diff --git "a/428.jsonl" "b/428.jsonl" new file mode 100644--- /dev/null +++ "b/428.jsonl" @@ -0,0 +1,712 @@ +{"seq_id":"71518304082","text":"class InvalidCountryException(Exception):\n def __str__(self):\n return (\"User Outside India cannot be registered.\")\n\nclass UserRegistration:\n def registerUser(self, userName, userCountry):\n if (userCountry==\"India\"):\n print(\"User registration done successfully.\")\n else:\n raise InvalidCountryException\n\nuser=UserRegistration()\nuser.registerUser(\"Mini\",\"India\")\nuser.registerUser(\"Mickey\",\"US\")\n","repo_name":"MandeepLamba/Python","sub_path":"assignment2/exception.py","file_name":"exception.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17461040905","text":"DEFAULT_INPUT_FILE_NAME = 'input.csv'\nDEFAULT_OUTPUT_FILE_NAME = 'output.json'\nDEFAULT_SEPARATOR = ','\n\n\nclass Converter:\n\n @staticmethod\n def convert(input_file_path: str = DEFAULT_INPUT_FILE_NAME,\n output_file_path: str = DEFAULT_OUTPUT_FILE_NAME):\n\n with open(input_file_path, \"r\") as f:\n content = f.readlines()\n\n header = content[0]\n data = content[1:]\n\n ans = []\n for record in data:\n l = []\n for pair in zip(header.split(DEFAULT_SEPARATOR), record.split(DEFAULT_SEPARATOR)):\n l.append(f'''\"{pair[0].strip()}\": \"{pair[1].strip()}\"''')\n tmp = f'''{{{\", \".join(l)}}}'''\n ans.append(tmp)\n\n json_string = f'''[{\", \".join(ans)}]'''\n\n with open(output_file_path, \"w\") as f:\n f.write(json_string)\n","repo_name":"akira225/python-exercises","sub_path":"(2) csv_json_conv/ManualConv.py","file_name":"ManualConv.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29976002279","text":"from time import sleep\nfrom random import randint\nfrom colorama import Fore, Style\nimport os\n\n# text1 = input(\"Enter holiday message: \")\ntext1 = \"Buon natale Alessandra \"\ntext2 = \"Buon natale Antonio \"\ntext3 = \"Buon natale Andrea \"\ntext4 = \"Buon natale Paolo \"\ntext5 = \"Buon natale Luca \"\n\nbase1 = len(text1)\nbase1 += 1\nbase2 = len(text2)\nbase2 += 1\nbase3 = len(text3)\nbase3 += 1\nbase4 = len(text4)\nbase4 += 1\nbase5 = len(text5)\nbase5 += 1\n\ni = 1\nslp = .70\n\n\ndef printSpace():\n print()\n print()\n print()\n print()\n # os.system(\"cls\")\n\n\ndef one():\n for x in range(1, base1, 2):\n y = randint(2, 12)\n if x == 1:\n print(Style.BRIGHT + Fore.YELLOW + \"{:^40}\".format(\"\\u2721\"))\n elif y % 5 == 0:\n print(Fore.RED + \"{:^40}\".format(text1[:x]))\n elif y % 3 == 0:\n print(Fore.GREEN + \"{:^40}\".format(text1[:x]))\n else:\n print(Fore.WHITE + \"{:^40}\".format(text1[:x]))\n print(Fore.WHITE + \"{:^39}\".format(\"||||\"))\n print(Fore.WHITE + \"{:^39}\".format(\"||||\"))\n # printSpace()\n sleep(slp)\n\n\ndef two():\n for x in range(1, base1, 2):\n y = randint(2, 12)\n if x == 1:\n print(Style.BRIGHT + Fore.YELLOW + \"{:^40}\".format(\"\\u2721\"))\n elif y % 5 == 0:\n print(Fore.RED + \"{:^40}\".format(text2[:x]))\n elif y % 3 == 0:\n print(Fore.GREEN + \"{:^40}\".format(text2[:x]))\n else:\n print(Fore.WHITE + \"{:^40}\".format(text2[:x]))\n print(Fore.WHITE + \"{:^39}\".format(\"||||\"))\n print(Fore.WHITE + \"{:^39}\".format(\"||||\"))\n # printSpace()\n sleep(slp)\n\n\ndef three():\n for x in range(1, base1, 2):\n y = randint(2, 12)\n if x == 1:\n print(Style.BRIGHT + Fore.YELLOW + \"{:^40}\".format(\"\\u2721\"))\n elif y % 5 == 0:\n print(Fore.RED + \"{:^40}\".format(text3[:x]))\n elif y % 3 == 0:\n print(Fore.GREEN + \"{:^40}\".format(text3[:x]))\n else:\n print(Fore.WHITE + \"{:^40}\".format(text3[:x]))\n print(Fore.WHITE + \"{:^39}\".format(\"||||\"))\n print(Fore.WHITE + \"{:^39}\".format(\"||||\"))\n # printSpace()\n sleep(slp)\n\n\ndef four():\n for x in range(1, base1, 2):\n y = randint(2, 12)\n if x == 1:\n print(Style.BRIGHT + Fore.YELLOW + \"{:^40}\".format(\"\\u2721\"))\n elif y % 5 == 0:\n print(Fore.RED + \"{:^40}\".format(text4[:x]))\n elif y % 3 == 0:\n print(Fore.GREEN + \"{:^40}\".format(text4[:x]))\n else:\n print(Fore.WHITE + \"{:^40}\".format(text4[:x]))\n print(Fore.WHITE + \"{:^39}\".format(\"||||\"))\n print(Fore.WHITE + \"{:^39}\".format(\"||||\"))\n # printSpace()\n sleep(slp)\n\n\ndef five():\n for x in range(1, base1, 2):\n y = randint(2, 12)\n if x == 1:\n print(Style.BRIGHT + Fore.YELLOW + \"{:^40}\".format(\"\\u2721\"))\n elif y % 5 == 0:\n print(Fore.RED + \"{:^40}\".format(text5[:x]))\n elif y % 3 == 0:\n print(Fore.GREEN + \"{:^40}\".format(text5[:x]))\n else:\n print(Fore.WHITE + \"{:^40}\".format(text5[:x]))\n print(Fore.WHITE + \"{:^39}\".format(\"||||\"))\n print(Fore.WHITE + \"{:^39}\".format(\"||||\"))\n # printSpace()\n sleep(slp)\n\n\ndef switch_demo(i):\n switcher = {\n 1: one(),\n 2: two(),\n 3: three(),\n 4: four(),\n 5: five(),\n }\n switcher.get(i, \"Invalid month\")\n\n\nCiclo = True\nNatale = 0\nwhile Ciclo:\n switch_demo(i)\n i = randint(1, 5)\n Natale += Natale\n if (Natale == 2019):\n Ciclo = False\n elif Natale == 170:\n slp = 1\n elif Natale == 300:\n slp = 2\n elif Natale == 560:\n slp = .50\n elif Natale == 770:\n slp = 1\n elif Natale == 800:\n slp = 2\n elif Natale == 960:\n slp = .50\n\n# K = randint(50, 100)\n# slp = K - ( 1 / K)\n","repo_name":"AndreaDagg/Christmas-Tree","sub_path":"Tree/tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":3840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25508146148","text":"# Basic math operations\n# Module import example\n\nimport BasicMath as bmath\n\na = 10\nb = 5\n\ns = bmath.sum_(a, b)\nsub = bmath.subtract(a, b)\nm = bmath.multiply(a, b)\nd = bmath.divide(a, b)\n\nprint(f'Basic Operation Resulto for a = {a} and b = {b} are: '\n f'sum = {s}, subtract = {sub}, multiply = {m} and division = {d}')\n","repo_name":"vmunozrivera/PyProjects","sub_path":"BasicMathOperations/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"39844035775","text":"#prog que leia nome, idade e sexo de 4 pessoas\r\nsoma_idade = 0\r\nmedia_idade = 0\r\nmaior_idade_homem = 0\r\nnome_velho = ''\r\ntot_mulher20 = 0\r\nfor p in range(1, 5):\r\n print('------ {}° pessoa ------0'.format(p))\r\n nome = str(input('Nome: ')).strip()\r\n idade = int(input('Idade: '))\r\n sexo = str(input('Sexo [M/F]: ')).strip()\r\n soma_idade += idade\r\n if p == 1 and sexo in 'Mm':\r\n maior_idade_homem = idade\r\n nome_velho = nome\r\n if sexo in 'Mn' and idade > maior_idade_homem:\r\n maior_idade_homem = idade\r\n nome_velho = nome\r\n if sexo in 'Ff' and idade < 20:\r\n tot_mulher20 += 1\r\nmedia_idade = soma_idade / 4\r\nprint('A média de idade do grupo é {}'.format(media_idade))\r\nprint('O homem mais velho tem {} anos e se chama {}'.format(maior_idade_homem, nome_velho))\r\nprint('Ao todo {} mulheres tem 20 anos ou menos'.format(tot_mulher20))\r\n\r\n\r\n#soma = 0\r\n#novo = 0\r\n#velho = 0\r\n#for m in range(1,5):\r\n# nome = str(input('Diga o nome da {}° pessoa '.format(m)))\r\n# idade = int(input('Diga a idade da {}° pessoa '.format(m)))\r\n# #sexo = str(input('Diga o sexo da {}° pessoa '.format(m)))\r\n# soma += idade\r\n# if m == 1:\r\n# velho = idade\r\n## novo = idade\r\n # else:\r\n## if idade > velho:\r\n# velho = idade\r\n#print('A média de idade do grupo é {}'.format(soma / m))\r\n#print('O mais velho tem {} e é {}'.format(velho,))","repo_name":"savioricardog/Atividades_Python_Curso_em_Video","sub_path":"aula13_desafio56.py","file_name":"aula13_desafio56.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"4780875698","text":"from websocket import create_connection\nimport json\n\nws = create_connection(\"wss://ws.kraken.com/\")\n\nsubscription_dict = {\n\t\"event\":\"subscribe\",\n\t\"subscription\":{\n\t\t\"name\":\"trade\"\n\t},\n\t\"pair\":[\"BTC/USD\"]\n}\n\nws.send(json.dumps(subscription_dict))\n\nwhile True:\n\tprint(ws.recv_data())\n","repo_name":"stew1922/Crypto_Bandyts","sub_path":"libs/trading/testing_86_del_trash/round_3/je_connection.py","file_name":"je_connection.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"31205211146","text":"from flask import Flask, request, render_template\nfrom faker import Faker\nimport csv\nimport requests\n\napp = Flask(__name__)\nfake = Faker()\n\n\n@app.route('/')\ndef hello_world():\n return render_template('hello.html')\n\n\n@app.route('/requirements/')\ndef requirements():\n with open('requirements.txt', 'r', encoding='UTF-8') as text:\n lst = text.readlines()\n lines = tuple(map(lambda value: value.strip('\\n'), lst))\n return render_template('parameters.html', line=lines)\n\n\n@app.route('/generate-users/')\ndef user_generate():\n value = request.args.get('count', 100)\n handbook = {}\n template_name = '{name:<20}'\n template_email = '{email:<15}'\n for _ in range(int(value)):\n person = fake.name()\n email = fake.email()\n line = {template_name.format(name=person): template_email.format(email=email)}\n handbook.update(line)\n return handbook\n\n\n@app.route('/mean/')\ndef parameters():\n with open('hw.csv', newline='') as csvfile:\n lines = csv.reader(csvfile.readlines()[1:25001], delimiter=',')\n height_inches = 0\n weight_pounds = 0\n for row in lines:\n height_inches += float(row[1])\n weight_pounds += float(row[2])\n height_cm = round(height_inches * 2.54, 2)\n weight_kg = round(weight_pounds / 2.2046, 2)\n return render_template('parameters.html', weight=weight_kg, height=height_cm)\n\n\n@app.route('/space/')\ndef astronaut():\n response = requests.get('http://api.open-notify.org/astros.json')\n number = response.json()['number']\n return render_template('parameters.html', number=number)\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"Velociraptor16/Flask","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71453004562","text":"import sys\nsys.stdin = open(\"input6.txt\", \"r\")\nfor _ in range(10):\n TC = int(input())\n res = 0\n case = [[] for _ in range(100)]\n start_list = []\n for i in range(100):\n case[i] = list(map(int, input().split()))\n for i in range(100):\n if case[0][i]:\n start_list.append(i)\n\n move_count = [0]*len(start_list)\n cnt = 0\n result = []\n for s in start_list:\n x_axios = s\n for y_axios in range(1,99):\n if x_axios>0 and case[y_axios][x_axios-1]:\n while x_axios>0 and case[y_axios][x_axios-1]:\n x_axios -= 1\n move_count[cnt] +=1\n elif x_axios<99 and case[y_axios][x_axios+1]:\n while x_axios<99 and case[y_axios][x_axios+1]:\n x_axios += 1\n move_count[cnt] +=1\n move_count[cnt] += 1\n cnt += 1\n min_value = move_count[0]\n min_index = 0\n for i in range(1, len(move_count)):\n if move_count[i] < min_value:\n min_value = move_count[i]\n min_index = i\n print(f\"#{TC} {start_list[min_index]}\")","repo_name":"dowookims/ProblemSolving","sub_path":"swea/array/1211_ladder2.py","file_name":"1211_ladder2.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71960774483","text":"from django.utils.translation import gettext as _\nfrom applications.utils import base_models\nfrom django.db import models\nfrom solo.models import SingletonModel\n\n\nclass Contact(base_models.TimeStampedModelBase):\n \"\"\"\n Model to store contacts.\n \"\"\"\n name = models.CharField(_('Name'), max_length=50)\n email = models.EmailField(_('Email'))\n message = models.TextField(verbose_name='Message')\n\n class Meta:\n verbose_name = _('Contact')\n verbose_name_plural = _('Contacts')\n\n def __str__(self):\n return self.name\n\n\nclass AdvertiseraAddress(SingletonModel):\n \"\"\"\n To Store the Bank Address\n \"\"\"\n company = models.CharField(verbose_name=_('Company'), max_length=50)\n phone = models.CharField(verbose_name=_('Phone'), max_length=50)\n email = models.EmailField(verbose_name=_('Email'))\n address = models.TextField(verbose_name=_('Address'))\n\n class Meta:\n verbose_name = _(\"Advertisera Address\")\n\n def __str__(self):\n return self.company\n","repo_name":"merlinmathew/AdvertisEra","sub_path":"applications/contacts/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"35962104845","text":"from datetime import datetime\nimport json\nimport os\nimport requests\nfrom bs4 import BeautifulSoup, SoupStrainer\n\n\nURL = 'https://www.alko.fi/tuotteet/tuotelistaus/'\nALKO_PRODUCTS_PATH = f'resources/products-{datetime.now()}.json'\nALKO_PRODUCTS_PATH_FINAL = \"resources/products-sorted.json\"\n\ndef haeSivujenMaara():\n response1 = requests.get(URL)\n response1.raise_for_status()\n\n keitto = BeautifulSoup(response1.text, 'lxml')\n tuotemaararivi = keitto.find(\"h3\", class_=\"product-count\")\n\n #Selvittää tuotemäärän HTML:stä olettaen että .text palauttaa pelkkiä lukuja ja lopussa olevan sanan, joka unohdetaan.\n tuotemaara = int(''.join(tuotemaararivi.text.split()[0:-1]))\n\n #Yhdellä alkon sivulla on 12 tuotetta ja sivujen indeksointi alkaa nollasta.\n sivujenmaara = int(tuotemaara/12)\n\n return sivujenmaara\n\n\ndef run():\n start = datetime.now()\n\n sivujenmaara = haeSivujenMaara()\n\n params = {\n \"PageNumber\": sivujenmaara,\n \"SearchTerm\": \"*\",\n \"PageSize\": 12,\n }\n\n response = requests.get(URL, params=params)\n response.raise_for_status()\n\n data_time = datetime.now()\n print(f\"Datan haku kesti {data_time - start}\")\n\n only_divs = SoupStrainer('div')\n soup = BeautifulSoup(\n response.text,\n 'lxml',\n parse_only=only_divs\n )\n\n products = []\n product_divs = soup.find_all(\n 'div',\n attrs={\n \"class\": \"product-data-container\",\n \"data-alkoproduct\": True\n }\n )\n\n for div in product_divs:\n products.append({\n 'id': div[\"data-alkoproduct\"],\n 'name': div.a[\"title\"],\n })\n\n print(f\"Datan käsittelyssä kesti {datetime.now() - data_time}\")\n print(f\"Koko operaatiossa kesti {datetime.now() - start}\")\n print(f\"Ladattu {len(product_divs)} tuotetta\")\n\n products.sort(key=lambda x: x[\"name\"])\n\n with open(ALKO_PRODUCTS_PATH, 'w') as f:\n json.dump(products, f)\n\n os.remove(ALKO_PRODUCTS_PATH_FINAL)\n os.rename(ALKO_PRODUCTS_PATH, ALKO_PRODUCTS_PATH_FINAL)\n\nif __name__ == \"__main__\":\n run()\n","repo_name":"Huttusta/huttustutka","sub_path":"backend/scripts/get_products.py","file_name":"get_products.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"fi","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"73640356240","text":"#Project Euler #21\n# Amicable Numbers >>> d[sum(ProperDivisors(x))] = x\ndef ProperDivisors(n):\n nums = [1]\n if n == 1 : return nums\n sq = int(n**0.5)\n for x in xrange(2,sq):\n if n % x == 0 : nums += [x, n//x]\n if n % sq == 0: nums.append(sq)\n return sorted(nums)\n\na = [0] + [sum(ProperDivisors(x)) for x in xrange(1,10000)]\nsum(x for i,x in enumerate(a) if x < len(a) and a[x] == i and x != i)","repo_name":"ZedOud/Project-Euler","sub_path":"problem 021/solution 21.py","file_name":"solution 21.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19275491473","text":"class Solution:\n def hIndex(self, citations: List[int]) -> int:\n # Sort the citations in decending order\n citations.sort(reverse=True)\n # find the maximum x >= y point on the x=y line\n h_index = 0\n for citation in citations:\n if citation > h_index:\n h_index += 1\n else:\n break\n\n return h_index\n\n","repo_name":"connectwithprakash/Leet-Code","sub_path":"problems/274_h_index.py","file_name":"274_h_index.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21983511944","text":"#!/usr/bin/python\n\nimport sys\nimport urllib.request\nfrom bs4 import BeautifulSoup\nfrom nltk import pos_tag\nfrom nltk.tokenize import sent_tokenize,word_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk.probability import FreqDist\nfrom string import punctuation\nfrom heapq import nlargest\nfrom collections import defaultdict\n\ndef getTextWaPo(url):\n page = urllib.request.urlopen(url).read().decode('utf8','ignore')\n soup = BeautifulSoup(page,\"lxml\")\n [s.extract() for s in soup('script')]\n [s.extract() for s in soup('style')]\n text = ' '.join(map(lambda body: body.text,soup.find_all('p')))\n text = text.encode('ascii', errors='replace').replace(\"?\".encode(\"utf8\"),\" \".encode(\"utf8\"))\n return text.decode('ascii', 'ignore')\n\ndef summarize(text,lang):\n sents = sent_tokenize(text)\n n = int(round(len(sents)*0.04,0))\n if n < 1:\n n=1\n word_sent = word_tokenize(text.lower())\n\n if lang==\"fr\":\n _stopwords = set(stopwords.words('french') + list(punctuation))\n word_sent = [word for word in word_sent if word not in _stopwords]\n elif lang==\"en\":\n _stopwords = set(stopwords.words('english') + list(punctuation))\n word_tag = pos_tag(word_sent)\n word_sent = [word[0] for word in word_tag if word[0] not in _stopwords and word[1] == \"NN\"]\n else:\n assert 1==0\n\n ntags = len(word_sent)\n if ntags > 5:\n ntags = 5\n\n freq = FreqDist(word_sent)\n \n tags = nlargest(ntags, freq, key=freq.get)\n \n ranking = defaultdict(int)\n\n for i,sent in enumerate(sents):\n for v in word_tokenize(sent.lower()):\n if v in freq:\n ranking[i] += freq[v]\n \n sents_idx = nlargest(n, ranking, key=ranking.get)\n summary=\"\"\n for j in sorted(sents_idx):\n summary += sents[j]\n return [summary,tags]\n\ndef main(source,lang,content):\n text = \"\"\n if source == \"url\":\n text = getTextWaPo(content)\n elif source == \"text\":\n text = content\n else:\n assert 1==0\n return summarize(text,lang)","repo_name":"yahiaBoughezala/WeSaw-POC","sub_path":"wesaw-poc/summarizer.py","file_name":"summarizer.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"44414076778","text":"\ndef get_lat_lng(apiKey, address):\n \"\"\"\n Returns the latitude and longitude of a location using the Google Maps Geocoding API. \n API: https://developers.google.com/maps/documentation/geocoding/start\n \n # INPUT -------------------------------------------------------------------\n apiKey [str]\n address [str]\n\n # RETURN ------------------------------------------------------------------\n lat [float] \n lng [float] \n \"\"\"\n import requests\n url = ('https://maps.googleapis.com/maps/api/geocode/json?address={}&key={}'\n .format(address.replace(' ','+'), apiKey))\n try:\n response = requests.get(url)\n resp_json_payload = response.json()\n lat = resp_json_payload['results'][0]['geometry']['location']['lat']\n lng = resp_json_payload['results'][0]['geometry']['location']['lng']\n except:\n print('ERROR: {}'.format(address))\n lat = 0\n lng = 0\n return lat, lng\n\n\nif __name__ == '__main__':\n # get key\n fname = '/Users/matthewkudija/Desktop/GoogleMapsAPIKey.txt'\n file = open(fname, 'r')\n apiKey = file.read()\n\n # get coordinates \n address = '1 Rocket Road, Hawthorne, CA'\n lat, lng = get_lat_lng(apiKey, address)\n print('{} Coordinates:\\nLatitude: {}°\\nLongitude: {}°'.format(address,lat, lng))","repo_name":"mkudija/blog","sub_path":"content/downloads/code/google-maps-api/google-maps-geocoding.py","file_name":"google-maps-geocoding.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"3"} +{"seq_id":"72067625042","text":"from io import StringIO\nimport json\nfrom dynamic import api_test\n\n\nclass TestApi():\n def test_get_request_no_headers(self, monkeypatch):\n # Test to check get api response without header\n\n self.test_object = api_test.ApiTesting()\n test_uri = \"https://reqres.in/api/users/2\"\n test_headers = \"\"\n save_response = \"n\"\n inputs = StringIO(test_uri+\"\\n\"+test_headers+\"\\n\"+save_response)\n monkeypatch.setattr('sys.stdin', inputs)\n res = self.test_object.get_request()\n\n file = open(\n 'dynamic/tests/test_get/output.json')\n assert res == json.loads(file.read())\n","repo_name":"IndianOpenSourceFoundation/dynamic-cli","sub_path":"dynamic/tests/test_get/test_get_api.py","file_name":"test_get_api.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","stars":169,"dataset":"github-code","pt":"3"} +{"seq_id":"4276619133","text":"def IUT_operator(num1, num2):\n res = ''\n for i, j in zip(str(num1), str(num2)):\n res += i + j\n return int(res)\n\n\ndef IUT_base_operator(num, base):\n if num == 0:\n return '0'\n digits = ''\n while num:\n digits += str(int(num % base))\n num //= base\n return digits[::-1]\n\n\ndef IUT_number(num):\n return num == int(str(num)[::-1])\n\n\na = int(input())\nb = int(input())\nc = int(input())\nd = int(input())\n\nx = int(IUT_base_operator(IUT_operator(a, b), c), d)\nprint(x)\nprint(IUT_number(x))\n","repo_name":"At1X/Exercises","sub_path":"Homework_1/Question_5.py","file_name":"Question_5.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"46231943808","text":"# Test file\n# Jakkawan Intaratchaiyakij 61070023\n\nimport pytest\nfrom Router import *\n\ndef create_router():\n r1 = Router(\"Huawei\", \"Special X\", \"R1\")\n r2 = Router(\"Cisco\", \"R7777\", \"R2\")\n r1.add_interface(\"G0/0\")\n r2.add_interface(\"G0/1\")\n return r1,r2\n\ndef test_add_interface():\n r1,r2 = create_router()\n assert r1.interfaces == {\"G0/0\": {'ip': 'unassigned', 'status': 'shutdown', 'connect': ['none', 'none']}}\n assert r2.interfaces == {\"G0/1\": {'ip': 'unassigned', 'status': 'shutdown', 'connect': ['none', 'none']}}\n\ndef test_connect_to():\n r1,r2 = create_router()\n r1.connect_to(\"G0/0\", r2, \"G0/1\")\n assert r1.interfaces[\"G0/0\"]['connect'] == [\"R2\", \"G0/1\"]\n\ndef test_add_ip():\n r1,r2 = create_router()\n r1.add_ip(\"G0/0\", \"192.168.1.1/24\")\n assert r1.interfaces['G0/0']['ip'] == \"192.168.1.1/24\"\n\ndef test_change_status():\n r1,r2 = create_router()\n r1.change_status(\"G0/0\", \"no-shutdown\")\n assert r1.interfaces[\"G0/0\"][\"status\"] == \"no-shutdown\"\n\ndef test_show_interfaces():\n r1,r2 = create_router()\n r1.add_interface(\"G0/1\")\n r1.add_interface(\"G0/2\")\n assert r1.show_interfaces() == \"Show Interfaces of R1\\nR1 has 3 interfaces\\nG0/0 IP-Address: unassigned \\\"shutdown\\\"\\nG0/1 IP-Address: unassigned \\\"shutdown\\\"\\nG0/2 IP-Address: unassigned \\\"shutdown\\\"\\n\"\n assert r2.show_interfaces() == \"Show Interfaces of R2\\nR2 has 1 interfaces\\nG0/1 IP-Address: unassigned \\\"shutdown\\\"\\n\"\n\ndef test_show_cdp():\n r1,r2 = create_router()\n r3 = Router(\"Amazon\", \"RX9898\", \"R3\")\n r1.add_interface(\"G0/1\")\n r3.add_interface(\"G0/0\")\n r1.connect_to(\"G0/0\", r2, \"G0/1\")\n r1.connect_to(\"G0/1\", r3, \"G0/0\")\n assert r1.show_cdp() == \"R1 interface G0/0 connect to R2 on interface G0/1\\nR1 interface G0/1 connect to R3 on interface G0/0\\n\"\n\ndef test_show_info():\n r1,r2 = create_router()\n assert r1.show_info() == \"Brand: Huawei\\nModel: Special X\\nHostname: R1\\n\"\n assert r2.show_info() == \"Brand: Cisco\\nModel: R7777\\nHostname: R2\\n\"\n","repo_name":"ZeroHX/NPA-3","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"86382640707","text":"import requests\nimport json\nimport pandas as pd\nimport sys\nimport datetime\nimport json\n\nfuentes = [[\"https://github.com/Sud-Austral/Fuego_LEAFLET/blob/main/SEMANARIO_INCENDIO/Consolidado/ConsolidadoPuntosFuego.xlsx?raw=true\",\n \"MODIS_24h\"],\n [\"https://github.com/Sud-Austral/Fuego_LEAFLET/blob/main/SEMANARIO_INCENDIO/Consolidado/ConsolidadoPuntosFuego.xlsx?raw=true\",\n \"SUOMI_24h\"],\n [\"https://github.com/Sud-Austral/Fuego_LEAFLET/blob/main/SEMANARIO_INCENDIO/Consolidado/ConsolidadoPuntosFuego.xlsx?raw=true\",\n \"J1_24h\"]]\n\ndef regiones(region):\n \n data = pd.read_excel('Incendios_24h/data.xlsx', sheet_name='regiones')\n\n try:\n data = data[data['COD_REGION'] == region]\n indx = data.index[0]\n \n resultado = data['REGION'][indx]\n\n except: \n resultado = ' '\n\n return resultado\n\ndef provincias(prov):\n \n data = pd.read_excel('Incendios_24h/data.xlsx', sheet_name='provincias')\n\n try:\n data = data[data['COD_PROVIN'] == prov]\n indx = data.index[0]\n\n resultado = data['PROVINCIA'][indx]\n\n except:\n resultado = ' '\n \n return resultado\n\ndef descarga(fuente):\n url = fuente[0]\n dataFuente = ''\n\n if(fuente[1] == 'MODIS_24h'):\n dataFuente = 'MODIS'\n\n if(fuente[1] == 'SUOMI_24h'):\n dataFuente = 'SUOMI'\n\n if(fuente[1] == 'J1_24h'):\n dataFuente = 'J1'\n\n print('FUENTE 24: ' + str(dataFuente)) \n \n today = str(datetime.datetime.today())[0:10]\n #yesterday = (datetime.datetime.now() - datetime.timedelta(days=1)).strftime(\"%Y-%m-%d\")\n\n new_today = [today]#,yesterday]\n \n dfDatas = pd.read_excel(url)\n\n dfAux = dfDatas[dfDatas['Coordenadas'] != '-27.12613,-109.28293000000001']\n dfAux2 = dfAux[dfAux['Coordenadas'] != '-27.126920000000002,-109.27901000000001']\n\n ##df = dfAux2[dfAux2['acq_date'] == today]\n df = dfAux2[dfAux2['acq_date'].apply(lambda x: x in new_today)]\n print(df[\"acq_date\"].head(5))\n print(\"a\")\n df['NOM_REGION'] = df['REGION'].apply(lambda x: regiones(x))\n print(\"b\")\n df['NOM_PROVINCIA'] = df['PROVINCIA'].apply(lambda x: provincias(x))\n print(\"c\")\n df = df[df['Fuente'] == dataFuente]\n print(\"d\")\n dfLat2 = df #.reset_index()\n print(\"e\")\n\n # AQUÍ SE PODRÍA AGREGAR LA INFORMACIÓN CALLE, COMUNA, PROVINCIA, REGIÓN.\n # CALLE, COMUNA, PROVINCIA, REGIÓN (INCLUIR JSON)\n print(\"Puntos de Calor\")\n print(len(dfLat2))\n dfLat2.to_csv(f\"Incendios_24h/Data/{fuente[1]}/Puntos_Diarios_{fuente[1]}.csv\")\n dfLat2.to_csv(f\"Incendios_24h/Data_Legacy/{fuente[1]}/Puntos_Diarios_{fuente[1]}_{datetime.datetime.now().strftime('%Y-%m-%d')}.csv\")\n \n\n \n return dfLat2\n\ndef getJSON(fuente):\n df = descarga(fuente)\n features = []\n features2 = []\n for i, j in df.iterrows():\n f = {'type': 'Feature', \\\n 'geometry': {'type': 'Point', 'coordinates': [j[\"longitude\"], j[\"latitude\"]]}, \\\n 'properties': {'acq_date': j[\"acq_date\"]}}\n features.append(f.copy())\n f = {'acq_date': j[\"acq_date\"],\"lat\":j[\"latitude\"],\"lng\":j[\"longitude\"], \"location\": j[\"Locacion\"], \"region\": j[\"NOM_REGION\"], \"provincia\": j[\"NOM_PROVINCIA\"], \"comuna\": j[\"Comuna\"]}\n features2.append(f.copy())\n salida = {\"type\":\"FeatureCollection\",\"features\":features}\n with open(f'Incendios_24h/Data/{fuente[1]}/heatmap_{fuente[1]}.json', 'w') as file:\n json.dump(salida, file, indent=4)\n with open(f'Incendios_24h/Data/{fuente[1]}/data_{fuente[1]}.json', 'w') as file:\n json.dump(features2, file, indent=4)\n return True\n\ndef proceso():\n for i in fuentes:\n #descarga(i)\n getJSON(i)\n\nif __name__ == '__main__':\n try:\n proceso()\n except:\n try:\n proceso()\n except:\n error = sys.exc_info()[1]\n print(error)","repo_name":"Sud-Austral/Fuego_LEAFLET","sub_path":"Incendios_24h/automatizacion_24h.py","file_name":"automatizacion_24h.py","file_ext":"py","file_size_in_byte":3896,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"11916418286","text":"from matplotlib import pyplot as plt\r\nimport numpy as np\r\nimport csv\r\nfrom collections import Counter\r\n\r\nplt.style.use('seaborn')\r\nprint(plt.style.available)\r\n\r\nwith open ('data.csv') as csv_file:\r\n csv_reader = csv.DictReader(csv_file)\r\n\r\n language_counter = Counter()\r\n for row in csv_reader:\r\n language_counter.update(row['LanguagesWorkedWith'].split(';'))\r\n \r\nprint(language_counter)\r\n\r\nlanguages = []\r\npopularity = []\r\n\r\nfor item in language_counter.most_common(55):\r\n languages.append(item[0])\r\n popularity.append(item[1])\r\n\r\nplt.scatter(popularity, languages, s=50, c='blue', edgecolor='black', linewidth=1, alpha=0.5)\r\nplt.title('50Popular Programming Languages')\r\nplt.xlabel('Number of people')\r\nplt.legend()\r\nplt.tight_layout()\r\nplt.show()","repo_name":"SamiAlJabar/data_analysis_matplotlib_python","sub_path":"splot.py","file_name":"splot.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"27113947032","text":"session_id = request.values.get(\"sessionId\", None)\nserviceCode = request.values.get(\"serviceCode\", None)\nphone_number = request.values.get(\"phoneNumber\", None)\ntext = request.values.get(\"text\", \"default\")\n\nif text == \"\":\n response = \"CON Urakaza neza kuri Bafana. Injiza code yuwo wenda gutera inkunga\"\n # Insert Session Id, Current State, and Next State\nelse:\n # Get State From Recent Session\n if next_state == 2:\n # Verify Team\n\n # If team correct\n response = \"CON Ugiye gutera inkuga [Insert] . Injiza inkuga ushaka gutanga.\"\n\n # Update state\n # If team not correct\n response = \"CON Kode winjije ntango ariyo. Injiza code yuwo wenda gutera inkunga\"\n elif next_state == 3:\n response = \"END Urakoze gutanga inkuga kuri APR FC. Emeza kuri Mobile Money.\"\n\nreturn response\n\n","repo_name":"Mugenga/bafana-ussd","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33484792551","text":"from pymacaron.log import pymlogger\nimport json\nimport uuid\nimport os\nimport inspect\nimport sys\nimport traceback\nfrom flask import request\nfrom pymacaron.utils import get_container_version\nfrom pymacaron.utils import get_app_name\nfrom pymacaron.utils import is_ec2_instance\nfrom pymacaron.config import get_config\nfrom pymacaron.exceptions import PyMacaronException\n\n\nlog = pymlogger(__name__)\n\n\ntry:\n from flask import _app_ctx_stack as stack\nexcept ImportError:\n from flask import _request_ctx_stack as stack\n\n\ndef function_name(f):\n return \"%s.%s\" % (inspect.getmodule(f).__name__, f.__name__)\n\n\n#\n# Default error reporting\n#\n\ndef default_error_reporter(title=None, data=None, exception=None):\n \"\"\"By default, error messages are just logged\"\"\"\n log.error(f\"error: {title}\")\n log.error(f\"exception: {exception}\")\n log.error(f\"details:\\n{json.dumps(data, indent=4, sort_keys=True)}\")\n\n\nerror_reporter = default_error_reporter\n\n\ndef set_error_reporter(f):\n global error_reporter\n error_reporter = f\n\n\ndef do_report_error(title=None, data=None, exception=None):\n global error_reporter\n log.info(\"Reporting error...\")\n\n error_reporter(\n title=title,\n data=data,\n )\n\n try:\n pass\n except Exception as e:\n # Don't block on replying to api caller\n log.error(f\"An error occured while trying to report this error: {e}\\n\")\n exc_type, exc_value, exc_traceback = sys.exc_info()\n trace = traceback.format_exception(exc_type, exc_value, exc_traceback, 30)\n log.error(trace)\n\n\ndef postmortem(f=None, t0=None, t1=None, exception=None, args=[], kwargs={}):\n \"\"\"Print the error's trace, and call the error reporter with a bunch of data on what happened\"\"\"\n\n data = {}\n\n # Gather data about the exception that occured\n exc_type, exc_value, exc_traceback = sys.exc_info()\n trace = traceback.format_exception(exc_type, exc_value, exc_traceback, 30)\n\n status = 500\n if isinstance(exception, PyMacaronException):\n status = exception.status\n\n if status < 500:\n return\n\n str_trace = '\\n'.join(trace)\n log.error(f\"ERROR - ERROR - ERROR - ERROR - ERROR - ERROR:\\n{str_trace}\")\n\n data.update({\n 'trace': trace,\n\n # Set only on the original error, not on forwarded ones, not on\n # success responses\n 'error_id': str(uuid.uuid4()),\n\n 'is_fatal_error': True if status >= 500 else False,\n\n # Call results\n 'time': {\n 'start': t0.isoformat(),\n 'end': t1.isoformat(),\n 'microsecs': (t1.timestamp() - t0.timestamp()) * 1000000,\n },\n\n # Response details\n 'response': {\n 'status': status,\n 'error_code': exception.code if hasattr(exception, 'code') else 'UNKNOWN',\n 'error_description': str(exception),\n 'user_message': exception.user_message if hasattr(exception, 'user_message') else None,\n },\n })\n\n populate_error_report(data)\n\n fname = function_name(f)\n do_report_error(\n title=f\"{fname}(): {exception}\",\n data=data,\n exception=exception,\n )\n\n\ndef report_warning(title=None, data={}, exception=None):\n populate_error_report(data)\n do_report_error(\n title=title,\n data=data,\n )\n\n\ndef populate_error_report(data):\n \"\"\"Add generic stats to the error report\"\"\"\n\n # Are we in aws?\n data['is_ec2_instance'] = is_ec2_instance()\n\n # If user is authenticated, get her id\n user_data = {\n 'id': '',\n 'is_auth': 0,\n 'ip': '',\n }\n\n if stack.top:\n # We are in a request context\n user_data['ip'] = request.remote_addr\n\n if 'X-Forwarded-For' in request.headers:\n user_data['forwarded_ip'] = request.headers.get('X-Forwarded-For', '')\n\n if 'User-Agent' in request.headers:\n user_data['user_agent'] = request.headers.get('User-Agent', '')\n\n if hasattr(stack.top, 'current_user'):\n user_data['is_auth'] = 1\n user_data['id'] = stack.top.current_user.get('sub', '')\n for k in ('name', 'email', 'is_expert', 'is_admin', 'is_support', 'is_tester', 'language'):\n v = stack.top.current_user.get(k, None)\n if v:\n user_data[k] = v\n\n data['user'] = user_data\n\n # Server info\n server = request.base_url\n server = server.replace('http://', '')\n server = server.replace('https://', '')\n server = server.split('/')[0]\n parts = server.split(':')\n fqdn = parts[0]\n port = parts[1] if len(parts) == 2 else ''\n\n data['server'] = {\n 'fqdn': fqdn,\n 'port': port,\n 'api_name': get_app_name(),\n 'api_version': get_container_version(),\n 'PYM_ENV': os.environ.get('PYM_ENV', ''),\n }\n\n # Endpoint data\n body_str = ''\n query_str = ''\n try:\n body_str = str(request.get_data())\n query_str = str(request.get.args)\n except Exception:\n pass\n\n data['request'] = {\n 'id': f\"{get_app_name()}, {request.method}, {request.path}\",\n 'url': request.url,\n 'base_url': request.base_url,\n 'path': request.path,\n 'method': request.method,\n 'request_body': body_str,\n 'request_query': query_str,\n }\n\n\n#\n# DEPRECATED\n#\n\ndef report_error(title=None, data={}, caught=None, is_fatal=False):\n \"\"\"Format a crash report and send it somewhere relevant. There are two\n types of crashes: fatal crashes (backend errors) or non-fatal ones (just\n reporting a glitch, but the api call did not fail)\"\"\"\n\n log.info(\"Caught error: %s\\ndata=%s\" % (title, json.dumps(data, indent=4)))\n\n # Don't report errors if NO_ERROR_REPORTING set to 1 (set by run_acceptance_tests)\n if os.environ.get('DO_REPORT_ERROR', None):\n # Force error reporting\n pass\n elif os.environ.get('NO_ERROR_REPORTING', '') == '1':\n log.info(\"NO_ERROR_REPORTING is set: not reporting error!\")\n return\n elif 'is_ec2_instance' in data:\n if not data['is_ec2_instance']:\n # Not running on amazon: no reporting\n log.info(\"DATA[is_ec2_instance] is False: not reporting error!\")\n return\n elif not is_ec2_instance():\n log.info(\"Not running on an EC2 instance: not reporting error!\")\n return\n\n # Fill error report with tons of usefull data\n if 'user' not in data:\n populate_error_report(data)\n\n # Add the message\n data['title'] = title\n data['is_fatal_error'] = is_fatal\n\n # Add the error caught, if any:\n if caught:\n data['error_caught'] = \"%s\" % caught\n\n # Add a trace - Formatting traceback may raise a UnicodeDecodeError...\n data['stack'] = []\n try:\n data['stack'] = [l for l in traceback.format_stack()]\n except Exception:\n data['stack'] = 'Skipped trace - contained non-ascii chars'\n\n # inspect may raise a UnicodeDecodeError...\n fname = ''\n try:\n fname = inspect.stack()[1][3]\n except Exception as e:\n fname = 'unknown-method'\n\n # Format the error's title\n status, code = 'unknown_status', 'unknown_error_code'\n app_name = get_config().name\n if 'response' in data:\n status = data['response'].get('status', status)\n code = data['response'].get('error_code', code)\n title_details = \"%s %s %s\" % (app_name, status, code)\n else:\n title_details = \"%s %s()\" % (app_name, fname)\n\n if is_fatal:\n title_details = 'FATAL ERROR %s' % title_details\n else:\n title_details = 'NON-FATAL ERROR %s' % title_details\n\n if title:\n title = \"%s: %s\" % (title_details, title)\n else:\n title = title_details\n\n global error_reporter\n log.info(\"Reporting crash...\")\n\n try:\n error_reporter(title, json.dumps(data, sort_keys=True, indent=4))\n except Exception as e:\n # Don't block on replying to api caller\n log.error(\"Failed to send email report: %s\" % str(e))\n\n\n\n#\n# Generic crash-handler as a decorator\n#\n\ndef crash_handler(f):\n \"\"\"Decorate method with pymacaron's generic crash handler\"\"\"\n return generate_crash_handler_decorator(None)(f)\n","repo_name":"pymacaron/pymacaron","sub_path":"pymacaron/crash.py","file_name":"crash.py","file_ext":"py","file_size_in_byte":8159,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"3"} +{"seq_id":"41750968780","text":"import numpy as np\nimport os\nimport pyqmc.api as pyq\nimport copy\nimport h5py\n\n\ndef test_complex_linemin(H2_ccecp_rhf, optfile=\"linemin.hdf5\"):\n \"\"\"Test linemin for the case of complex orbital coefficients.\n We check whether it completes successfully and whether the energy has decreased.\n \"\"\"\n mol, mf = H2_ccecp_rhf\n mf = copy.copy(mf)\n noise = (np.random.random(mf.mo_coeff.shape) - 0.5) * 0.2\n mf.mo_coeff = mf.mo_coeff * 1j + noise\n\n slater_kws = {\"optimize_orbitals\": True}\n wf, to_opt = pyq.generate_wf(mol, mf, slater_kws=slater_kws)\n\n configs = pyq.initial_guess(mol, 100)\n acc = pyq.gradient_generator(mol, wf, to_opt)\n pyq.line_minimization(\n wf, configs, acc, verbose=True, hdf_file=optfile, max_iterations=5\n )\n assert os.path.isfile(optfile)\n with h5py.File(optfile, \"r\") as f:\n en = f[\"energy\"][()]\n assert en[0] > en[-1]\n os.remove(optfile)\n","repo_name":"WagnerGroup/pyqmc","sub_path":"tests/integration/test_complex_linemin.py","file_name":"test_complex_linemin.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","stars":68,"dataset":"github-code","pt":"3"} +{"seq_id":"11346334486","text":"import unittest\n\nfrom webkitpy.common.thread.messagepump import MessagePump, MessagePumpDelegate\nfrom webkitpy.common.thread.threadedmessagequeue import ThreadedMessageQueue\n\n\nclass TestDelegate(MessagePumpDelegate):\n def __init__(self):\n self.log = []\n\n def schedule(self, interval, callback):\n self.callback = callback\n self.log.append(\"schedule\")\n\n def message_available(self, message):\n self.log.append(\"message_available: %s\" % message)\n\n def final_message_delivered(self):\n self.log.append(\"final_message_delivered\")\n\n\nclass MessagePumpTest(unittest.TestCase):\n\n def test_basic(self):\n queue = ThreadedMessageQueue()\n delegate = TestDelegate()\n pump = MessagePump(delegate, queue)\n self.assertEqual(delegate.log, [\n 'schedule'\n ])\n delegate.callback()\n queue.post(\"Hello\")\n queue.post(\"There\")\n delegate.callback()\n self.assertEqual(delegate.log, [\n 'schedule',\n 'schedule',\n 'message_available: Hello',\n 'message_available: There',\n 'schedule'\n ])\n queue.post(\"More\")\n queue.post(\"Messages\")\n queue.stop()\n delegate.callback()\n self.assertEqual(delegate.log, [\n 'schedule',\n 'schedule',\n 'message_available: Hello',\n 'message_available: There',\n 'schedule',\n 'message_available: More',\n 'message_available: Messages',\n 'final_message_delivered'\n ])\n","repo_name":"WebKit/WebKit","sub_path":"Tools/Scripts/webkitpy/common/thread/messagepump_unittest.py","file_name":"messagepump_unittest.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","stars":6880,"dataset":"github-code","pt":"3"} +{"seq_id":"30331953682","text":"import numpy as np\nimport pandas as pd\n\ndf = pd.read_csv('./data/train.csv')\ndf['Id_index'] = df['Id']\ndf = df.set_index('Id_index')\n\ndef rm_null(seq):\n nb = len(seq['Ref'])\n if (seq['Ref'].isnull()).sum() == nb:\n return seq['Id'].values[0]\n return -1\n \ndf_g = df.groupby(df.index)\ndf_id2rm = df_g.apply(rm_null)\ndf = df.drop(df_id2rm.values,axis=0)\n\ndf.to_csv('./data/train_nnull.csv')\n\n","repo_name":"Cadene/FDMS","sub_path":"TME3/rm_null_old.py","file_name":"rm_null_old.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"74726331600","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 20 18:00:18 2019\n\n@author: jagov\n\"\"\"\n\nfrom phe import paillier as phe\nimport numpy as np\nimport pheMat \nimport random\nimport time \n\nt0 = time.time()\n\npublic_key, private_key = phe.generate_paillier_keypair()\nr = random.SystemRandom().randrange(1,2**16)\n#a1 = public_key.encrypt(5)\n#b1 = private_key.decrypt(a1)\n#A1 = np.array([[1,2],[3,4],[5,6],[7,8]])\n#A2 = np.array([[8,-1],[-7,5],[2,3],[-4,6]])\n#A1_e = pheMat.encrypt_ndarray(public_key,A1)\n#A2_e = pheMat.encrypt_ndarray(public_key,A2)\n\nA = np.array([[1,0],[-1,0],[0,1],[0,-1]])\nb = np.array([[2],[-1],[1.5],[-.5]]) \n\nQ = np.array([[6,2],[2,4]])\nQi = np.linalg.inv(Q)\nc = np.array([[2],[4]])\n\nn = 50\nmu1 = np.zeros((4,1))\nmu = np.zeros((4,n+2))\neta = 1\n\nb_e = pheMat.encrypt_ndarray(public_key,b)\nc_e = pheMat.encrypt_ndarray(public_key,c)\nmu1_e = pheMat.encrypt_ndarray(public_key,mu1)\n\nt1 = time.time()\n\nfor i in range(n):\n Dg = -A @Qi @(A.T @mu[:,[i]] + c) - b\n mu[:,[i+1]] = np.maximum(np.zeros((4,1)), mu[:,[i]] + eta*Dg)\n xs = -Qi @(A.T @mu[:,[i+1]] + c)\n Zs = .5*xs.T @Q @xs + c.T @xs\n \nt2 = time.time()\n\nfor i in range(n):\n tl0 = time.time()\n Dg_e = -A @Qi @(A.T @mu1_e + c_e) - b_e\n mu1_be = r*(mu1_e + eta*Dg_e)\n tl1 = time.time()\n mu1_b = np.maximum(np.zeros((4,1)),mu1_be.decrypt(private_key))\n tl2 = time.time()\n mu1_e = pheMat.encrypt_ndarray(public_key,1/r*mu1_b)\n tl3 = time.time()\n \nt3 = time.time()\n\nxs1_e = -Qi @(A.T @mu1_e + c_e)\nxs1 = xs1_e.decrypt(private_key)\nZs1 = .5*xs1.T @Q @xs1 + c.T @xs1 \n\n\nt4 = time.time()","repo_name":"jgovers/gitPHE","sub_path":"Python/QP_Shoukry/mainPheQpShoukry.py","file_name":"mainPheQpShoukry.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29056907207","text":"from mdh.core.changes import Create\n\n\ndef test_change_basics(init_user, init_resource, spawn_batch_change, init_batch):\n\n user = init_user()\n res = init_resource(user=user)\n\n change_1 = spawn_batch_change(type=Create, resource=res)\n\n batch = init_batch(creator=user, changes=[change_1])\n\n assert f'{change_1}'\n assert change_1.type_params == {'count': 1}\n assert change_1.is_draft\n\n batch.mark_published()\n change_1.refresh_from_db()\n assert change_1.is_published\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"billing/tests/core/models/test_batch_change.py","file_name":"test_batch_change.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"24342752074","text":"from Bio.ExPASy import ScanProsite\nimport numpy as np\nimport time\n\nprotStructure=np.load(\"../data/protStructure.npy\").item()\nkinaseDict={}\nATPDict ={}\n\ncount=0\nmissing=0\nfor key, sequence in protStructure.items():\n count=count+1\n print(count)\n if count % 50 ==0:\n time.sleep(60)# sleep 1 mn for very 50 query to avoid timeout\n handle = ScanProsite.scan(seq=sequence)\n result = ScanProsite.read(handle)\n for i in range(len(result)): #I am looping over all results but there should be only one that has kinase\n if result[i]['signature_ac']=='PS50011':# Protein kinase domain\n kinaseDict[key]=sequence[result[i]['start']:result[i]['stop']]\n elif result[i]['signature_ac']=='PS00107':# ATP binding pocket\n ATPDict[key]=sequence[result[i]['start']:result[i]['stop']]\n \nprint(missing)\nnp.save('kinaseDomain.npy', kinaseDict)\nnp.save('ATPDomain.npy', ATPDict)\n","repo_name":"marouenbg/IDG-challenge","sub_path":"lib/extractKinaseDomain.py","file_name":"extractKinaseDomain.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25043357187","text":"#### TfidfVectorizer ############\r\nimport pandas as pd\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\n\r\ndef func_tfidf(docList):\r\n vectorizer = TfidfVectorizer()\r\n vectors = vectorizer.fit_transform(docList)\r\n feature_names = vectorizer.get_feature_names()\r\n dense = vectors.todense()\r\n denselist = dense.tolist()\r\n df = pd.DataFrame(denselist, columns=feature_names)\r\n return df\r\n\r\ndocList = ['the man went out for a walk', 'the children sat around the fire', 'Game of Thrones is an amazing tv series!', 'Game of Thrones is the best tv series!', 'Game of Thrones is so great']\r\nfunc_tfidf(docList)","repo_name":"NitinBhore/Natural-language-processing","sub_path":"preprocessing/TfidfVectorizer.py","file_name":"TfidfVectorizer.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"39498274103","text":"import logging\nimport rabbitpy\n\nfrom tasks.QueueAwaker import QueueAwaker\nfrom tasks.ExampleTask import ExampleTask\n\nfrom constants import RABBITMQ_URI, RABBITMQ_EXCHANGE\n\nfrom emitter import RabbitMqEmitter\n\nRABBITMQ_CALLBACKS = {\n 'QueueAwaker': QueueAwaker().rabbitpy_callback,\n 'ExampleTask': ExampleTask().rabbitpy_callback,\n}\n\n# RabbitMqEmitter is used to consume messages from a specific queue\n# It is advised to run only one listener per-container so that they can be easily scaled up and down\nclass RabbitMqListener(RabbitMqEmitter):\n\n def __init__(self, queue_name):\n super().__init__()\n self.logger = logging.getLogger('agent.agent.rabbit.RabbitMqEmitter')\n self.connection = rabbitpy.Connection(url=RABBITMQ_URI)\n self.channel = self.connection.channel()\n self.channel.enable_publisher_confirms()\n self.exchange = RABBITMQ_EXCHANGE\n self.queue_name = queue_name\n self.logger = logging.getLogger('agent.agent.rabbit.RabbitMqListener.%s' % queue_name)\n self.init_queue(queue_name)\n self.thread = None\n\n def init_exchange(self):\n if RABBITMQ_EXCHANGE != '':\n self.exchange = rabbitpy.Exchange(channel=self.channel, name=RABBITMQ_EXCHANGE)\n self.exchange.declare()\n\n def init_queues(self):\n for queue_name in RABBITMQ_CALLBACKS.keys():\n self.init_queue(queue_name=queue_name)\n\n def init_queue(self, queue_name):\n queue = rabbitpy.Queue(channel=self.channel, name=queue_name, durable=True)\n queue.declare()\n return queue\n\n def cleanup(self):\n if self.channel is not None:\n self.channel.close()\n self.channel = None\n if self.connection is not None:\n self.connection.close()\n self.connection = None\n\n def start_consuming(self):\n self.logger.debug(\" [✓] Started listening on queue: %s\" % self.queue_name)\n callback = RABBITMQ_CALLBACKS[self.queue_name]\n queue = self.init_queue(self.queue_name)\n\n # Consume the message\n for message in queue:\n # message.pprint(True)\n callback(message=message)\n\n def stop_consuming(self):\n self.logger.debug(\" [✓] Stopping listening on queue: %s\" % self.queue_name)\n if self.thread is not None:\n self.thread.join(timeout=10)\n self.logger.debug(' [×] Stopped listening on queue: %s' % self.queue_name)\n\n def close(self):\n self.stop_consuming()\n self.cleanup()\n","repo_name":"bodom0015/rabbitpy-example","sub_path":"agent/listener.py","file_name":"listener.py","file_ext":"py","file_size_in_byte":2529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14687406115","text":"import pandas as pd\n\nfrom fastapi import FastAPI\nfrom models import Input, Output\nfrom predlib import get_model_response\n\n\napp = FastAPI()\n\nmodel_name = \"Iris Setosa\"\nversion = \"v1.0.0\"\n\n\n@app.get('/')\nasync def model_info():\n \"\"\"Return model information, version, how to call\"\"\"\n return {\n \"name\": model_name,\n \"version\": version\n }\n\n\n@app.get('/health')\nasync def service_health():\n \"\"\"Return service health\"\"\"\n return {\n \"ok\"\n }\n\n\n@app.post('/predict', response_model=Output)\nasync def model_predict(input: Input):\n \"\"\"Prediction\"\"\"\n X = pd.json_normalize(input.__dict__)\n X = X.to_numpy()\n X = X.tolist()\n response = get_model_response(X)\n return response\n","repo_name":"delcaidero/iris-api","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6468374649","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport re\nfrom drugs.items import DiseaseMedicinesItem\nfrom scrapy.selector import Selector\nfrom scrapy.http import HtmlResponse\n\nclass DrugsDiseaseNameSpider(scrapy.Spider):\n name = 'drugs_disease_name'\n allowed_domains = ['drugs.com']\n first_page_url = ('https://www.drugs.com/condition/a.html')\n\n def __init__(self):\n self.condition_name = None\n self.dict_result = {}\n\n \"\"\" get results of first page \"\"\"\n def start_requests(self):\n request = scrapy.Request(\n url=DrugsDiseaseNameSpider.first_page_url,\n callback=self.parse_diseage_link,\n )\n yield request\n \n \"\"\" get the pages that should be scraped \"\"\"\n def parse_diseage_link(self, response):\n results = response.xpath('//div[@class=\"contentBox\"]/ul[@class=\"column-list-2\"]/li/a/@href')\n\n for disease_link in results: \n url_disease = 'https://www.drugs.com' + disease_link.extract() \n request = scrapy.Request(\n url=url_disease,\n callback=self.parse_page,\n )\n yield request\n\n \"\"\" get results of all pages \"\"\"\n def parse_page(self, response):\n diseaseMedicineItem = DiseaseMedicinesItem()\n url = self.configure_url(response.url)\n pages = 1\n\n if response.selector.xpath('//div[@class=\"contentBox\"]/div[@id=\"conditionBoxWrap\"]'):\n if response.selector.xpath('//div[@class=\"contentBox\"]/div[@id=\"conditionBoxWrap\"]/div[@class=\"paging-list paging-list-condition-list\"]'):\n pages = int(response.css('td.paging-list-index:nth-child(2) a::text')[-1].extract())\n \n for page in range(1, pages + 1):\n\n url_page = '{}?page_number={}' .format(url, page)\n print('\\n{}' .format(url_page))\n request = scrapy.Request(\n url=url_page,\n callback= self.parse_medications,\n dont_filter = True\n )\n yield request\n else:\n condition_name = response.css('h1::text').extract_first()\n self.dict_result[condition_name[19:]] = \"\"\n\n\n for num in range(0, len(self.dict_result)):\n item = self.dict_result.popitem()\n print('{} => {}' .format(item[0], item[1]))\n\n diseaseMedicineItem['disease'] = item[0]\n diseaseMedicineItem['medicine'] = item[1]\n # diseaseMedicineItem['amount'] = len(item[1])\n\n yield diseaseMedicineItem\n\n def parse_medications(self, response):\n\n condition_name = response.css('h1::text').extract_first()\n medications = response.xpath('//div[@class=\"contentBox\"]/div[@id=\"conditionBoxWrap\"]/table[@class=\"condition-table\"]/tbody/tr/td/span/a[@class=\"condition-table__drug-name__link\"]/text()')\n \n\n if condition_name[19:] in self.dict_result:\n for medication in medications:\n self.dict_result[condition_name[19:]].append(medication.extract())\n else:\n self.dict_result[condition_name[19:]] = medications.extract()\n \n def configure_url(self, url):\n cut_point = url.find('html')\n return url[:cut_point + 4]\n","repo_name":"viniciusRosa/Scrapy_drugs.com","sub_path":"drugs/spiders/drugs_disease_name.py","file_name":"drugs_disease_name.py","file_ext":"py","file_size_in_byte":3276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6882330765","text":"\"\"\"fix spelling\n\nRevision ID: b2da2bb279a5\nRevises: b81c52207a46\nCreate Date: 2021-11-22 10:15:44.602532\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'b2da2bb279a5'\ndown_revision = 'b81c52207a46'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('candidates', sa.Column('company_id', sa.Integer(), nullable=False))\n op.drop_constraint('candidates_comapany_id_fkey', 'candidates', type_='foreignkey')\n op.create_foreign_key(None, 'candidates', 'companies', ['company_id'], ['id'])\n op.drop_column('candidates', 'comapany_id')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('candidates', sa.Column('comapany_id', sa.INTEGER(), autoincrement=False, nullable=False))\n op.drop_constraint(None, 'candidates', type_='foreignkey')\n op.create_foreign_key('candidates_comapany_id_fkey', 'candidates', 'companies', ['comapany_id'], ['id'])\n op.drop_column('candidates', 'company_id')\n # ### end Alembic commands ###\n","repo_name":"CamChandler98/BeatBreakdown","sub_path":"migrations/versions/20211122_101544_fix_spelling.py","file_name":"20211122_101544_fix_spelling.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"9091708303","text":"import random\nimport operator\nimport matplotlib.pyplot\nimport time\n\n\nnum_of_agents = 10\nnum_of_iterations = 100\nagents = []\n\n# Make the agents.\nfor i in range(num_of_agents):\n agents.append([random.randint(0,99),random.randint(0,99)])\n\n# Move the agents.\nfor j in range(num_of_iterations):\n for i in range(num_of_agents):\n\n if random.random() < 0.5:\n agents[i][0] = (agents[i][0] + 1) % 100\n else:\n agents[i][0] = (agents[i][0] - 1) % 100\n\n if random.random() < 0.5:\n agents[i][1] = (agents[i][1] + 1) % 100\n else:\n agents[i][1] = (agents[i][1] - 1) % 100\n\n'''\nanswer = (((agents[0][0] - agents[1][0])**2) + ((agents[0][1] - agents[1][1])**2))**0.5\nprint(answer)\n'''\n\nmatplotlib.pyplot.ylim(0, 99)\nmatplotlib.pyplot.xlim(0, 99)\nfor i in range(num_of_agents):\n matplotlib.pyplot.scatter(agents[i][1],agents[i][0])\nmatplotlib.pyplot.show()\n\n# calculating distance between 2 points\ndef distance_between(agents_row_a, agents_row_b):\n\treturn (((agents_row_a[0] - agents_row_b[0])**2) + ((agents_row_a[1] - agents_row_b[1])**2))**0.5\n\t\nstart=time.clock()\nend = time.clock()\nprint(\"time = \" + str(end - start))\n\n# new model.py file using agent \n\t\nimport random\nimport operator\nimport matplotlib.pyplot\nimport agentframework\nimport environment\nimport eat\n\ndef distance_between(agents_row_a, agents_row_b):\n return (((agents_row_a.x - agents_row_b.x)**2) + \n ((agents_row_a.y - agents_row_b.y)**2))**0.5\n\nnum_of_agents = 10\nnum_of_iterations = 100\nagents = []\n\n# Make the agents.\nfor i in range(num_of_agents):\n agents.append(agentframework.Agent())\n\n# Move the agents.\nfor j in range(num_of_iterations):\n for i in range(num_of_agents):\n agents[i].move()\n\nmatplotlib.pyplot.xlim(0, 99)\nmatplotlib.pyplot.ylim(0, 99)\nfor i in range(num_of_agents):\n matplotlib.pyplot.scatter(agents[i].x, agents[i].y)\nmatplotlib.pyplot.show()\n\nfor agents_row_a in agents:\n for agents_row_b in agents:\n distance = distance_between(agents_row_a, agents_row_b)\n\t\t\n\n\t\t\n# interact with environment\nenvironment = []\nrowlist = []\nvalue=[]\nenvironment.append(rowlist)\nrowlist.append(value)\n\nfor j in range(num_of_iterations):\n for i in range(num_of_agents):\n agents[i].move()\n agents[i].eat()\n\nmatplotlib.pyplot.xlim(0, 99)\nmatplotlib.pyplot.ylim(0, 99)\nmatplotlib.pyplot.imshow(environment)\nfor i in range(num_of_agents):\n matplotlib.pyplot.scatter(agents[i].x,agents[i].y)\nmatplotlib.pyplot.show()\n\nimport random\nimport operator\nimport matplotlib.pyplot\nimport matplotlib.animation \n\nnum_of_agents = 10\nnum_of_iterations = 100\nagents = []\n\nfig = matplotlib.pyplot.figure(figsize=(7, 7))\nax = fig.add_axes([0, 0, 1, 1])\n\n\n#ax.set_autoscale_on(False)\n\n# Make the agents.\nfor i in range(num_of_agents):\n agents.append([random.randint(0,100),random.randint(0,100)])\n\ncarry_on = True\t\n\t\ndef update(frame_number):\n \n fig.clear() \n global carry_on\n \n for i in range(num_of_agents):\n if random.random() < 0.5:\n agents[i][0] = (agents[i][0] + 1) % 99 \n else:\n agents[i][0] = (agents[i][0] - 1) % 99\n \n if random.random() < 0.5:\n agents[i][1] = (agents[i][1] + 1) % 99 \n else:\n agents[i][1] = (agents[i][1] - 1) % 99 \n \n if random.random() < 0.1:\n carry_on = False\n print(\"stopping condition\")\n \n for i in range(num_of_agents):\n matplotlib.pyplot.scatter(agents[i][0],agents[i][1])\n #print(agents[i][0],agents[i][1])\n\n\t\t\ndef gen_function(b = [0]):\n a = 0\n global carry_on #Not actually needed as we're not assigning, but clearer\n while (a < 10) & (carry_on) :\n yield a\t\t\t# Returns control and waits next call.\n a = a + 1\n\n\n#animation = matplotlib.animation.FuncAnimation(fig, update, interval=1, repeat=False, frames=10)\nanimation = matplotlib.animation.FuncAnimation(fig, update, frames=gen_function, repeat=False)\n\n\n\nmatplotlib.pyplot.show()\n","repo_name":"ericakufa/assignment","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11491543540","text":"import functions\n\nsource_image = r'monitor-1.jpg'\n\nheroes_to_find = functions.read_heroes_text(source_image)\n\nhero_models = functions.import_models()\n\npics_in_screenshot = functions.extract_miniatures(source_image)\nfound_positions = []\nfound_coords = []\n\nfor hero in heroes_to_find:\n found_positions.append(functions.find_hero_position(hero))\n\nfor position in found_positions:\n found_coords.append(functions.find_coords(position))\n\nglobal last_img\n\nfor coord in found_coords:\n last_img = functions.draw_box(coord)\n\nlast_img.show()\nprint(\"KONIEC\")\n","repo_name":"filiard/hero_finder","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"32348332112","text":"class Solution:\n def maximumGap(self, nums: List[int]) -> int:\n if len(nums)<2:\n return 0\n if len(nums)==2:\n return nums[1]-nums[0]\n maxx=max(nums)\n minn=min(nums)\n if maxx==minn:\n return 0\n g=ceil((maxx-minn)/(len(nums)-1))\n size_bucket=((maxx-minn)//g) + 1\n buckets=[[] for i in range(size_bucket)]\n for i in buckets:\n i.append(float('inf'))\n i.append(float('-inf'))\n #[1,3],[4,6],[7,9],[10,12]\n for i in nums:\n bucknum=(i-minn)//g\n if ibuckets[bucknum][1]:\n buckets[bucknum][1]=i\n nbuckets=[]\n for i in buckets: \n if i[0]!=float('inf'):\n nbuckets.append(i)\n maxi=float('-inf')\n for i in range(1,len(nbuckets)):\n maxi=max(maxi,nbuckets[i][0]-nbuckets[i-1][1])\n return maxi \n \n ","repo_name":"mayank9200/My_codes","sub_path":"0164-maximum-gap/0164-maximum-gap.py","file_name":"0164-maximum-gap.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"15511918016","text":"'''\r\n itertools.cycle() Function\r\n ==========================\r\n # Itertools function cycle(), cycles through an an iterator endlessly.\r\n Syntax- \r\n itertools.cycle(iterable)\r\n\r\n # To stops iteration we have to check it with last index value of string\r\n where we want to stop, we need to specify it through a conditon.\r\n\r\n'''\r\n\r\n# working with string data_type\r\nfrom itertools import cycle\r\nfor i in cycle(\"Message\"):\r\n print(i)\r\n if i ==\"g\" : # reached the last character\r\n break\r\n\r\n# working with list data_type\r\nfrom itertools import cycle\r\n# defining an iterable\r\nrandom_list_items = [1, 2, 4, \"a\", \"Orange\", \"Apple\"] \r\nfor i in cycle(random_list_items):\r\n # print(i) # uncomment it and see it goes through endlessly\r\n if i == \"a\":\r\n break\r\n\r\n","repo_name":"JaberKh16/Python-Fundamentals-Concept-Practices","sub_path":"Section-16 Python Itertools Module Functions/16.2 itertools.cycle().py","file_name":"16.2 itertools.cycle().py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"13715936735","text":"N = int(input())\n\ngrid = [\n list(map(int,input().split()))\n for _ in range(N)\n]\n\nr,c,m1,m2,m3,m4,dir = list(map(int,input().split()))\n\n# dir - 1: 시계, 0 : 반시계\n\ndir_mapper = {\n 1:[(-1,1),(-1,-1),(1,-1),(1,1)], #cclk\n 0:[(-1,-1),(-1,1),(1,1),(1,-1)], #clk\n}\n\ndef rotate(dir):\n\n dirs = dir_mapper[dir]\n if dir == 0 :\n pattern = [m4,m3,m2,m1]\n elif dir == 1 :\n pattern = [m1,m2,m3,m4]\n cur_x,cur_y = r-1,c-1\n temp = grid[cur_x][cur_y]\n for (dx,dy), length in zip(dirs,pattern):\n for _ in range(length):\n nx,ny = cur_x + dx, cur_y + dy\n grid[cur_x][cur_y] = grid[nx][ny] \n cur_x,cur_y = nx,ny\n if dir == 0:\n grid[r-1-1][c-1+1] = temp\n elif dir == 1 :\n grid[r-1-1][c-1-1] = temp \n\nrotate(dir)\n\nfor row in grid:\n print(*row)","repo_name":"SeongSuKim95/Python_practice","sub_path":"Implementation_practice/기울어진 직사각형 회전.py","file_name":"기울어진 직사각형 회전.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30702762186","text":"# Matplot lib\nimport matplotlib.pyplot as plt\n# Bibtex parser\nimport bibtexparser\n# Graphs\nimport networkx as nx\n\nfrom networkx.drawing.layout import *\n\ndef main():\n # Settings\n cite_as_noun = True\n\n # Open bibtext\n with open('bibtex.bib') as bibtex_file:\n bib_database = bibtexparser.load(bibtex_file)\n\n print(bib_database.entries)\n print(bib_database.comments)\n print(bib_database.preambles)\n print(bib_database.strings)\n\n # Define graph\n G = nx.DiGraph()\n \n dict_pos = {}\n dict_qpycurr = {}\n \n initial_year = 2000\n current_year = 2020\n dict_qpy = {}\n i = initial_year\n while i <= current_year:\n qtde = count_by_attribute(bib_database.entries,'year',str(i))\n if qtde > 0:\n dict_qpy[str(i)] = qtde\n dict_qpycurr[str(i)] = 0\n i += 1\n \n for e in dict_qpy:\n initial_year = e\n break\n \n for e in dict_qpy:\n final_year = e\n \n \n # Create nodes\n mapping = {}\n i = 1\n for entry in bib_database.entries:\n node_color = 'blue' if 'citations' in entry else 'red'\n node_name = entry['ID']\n G.add_node(node_name, color=node_color)\n year = float(entry['year'])\n positionx = (dict_qpycurr[entry['year']])/dict_qpy[entry['year']]\n dict_qpycurr.update({entry['year']: dict_qpycurr[entry['year']]+1 })\n position = [positionx + (year - int(initial_year))/1000, (year - int(initial_year))/(int(final_year)-int(initial_year))+positionx/5]\n dict_pos[i] = position\n mapping[node_name] = i\n i+=1\n\n \n # Create edges\n for entry in bib_database.entries:\n if 'citations' in entry:\n print(entry['ID'])\n for citation_id in entry['citations'].split(','):\n print(' ' + citation_id)\n if citation_id:\n if cite_as_noun:\n citation_entry = find_by_attribute(bib_database.entries,'ID',citation_id)\n if citation_entry:\n node_name_a = entry['ID']\n node_name_b = citation_entry['ID']\n #print(node_name_a + ' : ' + node_name_b)\n G.add_edge(node_name_a, node_name_b)\n else:\n G.add_edge(entry['ID'], citation_id)\n\n\n \n \n \n # Calculate node colors as the number of citations it has\n node_colors = []\n for node in G.nodes:\n in_edges = G.in_degree(node)\n node_colors.append(in_edges)\n range_of_colors = max(node_colors) - min(node_colors)\n \n # Convert graph to figure\n plt.figure(figsize=(8,6))\n\n # Options\n \n #pos=nx.planar_layout(G)\n \n if G.number_of_nodes() <30:\n try: \n dict_pos = nx.planar_layout(G)\n nx.draw_networkx(G, with_labels=True, node_color=node_colors, cmap=plt.cm.Blues, vmin=min(node_colors)-range_of_colors*0.4, vmax=max(node_colors), font_weight='bold', pos=nx.planar_layout(G))\n except:\n dict_pos = nx.circular_layout(G)\n nx.draw_networkx(G, with_labels=True, node_color=node_colors, cmap=plt.cm.Blues, vmin=min(node_colors)-range_of_colors*0.4, vmax=max(node_colors), font_weight='bold', pos=nx.circular_layout(G))\n \n else:\n G = nx.relabel_nodes(G, mapping)\n i = 1\n for e in mapping:\n print(str(i)+': ' + e)\n i +=1\n nx.draw_networkx(G, with_labels=True, node_color=node_colors, cmap=plt.cm.Blues, vmin=min(node_colors)-range_of_colors*0.4, vmax=max(node_colors), font_weight='bold', pos=dict_pos)\n for i in range(int(initial_year), int(final_year)+1):\n plt.text(-0.2, (i - int(initial_year))/(int(final_year)-int(initial_year))-0.03, str(i)+':')\n \n maxDP = 0.0\n for e in dict_pos:\n if maxDP < dict_pos[e][1]:\n maxDP = dict_pos[e][1]\n for node in G:\n plt.text(dict_pos[node][0], dict_pos[node][1]-1.28/(32*maxDP), str(G.in_degree(node)), fontsize=4, color='red')\n \n \n \n \n \n plt.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)\n plt.tick_params(axis='y', which='both', right=False, left=False, labelleft=False)\n for pos in ['right','top','bottom','left']:\n plt.gca().spines[pos].set_visible(False)\n \n\n #plt.show()\n \n \n # Save plot\n plt.savefig(\"citation_graph.eps\")\n\n # End main\n return 0\n\n\ndef extract_author_string(entry):\n \"\"\"This extracts the authors from an entry\"\"\"\n authors = entry['author'].replace(';', ',').replace('and',',').split(',')\n citation_format = ''\n if authors:\n first_author_names = authors[0].split(' ')\n first_author_citation = first_author_names[0]\n if len(first_author_names) > 1:\n if len(first_author_names[1]) <= 3:\n first_author_citation += ' ' + first_author_names[1]\n else:\n first_author_citation += ' ' + first_author_names[1][0] + '.'\n citation_format = first_author_citation + (' et. al' if len(authors) > 1 else '')\n citation_format += ' (' + entry['year'] + ')'\n return citation_format\n\n\ndef find_by_attribute(entries,attribute_name,attribute_value):\n for entry in entries:\n if entry[attribute_name] == attribute_value:\n return entry\n return None\n\ndef count_by_attribute(entries,attribute_name,attribute_value):\n count = 0\n for entry in entries:\n if entry[attribute_name] == attribute_value:\n count = count + 1\n return count\n\n\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"NoemiHonorato10/citation_graph","sub_path":"citation_graph.py","file_name":"citation_graph.py","file_ext":"py","file_size_in_byte":5684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"3258241710","text":"import numpy as np\nimport pandas as pd\nimport streamlit as st\nimport pickle\nimport calendar\n\nst.set_page_config(layout=\"centered\", page_title=\"Time-series Sales Prediction\",\n menu_items={\n \"Get Help\": \"https://www.linkedin.com/in/andik-al-fauzi/\",\n \"Report a bug\": \"https://github.com/andik-alfauzi\",\n \"About\": \"### Time-series Sales Prediction App - By Andik Al Fauzi\"})\n\n# load the model \nwith open('SVMModel.pkl', 'rb') as file1:\n SVMModel = pickle.load(file1)\n\nwith open('Scaling.pkl', 'rb') as file1:\n scaler = pickle.load(file1)\n\n# Define main page\ndef run():\n # Read dataset\n data = pd.read_csv('https://raw.githubusercontent.com/andik-alfauzi/Final-Project/main/sample_dataset_timeseries_noarea.csv')\n data = data.groupby('week_end_date', as_index=False)['quantity'].sum()\n st.dataframe(data)\n\n # Change into datetime\n data['week_end_date'] = pd.to_datetime(data['week_end_date'], format='%Y-%m-%d')\n\n # Create a dataframe\n sales = data.groupby('week_end_date')['quantity'].sum()\n\n # Create A New Dataset with `window=4`\n window = 4\n X = []\n y = []\n\n for index in range(0, len(sales)-window):\n X.append(sales[index : window + index])\n y.append(sales[window + index])\n\n X = np.array(X)\n y = np.array(y)\n\n with st.form(key='time-series-prediction'):\n # Button submit\n submitted = st.form_submit_button('Predict 4 Next Week Sales')\n\n if submitted:\n # Define function forcasting\n def forecasting(week):\n sales_forecast = sales.copy()\n window = 4\n for i in range(week):\n X = np.array(sales_forecast[-window:].values).reshape(1, -1)\n X_scaled = scaler.transform(X)\n\n # add 7 last day into dataset\n last_date = sales_forecast.index[-1]\n new_date = last_date + pd.Timedelta(days=7)\n\n # make sure the date are valid\n while True:\n _, last_day = calendar.monthrange(new_date.year, new_date.month)\n if new_date.day <= last_day:\n break\n new_date -= pd.Timedelta(days=1)\n\n sales_forecast[new_date] = round(SVMModel.predict(X_scaled)[0])\n\n return sales_forecast\n \n # Forecasting sales for the Next 4 weeks\n sales_forecast = forecasting(4)\n \n # Displaying forecast\n st.dataframe(sales_forecast.tail(4))\n\nif __name__ == '__main__':\n run()","repo_name":"andik-alfauzi/Final-Project","sub_path":"deployment/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21139193410","text":"from typing import List\n\nimport numpy as np\nimport pandas as pd\nimport warnings as wrn\nfrom nilearn.connectome import ConnectivityMeasure\nfrom sklearn.decomposition import PCA\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.metrics import accuracy_score, recall_score, precision_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import LinearSVC, SVC\n\nfrom icns.common import Phenotypic, Institute, Atlas, Features, Target\nfrom icns.data_functions import create_training_data\nfrom icns.plotting import plot_institute_scores\n\n\n# Helper functions\ndef create_classifier(classifier_name):\n if classifier_name is 'LinearSVC':\n return LinearSVC(random_state=42)\n elif classifier_name is 'RandomForest':\n return RandomForestClassifier(random_state=42, n_estimators=200)\n elif classifier_name is 'SVC':\n return SVC(kernel='poly')\n\n\ndef execute_experiment(institutes: List[Institute],\n atlas: Atlas,\n connectivity_kind,\n pca_set_up,\n classifier_name,\n features_composition) -> None:\n phenotypic_features = list() if features_composition is Features.TIME_SERIES else [Phenotypic.AGE,\n Phenotypic.GENDER,\n Phenotypic.FULL4_IQ]\n\n institute_scores = dict()\n for institute in institutes:\n\n institute_data: dict = create_training_data(institute, atlas,\n phenotypic_features, smoothed=True)\n time_series_list = list()\n phenotypic_list = list()\n adhd_labels = list()\n\n for patient_id in institute_data.keys():\n patient_data = institute_data[patient_id]\n # Time-series\n time_series_df: pd.DataFrame = patient_data['time_series']\n time_series_matrix: np.ndarray = time_series_df.values\n time_series_list.append(time_series_matrix)\n # Target labels\n phenotypic: pd.Series = patient_data['phenotypic']\n if target_domain is Target.TD_ADHD:\n phenotypic[Phenotypic.DX] = 1 if phenotypic[Phenotypic.DX] != 0 else phenotypic[Phenotypic.DX]\n adhd_labels.append(phenotypic[Phenotypic.DX])\n # phenotypic\n phenotypic_list.append(phenotypic.values[:-1])\n\n # Image features are a vectorization of the correlation from time series\n correlation_measure = ConnectivityMeasure(kind=connectivity_kind, vectorize=True)\n connectivity_vector = correlation_measure.fit_transform(time_series_list)\n\n # Perform PCA\n pca_transformed = connectivity_vector\n if pca_set_up > 0:\n pca = PCA(n_components=pca_set_up, random_state=42)\n pca_transformed = pca.fit_transform(connectivity_vector)\n\n # Optionally combine features\n patient_features = None\n if features_composition is Features.TIME_SERIES:\n patient_features = pca_transformed\n elif features_composition is Features.TIME_SERIES_AND_PHENOTYPIC:\n scaled_phenotypic = StandardScaler().fit_transform(phenotypic_list)\n patient_features = np.concatenate((pca_transformed, scaled_phenotypic), axis=1)\n\n X_train, X_test, y_train, y_test = train_test_split(patient_features,\n adhd_labels,\n test_size=0.3,\n random_state=42)\n\n # Train\n classifier = create_classifier(classifier_name)\n classifier.fit(X_train, y_train)\n\n # Predict\n y_train_predicted = classifier.predict(X_train)\n y_majority = [np.argmax(np.bincount(y_train))] * len(y_test)\n y_test_predicted = classifier.predict(X_test)\n\n # Collect results\n train_accuracy = accuracy_score(y_train, y_train_predicted)\n chance_accuracy = accuracy_score(y_test, y_majority)\n test_accuracy = accuracy_score(y_test, y_test_predicted)\n precision = precision_score(y_test, y_test_predicted)\n recall = recall_score(y_test, y_test_predicted)\n\n print(f'Institute: {str(institute)}')\n print(f'Train accuracy {train_accuracy}')\n print(f'Chance accuracy {chance_accuracy}')\n print(f'Test accuracy {test_accuracy}')\n print(f'Precision {precision}')\n print(f'Recall {recall}')\n\n institute_scores[str(institute)] = {'accuracy': test_accuracy,\n 'precision': precision,\n 'recall': recall,\n 'chance': chance_accuracy}\n\n # Store results\n file_name = f'score_pca{pca_set_up}_{str(atlas)}_{classifier_name}_{connectivity_kind}_{str(features_composition)}'\n plot_institute_scores(institute_scores,\n filename=f'{file_name}.png'\n .replace(' ', '-'),\n do_show=False)\n\n\ntarget_domain = Target.TD_ADHD\n\n# Experiment domain\nconnectivity_kinds = ['partial correlation']\nex_classifiers = ['LinearSVC', 'RandomForest']\nex_institutes = [Institute.PEKING, Institute.NYU, Institute.OHSU]\natlas_types = [Atlas.AAL, Atlas.CC200]\nex_compositions = [Features.TIME_SERIES, Features.TIME_SERIES_AND_PHENOTYPIC]\npca_set_ups = [3, 4]\n\nfor ex_composition in ex_compositions:\n for ex_classifier in ex_classifiers:\n for ex_pca_set_up in pca_set_ups:\n for ex_connectivity_kind in connectivity_kinds:\n for ex_atlas in atlas_types:\n execute_experiment(ex_institutes,\n ex_atlas,\n ex_connectivity_kind,\n ex_pca_set_up,\n ex_classifier,\n ex_composition)\n","repo_name":"alessandrostranieri/icns_adhd_fmri","sub_path":"experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":6239,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"25612816905","text":"from graph.graph import Graph, Vertex, Edge\n\n\n# Node can be successfully added to the graph\ndef test_add_node_to_graph():\n graph = Graph()\n actual = graph.add_node('a').value\n expected = 'a'\n assert actual == expected\n\n\n# An edge can be successfully added to the graph\ndef test_add_edge_to_graph():\n graph = Graph()\n a = graph.add_node('a')\n b = graph.add_node('b')\n graph.add_edge(a, b)\n assert True\n\n\n# A collection of all nodes can be properly retrieved from the graph\ndef test_node_collection():\n graph = Graph()\n a = graph.add_node('a')\n b = graph.add_node('b')\n c = graph.add_node('c')\n actual = graph.get_nodes()\n assert actual == [a, b, c]\n\n\n# All appropriate neighbors can be retrieved from the graph\ndef test_get_neighbors():\n graph = Graph()\n a = graph.add_node('a')\n b = graph.add_node('b')\n c = graph.add_node('c')\n e_1 = graph.add_edge(a, b, 2)\n e_2 = graph.add_edge(a, c, 2)\n actual = graph.get_neighbors(a)\n assert actual == [e_1, e_2]\n\n\ndef test_get_neighbors_none():\n graph = Graph()\n a = graph.add_node('a')\n b = graph.add_node('b')\n c = graph.add_node('c')\n actual = graph.get_neighbors(a)\n assert actual == []\n\n\n# Neighbors are returned with the weight between nodes included\ndef test_get_neighbors_weight():\n graph = Graph()\n a = graph.add_node('a')\n b = graph.add_node('b')\n c = graph.add_node('c')\n e_1 = graph.add_edge(a, b, 3)\n e_2 = graph.add_edge(a, c, 4)\n neighbor_1 = graph.get_neighbors(a)[0]\n neighbor_2 = graph.get_neighbors(a)[1]\n assert neighbor_1.weight == 3 and neighbor_2.weight == 4\n\n\n# The proper size is returned, representing the number of nodes in the graph\ndef test_graph_size():\n graph = Graph()\n a = graph.add_node('a')\n b = graph.add_node('b')\n actual = graph.size()\n assert actual == 2\n\n\n# A graph with only one node and edge can be properly returned\ndef test_one_node_one_edge():\n graph = Graph()\n a = graph.add_node('a')\n edge = graph.add_edge(a, a)\n actual = graph.get_neighbors(a)\n assert actual == [edge]\n\n\n# An empty graph properly returns None\ndef test_empty_graph():\n graph = Graph()\n actual = graph.get_nodes()\n assert actual is None","repo_name":"WalaaAlrefai/data-structures-and-algorithms","sub_path":"tests/test_graph.py","file_name":"test_graph.py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"34178591809","text":"from os import name\nfrom motor.motor_asyncio import AsyncIOMotorCollection, AsyncIOMotorCursor\nfrom koala.storage.storage import *\nfrom motor.motor_asyncio import AsyncIOMotorClient\n\n\nclass RecordStorageMongo(RecordStorage[RecordType]):\n def __init__(\n self,\n record_type: Type[RecordType],\n meta: RecordMetaData,\n db: AsyncIOMotorCollection,\n ):\n self.__record_type = record_type\n self.__meta = meta\n self.__db = db\n pass\n\n @property\n def table_name(self) -> str:\n return self.__meta.table_name\n\n @property\n def unique_key(self) -> KeyInfo:\n return self.__meta.key_info\n\n def __get_filter(\n self,\n record: Optional[dict],\n key: Optional[TypeID] = None,\n key2: Optional[TypeID] = None,\n ) -> dict:\n key_info = self.__meta.key_info\n\n mongo_filter = {}\n name_1 = key_info.key_name\n mongo_filter[name_1] = {\"$eq\": record.get(name_1) if record else key}\n assert mongo_filter[name_1]\n\n if key_info.key_name_2:\n name_2 = key_info.key_name\n mongo_filter[name_2] = {\"$eq\": record.get(name_2) if record else key}\n assert mongo_filter[name_2]\n return mongo_filter\n\n async def insert_one(self, record: RecordType) -> object:\n content = record.to_dict()\n mongo_filter = self.__get_filter(content)\n update = {\"$set\": content}\n result = await self.__db.update_one(mongo_filter, update, upsert=True)\n return result\n\n async def delete_one(self, key: TypeID, key2: Optional[TypeID] = None) -> object:\n mongo_filter = self.__get_filter(None, key, key2)\n result = await self.__db.delete_many(mongo_filter)\n return result\n\n async def find(\n self, key1: TypeID, key2: Optional[TypeID] = None\n ) -> List[RecordType]:\n result: List[RecordType] = []\n mongo_filter = self.__get_filter(None, key1, key2)\n cursor: AsyncIOMotorCursor = self.__db.find(mongo_filter)\n for document in await cursor.to_list(1024):\n obj = self.__record_type.parse_obj(document)\n result.append(obj)\n return result\n\n async def find_one(self, key1: TypeID) -> Optional[RecordType]:\n document = await self.__db.find_one(self.__get_filter(None, key1))\n if document:\n obj = self.__record_type.parse_obj(document)\n return obj\n return None\n\n\nclass MongoStorageFactory(IStorageFactory):\n def __init__(self):\n self.connection_string = \"\"\n self.db_name = \"\"\n self.mongo: Optional[AsyncIOMotorClient] = None\n pass\n\n def init_factory(self, *args, **kwargs):\n self.connection_string = kwargs.get(\"connection_str\")\n self.db_name = kwargs.get(\"db\")\n self.mongo = AsyncIOMotorClient(self.connection_string)\n pass\n\n def get_storage(self, record_type: Type[RecordType]) -> RecordStorage[RecordType]:\n meta_info = get_record_meta(record_type)\n if not meta_info:\n raise Exception(\"cannot find %s's meta data\" % record_type)\n if not self.mongo:\n raise Exception(\"mongo db not init\")\n table = self.mongo[self.db_name][meta_info.table_name]\n obj = RecordStorageMongo(record_type, meta_info, table)\n return obj\n","repo_name":"egmkang/koala","sub_path":"koala/storage/storage_mongo.py","file_name":"storage_mongo.py","file_ext":"py","file_size_in_byte":3347,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"3"} +{"seq_id":"73527113041","text":"\"\"\"⦁\tДано натуральное число N. Определить, является ли оно простым.\"\"\"\n\nn = int(input(\"Введите число N \"))\np = True\nfor i in range(2, n // 2 + 1):\n if n % i == 0:\n p = False\nif not p:\n print(\"Не простое\")\nelse:\n print(\"Простое\")\n","repo_name":"Zyoger/My-First-Repository","sub_path":"Python/UDEMI lesson/Exercise25.py","file_name":"Exercise25.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"38246073630","text":"from LSTMModel import *\nfrom BatchGenerator import *\nimport matplotlib.pyplot as pl\n\ndef Gain(pred, values):\n actions = np.argmax(pred, axis=1)\n order = [0,0]\n gain = 0\n history = []\n for i in range(len(actions)):\n a = 1 if actions[i] == 2 else (-1 if actions[i] == 0 else 0)\n if a and order[0] != a:\n g = order[0]*(values[i] - order[1])\n gain += g\n print('[time {}] : change possession. pos={}, gain={}'.format(i, a, g))\n order = [a, values[i]]\n history.append(gain)\n \n print('Total gain = {}'.format(gain))\n pl.subplot(311)\n pl.plot(values)\n pl.subplot(312)\n pl.plot(actions)\n pl.subplot(313)\n pl.plot(history)\n pl.show()\n \n\nmodel = LSTMModel(5, 3, 90, input_dim=5, output_dim=3, alpha=0.00005)\n\npath = input('path to the model : ')\n\nprint('loading model ...')\nmodel.restore(path)\nprint('complete')\n\ngen = BatchGenerator(30, indices=[['sma', [10]], ['sma', [30]], ['rsi', [10]], ['roc', [10]]],\n test_size=0.05,\n domains=[['GBPJPY-30', 100000]],#[['7974', 2000], ['7203', 2000], ['6752', 2000], ['6504', 2000], ['4506', 2000]],#\n threshould=0.001, use_fundamental=False,\n freq=4/365)#1/120)\nx, y = gen.get_test_data(90, cut=False)\n\nwhile True:\n symbol = input('The symbol you want to predict : ')\n if symbol == 'end':\n break\n\n pred = model.predict(x[symbol][-90:])\n pl.plot(pred)\n pl.show()\n pred = model.predict(x[symbol])\n Gain(pred, gen.get_price(symbol)[gen.splitPoint[symbol]:])\n","repo_name":"21335732529sky/capstone","sub_path":"predict_main.py","file_name":"predict_main.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30007163244","text":"import time\nimport datetime\nfrom collections import OrderedDict\nfrom itertools import groupby, ifilter, ifilterfalse\nfrom meta import *\nfrom media import Network\nfrom sqlalchemy import Index\nfrom sqlalchemy.orm import column_property, eagerload, aliased\nfrom sqlalchemy import select, func\n \nclass ExternalID(AbstractConcreteBase, Base):\n id = Column(Integer, primary_key=True)\n external_id = Column(String)\n \n @declared_attr\n def source_name(cls):\n return Column(Integer, ForeignKey('source.name'))\n \n @declared_attr\n def league_id(cls):\n return Column(Integer, ForeignKey('league.id'))\n \n @declared_attr\n def league(cls):\n return relationship('League', uselist=False)\n \nclass Season(Base):\n id = Column(Integer, primary_key=True)\n league_id = Column(Integer, ForeignKey('league.id'), index=True)\n year = Column(Integer)\n preseason_start = Column(Date)\n preseason_end = Column(Date)\n regular_start = Column(Date)\n regular_end = Column(Date)\n postseason_start = Column(Date)\n postseason_end = Column(Date)\n \n league = relationship('League')\n \n def __repr__(self):\n return '%s - %s' % (self.league, self.year)\n\nclass GameTeamScore(Base):\n id = Column(Integer, primary_key=True)\n game_id = Column(Integer, ForeignKey('game.id'), index=True)\n team_id = Column(Integer, ForeignKey('team.id'), index=True)\n period = Column(Integer)\n score = Column(Integer)\n \n team = relationship('Team', uselist=False)\n game = relationship('Game', uselist=False)\n \n def __repr__(self):\n return '%s - %d' % (self.team, self.score)\n \nclass Game(Base):\n id = Column(Integer, primary_key=True)\n game_time = Column(DateTime)\n game_type = Column(String)\n attendance = Column(Integer)\n duration = Column(Time)\n season_id = Column(Integer, ForeignKey('season.id'), index=True)\n home_team_id = Column(Integer, ForeignKey('team.id'), index=True)\n away_team_id = Column(Integer, ForeignKey('team.id'), index=True)\n league_id = Column(Integer, ForeignKey('league.id'), index=True)\n venue_id = Column(Integer, ForeignKey('venue.id'))\n network_abbr = Column(Integer, ForeignKey('network.abbr'))\n \n home_score = column_property(\n select([func.sum(GameTeamScore.score)])\\\n .where(and_(GameTeamScore.team_id==home_team_id, \n GameTeamScore.game_id==id))\n )\n away_score = column_property(\n select([func.sum(GameTeamScore.score)])\\\n .where(and_(GameTeamScore.team_id==away_team_id, \n GameTeamScore.game_id==id))\n )\n \n espn_id = relationship('GameExternalID', uselist=False)\n league = relationship('League', uselist=False)\n home_team = relationship('Team', \n primaryjoin='Game.home_team_id==Team.id', \n uselist=False)\n away_team = relationship('Team', \n primaryjoin='Game.away_team_id==Team.id', \n uselist=False)\n home_scores = relationship('GameTeamScore', primaryjoin='and_'\n '(Game.id==GameTeamScore.game_id,' \n 'Game.home_team_id==GameTeamScore.team_id)',\n order_by='GameTeamScore.period',\n cascade='all,delete')\n away_scores = relationship('GameTeamScore', primaryjoin='and_'\n '(Game.id==GameTeamScore.game_id,' \n 'Game.away_team_id==GameTeamScore.team_id)',\n order_by='GameTeamScore.period',\n cascade='all,delete')\n season = relationship('Season', uselist=False)\n venue = relationship('Venue', uselist=False)\n game_players = relationship('GamePlayer')\n players = relationship('Player', secondary='game_player', \n cascade='all,delete')\n officials = relationship('Official', secondary='game_official', \n cascade='all,delete')\n stats = relationship('PlayerStat', cascade='all,delete')\n \n @classmethod\n def links(self):\n game_links = ['boxscore']\n #game_link += ['plays', 'shots', 'conversation', 'media']\n return game_links\n \n @classmethod\n def game_types(self): \n game_types_dict = {\n 'pre': 'preseason',\n 'reg': 'regular',\n 'post': 'postseason',\n }\n return game_types_dict\n \n @classmethod\n def get_game(cls, game_id):\n return DBSession.query(cls)\\\n .options(eagerload('away_team'),\n eagerload('home_team'),\n eagerload('home_scores'),\n eagerload('away_scores'),\n eagerload('officials'),)\\\n .get(game_id)\n \n @classmethod\n def get_date(cls, league, date):\n day = datetime.timedelta(days=1)\n return DBSession.query(cls)\\\n .options(eagerload('home_scores'),\n eagerload('away_scores'),\n eagerload('home_team'),\n eagerload('away_team'),\n eagerload('league'))\\\n .filter(cls.league==league,\n cls.game_time.between(date, date+day))\\\n .order_by(cls.game_time)\\\n .all()\n \n @classmethod\n def get_season(cls, league, game_type, year):\n return DBSession.query(cls)\\\n .options(eagerload('away_team'),\n eagerload('home_team'),\n eagerload('home_scores'),\n eagerload('away_scores'),\n eagerload('season'),\n eagerload('league'),\n eagerload('away_team.league'),\n eagerload('home_team.league'))\\\n .filter(cls.league==league, \n cls.game_type==game_type,\n cls.season.has(year=year))\\\n .order_by(cls.game_time)\n \n def periods(self):\n periods = len(self.home_scores)\n periods_list = []\n for period in range(1, periods+1):\n if period > 4:\n period -= 4\n period = '%d OT' % period\n else:\n period = '%d' % period\n periods_list.append(period)\n return periods_list\n \n def num_periods(self):\n periods = len(self.home_scores)\n return periods\n \n def winner(self):\n if self.away_score > self.home_score:\n team = self.away_team\n elif self.away_score == self.home_score:\n team = None\n else:\n team = self.home_team\n return team\n \n def week(self):\n difference = self.game_time.date() - self.season.regular_start\n week = difference.days / 7 + 1\n return week\n \n def __repr__(self):\n return '%s @ %s %s' % (self.away_team.abbr, self.home_team.abbr, \n self.game_time.strftime('%m/%d/%y %I:%M %p'))\n \n def url(self):\n return '/%s/%s/games/%d/' % (self.league.sport_name, \n self.league.abbr, self.id)\n \n @classmethod\n def sorted_stats(cls, players):\n key = lambda x: x.team\n stats = groupby(sorted(list(players), key=key), key=key)\n stats = {team: list(players) for team, players in stats}\n stats = {team: {\n 'starter': [p for p in players if p.starter],\n 'bench': [p for p in players \n if not p.starter and not p.status == 'dnp'], \n 'dnp': [p for p in players if p.status == 'dnp'],\n } for team, players in stats.iteritems()}\n return stats\n \n @classmethod\n def team_schedule(cls, team, game_type, year):\n return DBSession.query(cls)\\\n .options(eagerload('away_team'),\n eagerload('home_team'),\n eagerload('home_scores'),\n eagerload('away_scores'),)\\\n .filter(or_(cls.home_team==team, \n cls.away_team==team),\n cls.game_type==game_type,\n cls.season.has(year=year))\\\n .order_by(Game.game_time)\n\nclass GameOfficial(Base):\n game_id = Column(Integer, ForeignKey('game.id'), primary_key=True)\n official_id = Column(Integer, ForeignKey('official.id'), primary_key=True)\n official_type = Column(String)\n \n official = relationship('Official', uselist=False)\n game = relationship('Game', uselist=False)\n \nclass GameExternalID(ExternalID):\n game_id = Column(Integer, ForeignKey('game.id'))\n boxscore = Column(Boolean)\n shot_cords = Column(Boolean)\n pbp = Column(Boolean)\n \n game = relationship('Game')\n \n __mapper_args__ = {'polymorphic_identity': 'game', 'concrete': True}\n \n def __repr__(self):\n return '%s' % self.external_id\n \nclass GamePlayer(Base):\n id = Column(Integer, primary_key=True)\n game_id = Column(Integer, ForeignKey('game.id'), index=True)\n team_id = Column(Integer, ForeignKey('team.id'), index=True)\n player_id = Column(Integer, ForeignKey('player.id'), index=True)\n starter = Column(Boolean)\n status = Column(String)\n \n game = relationship('Game', uselist=False)\n player = relationship('Player', uselist=False)\n team = relationship('Team', uselist=False)\n stats = relationship('PlayerStat', \n primaryjoin='and_('\n 'GamePlayer.player_id==PlayerStat.player_id,'\n 'GamePlayer.team_id==PlayerStat.team_id,'\n 'GamePlayer.game_id==PlayerStat.game_id)',\n foreign_keys=[game_id, team_id, player_id])\n \n \n @classmethod\n def sum_query(cls):\n sum_query = [\n func.count(cls.game_id).label('gp')\n ]\n return sum_query\n \n @classmethod\n def dnps(cls, game):\n return DBSession.query(cls)\\\n .options(eagerload('player'),\n eagerload('player.positions'),\n eagerload('team'),\n eagerload('team.league'))\\\n .join(GamePlayerDNP)\\\n .filter(cls.game==game)\n \n @classmethod\n def get_game(cls, game):\n return DBSession.query(GamePlayer)\\\n .options(eagerload('player'),\n eagerload('team'))\\\n .filter(GamePlayer.game==game)\n \n def __repr__(self):\n return '%s %s' % (self.player, self.game)\n\nclass GamePlayerDNP(Base):\n id = Column(Integer, ForeignKey('game_player.id'), primary_key=True)\n reason = Column(String)\n \n game_player = relationship('GamePlayer', uselist=False, lazy='joined',\n backref=backref('dnp', lazy='joined', \n uselist=False))\n\n \nclass PlayerStat(Base):\n id = Column(Integer, primary_key=True)\n game_id = Column(Integer, ForeignKey('game.id'), index=True)\n team_id = Column(Integer, ForeignKey('team.id'), index=True)\n player_id = Column(Integer, ForeignKey('player.id'), index=True)\n stat_type = Column(String)\n \n game = relationship('Game', uselist=False)\n player = relationship('Player', uselist=False)\n team = relationship('Team', uselist=False)\n \n __mapper_args__ = {'polymorphic_on': stat_type}\n \n @classmethod\n def model_map(cls, sport, stat_type=None):\n map_ = {\n 'football': OrderedDict([\n ('passing', FootballOffensiveStat),\n ('rushing', FootballOffensiveStat),\n ('receiving', FootballOffensiveStat),\n ('return', FootballOffensiveStat),\n ('kicking', FootballKickingStat),\n ('punting', FootballPuntingStat),\n #('defense', FootballTeamStat),\n ]),\n 'basketball': BasketballBoxScoreStat,\n }\n if stat_type:\n return map_[sport][stat_type]\n else:\n return map_[sport]\n \n @classmethod\n def full_map(cls):\n return OrderedDict([\n ('passing', FootballPassingStat),\n ('rushing', FootballRushingStat),\n ('receiving', FootballReceivingStat),\n ('fumble', FootballFumbleStat),\n ('return', FootballReturnStat),\n ('kicking', FootballKickingStat),\n ('punting', FootballPuntingStat),\n ('interception', FootballInterceptionStat),\n ('defensive', FootballDefensiveStat),\n ])\n \n @classmethod\n def queries(cls, sport, stat_type):\n q = []\n if sport == 'basketball':\n q.append(BasketballBoxScoreStat, BasketballBoxScoreStat.sum_query())\n return q\n \n @classmethod\n def player_stats(cls, sport, league, game_type, player):\n stats = DBSession.query(cls)\\\n .options(eagerload('game'), \n eagerload('game.season'))\\\n .with_polymorphic('*')\\\n .join(Game)\\\n .filter(cls.player==player,\n cls.stat_type!='offense',\n Game.game_type==game_type)\n return stats\n \n @classmethod\n def get_game(cls, game):\n return DBSession.query(cls)\\\n .options(eagerload('player'),\n eagerload('team'))\\\n .with_polymorphic('*')\\\n .filter(cls.game_id==game.id)\\\n .all()\n \n @classmethod\n def sorted_game_stats(cls, stats):\n key = lambda x: x.team\n stats = groupby(sorted(list(stats), key=key), key=key)\n stats = {team:list(stats) for team, stats in stats}\n game_stats = {}\n for team, stat_list in stats.iteritems():\n game_stats[team] = {}\n key = lambda x: x.stat_type\n stat_list = sorted(stat_list, key=key)\n for stat_type, these_stats in groupby(stat_list, key=key):\n if stat_type == 'return':\n game_stats[team]['kick_return'] = []\n game_stats[team]['punt_return'] = []\n for stat in list(these_stats):\n return_type = '%s_return' % stat.return_type\n game_stats[team][return_type].append(stat)\n else:\n game_stats[team][stat_type] = list(these_stats)\n return game_stats \n \n @classmethod\n def player_sum(cls, sport, league, game_type, player, season):\n return DBSession.query(*cls.sum_query())\\\n .join(Game, Season)\\\n .filter(cls.player==player, \n Game.game_type==game_type, \n Season.year==season)\\\n .one()\n \n @classmethod\n def team_season(cls, team, season_year):\n return DBSession.query(cls)\\\n .with_polymorphic('*')\\\n .join(Game, Season)\\\n .filter(Season.year==season_year,\n cls.team==team)\n \nclass FootballPassingStat(PlayerStat):\n id = Column(Integer, ForeignKey('player_stat.id'), primary_key=True)\n attempts = Column(Integer)\n completions = Column(Integer)\n yards = Column(Integer)\n touchdowns = Column(Integer)\n interceptions = Column(Integer)\n sacks = Column(Integer)\n sack_yards = Column(Integer)\n \n __mapper_args__ = {'polymorphic_identity': 'passing'}\n \n @classmethod\n def abbr(cls):\n return [\n (cls.attempts, 'att'),\n (cls.completions, 'comp'),\n (cls.yards, 'yd'),\n (cls.touchdowns, 'td'),\n (cls.interceptions, 'int'),\n (cls.sacks,'sk'),\n (cls.sack_yards, 'sk_yd')\n ]\n \n @classmethod\n def sum_query(cls):\n return [func.sum(k).label(v) for k, v in cls.abbr()]\n \n @classmethod\n def sum_q(cls):\n return 'yd'\n \n @classmethod\n def full_cats(cls):\n c = [(k.name, v) for k, v in cls.abbr()]\n c.insert(3, ('yards per attempt', 'y/a'))\n c.insert(3, ('yards per completion', 'y/c'))\n c.insert(2, ('completion percent', 'c%'))\n c.append(('yards per game', 'y/g'))\n return c\n \n @classmethod\n def calc_additional(cls, players):\n p_list = []\n for p in players:\n p = p._asdict()\n stats = []\n for cat, abbr in cls.full_cats():\n if abbr in p:\n s = p[abbr]\n elif abbr == 'y/g':\n s = round(float(p['yd'])/p['gp'], 1)\n elif abbr == 'c%':\n s = round(float(p['att'])/p['comp']*100, 2)\n elif abbr == 'y/c':\n s = round(float(p['yd'])/p['comp'], 1)\n elif abbr == 'y/a':\n s = round(float(p['yd'])/p['att'], 1)\n stats.append(s)\n p['stats'] = stats\n p_list.append(p)\n players = sorted(p_list, key=lambda x: x['stats'][-1], reverse=True)\n return players\n\nclass FootballRushingStat(PlayerStat):\n id = Column(Integer, ForeignKey('player_stat.id'), primary_key=True)\n attempts = Column(Integer)\n yards = Column(Integer)\n touchdowns = Column(Integer)\n longest = Column(Integer)\n \n __mapper_args__ = {'polymorphic_identity': 'rushing'}\n \n @classmethod\n def abbr(cls):\n return [\n (cls.attempts, 'att'),\n (cls.yards, 'yd'),\n (cls.touchdowns, 'td'),\n (cls.longest, 'long'),\n ]\n \n @classmethod\n def sum_query(cls):\n s_q = cls.abbr()[:-1]\n s_q = [func.sum(k).label(v) for k, v in s_q]\n s_q.append(func.max(cls.longest).label('long'))\n return s_q\n \n @classmethod\n def sum_q(cls):\n return 'yd'\n \nclass FootballReceivingStat(PlayerStat):\n id = Column(Integer, ForeignKey('player_stat.id'), primary_key=True)\n receptions = Column(Integer)\n yards = Column(Integer)\n touchdowns = Column(Integer)\n longest = Column(Integer)\n targets = Column(Integer)\n\n __mapper_args__ = {'polymorphic_identity': 'receiving'}\n\n @classmethod\n def abbr(cls):\n return [\n (cls.receptions, 'rec'),\n (cls.yards, 'yd'),\n (cls.touchdowns, 'td'),\n (cls.targets, 'tgt'),\n (cls.longest, 'long'),\n ]\n \n @classmethod\n def sum_query(cls):\n s_q = cls.abbr()[:-1]\n s_q = [func.sum(k).label(v) for k, v in s_q]\n s_q.append(func.max(cls.longest).label('long'))\n return s_q\n\n @classmethod\n def sum_q(cls):\n return 'yd'\n \nclass FootballFumbleStat(PlayerStat):\n id = Column(Integer, ForeignKey('player_stat.id'), primary_key=True)\n fumbles = Column(Integer)\n recovered = Column(Integer)\n lost = Column(Integer)\n \n __mapper_args__ = {'polymorphic_identity': 'fumble'}\n \n @classmethod\n def abbr(cls):\n return [\n (cls.fumbles, 'tot'),\n (cls.recovered, 'rec'),\n (cls.lost, 'lost')\n ]\n\n @classmethod\n def sum_query(cls):\n return [func.sum(k).label(v) for k, v in cls.abbr()]\n \n @classmethod\n def sum_q(cls):\n return 'lost'\n\nclass FootballDefensiveStat(PlayerStat):\n id = Column(Integer, ForeignKey('player_stat.id'), primary_key=True)\n tackles = Column(Integer)\n solo = Column(Integer)\n sacks = Column(Integer)\n tackles_for_loss = Column(Integer)\n pass_deflections = Column(Integer)\n qb_hits = Column(Integer)\n touchdowns = Column(Integer)\n \n __mapper_args__ = {'polymorphic_identity': 'defensive'}\n\n @classmethod\n def abbr(cls):\n return [\n (cls.tackles, 'tkl'),\n (cls.solo, 'solo'),\n (cls.sacks, 'sk'),\n (cls.tackles_for_loss, 'tfl'),\n (cls.pass_deflections, 'pd'),\n (cls.qb_hits, 'hits'),\n (cls.touchdowns, 'td'),\n ]\n \n @classmethod\n def sum_query(cls):\n return [func.sum(k).label(v) for k, v in cls.abbr()]\n \n @classmethod\n def sum_q(cls):\n return 'tkl'\n \nclass FootballReturnStat(PlayerStat):\n id = Column(Integer, ForeignKey('player_stat.id'), primary_key=True)\n total = Column(Integer)\n yards = Column(Integer)\n longest = Column(Integer)\n touchdowns = Column(Integer)\n return_type = Column(String)\n \n __mapper_args__ = {'polymorphic_identity': 'return'}\n \n @classmethod\n def abbr(cls):\n return [\n (cls.total, 'tot'),\n (cls.yards, 'yd'),\n (cls.touchdowns, 'td'),\n (cls.longest, 'long'),\n ]\n \n @classmethod\n def sum_query(cls):\n return [func.sum(k).label(v) for k, v in cls.abbr()] \n \n @classmethod\n def sum_q(cls): \n return 'tot' \n\nclass FootballKickingStat(PlayerStat):\n id = Column(Integer, ForeignKey('player_stat.id'), primary_key=True)\n fg_made = Column(Integer)\n fg_attempts = Column(Integer)\n longest = Column(Integer)\n xp_made = Column(Integer)\n xp_attempts = Column(Integer)\n \n __mapper_args__ = {'polymorphic_identity': 'kicking'}\n \n @classmethod\n def abbr(cls):\n return [\n (cls.fg_made, 'fgm'),\n (cls.fg_attempts, 'fga'),\n (cls.xp_made, 'xpm'),\n (cls.xp_attempts, 'xpa')\n ]\n \n @classmethod\n def sum_query(cls):\n s_q = [func.sum(k).label(v) for k, v in cls.abbr()]\n s_q.append(func.max(cls.longest).label('long'))\n return s_q\n \n @classmethod\n def sum_q(cls):\n return 'xpa'\n \n @classmethod\n def full_cats(cls):\n fc = cls.abbr()\n fc.append((cls.longest, 'long'))\n return [(k.name\\\n .replace('fg', 'field goals')\\\n .replace('xp', 'extra points')\\\n .replace('_', ' '), v) for k, v in fc]\n\n @classmethod\n def calc_additional(cls, players, stat_type):\n p_list = []\n cats = cls.full_cats()\n for p in players:\n p = p._asdict()\n p['stats'] = [p[abbr] for k, abbr in cats]\n p_list.append(p)\n players = sorted(p_list, key=lambda x: x['fgm'], reverse=True)\n return players, cats\n\nclass FootballPuntingStat(PlayerStat):\n id = Column(Integer, ForeignKey('player_stat.id'), primary_key=True)\n total = Column(Integer)\n yards = Column(Integer)\n touchbacks = Column(Integer)\n inside_20 = Column(Integer)\n longest = Column(Integer)\n \n __mapper_args__ = {'polymorphic_identity': 'punting'}\n \n @classmethod\n def abbr(cls):\n return [\n (cls.total, 'tot'),\n (cls.yards, 'yd'),\n (cls.touchbacks, 'tb'),\n (cls.inside_20, 'i20')\n ]\n \n @classmethod\n def sum_query(cls):\n s_q = [func.sum(k).label(v) for k, v in cls.abbr()]\n s_q.append(func.max(cls.longest).label('long'))\n return s_q\n \n @classmethod\n def sum_q(cls):\n return 'tot'\n \n @classmethod\n def full_cats(cls):\n fc = cls.abbr()\n fc.append((cls.longest, 'long'))\n return [(k.name.replace('_', ' '), v) \n for k, v in fc]\n\n @classmethod\n def calc_additional(cls, players, stat_type):\n p_list = []\n cats = cls.full_cats()\n for p in players:\n p = p._asdict()\n p['stats'] = [p[abbr] for k, abbr in cats]\n p_list.append(p)\n players = sorted(p_list, key=lambda x: x['tot'], reverse=True)\n return players, cats\n\nclass FootballInterceptionStat(PlayerStat):\n id = Column(Integer, ForeignKey('player_stat.id'), primary_key=True)\n total = Column(Integer)\n yards = Column(Integer)\n touchdowns = Column(Integer)\n \n __mapper_args__ = {'polymorphic_identity': 'interception'}\n\n @classmethod\n def abbr(cls):\n return [\n (cls.total, 'tot'),\n (cls.yards, 'yd'),\n (cls.touchdowns, 'td')\n ]\n \n @classmethod\n def sum_query(cls):\n return [func.sum(k).label(v) for k, v in cls.abbr()]\n \n @classmethod\n def sum_q(cls):\n return 'tot'\n \nclass FootballOffensiveStat(PlayerStat):\n id = Column(Integer, ForeignKey('player_stat.id'), primary_key=True)\n passing_attempts = Column(Integer)\n passing_completions = Column(Integer)\n passing_yards = Column(Integer)\n passing_touchdowns = Column(Integer)\n passing_interceptions = Column(Integer)\n passing_sacks = Column(Integer)\n passing_sack_yards = Column(Integer)\n rushing_attempts = Column(Integer)\n rushing_yards = Column(Integer)\n rushing_touchdowns = Column(Integer)\n rushing_longest = Column(Integer)\n receiving_receptions = Column(Integer)\n receiving_yards = Column(Integer)\n receiving_touchdowns = Column(Integer)\n receiving_longest = Column(Integer)\n receiving_targets = Column(Integer)\n fumble_fumbles = Column(Integer)\n fumble_recovered = Column(Integer)\n fumble_lost = Column(Integer)\n punt_return_total = Column(Integer)\n punt_return_yards = Column(Integer)\n punt_return_longest = Column(Integer)\n punt_return_touchdowns = Column(Integer)\n kick_return_total = Column(Integer)\n kick_return_yards = Column(Integer)\n kick_return_longest = Column(Integer)\n kick_return_touchdowns = Column(Integer)\n \n __mapper_args__ = {'polymorphic_identity': 'offense'}\n \n @classmethod\n def abbr(cls, stat_type):\n a_dict = {\n 'passing':[\n (cls.passing_completions, 'pass_comp'),\n (cls.passing_attempts, 'pass_att'),\n (cls.passing_yards, 'pass_yd'),\n (cls.passing_sacks, 'pass_sk'),\n (cls.passing_sack_yards, 'pass_sk_yd'),\n (cls.passing_interceptions, 'pass_int'),\n (cls.passing_touchdowns, 'pass_td')\n ],\n 'rushing': [\n (cls.rushing_attempts, 'rush_att'),\n (cls.rushing_yards, 'rush_yd'),\n (cls.rushing_touchdowns, 'rush_td'),\n ],\n 'receiving': [\n (cls.receiving_receptions, 'rec_rec'),\n (cls.receiving_targets, 'rec_tgt'),\n (cls.receiving_yards, 'rec_yd'),\n (cls.receiving_touchdowns, 'rec_td')\n ],\n 'return': [\n (cls.kick_return_total, 'kick_ret_tot'),\n (cls.kick_return_yards, 'kick_ret_yd'),\n (cls.kick_return_touchdowns, 'kick_ret_td'),\n (cls.punt_return_total, 'punt_ret_tot'),\n (cls.punt_return_yards, 'punt_ret_yd'),\n (cls.punt_return_touchdowns, 'punt_ret_td'),\n ],\n 'fumbles': [\n (cls.fumble_fumbles, 'fum_tot'),\n (cls.fumble_lost, 'fum_lost'),\n ]\n }\n a = a_dict[stat_type]\n if stat_type in ['passing', 'rushing', 'receiving']:\n a += a_dict['fumbles']\n return a\n \n @classmethod\n def sum_query(cls, stat_type):\n sum_query = [func.sum(k).label(v) for k, v in cls.abbr(stat_type)]\n if stat_type == 'return':\n q = 'ret_tot'\n add = cls.kick_return_total+cls.punt_return_total\n sum_query.append(func.sum(add).label('ret_tot'))\n else:\n q = '%s_yd' % stat_type.replace('ing', '').replace('eiv', '')\n return q, sum_query\n \n @classmethod\n def full_cats(cls, stat_type):\n c = [(k.name, v) for k, v in cls.abbr(stat_type)]\n if stat_type == 'passing':\n c.insert(3, ('yards per attempt', 'y/a'))\n c.insert(3, ('yards per completion', 'y/c'))\n c.insert(2, ('completion percent', 'c%'))\n c.append(('yards per game', 'y/g'))\n elif stat_type == 'rushing':\n c.insert(2, ('yards per attempt', 'y/a'))\n c.append(('yards per game', 'y/g'))\n elif stat_type == 'receiving':\n c.insert(2, ('targets per game', 't/g'))\n c.insert(2, ('receptions per game', 'r/g'))\n c.insert(2, ('yards per reception', 'y/r'))\n c.append(('yards per game', 'y/g'))\n return c\n \n @classmethod\n def calc_additional(cls, players, stat_type):\n full_cats = cls.full_cats(stat_type)\n p_list = []\n for p in players:\n p = p._asdict()\n stats = []\n for cat, abbr in full_cats:\n if abbr in p:\n s = p[abbr]\n elif stat_type == 'passing':\n if abbr == 'y/g':\n s = round(float(p['pass_yd'])/p['gp'], 1)\n elif abbr == 'c%':\n s = round(float(p['pass_att'])/p['pass_comp']*100, 2)\n elif abbr == 'y/c':\n s = round(float(p['pass_yd'])/p['pass_comp'], 1)\n elif abbr == 'y/a':\n s = round(float(p['pass_yd'])/p['pass_att'], 1)\n elif stat_type == 'rushing':\n if abbr == 'y/g':\n s = round(float(p['rush_yd'])/p['gp'], 1)\n elif abbr == 'y/a':\n s = round(float(p['rush_yd'])/p['rush_att'], 1)\n elif stat_type == 'receiving':\n if abbr == 'y/g':\n s = round(float(p['rec_yd'])/p['gp'], 1)\n elif abbr == 'y/r':\n s = round(float(p['rec_yd'])/p['rec_rec'], 1)\n elif abbr == 't/g':\n s = round(float(p['rec_tgt'])/p['gp'], 1)\n elif abbr == 'r/g':\n s = round(float(p['rec_rec'])/p['gp'], 1)\n stats.append(s)\n p['stats'] = stats\n p_list.append(p)\n players = sorted(p_list, key=lambda x: x['stats'][-1], reverse=True)\n return players, full_cats\n \nclass FootballDefenseSpecialTeamsStat(Base):\n id = Column(Integer, primary_key=True)\n game_id = Column(Integer, ForeignKey('game.id'))\n team_id = Column(Integer, ForeignKey('team.id'))\n yards_allowed = Column(Integer)\n sack_total = Column(Integer)\n sack_yards = Column(Integer)\n fumbles_forced = Column(Integer)\n fumbles_recovered = Column(Integer)\n interceptions = Column(Integer)\n points_allowed = Column(Integer)\n touchdowns = Column(Integer)\n safeties = Column(Integer)\n \nclass FootballTeamStat(Base):\n id = Column(Integer, primary_key=True)\n game_id = Column(Integer, ForeignKey('game.id'))\n team_id = Column(Integer, ForeignKey('team.id'))\n passing_first_downs = Column(Integer)\n rushing_first_downs = Column(Integer)\n penalty_first_downs = Column(Integer)\n third_down_attempts = Column(Integer)\n third_down_conversions = Column(Integer)\n fourth_down_attempts = Column(Integer)\n fourth_down_conversions = Column(Integer)\n plays = Column(Integer)\n yards = Column(Integer)\n drives = Column(Integer)\n passing_yards = Column(Integer)\n passing_attempts = Column(Integer)\n passing_completions = Column(Integer)\n interceptions = Column(Integer)\n sack_total = Column(Integer)\n sack_yards = Column(Integer)\n rushing_yards = Column(Integer)\n rushing_attempts = Column(Integer)\n red_zone_attempts = Column(Integer)\n red_zone_conversions = Column(Integer)\n penalty_total = Column(Integer)\n penalty_yards = Column(Integer)\n fumbles_lost = Column(Integer)\n defensive_special_teams_tds = Column(Integer)\n time_of_possession = Column(Time)\n \n game = relationship('Game', backref=(backref('team_stats')))\n team = relationship('Team')\n \n @classmethod\n def sum_query(cls):\n \n sum_query = [\n func.count(cls.sack_total).label('sacks'),\n func.count(cls.defensive_special_teams_tds).label('td'),\n func.count(cls.interceptions).label('interceptions'),\n ]\n return sum_query\n \n @classmethod\n def headers(cls):\n return ['passing_first_downs', 'rushing_first_downs', \n 'penalty_first_downs', 'third_down_attempts', \n 'third_down_conversions', 'fourth_down_attempts', \n 'fourth_down_conversions', 'plays', 'yards', 'drives', \n 'passing_yards', 'passing_attempts', 'passing_completions', \n 'interceptions', 'sack_total', 'sack_yards', 'rushing_yards', \n 'rushing_attempts', 'red_zone_attempts', 'red_zone_conversions', \n 'penalty_total', 'penalty_yards','fumbles_lost', \n 'defensive_special_teams_tds']\n \nclass BasketballMixin(object): \n minutes = Column(Integer) \n field_goals_made = Column(Integer)\n field_goals_attempted = Column(Integer)\n threes_made = Column(Integer)\n threes_attempted = Column(Integer)\n free_throws_made = Column(Integer)\n free_throws_attempted = Column(Integer)\n offensive_rebounds = Column(Integer)\n defensive_rebounds = Column(Integer)\n rebounds = Column(Integer)\n assists = Column(Integer)\n steals = Column(Integer)\n blocks = Column(Integer)\n turnovers = Column(Integer)\n personal_fouls = Column(Integer)\n plus_minus = Column(Integer)\n points = Column(Integer)\n \n def per_game(self, stat):\n if stat:\n per_game = round(float(stat)/self.games_played, 2)\n else:\n return 0\n return per_game\n \n def per_36(self, stat):\n if stat:\n per_36 = round(float(stat)/self.minutes*36, 2)\n else:\n return 0\n return per_36\n \n def free_throw_pct(self):\n if self.free_throws_made:\n ft_pct = round(float(self.free_throws_made)/float(self.free_throws_attempted)*100, 2)\n else:\n return 0\n return ft_pct\n \n def field_goal_pct(self):\n if self.field_goals_attempted:\n fg_pct = round(float(self.field_goals_made)/float(self.field_goals_attempted)*100, 2)\n else:\n return 0\n return fg_pct\n \n def threes_pct(self):\n if self.threes_attempted:\n threes_pct = round(float(self.threes_made)/float(self.threes_attempted)*100, 2)\n else:\n return 0\n return threes_pct\n \n def two_pt_fg_made(self):\n return self.field_goals_made - self.threes_made\n \n def two_pt_fg_attempted(self):\n return self.two_pt_fg_attempted - self.two_pt_fg_attempted\n \n def two_pt_fg_pct(self):\n return round(float(self.two_pt_fg_made())/float(self.two_pt_fg_attempted())*100, 2)\n \n def ts(self):\n ts = round(self.points/(2*self.field_goals_attempted + 0.44*stat.free_throws_attempted)*100, 2)\n \n def efg(self):\n efg = round((self.field_goals_made + 0.5 * self.threes_made)/self.field_goals_attempted*100, 2)\n return efg\n \n def credits(self):\n credits = (self.points + self.rebounds + self.steals + \n self.blocks - self.field_goals_missed - \n self.free_throws_missed - self.turnovers)\n \n def approximate_value(self):\n av = round((float(self.credits)*3/4)/21, 2)\n return av\n \n def assist_pct(self):\n return ''\n '''\n assist_pct = 100*self.assists/(((self.minutes\n 100*Assists/(((Minutes Played /(Team Minutes/5)) * Team Field Goals Made) - Field Goals Made)\n '''\n\nclass BasketballBoxScoreStat(BasketballMixin, PlayerStat):\n\n id = Column(Integer, ForeignKey('player_stat.id'), primary_key=True)\n \n '''\n player_stat = relationship('PlayerStat', \n backref=backref('basketball_stat', uselist=False))\n '''\n \n __mapper_args__ = {'polymorphic_identity': 'box_score'}\n \n @classmethod\n def abbr(cls):\n return [\n (cls.minutes, 'min'),\n (cls.field_goals_made, 'fgm'),\n (cls.field_goals_attempted, 'fga'),\n (cls.threes_made, '3ptm'),\n (cls.threes_attempted, '3pta'),\n (cls.free_throws_made, 'ftm'),\n (cls.free_throws_attempted, 'fta'),\n (cls.offensive_rebounds, 'oreb'),\n (cls.defensive_rebounds, 'dreb'),\n (cls.rebounds, 'reb'),\n (cls.assists, 'ast'),\n (cls.steals, 'stl'),\n (cls.blocks, 'blk'),\n (cls.turnovers, 'tos'),\n (cls.personal_fouls, 'pf'),\n (cls.plus_minus, '+/-'),\n (cls.points, 'pts')\n ]\n \n @classmethod\n def full_cats(cls):\n c = [(k.name, v) for k, v in cls.abbr()]\n c.insert(3, ('field goal percent', 'fg%'))\n c.insert(6, ('three point percent', '3pt%'))\n c.insert(9, ('free throw percent', 'ft%'))\n return c\n \n @classmethod\n def sum_query(cls):\n return [func.sum(k).label(v) for k, v in cls.abbr()]\n \n @classmethod\n def calc_add(cls, players, stat_type):\n p_list = []\n for p in players:\n p = p._asdict()\n stats = []\n for cat, abbr in cls.full_cats():\n if abbr in ['fg%', 'ft%', '3pt%']:\n base = abbr[:-1]\n try:\n s = round(float(p[base+'m'])/p[base+'a'] * 100, 2)\n except ZeroDivisionError:\n s = 0\n elif abbr == 'gp':\n s = ['gp']\n elif stat_type == 'game':\n s = round(float(p[abbr])/p['gp'], 1)\n elif stat_type == '36':\n if abbr == 'min':\n s = p['min']\n else:\n try:\n s = round(float(p[abbr])*36/p['min'], 1)\n except ZeroDivisionError:\n s = 0\n else:\n s = p[abbr]\n stats.append(s)\n p['stats'] = stats\n p_list.append(p)\n players = p_list\n return sorted(p_list, key= lambda x: x['stats'][-1], reverse=True)\n \n def __repr__(self):\n return '%s' % self.player.person\n \nclass BasketballSeasonStat(BasketballMixin, Base):\n id = Column(Integer, primary_key=True)\n player_id = Column(Integer, ForeignKey('player.id'))\n team_id = Column(Integer, ForeignKey('team.id'))\n season_id = Column(Integer, ForeignKey('season.id'))\n games_played = Column(Integer)\n game_type = Column(String)\n \n __mapper_args__ = {'polymorphic_identity': 'box_score'}\n \n player = relationship('Player')\n team = relationship('Team')\n season = relationship('Season')\n \n def __repr__(self):\n return '%s' % self.player.person\n \n \nclass BasketballTeamStat(Base):\n id = Column(Integer, primary_key=True)\n game_id = Column(Integer, ForeignKey('game.id'))\n team_id = Column(Integer, ForeignKey('team.id'))\n field_goals_made = Column(Integer)\n field_goals_attempted = Column(Integer)\n threes_made = Column(Integer)\n threes_attempted = Column(Integer)\n free_throws_made = Column(Integer)\n free_throws_attempted = Column(Integer)\n offensive_rebounds = Column(Integer)\n defensive_rebounds = Column(Integer)\n rebounds = Column(Integer)\n assists = Column(Integer)\n steals = Column(Integer)\n blocks = Column(Integer)\n turnovers = Column(Integer)\n personal_fouls = Column(Integer)\n points = Column(Integer)\n paint_points = Column(Integer)\n fast_break_points = Column(Integer)\n points_off_turnover = Column(Integer)\n \n team = relationship('Team', uselist=False)\n game = relationship('Game', uselist=False)\n \n\n \n","repo_name":"tignas/sports","sub_path":"bizarro/models/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":41238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"9752657441","text":"import requests\nfrom bs4 import BeautifulSoup\nimport os\nimport tqdm\n\nimport pypdf\nROOT_DIRECTORY = './appel'\nFILE_DIRECTORY = os.path.join(ROOT_DIRECTORY, 'pdfs')\n\nurl = ''\n# create a directory to store the downloaded files\nif not os.path.exists(FILE_DIRECTORY):\n os.makedirs(FILE_DIRECTORY)\n\n# send a GET request to the URL\nresponse = requests.get(url)\n\n# parse the HTML content using BeautifulSoup\nsoup = BeautifulSoup(response.content, 'html.parser')\n\n# find all links to PDF files\npdf_links = soup.find_all('a', href=lambda href: href.endswith('.pdf'))\npdf_paths = set(os.path.join(FILE_DIRECTORY, pdf_link['href'].split('/')[-1]) for pdf_link in pdf_links)\n\n# compare to existing directory\nexisting_pdfs = set(os.path.join(FILE_DIRECTORY, f) for f in os.listdir(FILE_DIRECTORY))\nfrom copy import copy\nfor file in copy(existing_pdfs):\n \n try:\n pdfFileObj = open(file, \"rb\")\n pdfReader = pypdf.PdfReader(pdfFileObj)\n except:\n existing_pdfs.remove(file)\n\nremaining_files_to_download = pdf_paths.difference(existing_pdfs)\nprint(f'Corrupted files: {remaining_files_to_download}\\n{len(remaining_files_to_download)}')\n\n# download each PDF file\nfor pdf_path in remaining_files_to_download:\n # print(link)\n pdf_url = '/'.join(url.split('/')[:-1]) + '/s/' + pdf_path.split('/')[-1]\n print(pdf_url)\n\n response = requests.get(pdf_url)\n with open(pdf_path, 'wb') as f:\n f.write(response.content)\n print(f'Downloaded {pdf_path}. ({pdf_url})')\n\n\n# compare to existing directory\nexisting_pdfs = set(os.path.join(FILE_DIRECTORY, f) for f in os.listdir(FILE_DIRECTORY))\n\nfor file in copy(existing_pdfs):\n\n try:\n pdfFileObj = open(file, \"rb\")\n pdfReader = pypdf.PdfReader(pdfFileObj)\n except:\n existing_pdfs.remove(file)\n\nremaining_files_to_download = pdf_paths.difference(existing_pdfs)\nprint(f'Remaining files: {pdf_paths.difference(existing_pdfs)}\\n{len(pdf_paths.difference(existing_pdfs))}')","repo_name":"lopez-hector/pub_find","sub_path":"src/download_pdfs.py","file_name":"download_pdfs.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"10300153219","text":"from datetime import datetime\nimport json\nimport os\n\n\ndef get_spot_v1_attribute_metadata():\n globals = {\n 'commit' : { 'type': 'string' },\n 'launchdate' : { 'type': 'date' },\n 'title' : { 'type': 'string' },\n }\n\n metrics = {\n 'avg#inclusive#sum#time.duration' : { 'type': 'double', 'attribute.unit': 'sec', 'attribute.alias': 'Avg time/rank' }\n }\n\n return (globals, metrics)\n\ndef is_spot_v1_file(filename):\n with open(filename) as f:\n try:\n obj = json.load(f)\n except:\n return False\n \n return 'XTics' in obj and 'commits' in obj\n\n\ndef read_spot_v1_contents(filename):\n \"\"\"\n Read contents from Spot v1 json file\n \"\"\"\n\n with open(filename) as f:\n obj = json.load(f)\n \n commits = obj.pop('commits')\n title = obj.pop('title')\n yAxis = obj.pop('yAxis')\n series = obj.pop('series')\n\n if 'show_exclusive' in obj:\n obj.pop('show_exclusive')\n\n dates = []\n\n for date in obj.pop('XTics'):\n split_date = date.split(\".\")\n split_date_pre = split_date[0]\n our_date = datetime.strptime(split_date_pre, '%Y-%m-%d %H:%M:%S').timestamp()\n int_date = int(our_date)\n str_date = str(int_date)\n dates.append( str_date )\n\n basename = os.path.basename(filename)\n base_key = basename[0:basename.find('.json')]\n\n result = {}\n\n for i in range(len(dates)):\n key = base_key + '-' + str(i)\n\n globals = {\n 'launchdate' : dates[i],\n 'commit' : commits[i],\n 'title' : title,\n 'json' : '1'\n }\n\n records = []\n for funcpath, values in obj.items():\n # values are 2d arrays like: [ [ 0, 0.5 ], [ 0, 1.0, ], ... ]\n # We assume the outer dimension is the run, and the second element\n # in the inner dimension is the value.\n if (len(values) > i and isinstance(values[i], list) and len(values[i]) >= 2):\n records.append({ 'path': funcpath, 'avg#inclusive#sum#time.duration': values[i][1] })\n \n result[key] = { 'globals': globals, 'records': records }\n \n return result\n","repo_name":"LLNL/spotbe","sub_path":"spotdb/spotdb/spotv1.py","file_name":"spotv1.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"9936833745","text":"import os\nimport requests\nimport json\n\nfrom typing import *\n\nfrom requests.adapters import HTTPAdapter\nfrom requests.packages.urllib3.util.retry import Retry\nfrom dateutil.parser import parse\n\nimport urllib\n\n\nclass GwanboDict:\n def __init__(self, agent: str, seq: str, ebook_no: str, publish_seq: str, publish_subject: str,\n publish_regdate: str, organization_name: str, organization_code: str, category_name: str,\n category_seq: str, law_name: str):\n self.seq = seq\n self.ebookNo = ebook_no\n self.publish = {\n 'seq': publish_seq,\n 'subject': publish_subject.strip(),\n 'regdate': publish_regdate.replace('20107280', '20100728')\n }\n self.organization = {\n 'name': organization_name,\n 'code': organization_code\n }\n self.category = {\n 'name': category_name,\n 'seq': category_seq\n }\n\n self.lawName = law_name.strip()\n\n dt = parse(self.publish[\"regdate\"])\n _cdn_prefix = f'https://cdn.dataportal.kr/data/{agent}/{dt.year}/{str(dt.month).zfill(2)}/{str(dt.day).zfill(2)}/'\n self.binaryFile = f'{_cdn_prefix}{self.seq}.pdf'\n\n def __str__(self):\n return json.dumps(vars(self), ensure_ascii=False)\n\n\nclass ParseDriver:\n def __init__(self):\n self.agent = 'gwanbo'\n\n def get_list_by_date(self, start_date: str, end_date: str) -> List[GwanboDict]:\n print(f'===== {start_date} TO {end_date} =====')\n\n session = requests.session()\n retry = Retry(connect=3, backoff_factor=0.5)\n adapter = HTTPAdapter(max_retries=retry)\n session.mount('http://', adapter)\n session.mount('https://', adapter)\n url = 'https://gwanbo.go.kr/SearchRestApi.jsp'\n header = {\n 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:87.0) Gecko/20100101 Firefox/87.0',\n 'X-Requested-With': 'XMLHttpRequest'\n }\n request_body = {\n 'mode': 'daily',\n 'index': 'gwanbo',\n 'query': f'keyword_field_regdate:[{start_date}+TO+{end_date}]+AND+unstored_field_keyword:(관보)+AND+keyword_category_order:(@@ORDER_NUM)',\n 'pQuery_tmp': '',\n 'pageNo': '1',\n 'listSize': '10000',\n 'sort': ''\n }\n request_payload = ''\n for k, v in request_body.items():\n request_payload += f'{k}={urllib.parse.quote(v).replace(\"%2B\", \"+\")}&'\n\n response = session.post(\n url,\n headers=header,\n data=request_payload\n )\n try:\n dataset = json.loads(response.text)['data']\n except Exception as e:\n return list()\n\n gwanbo_list = list()\n for data in dataset:\n if data['count'] > 0:\n for item in data['list']:\n gwanbo_list.append(GwanboDict(\n self.agent,\n item['stored_toc_seq'], item['keyword_ebook_no'], item['search_key'].split('_')[1],\n item['stored_field_subject'], item['keyword_field_regdate'], item['stored_organ_nm'],\n item['stored_organ_code'], item['stored_category_name'], item['stored_category_seq'],\n item['stored_laword_nm']\n ))\n return gwanbo_list\n\n def download_single_gwanbo(self, gwanbo: GwanboDict, destination: str) -> bool:\n session = requests.session()\n retry = Retry(connect=3, backoff_factor=0.5)\n adapter = HTTPAdapter(max_retries=retry)\n session.mount('http://', adapter)\n session.mount('https://', adapter)\n\n uri = 'https://gwanbo.go.kr/ezpdfwebviewer/viewer.jsp?optNoUi=true&optLang=ko&' \\\n f'contentId={gwanbo.publish[\"seq\"]}\":{gwanbo.seq}:N:&reqType=docData&reqSubType=dn'\n response = session.get(uri)\n\n print(f'download ==> {gwanbo.publish[\"regdate\"]}/{gwanbo.category[\"name\"]}/{gwanbo.publish[\"subject\"]}')\n try:\n if not (os.path.isdir(destination)):\n os.makedirs(destination)\n file = os.path.join(destination, f'{gwanbo.seq}.pdf')\n open(file, 'wb').write(response.content)\n except IOError as e:\n print('IOError', e)\n return False\n return True\n\n def download_multiple_gwanbo(self, gwanbo: List[GwanboDict], destination: str) -> bool:\n for one in gwanbo:\n if not self.download_single_gwanbo(one, destination):\n return False\n\n return True\n","repo_name":"TEAM-CLOUD-KR/datahub-ingest","sub_path":"crawler/gwanbo/parsedriver.py","file_name":"parsedriver.py","file_ext":"py","file_size_in_byte":4686,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"14252110225","text":"# Calculate the accuracy of a baseline that simply predicts \"London\" for every\n# example in the dev set.\n# Hint: Make use of existing code.\n# Your solution here should only be a few lines.\nfrom utils import evaluate_places\n\nif __name__ == \"__main__\":\n length = 500\n pred = ['London'] * length\n total, correct = evaluate_places(\"birth_dev.tsv\", pred)\n print(f'Accuracy of london baseline is {correct*100/total}%')\n\n","repo_name":"chenjianhuii/CS224n","sub_path":"assignments/student-new/src/london_baseline.py","file_name":"london_baseline.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36868503041","text":"import pyglet\nimport numpy, numpy as np\n\ndef image_np2pg(npImage):\n\tassert(npImage.shape[2] == 3)\n\tassert(npImage.dtype == np.float32)\n\tw = npImage.shape[1]; h = npImage.shape[0]; format_ = 'RGB'\n\tpitch = -w * len(format_)\n\tnpImage8 = (npImage * 255.0).astype(np.uint8)\n\n\timage = pyglet.image.ImageData(w, h, format_, str(npImage8.data), pitch)\n\treturn image","repo_name":"logixoul/py_include","sub_path":"lib/pyglet/interop.py","file_name":"interop.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19238183932","text":"\"\"\"\nShipping functions from SciPy to reduce dependency on having SciPy installed\n\"\"\"\n\nimport numpy as np\n\n\ndef scoreatpercentile(a, per, limit=(), interpolation_method='fraction'):\n \"\"\"\n Calculate the score at the given `per` percentile of the sequence `a`.\n\n For example, the score at `per=50` is the median. If the desired quantile\n lies between two data points, we interpolate between them, according to\n the value of `interpolation`. If the parameter `limit` is provided, it\n should be a tuple (lower, upper) of two values. Values of `a` outside\n this (closed) interval will be ignored.\n\n The `interpolation_method` parameter supports three values, namely\n `fraction` (default), `lower` and `higher`. Interpolation is done only,\n if the desired quantile lies between two data points `i` and `j`. For\n `fraction`, the result is an interpolated value between `i` and `j`;\n for `lower`, the result is `i`, for `higher` the result is `j`.\n\n Parameters\n ----------\n a : ndarray\n Values from which to extract score.\n per : scalar\n Percentile at which to extract score.\n limit : tuple, optional\n Tuple of two scalars, the lower and upper limits within which to\n compute the percentile.\n interpolation_method : {'fraction', 'lower', 'higher'}, optional\n This optional parameter specifies the interpolation method to use,\n when the desired quantile lies between two data points `i` and `j`:\n\n - fraction: `i + (j - i)*fraction`, where `fraction` is the\n fractional part of the index surrounded by `i` and `j`.\n - lower: `i`.\n - higher: `j`.\n\n Returns\n -------\n score : float\n Score at percentile.\n\n See Also\n --------\n percentileofscore\n\n Examples\n --------\n >>> from scipy import stats\n >>> a = np.arange(100)\n >>> stats.scoreatpercentile(a, 50)\n 49.5\n\n \"\"\"\n # TODO: this should be a simple wrapper around a well-written quantile\n # function. GNU R provides 9 quantile algorithms (!), with differing\n # behaviour at, for example, discontinuities.\n values = np.sort(a, axis=0)\n if limit:\n values = values[(limit[0] <= values) & (values <= limit[1])]\n\n idx = per / 100. * (values.shape[0] - 1)\n if (idx % 1 == 0):\n score = values[idx]\n else:\n if interpolation_method == 'fraction':\n score = _interpolate(values[int(idx)], values[int(idx) + 1],\n idx % 1)\n elif interpolation_method == 'lower':\n score = values[np.floor(idx)]\n elif interpolation_method == 'higher':\n score = values[np.ceil(idx)]\n else:\n raise ValueError(\"interpolation_method can only be 'fraction', \"\n \"'lower' or 'higher'\")\n\n return score\n\n\ndef _interpolate(a, b, fraction):\n \"\"\"Returns the point at the given fraction between a and b, where\n 'fraction' must be between 0 and 1.\n \"\"\"\n return a + (b - a) * fraction\n\n\ndef rankdata(a):\n \"\"\"\n Ranks the data, dealing with ties appropriately.\n\n Equal values are assigned a rank that is the average of the ranks that\n would have been otherwise assigned to all of the values within that set.\n Ranks begin at 1, not 0.\n\n Parameters\n ----------\n a : array_like\n This array is first flattened.\n\n Returns\n -------\n rankdata : ndarray\n An array of length equal to the size of `a`, containing rank scores.\n\n Examples\n --------\n >>> stats.rankdata([0, 2, 2, 3])\n array([ 1. , 2.5, 2.5, 4. ])\n\n \"\"\"\n a = np.ravel(a)\n n = len(a)\n svec, ivec = fastsort(a)\n sumranks = 0\n dupcount = 0\n newarray = np.zeros(n, float)\n for i in xrange(n):\n sumranks += i\n dupcount += 1\n if i == n - 1 or svec[i] != svec[i + 1]:\n averank = sumranks / float(dupcount) + 1\n for j in xrange(i - dupcount + 1, i + 1):\n newarray[ivec[j]] = averank\n sumranks = 0\n dupcount = 0\n return newarray\n\n\ndef fastsort(a):\n \"\"\"\n Sort an array and provide the argsort.\n\n Parameters\n ----------\n a : array_like\n Input array.\n\n Returns\n -------\n fastsort : ndarray of type int\n sorted indices into the original array\n\n \"\"\"\n # TODO: the wording in the docstring is nonsense.\n it = np.argsort(a)\n as_ = a[it]\n return as_, it\n\n\ndef percentileofscore(a, score, kind='rank'):\n '''\n The percentile rank of a score relative to a list of scores.\n\n A `percentileofscore` of, for example, 80% means that 80% of the\n scores in `a` are below the given score. In the case of gaps or\n ties, the exact definition depends on the optional keyword, `kind`.\n\n Parameters\n ----------\n a: array like\n Array of scores to which `score` is compared.\n score: int or float\n Score that is compared to the elements in `a`.\n kind: {'rank', 'weak', 'strict', 'mean'}, optional\n This optional parameter specifies the interpretation of the\n resulting score:\n\n - \"rank\": Average percentage ranking of score. In case of\n multiple matches, average the percentage rankings of\n all matching scores.\n - \"weak\": This kind corresponds to the definition of a cumulative\n distribution function. A percentileofscore of 80%\n means that 80% of values are less than or equal\n to the provided score.\n - \"strict\": Similar to \"weak\", except that only values that are\n strictly less than the given score are counted.\n - \"mean\": The average of the \"weak\" and \"strict\" scores, often used in\n testing. See\n\n http://en.wikipedia.org/wiki/Percentile_rank\n\n Returns\n -------\n pcos : float\n Percentile-position of score (0-100) relative to `a`.\n\n Examples\n --------\n Three-quarters of the given values lie below a given score:\n\n >>> percentileofscore([1, 2, 3, 4], 3)\n 75.0\n\n With multiple matches, note how the scores of the two matches, 0.6\n and 0.8 respectively, are averaged:\n\n >>> percentileofscore([1, 2, 3, 3, 4], 3)\n 70.0\n\n Only 2/5 values are strictly less than 3:\n\n >>> percentileofscore([1, 2, 3, 3, 4], 3, kind='strict')\n 40.0\n\n But 4/5 values are less than or equal to 3:\n\n >>> percentileofscore([1, 2, 3, 3, 4], 3, kind='weak')\n 80.0\n\n The average between the weak and the strict scores is\n\n >>> percentileofscore([1, 2, 3, 3, 4], 3, kind='mean')\n 60.0\n\n '''\n a = np.array(a)\n n = len(a)\n\n if kind == 'rank':\n if not(np.any(a == score)):\n a = np.append(a, score)\n a_len = np.array(range(len(a)))\n else:\n a_len = np.array(range(len(a))) + 1.0\n\n a = np.sort(a)\n idx = [a == score]\n pct = (np.mean(a_len[idx]) / n) * 100.0\n return pct\n\n elif kind == 'strict':\n return sum(a < score) / float(n) * 100\n elif kind == 'weak':\n return sum(a <= score) / float(n) * 100\n elif kind == 'mean':\n return (sum(a < score) + sum(a <= score)) * 50 / float(n)\n else:\n raise ValueError(\"kind can only be 'rank', 'strict', 'weak' or 'mean'\")\n","repo_name":"miniBloq/v0.83","sub_path":"source/Bin/Minibloq/lang/PPythonWin/v2.7.5.1/App/Lib/site-packages/pandas/compat/scipy.py","file_name":"scipy.py","file_ext":"py","file_size_in_byte":7348,"program_lang":"python","lang":"en","doc_type":"code","stars":82,"dataset":"github-code","pt":"3"} +{"seq_id":"19779637820","text":"import numpy as np\r\n\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.colors as colors\r\n\r\ndef truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):\r\n new_cmap = colors.LinearSegmentedColormap.from_list(\r\n 'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),\r\n cmap(np.linspace(minval, maxval, n)))\r\n return new_cmap\r\n\r\n# THot (truncated hot)\r\ncmap = plt.get_cmap('hot_r')\r\nTHot = truncate_colormap(cmap, 0, 0.7)","repo_name":"AlcebiadesDalColJunior/Wavelet-based-Visual-Analysis-for-Data-Exploration","sub_path":"code/util/custom_colormaps.py","file_name":"custom_colormaps.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"70332559443","text":"#!/usr/bin/python3\n\nimport re\nimport subprocess\n\n# Discover OS version\nwith open(\"/etc/redhat-release\", \"r\") as f:\n name = f.read()[:-1]\n\nm = re.match(r\"Red Hat Enterprise Linux Server release (\\d\\.\\d)\", name)\nif not m:\n m = re.match(r\"CentOS Linux release (\\d\\.\\d)\", name)\nif not m:\n print(\"OS detection failed!\")\n exit(-1)\n\nel = m.group(1)\n\n# Get the output\nret, out = subprocess.getstatusoutput(\"./runme > out\")\nif ret != 0:\n print(f\"Check failed: {out}\")\n exit(ret)\n\n# Check if it matches the reference\nret, out = subprocess.getstatusoutput(f\"diff -u ci/outputs/{el} out\")\nif ret != 0:\n print(\"Output didn't match expectations; diff follows...\")\n print(out)\n exit(ret)\n\nprint(\"All set!\")\n","repo_name":"frozencemetery/krb5check","sub_path":".github/workflows/doublecheck.py","file_name":"doublecheck.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33021652477","text":"#this program calculates waiting time for each packet and stores it in p4.txt\nfin = open(\"tcp-example.tr\", \"r\")\nfout = open(\"p4.txt\", \"w\")\nstr2=fin.readline()\nstr1=[]\nstr1 = str2.split()\nm=[]\nt=0\n\nwhile (str2!=\"\") :\n\tstr1 = str2.split()\n\tt=float(str1[1])\n\tif str1[0] =='+':\n\t\tm.append(float(str1[1]))\n\telif str1[0]=='-':\n\t\tw=m[0];\n\t\tdel m[0]\n\t\tfout.write(str(t) + \"\\t\" + str(t-w) +\"\\n\")\n\t\t\n\tstr2=fin.readline()\nfin.close()\nfout.close()\n","repo_name":"gimmepizza/CS224-pa2","sub_path":"p4.py","file_name":"p4.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22472023927","text":"\"\"\"\nvoxel.py\n-----------\n\nConvert meshes to a simple voxel data structure and back again.\n\"\"\"\nimport numpy as np\n\nfrom . import ops\nfrom . import transforms\nfrom . import morphology\n\nfrom .encoding import Encoding, DenseEncoding\nfrom .. import util\nfrom .. import caching\nfrom .. import bounds as bounds_module\nfrom .. import transformations as tr\n\nfrom ..parent import Geometry\nfrom ..constants import log\n\nfrom ..exchange.binvox import export_binvox\n\n\nclass VoxelGrid(Geometry):\n \"\"\"\n Store 3D voxels.\n \"\"\"\n\n def __init__(self, encoding, transform=None, metadata=None):\n if transform is None:\n transform = np.eye(4)\n if isinstance(encoding, np.ndarray):\n encoding = DenseEncoding(encoding.astype(bool))\n if encoding.dtype != bool:\n raise ValueError('encoding must have dtype bool')\n self._data = caching.DataStore()\n self.encoding = encoding\n self._data['transform'] = transforms.Transform(transform)\n self._cache = caching.Cache(\n id_function=self._data.__hash__)\n\n self.metadata = dict()\n # update the mesh metadata with passed metadata\n if isinstance(metadata, dict):\n self.metadata.update(metadata)\n elif metadata is not None:\n raise ValueError(\n 'metadata should be a dict or None, got %s' % str(metadata))\n\n def crc(self):\n util.log.warning(\n '`geometry.crc()` is deprecated and will ' +\n 'be removed in October 2023: replace ' +\n 'with `geometry.__hash__()` or `hash(geometry)`')\n return self.__hash__()\n\n def hash(self):\n util.log.warning(\n '`geometry.hash()` is deprecated and will ' +\n 'be removed in October 2023: replace ' +\n 'with `geometry.__hash__()` or `hash(geometry)`')\n return self.__hash__()\n\n def __hash__(self):\n \"\"\"\n Get the hash of the current transformation matrix.\n\n Returns\n ------------\n hash : str\n Hash of transformation matrix\n \"\"\"\n return self._data.__hash__()\n\n @property\n def encoding(self):\n \"\"\"\n `Encoding` object providing the occupancy grid.\n\n See `trimesh.voxel.encoding` for implementations.\n \"\"\"\n return self._data['encoding']\n\n @encoding.setter\n def encoding(self, encoding):\n if isinstance(encoding, np.ndarray):\n encoding = DenseEncoding(encoding)\n elif not isinstance(encoding, Encoding):\n raise ValueError(\n 'encoding must be an Encoding, got %s' % str(encoding))\n if len(encoding.shape) != 3:\n raise ValueError(\n 'encoding must be rank 3, got shape %s' % str(encoding.shape))\n if encoding.dtype != bool:\n raise ValueError(\n 'encoding must be binary, got %s' % encoding.dtype)\n self._data['encoding'] = encoding\n\n @property\n def _transform(self):\n return self._data['transform']\n\n @property\n def transform(self):\n \"\"\"4x4 homogeneous transformation matrix.\"\"\"\n return self._transform.matrix\n\n @transform.setter\n def transform(self, matrix):\n \"\"\"4x4 homogeneous transformation matrix.\"\"\"\n self._transform.matrix = matrix\n\n @property\n def translation(self):\n \"\"\"Location of voxel at [0, 0, 0].\"\"\"\n return self._transform.translation\n\n @property\n def origin(self):\n \"\"\"Deprecated. Use `self.translation`.\"\"\"\n # DEPRECATED. Use translation instead\n return self.translation\n\n @property\n def scale(self):\n \"\"\"\n 3-element float representing per-axis scale.\n\n Raises a `RuntimeError` if `self.transform` has rotation or\n shear components.\n \"\"\"\n return self._transform.scale\n\n @property\n def pitch(self):\n \"\"\"\n Uniform scaling factor representing the side length of\n each voxel.\n\n Returns\n -----------\n pitch : float\n Pitch of the voxels.\n\n Raises\n ------------\n `RuntimeError`\n If `self.transformation` has rotation or shear\n components of has non-uniform scaling.\n \"\"\"\n return self._transform.pitch\n\n @property\n def element_volume(self):\n return self._transform.unit_volume\n\n def apply_transform(self, matrix):\n self._transform.apply_transform(matrix)\n return self\n\n def strip(self):\n \"\"\"\n Mutate self by stripping leading/trailing planes of zeros.\n\n Returns\n --------\n self after mutation occurs in-place\n \"\"\"\n encoding, padding = self.encoding.stripped\n self.encoding = encoding\n self._transform.matrix[:3, 3] = self.indices_to_points(padding[:, 0])\n return self\n\n @caching.cache_decorator\n def bounds(self):\n indices = self.sparse_indices\n # get all 8 corners of the AABB\n corners = bounds_module.corners(\n [indices.min(axis=0) - 0.5,\n indices.max(axis=0) + 0.5])\n # transform these corners to a new frame\n corners = self._transform.transform_points(corners)\n # get the AABB of corners in-frame\n bounds = np.array([corners.min(axis=0), corners.max(axis=0)])\n bounds.flags.writeable = False\n return bounds\n\n @caching.cache_decorator\n def extents(self):\n bounds = self.bounds\n extents = bounds[1] - bounds[0]\n extents.flags.writeable = False\n return extents\n\n @caching.cache_decorator\n def is_empty(self):\n return self.encoding.is_empty\n\n @property\n def shape(self):\n \"\"\"3-tuple of ints denoting shape of occupancy grid.\"\"\"\n return self.encoding.shape\n\n @caching.cache_decorator\n def filled_count(self):\n \"\"\"int, number of occupied voxels in the grid.\"\"\"\n return self.encoding.sum.item()\n\n def is_filled(self, point):\n \"\"\"\n Query points to see if the voxel cells they lie in are\n filled or not.\n\n Parameters\n ----------\n point : (n, 3) float\n Points in space\n\n Returns\n ---------\n is_filled : (n,) bool\n Is cell occupied or not for each point\n \"\"\"\n point = np.asanyarray(point)\n indices = self.points_to_indices(point)\n in_range = np.logical_and(\n np.all(indices < np.array(self.shape), axis=-1),\n np.all(indices >= 0, axis=-1))\n\n is_filled = np.zeros_like(in_range)\n is_filled[in_range] = self.encoding.gather_nd(indices[in_range])\n return is_filled\n\n def fill(self, method='holes', **kwargs):\n \"\"\"\n Mutates self by filling in the encoding according\n to `morphology.fill`.\n\n Parameters\n ----------\n method : hashable\n Implementation key, one of\n `trimesh.voxel.morphology.fill.fillers` keys\n **kwargs : dict\n Additional kwargs passed through to\n the keyed implementation.\n\n Returns\n ----------\n self : VoxelGrid\n After replacing encoding with a filled version.\n \"\"\"\n self.encoding = morphology.fill(\n self.encoding, method=method, **kwargs)\n return self\n\n def hollow(self):\n \"\"\"\n Mutates self by removing internal voxels\n leaving only surface elements.\n\n Surviving elements are those in encoding that are\n adjacent to an empty voxel where adjacency is\n controlled by `structure`.\n\n Returns\n ----------\n self : VoxelGrid\n After replacing encoding with a surface version.\n \"\"\"\n self.encoding = morphology.surface(self.encoding)\n return self\n\n @caching.cache_decorator\n def marching_cubes(self):\n \"\"\"\n A marching cubes Trimesh representation of the voxels.\n\n No effort was made to clean or smooth the result in any way;\n it is merely the result of applying the scikit-image\n measure.marching_cubes function to self.encoding.dense.\n\n Returns\n ---------\n meshed : trimesh.Trimesh\n Representing the current voxel\n object as returned by marching cubes algorithm.\n \"\"\"\n return ops.matrix_to_marching_cubes(matrix=self.matrix)\n\n @property\n def matrix(self):\n \"\"\"\n Return a DENSE matrix of the current voxel encoding.\n\n Returns\n -------------\n dense : (a, b, c) bool\n Numpy array of dense matrix\n Shortcut to voxel.encoding.dense\n \"\"\"\n return self.encoding.dense\n\n @caching.cache_decorator\n def volume(self):\n \"\"\"\n What is the volume of the filled cells in the current\n voxel object.\n\n Returns\n ---------\n volume : float\n Volume of filled cells.\n \"\"\"\n return self.filled_count * self.element_volume\n\n @caching.cache_decorator\n def points(self):\n \"\"\"\n The center of each filled cell as a list of points.\n\n Returns\n ----------\n points : (self.filled, 3) float\n Points in space.\n \"\"\"\n return self._transform.transform_points(\n self.sparse_indices.astype(float))\n\n @property\n def sparse_indices(self):\n \"\"\"(n, 3) int array of sparse indices of occupied voxels.\"\"\"\n return self.encoding.sparse_indices\n\n def as_boxes(self, colors=None, **kwargs):\n \"\"\"\n A rough Trimesh representation of the voxels with a box\n for each filled voxel.\n\n Parameters\n ----------\n colors : None, (3,) or (4,) float or uint8\n (X, Y, Z, 3) or (X, Y, Z, 4) float or uint8\n Where matrix.shape == (X, Y, Z)\n\n Returns\n ---------\n mesh : trimesh.Trimesh\n Mesh with one box per filled cell.\n \"\"\"\n\n if colors is not None:\n colors = np.asanyarray(colors)\n if colors.ndim == 4:\n encoding = self.encoding\n if colors.shape[:3] == encoding.shape:\n # TODO jackd: more efficient implementation?\n # encoding.as_mask?\n colors = colors[encoding.dense]\n else:\n log.warning('colors incorrect shape!')\n colors = None\n elif colors.shape not in ((3,), (4,)):\n log.warning('colors incorrect shape!')\n colors = None\n\n mesh = ops.multibox(\n centers=self.sparse_indices.astype(float), colors=colors)\n\n mesh = mesh.apply_transform(self.transform)\n return mesh\n\n def points_to_indices(self, points):\n \"\"\"\n Convert points to indices in the matrix array.\n\n Parameters\n ----------\n points: (n, 3) float, point in space\n\n Returns\n ---------\n indices: (n, 3) int array of indices into self.encoding\n \"\"\"\n points = self._transform.inverse_transform_points(points)\n return np.round(points).astype(int)\n\n def indices_to_points(self, indices):\n return self._transform.transform_points(indices.astype(float))\n\n def show(self, *args, **kwargs):\n \"\"\"\n Convert the current set of voxels into a trimesh for visualization\n and show that via its built- in preview method.\n \"\"\"\n return self.as_boxes(kwargs.pop(\n 'colors', None)).show(*args, **kwargs)\n\n def copy(self):\n return VoxelGrid(\n self.encoding.copy(),\n self._transform.matrix.copy())\n\n def export(self, file_obj=None, file_type=None, **kwargs):\n \"\"\"\n Export the current VoxelGrid.\n\n Parameters\n ------------\n file_obj : file-like or str\n File or file-name to export to.\n file_type : None or str\n Only 'binvox' currently supported.\n\n Returns\n ---------\n export : bytes\n Value of export.\n \"\"\"\n if isinstance(file_obj, str) and file_type is None:\n file_type = util.split_extension(file_obj).lower()\n\n if file_type != 'binvox':\n raise ValueError('only binvox exports supported!')\n\n exported = export_binvox(self, **kwargs)\n if hasattr(file_obj, 'write'):\n file_obj.write(exported)\n elif isinstance(file_obj, str):\n with open(file_obj, 'wb') as f:\n f.write(exported)\n return exported\n\n def revoxelized(self, shape):\n \"\"\"\n Create a new VoxelGrid without rotations, reflections\n or shearing.\n\n Parameters\n ----------\n shape : (3, int)\n The shape of the returned VoxelGrid.\n\n Returns\n ----------\n vox : VoxelGrid\n Of the given shape with possibly non-uniform\n scale and translation transformation matrix.\n \"\"\"\n shape = tuple(shape)\n bounds = self.bounds.copy()\n extents = self.extents\n points = util.grid_linspace(\n bounds, shape).reshape(shape + (3,))\n dense = self.is_filled(points)\n scale = extents / np.asanyarray(shape)\n translate = bounds[0]\n return VoxelGrid(\n dense,\n transform=tr.scale_and_translate(scale, translate))\n\n def __add__(self, other):\n raise NotImplementedError(\"TODO : implement voxel concatenation\")\n","repo_name":"mikedh/trimesh","sub_path":"trimesh/voxel/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":13522,"program_lang":"python","lang":"en","doc_type":"code","stars":2558,"dataset":"github-code","pt":"3"} +{"seq_id":"23492531919","text":"# search.py\n# ---------\n# Licensing Information: You are free to use or extend these projects for\n# educational purposes provided that (1) you do not distribute or publish\n# solutions, (2) you retain this notice, and (3) you provide clear\n# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.\n# \n# Attribution Information: The Pacman AI projects were developed at UC Berkeley.\n# The core projects and autograders were primarily created by John DeNero\n# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).\n# Student side autograding was added by Brad Miller, Nick Hay, and\n# Pieter Abbeel (pabbeel@cs.berkeley.edu).\n\n\n\"\"\"\nIn search.py, you will implement generic search algorithms which are called by\nPacman agents (in searchAgents.py).\n\"\"\"\n\nimport util\n\n\nclass SearchProblem:\n \"\"\"\n This class outlines the structure of a search problem, but doesn't implement\n any of the methods (in object-oriented terminology: an abstract class).\n\n You do not need to change anything in this class, ever.\n \"\"\"\n\n def getStartState(self):\n \"\"\"\n Returns the start state for the search problem.\n \"\"\"\n util.raiseNotDefined()\n\n def isGoalState(self, state):\n \"\"\"\n state: Search state\n\n Returns True if and only if the state is a valid goal state.\n \"\"\"\n util.raiseNotDefined()\n\n def getSuccessors(self, state):\n \"\"\"\n state: Search state\n\n For a given state, this should return a list of triples, (successor,\n action, stepCost), where 'successor' is a successor to the current\n state, 'action' is the action required to get there, and 'stepCost' is\n the incremental cost of expanding to that successor.\n \"\"\"\n util.raiseNotDefined()\n\n def getCostOfActions(self, actions):\n \"\"\"\n actions: A list of actions to take\n\n This method returns the total cost of a particular sequence of actions.\n The sequence must be composed of legal moves.\n \"\"\"\n util.raiseNotDefined()\n\n\ndef tinyMazeSearch(problem):\n \"\"\"\n Returns a sequence of moves that solves tinyMaze. For any other maze, the\n sequence of moves will be incorrect, so only use this for tinyMaze.\n \"\"\"\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n return [s, s, w, s, w, w, s, w]\n\n\ndef depthFirstSearch(problem: SearchProblem):\n \"\"\"\n Search the deepest nodes in the search tree first.\n\n Your search algorithm needs to return a list of actions that reaches the\n goal. Make sure to implement a graph search algorithm.\n\n To get started, you might want to try some of these simple commands to\n understand the search problem that is being passed in:\n\n print(\"Start:\", problem.getStartState())\n print(\"Is the start a goal?\", problem.isGoalState(problem.getStartState()))\n print(\"Start's successors:\", problem.getSuccessors(problem.getStartState()))\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n # util.raiseNotDefined()\n\n stack_state = util.Stack() # 保存尝试遍历的状态\n # 初始时为开始节点\n stack_state.push({\"successor\": problem.getStartState(), \"action\": [], \"cost\": 0})\n visited = {} # 记录某个节点是否遍历过\n\n # dfs\n while not stack_state.isEmpty():\n state = stack_state.pop() # 取出栈顶的作为当前遍历的状态\n successor = state[\"successor\"] # 当前节点\n action = state[\"action\"] # 当前走过的路径\n cost = state[\"cost\"] # 当前付出的代价\n visited[successor] = True # 标记\n if problem.isGoalState(successor): # 找到目的地\n return action\n # 考虑邻接节点,加入新的尝试遍历的节点,优先考虑代价小的邻接节点\n edges = problem.getSuccessors(successor)\n edges.sort(key=lambda x: x[2], reverse=True)\n for edge in edges:\n near = edge[0] # 邻接节点\n direct = edge[1] # 方向\n edge_cost = edge[2] # 边代价\n if visited.get(near) is None: # 没有遍历过\n stack_state.push({\"successor\": near, \"action\": action + [direct],\n \"cost\": cost + edge_cost})\n\n return None # 没有找到路径\n\n\ndef breadthFirstSearch(problem: SearchProblem):\n \"\"\"Search the shallowest nodes in the search tree first.\"\"\"\n \"*** YOUR CODE HERE ***\"\n # util.raiseNotDefined()\n\n que_state = util.PriorityQueue() # 保存尝试遍历的状态\n # 初始时为开始节点,优先级先考虑深度,然后考虑总代价\n que_state.push({\"successor\": problem.getStartState(), \"action\": [], \"depth\": 0}, (0, 0))\n visited = {problem.getStartState(): True} # 记录某个节点是否遍历过\n\n # bfs\n while not que_state.isEmpty():\n state = que_state.pop() # 取出队头的作为当前遍历的状态\n successor = state[\"successor\"] # 当前节点\n action = state[\"action\"] # 当前走过的路径\n depth = state[\"depth\"] # 当前深度\n # visited[successor] = True # 标记\n if problem.isGoalState(successor): # 找到目的地\n return action\n # 考虑邻接节点,加入新的尝试遍历的节点,优先考虑代价小的邻接节点\n for edge in problem.getSuccessors(successor):\n near = edge[0] # 邻接节点\n direct = edge[1] # 方向\n edge_cost = edge[2] # 边代价\n if visited.get(near) is None: # 没有遍历过\n que_state.push({\"successor\": near, \"action\": action + [direct], \"depth\": depth + 1},\n (depth + 1, problem.getCostOfActions(action + [direct])))\n visited[near] = True\n\n return None # 没有找到路径\n\n\ndef uniformCostSearch(problem: SearchProblem):\n \"\"\"Search the node of least total cost first.\"\"\"\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()\n\n\ndef nullHeuristic(state, problem=None):\n \"\"\"\n A heuristic function estimates the cost from the current state to the nearest\n goal in the provided SearchProblem. This heuristic is trivial.\n \"\"\"\n return 0\n\n\ndef aStarSearch(problem: SearchProblem, heuristic=nullHeuristic):\n \"\"\"Search the node that has the lowest combined cost and heuristic first.\"\"\"\n \"*** YOUR CODE HERE ***\"\n # util.raiseNotDefined()\n\n # 定义state状态类,successor表明节点名称,action表明到达该节点走过的路径\n class State:\n def __init__(self, successor, action):\n self.successor = successor\n self.action = action\n\n def __eq__(self, other):\n return self.successor == other.successor\n\n que_state = util.PriorityQueue() # 保存尝试遍历的状态\n # 初始时为开始节点,优先级f=g+h, g表示已花费的代价, h表示当前节点到目标节点的估算代价\n que_state.push(State(problem.getStartState(), []),\n heuristic(problem.getStartState(), problem))\n visited = {} # 记录某个节点是否遍历过\n\n # A*\n while not que_state.isEmpty():\n state = que_state.pop() # 取出队头的作为当前遍历的状态\n successor = state.successor # 当前节点\n action = state.action # 当前走过的路径\n visited[successor] = True # 标记\n if problem.isGoalState(successor): # 找到目的地\n return action\n # 考虑邻接节点,加入新的尝试遍历的节点,优先考虑代价小的邻接节点\n for edge in problem.getSuccessors(successor):\n near = edge[0] # 邻接节点\n direct = edge[1] # 方向\n edge_cost = edge[2] # 边代价\n if visited.get(near) is None: # 没有遍历过\n que_state.update(State(near, action+[direct]),\n problem.getCostOfActions(action + [direct])+heuristic(near, problem))\n\n return None # 没有找到路径\n\n\n# Abbreviations\nbfs = breadthFirstSearch\ndfs = depthFirstSearch\nastar = aStarSearch\nucs = uniformCostSearch\n","repo_name":"YnmtDJ/cs188","sub_path":"Homework1_2/search/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":8132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"762424576","text":"import feedparser\nimport xml.etree.ElementTree as ET\n\n# RSS feed URL to query\nrss_feed_url = 'https://www.bankless.com/rss/feed'\n\n# Parse the RSS feed\nfeed = feedparser.parse(rss_feed_url)\n\n# Create XML structure\nrss = ET.Element('rss')\nchannel = ET.SubElement(rss, 'channel')\n\nfor entry in feed.entries:\n item = ET.SubElement(channel, 'item')\n \n title = ET.SubElement(item, 'title')\n title.text = entry.title\n \n link = ET.SubElement(item, 'link')\n link.text = entry.link\n \n author = ET.SubElement(item, 'author')\n author.text = entry.author if hasattr(entry, 'author') else ''\n \n date = ET.SubElement(item, 'pubDate')\n date.text = entry.published if hasattr(entry, 'published') else ''\n \n content = ET.SubElement(item, 'content')\n content.text = entry.summary if hasattr(entry, 'summary') else ''\n\n# Create an ElementTree object and write to XML file\ntree = ET.ElementTree(rss)\ntree.write('rss_data.xml', encoding='utf-8', xml_declaration=True)\n\nprint(\"XML data saved to 'rss_data.xml'\")\n","repo_name":"MadBananaUnionDAO/rss-feeder","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"1420589647","text":"import json\nimport requests\n\n\nclass Client:\n def __init__(self,\n timeout: float = 5.0,\n parse_json: bool = True,\n endpoint: str = 'https://scoreunlocked.pythonanywhere.com',\n raise_errors: bool = False\n ):\n self._developer = None\n self._leaderboard = None\n self._timeout = timeout\n self._parse_json = parse_json\n self.raise_errors = raise_errors\n self._base_endpoint = endpoint\n self._get_endpoint = f'{self._base_endpoint}/leaderboards/get'\n self._post_endpoint = f'{self._base_endpoint}/leaderboards/post'\n\n def connect(self, developer: str, leaderboard: str):\n self._developer = developer\n self._leaderboard = leaderboard\n\n def parse_response(self, response: str):\n if self._parse_json:\n try:\n return json.loads(response)\n except json.JSONDecodeError:\n return response\n else:\n return response\n\n def get_server_status(self):\n try:\n if requests.get(self._base_endpoint, timeout=self._timeout).status_code == 200:\n return True\n else:\n return False\n except requests.ReadTimeout:\n return False\n except Exception as e:\n if self.raise_errors:\n raise e\n print(f'An Error Occurred: {e}')\n return False\n\n def _get_leaderboard(self):\n params = {\n 'developer': self._developer,\n 'leaderboard': self._leaderboard\n }\n response = requests.get(self._get_endpoint,\n params=params,\n timeout=self._timeout\n )\n parsed_response = self.parse_response(response.text)\n try:\n if parsed_response.get('error'):\n return parsed_response\n # if parsed_response.get('message'):\n # print(parsed_response.get('message'), ':', parsed_response.get('error').get('message'))\n # return None\n else:\n if parsed_response.get('leaderboard') is not None:\n return parsed_response.get('leaderboard')\n except AttributeError:\n return None\n\n def get_leaderboard(self):\n if self.raise_errors:\n return self._get_leaderboard()\n else:\n try:\n return self._get_leaderboard()\n except requests.ReadTimeout:\n return None\n except Exception as e:\n print(f'An Error Occurred: {e}')\n return None\n\n def _post_score(self, name, score, validation_data=''):\n data = {\n 'developer': self._developer,\n 'leaderboard': self._leaderboard,\n 'name': name,\n 'score': score,\n 'validation_data': validation_data\n }\n try:\n response = requests.post(self._post_endpoint,\n data=data,\n timeout=self._timeout\n )\n return self.parse_response(response.text)\n except requests.ReadTimeout:\n return None\n except requests.ConnectionError:\n print('connection error')\n except Exception as e:\n print(f'An Error Occurred: {e}')\n return None\n\n def post_score(self, name, score, validation_data=''):\n if self.raise_errors:\n return self._post_score(name, score, validation_data)\n else:\n try:\n return self._post_score(name, score, validation_data)\n except requests.ReadTimeout:\n return None\n except Exception as e:\n print(f'An Error Occurred: {e}')\n return None\n","repo_name":"tank-king/scoreunlocked","sub_path":"src/scoreunlocked_client.py","file_name":"scoreunlocked_client.py","file_ext":"py","file_size_in_byte":3950,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"3"} +{"seq_id":"5932854751","text":"import requests\nimport json, csv\nfrom datetime import datetime\n\ndef getFinland():\n url = 'https://w3qa5ydb4l.execute-api.eu-west-1.amazonaws.com/prod/finnishCoronaHospitalData'\n response = requests.get(url)\n data = json.loads(response.content)\n finlandData = []\n finlandData.append([\"data\", \"hospital\", \"icu\", \"ventilator\"])\n for entry in data['hospitalised']:\n if entry[\"area\"] == \"Finland\":\n date = datetime.strptime(entry[\"date\"], '%Y-%m-%dT%H:%M:%S.%f%z').date()\n datum = date.strftime('%Y-%m-%d')\n finlandData.append([datum, entry['totalHospitalised'], entry['inIcu'], None])\n with open(\".\\Data\\Finland.csv\", 'w', newline='') as myfile:\n wr = csv.writer(myfile, quoting=csv.QUOTE_NONE)\n for hospData in finlandData:\n wr.writerow(hospData)","repo_name":"chilija92/covid-19-hospital-situation","sub_path":"countryParsers/finland.py","file_name":"finland.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"10585503036","text":"# http://flask.pocoo.org/docs/1.0/appcontext/\n# Application context는 요청을 처리하는 동안 어플리케이션 레벨의 데이터를 관리하기 위해 사용한다\n# context는 같은 전역 변수지만 실제 내용은 각 요청(문맥)마다 다른 것을 가르키게 만들어둔 것이다\n# 그렇게 만든 이유 : http://flask.pocoo.org/docs/1.0/design/#thread-locals\n# Application context에 해당하는 대표적인 객체로, current_app과 g가 있다\n\n# Flask 객체는 config와 같은 속성들을 가지고 있다. 그러나, 단순히 해당 객체를 import하는 것은 circular import(서로 다른 모듈에서 서로 import) 문제를 일으킬 수 있다\n# Flask는 이러한 문제를 application context라는 개념을 이용해 극복했고, Flask 객체를 직접 참조하지 않고 current_app을 사용하도록 지원한다\nfrom flask import Flask, current_app\n\napp = Flask(__name__)\napp.config['SOME'] = True\n\n\n@app.route('/')\ndef index():\n # 요청을 처리할 때, Flask는 자동으로 application context를 push한다. view function, error handler 등 요청 중에 실행되는 것들은 current_app에 액세스할 수 있다\n return 'Config SOME is {}'.format(current_app.config['SOME'])\n\n# current_app은 view function처럼 요청 중에 실행되는 기능에서 사용하는 게 아니라면 RuntimeError가 raise되는데, 이를 해결하기 위해 수동으로 context를 push할 수도 있다\nwith app.app_context():\n # [app.Flask]\n # def app_context(self)\n # 해당 메소드는 app.ctx.AppContext 객체를 반환하며, 해당 클래스에는 __enter__와 __exit__이 정의되어 있어 비교적 깔끔한 with 문을 사용할 수 있다\n print(current_app.config['SOME'])\n\n\nfrom flask import g\n# Application context는 요청을 처리하는 동안에만 사용할 데이터를 저장하기 좋은 장소다\n# 이를 위해 Flask는 g라는 객체를 제공(global의 약자)하며, 이는 어떤 속성이든 자유롭게 정의할 수 있는 간단한 namespace이다\n# 이는 Request context에서 유용하니, 해당 순서에서 알아보도록 하자\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"JoMingyu/--Awesome-Python--","sub_path":"000. Flask/13. Application Context & current_app & g/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2203,"program_lang":"python","lang":"ko","doc_type":"code","stars":190,"dataset":"github-code","pt":"3"} +{"seq_id":"13042075683","text":"import re\nfrom math import ceil\n\nimport numpy\n\nfrom LDAStools import frameCPP # noqa: F401\n\nfrom ....io import gwf as io_gwf\nfrom ....io import _framecpp as io_framecpp\nfrom ....io.utils import file_list\nfrom ....segments import Segment\nfrom ....time import (LIGOTimeGPS, to_gps)\nfrom ... import TimeSeries\nfrom ...core import _dynamic_scaled\n\nfrom . import channel_dict_kwarg\n\n__author__ = 'Duncan Macleod '\n\nFRAME_LIBRARY = 'LDAStools.frameCPP'\n\n# error regexs\nFRERR_NO_FRAME_AT_NUM = re.compile(\n r'\\ARequest for frame (?P\\d+) exceeds the range of '\n r'0 through (?P\\d+)\\Z',\n)\nFRERR_NO_CHANNEL_OF_TYPE = re.compile(\n r'\\ANo Fr(Adc|Proc|Sim)Data structures with the name ',\n)\n\n\nclass _Skip(ValueError):\n \"\"\"Error denoting that the contents of a given structure aren't required\n \"\"\"\n pass\n\n\n# -- read ---------------------------------------------------------------------\n\ndef read(source, channels, start=None, end=None, scaled=None, type=None,\n series_class=TimeSeries):\n # pylint: disable=redefined-builtin\n \"\"\"Read a dict of series from one or more GWF files\n\n Parameters\n ----------\n source : `str`, `list`\n Source of data, any of the following:\n\n - `str` path of single data file,\n - `str` path of cache file,\n - `list` of paths.\n\n channels : `~gwpy.detector.ChannelList`, `list`\n a list of channels to read from the source.\n\n start : `~gwpy.time.LIGOTimeGPS`, `float`, `str` optional\n GPS start time of required data, anything parseable by\n :func:`~gwpy.time.to_gps` is fine.\n\n end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional\n GPS end time of required data, anything parseable by\n :func:`~gwpy.time.to_gps` is fine.\n\n scaled : `bool`, optional\n apply slope and bias calibration to ADC data.\n\n type : `dict`, optional\n a `dict` of ``(name, channel-type)`` pairs, where ``channel-type``\n can be one of ``'adc'``, ``'proc'``, or ``'sim'``.\n\n series_class : `type`, optional\n the `Series` sub-type to return.\n\n Returns\n -------\n data : `~gwpy.timeseries.TimeSeriesDict` or similar\n a dict of ``(channel, series)`` pairs read from the GWF source(s).\n \"\"\"\n # parse input source\n source = file_list(source)\n\n # parse type\n ctype = channel_dict_kwarg(type, channels, (str,))\n\n # read each individually and append\n out = series_class.DictClass()\n for i, file_ in enumerate(source):\n if i == 1: # force data into fresh memory so that append works\n for name in out:\n out[name] = numpy.require(out[name], requirements=['O'])\n # read frame\n out.append(read_gwf(file_, channels, start=start, end=end, ctype=ctype,\n scaled=scaled, series_class=series_class),\n copy=False)\n return out\n\n\ndef read_gwf(filename, channels, start=None, end=None, scaled=None,\n ctype=None, series_class=TimeSeries):\n \"\"\"Read a dict of series data from a single GWF file\n\n Parameters\n ----------\n filename : `str`\n the GWF path from which to read\n\n channels : `~gwpy.detector.ChannelList`, `list`\n a list of channels to read from the source.\n\n start : `~gwpy.time.LIGOTimeGPS`, `float`, `str` optional\n GPS start time of required data, anything parseable by\n :func:`~gwpy.time.to_gps` is fine.\n\n end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional\n GPS end time of required data, anything parseable by\n :func:`~gwpy.time.to_gps` is fine.\n\n scaled : `bool`, optional\n apply slope and bias calibration to ADC data.\n\n type : `dict`, optional\n a `dict` of ``(name, channel-type)`` pairs, where ``channel-type``\n can be one of ``'adc'``, ``'proc'``, or ``'sim'``.\n\n series_class : `type`, optional\n the `Series` sub-type to return.\n\n Returns\n -------\n data : `~gwpy.timeseries.TimeSeriesDict` or similar\n a dict of ``(channel, series)`` pairs read from the GWF file.\n \"\"\"\n # parse kwargs\n if not start:\n start = 0\n if not end:\n end = 0\n span = Segment(start, end)\n\n # open file\n stream = io_gwf.open_gwf(filename, 'r')\n nframes = stream.GetNumberOfFrames()\n\n # find channels\n out = series_class.DictClass()\n\n # loop over frames in GWF\n i = 0\n while True:\n this = i\n i += 1\n\n # read frame\n try:\n frame = stream.ReadFrameNSubset(this, 0)\n except IndexError:\n if this >= nframes:\n break\n raise\n\n # check whether we need this frame at all\n if not _need_frame(frame, start, end):\n continue\n\n # get epoch for this frame\n epoch = LIGOTimeGPS(*frame.GetGTime())\n\n # and read all the channels\n for channel in channels:\n _scaled = _dynamic_scaled(scaled, channel)\n try:\n new = _read_channel(stream, this, str(channel),\n ctype.get(channel, None),\n epoch, start, end, scaled=_scaled,\n series_class=series_class)\n except _Skip: # don't need this frame for this channel\n continue\n try:\n out[channel].append(new)\n except KeyError:\n out[channel] = numpy.require(new, requirements=['O'])\n\n # if we have all of the data we want, stop now\n if all(span in out[channel].span for channel in out):\n break\n\n # if any channels weren't read, something went wrong\n for channel in channels:\n if channel not in out:\n msg = \"Failed to read {0!r} from {1!r}\".format(\n str(channel), filename)\n if start or end:\n msg += ' for {0}'.format(span)\n raise ValueError(msg)\n\n return out\n\n\ndef _read_channel(stream, num, name, ctype, epoch, start, end,\n scaled=True, series_class=TimeSeries):\n \"\"\"Read a channel from a specific frame in a stream\n \"\"\"\n data = _get_frdata(stream, num, name, ctype=ctype)\n return read_frdata(data, epoch, start, end,\n scaled=scaled, series_class=series_class)\n\n\ndef _get_frdata(stream, num, name, ctype=None):\n \"\"\"Brute force-ish method to return the FrData structure for a channel\n\n This saves on pulling the channel type from the TOC\n \"\"\"\n ctypes = (ctype,) if ctype else ('adc', 'proc', 'sim')\n for ctype in ctypes:\n _reader = getattr(stream, 'ReadFr{0}Data'.format(ctype.title()))\n try:\n return _reader(num, name)\n except IndexError as exc:\n if FRERR_NO_CHANNEL_OF_TYPE.match(str(exc)):\n continue\n raise\n raise ValueError(\"no Fr{{Adc,Proc,Sim}}Data structures with the \"\n \"name {0}\".format(name))\n\n\ndef _need_frame(frame, start, end):\n frstart = LIGOTimeGPS(*frame.GetGTime())\n if end and frstart >= end:\n return False\n\n frend = frstart + frame.GetDt()\n if start and frend <= start:\n return False\n\n return True\n\n\ndef read_frdata(frdata, epoch, start, end, scaled=True,\n series_class=TimeSeries):\n \"\"\"Read a series from an `FrData` structure\n\n Parameters\n ----------\n frdata : `LDAStools.frameCPP.FrAdcData` or similar\n the data structure to read\n\n epoch : `float`\n the GPS start time of the containing frame\n (`LDAStools.frameCPP.FrameH.GTime`)\n\n start : `float`\n the GPS start time of the user request\n\n end : `float`\n the GPS end time of the user request\n\n scaled : `bool`, optional\n apply slope and bias calibration to ADC data.\n\n series_class : `type`, optional\n the `Series` sub-type to return.\n\n Returns\n -------\n series : `~gwpy.timeseries.TimeSeriesBase`\n the formatted data series\n\n Raises\n ------\n _Skip\n if this data structure doesn't overlap with the requested\n ``[start, end)`` interval.\n \"\"\"\n datastart = epoch + frdata.GetTimeOffset()\n\n # check overlap with user-requested span\n if end and datastart >= end:\n raise _Skip()\n\n # get scaling\n try:\n slope = frdata.GetSlope()\n bias = frdata.GetBias()\n except AttributeError: # not FrAdcData\n slope = None\n bias = None\n else:\n # workaround https://git.ligo.org/ldastools/LDAS_Tools/-/issues/114\n # by forcing the default slope to 1.\n if bias == slope == 0.:\n slope = 1.\n null_scaling = slope == 1. and bias == 0.\n\n out = None\n for j in range(frdata.data.size()):\n # we use range(frdata.data.size()) to avoid segfault\n # related to iterating directly over frdata.data\n try:\n new = read_frvect(frdata.data[j], datastart, start, end,\n name=frdata.GetName(),\n series_class=series_class)\n except _Skip:\n continue\n\n # apply scaling for ADC channels\n if scaled and slope is not None:\n rtype = numpy.result_type(new, slope, bias)\n typechange = not numpy.can_cast(\n rtype,\n new.dtype,\n casting='same_kind',\n )\n # only apply scaling if interesting _or_ if it would lead to a\n # type change, otherwise we are unnecessarily duplicating memory\n if typechange:\n new = new * slope + bias\n elif not null_scaling:\n new *= slope\n new += bias\n elif slope is not None:\n # user has deliberately disabled the ADC calibration, so\n # the stored engineering unit is not valid, revert to 'counts':\n new.override_unit('count')\n\n if out is None:\n out = new\n else:\n out.append(new)\n return out\n\n\ndef read_frvect(vect, epoch, start, end, name=None, series_class=TimeSeries):\n \"\"\"Read an array from an `FrVect` structure\n\n Parameters\n ----------\n vect : `LDASTools.frameCPP.FrVect`\n the frame vector structur to read\n\n start : `float`\n the GPS start time of the request\n\n end : `float`\n the GPS end time of the request\n\n epoch : `float`\n the GPS start time of the containing `FrData` structure\n\n name : `str`, optional\n the name of the output `series_class`; this is also used\n to ignore ``FrVect`` structures containing other information\n\n series_class : `type`, optional\n the `Series` sub-type to return.\n\n Returns\n -------\n series : `~gwpy.timeseries.TimeSeriesBase`\n the formatted data series\n\n Raises\n ------\n _Skip\n if this vect doesn't overlap with the requested\n ``[start, end)`` interval, or the name doesn't match.\n \"\"\"\n # only read FrVect with matching name (or no name set)\n # frame spec allows for arbitrary other FrVects\n # to hold other information\n if vect.GetName() and name and vect.GetName() != name:\n raise _Skip()\n\n # get array\n arr = vect.GetDataArray()\n nsamp = arr.size\n\n # and dimensions\n dim = vect.GetDim(0)\n dx = dim.dx\n x0 = dim.startX\n\n # start and end GPS times of this FrVect\n dimstart = epoch + x0\n dimend = dimstart + nsamp * dx\n\n # index of first required sample\n nxstart = int(max(0., float(start-dimstart)) / dx)\n\n # requested start time is after this frame, skip\n if nxstart >= nsamp:\n raise _Skip()\n\n # index of end sample\n if end:\n nxend = int(nsamp - ceil(max(0., float(dimend-end)) / dx))\n else:\n nxend = None\n\n if nxstart or nxend:\n arr = arr[nxstart:nxend]\n\n # -- cast as a series\n\n # get unit\n unit = vect.GetUnitY() or None\n\n # create array\n series = series_class(arr, t0=dimstart+nxstart*dx, dt=dx, name=name,\n channel=name, unit=unit, copy=False)\n\n # add information to channel\n series.channel.sample_rate = series.sample_rate.value\n series.channel.unit = unit\n series.channel.dtype = series.dtype\n\n return series\n\n\n# -- write --------------------------------------------------------------------\n\ndef write(tsdict, outfile,\n start=None, end=None,\n type=None,\n name='gwpy', run=0,\n compression='GZIP', compression_level=None):\n \"\"\"Write data to a GWF file using the frameCPP API\n\n Parameters\n ----------\n tsdict : `TimeSeriesDict`\n dict of data to write\n\n outfile : `str`\n the file name of the target output file\n\n start : `float`, optional\n the GPS start time of the file\n\n end : `float`, optional\n the GPS end time of the file\n\n type : `str`, optional\n the type of the channel, one of 'adc', 'proc', 'sim', default\n is 'proc' unless stored in the channel structure\n\n name : `str`, optional\n the name of each frame\n\n run : `int`, optional\n the FrameH run number\n\n compression : `int`, `str`, optional\n name of compresion algorithm to use, or its endian-appropriate\n ID, choose from\n\n - ``'RAW'``\n - ``'GZIP'``\n - ``'DIFF_GZIP'``\n - ``'ZERO_SUPPRESS_WORD_2'``\n - ``'ZERO_SUPPRESS_WORD_4'``\n - ``'ZERO_SUPPRESS_WORD_8'``\n - ``'ZERO_SUPPRESS_OTHERWISE_GZIP'``\n\n compression_level : `int`, optional\n compression level for given method, default is ``6`` for GZIP-based\n methods, otherwise ``0``\n \"\"\"\n # set frame header metadata\n if not start or not end:\n starts, ends = zip(*(ts.span for ts in tsdict.values()))\n start = to_gps(start or min(starts))\n end = to_gps(end or max(ends))\n duration = end - start\n ifos = {\n ts.channel.ifo for ts in tsdict.values() if (\n ts.channel\n and ts.channel.ifo\n and ts.channel.ifo in io_framecpp.DetectorLocation.__members__\n )\n }\n\n # create frame\n frame = io_gwf.create_frame(\n time=start,\n duration=duration,\n name=name,\n run=run,\n ifos=ifos,\n )\n\n # append channels\n for i, key in enumerate(tsdict):\n ctype = (\n type\n or getattr(tsdict[key].channel, \"_ctype\", \"proc\").lower()\n or \"proc\"\n )\n if ctype == 'adc':\n kw = {\"channelid\": i}\n else:\n kw = {}\n _append_to_frame(frame, tsdict[key].crop(start, end), type=ctype, **kw)\n\n # write frame to file\n io_gwf.write_frames(\n outfile,\n [frame],\n compression=compression,\n compression_level=compression_level,\n )\n\n\ndef _append_to_frame(frame, timeseries, type='proc', **kwargs):\n # pylint: disable=redefined-builtin\n \"\"\"Append data from a `TimeSeries` to a `~frameCPP.FrameH`\n\n Parameters\n ----------\n frame : `~frameCPP.FrameH`\n frame object to append to\n\n timeseries : `TimeSeries`\n the timeseries to append\n\n type : `str`\n the type of the channel, one of 'adc', 'proc', 'sim'\n\n **kwargs\n other keyword arguments are passed to the relevant\n `create_xxx` function\n\n See also\n --------\n gwpy.io.gwf.create_fradcdata\n gwpy.io.gwf.create_frprocdata\n gwpy.io.gwf_create_frsimdata\n for details of the data structure creation, and associated available\n arguments\n \"\"\"\n epoch = LIGOTimeGPS(*frame.GetGTime())\n\n # create the data container\n if type.lower() == 'adc':\n create = io_gwf.create_fradcdata\n append = frame.AppendFrAdcData\n elif type.lower() == 'proc':\n create = io_gwf.create_frprocdata\n append = frame.AppendFrProcData\n elif type.lower() == 'sim':\n create = io_gwf.create_frsimdata\n append = frame.AppendFrSimData\n else:\n raise RuntimeError(\"Invalid channel type {!r}, please select one of \"\n \"'adc, 'proc', or 'sim'\".format(type))\n frdata = create(timeseries, frame_epoch=epoch, **kwargs)\n\n # append an FrVect\n frdata.AppendData(io_gwf.create_frvect(timeseries))\n append(frdata)\n return frdata\n","repo_name":"gwpy/gwpy","sub_path":"gwpy/timeseries/io/gwf/framecpp.py","file_name":"framecpp.py","file_ext":"py","file_size_in_byte":16329,"program_lang":"python","lang":"en","doc_type":"code","stars":358,"dataset":"github-code","pt":"3"} +{"seq_id":"17356510101","text":"from flask import Flask,render_template,Response,redirect,url_for,request, flash,stream_with_context\nimport cv2\nimport os\nimport time\nimport re\nimport ast\nfrom flask.json import jsonify\nfrom flask_restful import Resource, Api\nimport pandas as pd\nimport threading \nimport multiprocessing\nfrom pose_live import output_keypoints,output_keypoints_with_lines\nfrom pose_photo import output_keypoints_photo,output_keypoints_with_lines_photo\nfrom werkzeug.utils import secure_filename\nimport argparse\nimport json\napp=Flask(__name__)\napp.secret_key=\"disuo\"\napi=Api(app)\nUPLOAD_FOLDER=os.getcwd()+'\\static\\images'\nALLOWED_EXTENSIONS=set(['png','jpg','gif'])\napp.config['UPLOAD_FOLDER']=UPLOAD_FOLDER\n\nheadings=(\"과\",\"병원명\",\"도\",\"시\",\"동\")\n\ndata=None\naddress={}\n# Initialize output variables and define a multithreaded lock to prevent simultaneous access from multiple browsers or pages\n# When the video stream is output\noutputFrame = None\nresults=None\nlock = threading.Lock()\n\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.',1)[1] in ALLOWED_EXTENSIONS\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/search_hospital',methods=['POST','GET'])\ndef hospital():\n global data,headings\n global address\n if request.method=='POST':\n address=request.data.decode(\"UTF-8\")\n address=ast.literal_eval(address)\n\n print(address)\n han=pd.read_csv('./static/han.csv',encoding='cp949')\n jung=pd.read_csv('./static/jung.csv',encoding='cp949')\n tong=pd.read_csv('./static/tong.csv',encoding='cp949')\n\n\n han_result=han[ (han['시'].str.contains(address[1], case=False)) & (han['동']==address[2])]\n # print(han_result)\n han_result=list(han_result.itertuples(index=False, name=None))\n\n\n tong_result=tong[(tong['시'].str.contains(address[1], case=False))&(tong['동']==address[2])]\n # print(tong_result)\n tong_result=list(tong_result.itertuples(index=False, name=None))\n\n\n jung_result=jung[ (jung['시'].str.contains(address[1], case=False))&(jung['동']==address[2])]\n # print(tong_result)\n jung_result=list(jung_result.itertuples(index=False, name=None))\n\n data=tuple(han_result+tong_result+jung_result) \n\n html_text=''\n for d in data:\n tmp=\"\\n\"\n cnt=0\n for i in d:\n if cnt==1: \n temp=\"\"+i+\"\\n\"\n else: \n temp=\" \"+i+\"\\n\"\n tmp=tmp+temp\n cnt+=1\n html_text=html_text+tmp+\"\\n\"\n\n html_file = open('./static/html_file.html', 'w',encoding=\"UTF-8\")\n html_file.write(html_text)\n html_file.close()\n\n\n print(data)\n return render_template('hospital.html',headings=headings,data=data)\n\n\n@app.route('/static_ver')\ndef static_ver(): \n file_path='static/images/img-02.jpg'\n return render_template('static.html',file_path=file_path,result=None)\n\n\n@app.route('/static_ver/')\ndef static_ver2(file_path): \n file_path='/static/images/'+file_path\n result=request.args.get('results_photo')\n print(type(result))\n return render_template('static.html',file_path=file_path,result=result)\n\n\n@app.route('/uploader',methods=['POST','GET'])\ndef uploader_file():\n if request.method=='POST':\n file=request.files['file']\n if file and allowed_file(file.filename):\n filename=secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'],filename))\n results_photo=gen_photo(filename,os.path.join(app.config['UPLOAD_FOLDER'],filename))\n print(type(results_photo))\n return redirect(url_for('static_ver2',file_path=filename[0:-3]+\"_pose.jpg\",results_photo=results_photo, code=307))\n\n\n@app.route('/reload')\ndef reload():\n return render_template('html_file.html')\n\ndef detect_motion():\n\n global outputFrame,lock,results\n\n BODY_PARTS_BODY_25 = {0: \"Nose\", 1: \"Neck\", 2: \"RShoulder\", 3: \"RElbow\", 4: \"RWrist\",\n 5: \"LShoulder\", 6: \"LElbow\", 7: \"LWrist\", 8: \"MidHip\", 9: \"RHip\",\n 10: \"RKnee\", 11: \"RAnkle\", 12: \"LHip\", 13: \"LKnee\", 14: \"LAnkle\",\n 15: \"REye\", 16: \"LEye\", 17: \"REar\", 18: \"LEar\", 19: \"LBigToe\",\n 20: \"LSmallToe\", 21: \"LHeel\", 22: \"RBigToe\", 23: \"RSmallToe\", 24: \"RHeel\", 25: \"Background\"}\n\n POSE_PAIRS_BODY_25 = [[0, 1], [0, 15], [0, 16], [1, 2], [1, 5], [1, 8], [8, 9], [8, 12], [9, 10], [12, 13], [2, 3],\n [3, 4], [5, 6], [6, 7], [10, 11], [13, 14], [15, 17], [16, 18], [14, 21], [19, 21], [20, 21],\n [11, 24], [22, 24], [23, 24]]\n\n # 신경 네트워크의 구조를 지정하는 prototxt 파일 (다양한 계층이 배열되는 방법 등)\n protoFile_body_25 = \"C:\\\\openpose-master\\\\models\\\\pose\\\\body_25\\\\pose_deploy.prototxt\"\n\n # 훈련된 모델의 weight 를 저장하는 caffemodel 파일\n weightsFile_body_25 = \"C:\\\\openpose-master\\\\models\\\\pose\\\\body_25\\\\pose_iter_584000.caffemodel\"\n\n # 키포인트를 저장할 빈 리스트\n points = []\n\n net = cv2.dnn.readNetFromCaffe(protoFile_body_25, weightsFile_body_25)\n \n # GPU 사용\n # net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)\n # net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)\n\n # 비디오 읽어오기\n capture = cv2.VideoCapture(1)\n capture.set(3,800)\n capture.set(4,800)\n \n if not capture.isOpened():\n capture=cv2.VideoCapture(0, cv2.CAP_DSHOW)\n if not capture.isOpened():\n raise IOError(\"Cannot open webcame\") \n\n prev_time = 0\n FPS = 10\n \n while cv2.waitKey(1) <0:\n\n current_time = time.time() - prev_time\n ret, frame_boy = capture.read()\n if not ret:\n cv2.waitKey()\n break\n \n elif ret and current_time>1./FPS: \n \n prev_time = time.time()\n \n frame_boy = output_keypoints(frame=frame_boy, net=net,proto_file=protoFile_body_25, weights_file=weightsFile_body_25, threshold=0.1, BODY_PARTS=BODY_PARTS_BODY_25)\n frame_boy,results_live = output_keypoints_with_lines(frame=frame_boy, POSE_PAIRS=POSE_PAIRS_BODY_25)\n # ret,buffer=cv2.imencode('.jpg',frame_boy)\n # frame=buffer.tobytes()\n\n with lock:\n # outputFrame=frame.copy()\n outputFrame=frame_boy.copy()\n results=results_live\n\ndef gen():\n\n global outputFrame, lock\n # Traverse the frames of the output video stream\n while True:\n # Wait until the thread lock is acquired\n with lock:\n # Check whether there is content in the output. If there is no content, skip this process\n if outputFrame is None:\n continue\n\n # Compress the output to jpeg format\n (flag, encodedImage) = cv2.imencode(\".jpg\", outputFrame)\n frame=encodedImage.tobytes()\n # Make sure the output is compressed correctly\n if not flag:\n continue\n\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')\n\ndef gen2():\n global results,lock\n # Traverse the frames of the output video stream\n while True:\n # Wait until the thread lock is acquired\n with lock:\n # Check whether there is content in the output. If there is no content, skip this process\n if results is None:\n continue\n \n yield \"바보\"\n\n\ndef gen_photo(filename,file_path):\n\n BODY_PARTS_BODY_25 = {0: \"Nose\", 1: \"Neck\", 2: \"RShoulder\", 3: \"RElbow\", 4: \"RWrist\",\n 5: \"LShoulder\", 6: \"LElbow\", 7: \"LWrist\", 8: \"MidHip\", 9: \"RHip\",\n 10: \"RKnee\", 11: \"RAnkle\", 12: \"LHip\", 13: \"LKnee\", 14: \"LAnkle\",\n 15: \"REye\", 16: \"LEye\", 17: \"REar\", 18: \"LEar\", 19: \"LBigToe\",\n 20: \"LSmallToe\", 21: \"LHeel\", 22: \"RBigToe\", 23: \"RSmallToe\", 24: \"RHeel\", 25: \"Background\"}\n\n POSE_PAIRS_BODY_25 = [[0, 1], [0, 15], [0, 16], [1, 2], [1, 5], [1, 8], [8, 9], [8, 12], [9, 10], [12, 13], [2, 3],\n [3, 4], [5, 6], [6, 7], [10, 11], [13, 14], [15, 17], [16, 18], [14, 21], [19, 21], [20, 21],\n [11, 24], [22, 24], [23, 24]]\n\n # 신경 네트워크의 구조를 지정하는 prototxt 파일 (다양한 계층이 배열되는 방법 등)\n protoFile_body_25 = \"C:\\\\openpose-master\\\\models\\\\pose\\\\body_25\\\\pose_deploy.prototxt\"\n\n # 훈련된 모델의 weight 를 저장하는 caffemodel 파일\n weightsFile_body_25 = \"C:\\\\openpose-master\\\\models\\\\pose\\\\body_25\\\\pose_iter_584000.caffemodel\"\n\n # 키포인트를 저장할 빈 리스트\n points = []\n\n net = cv2.dnn.readNetFromCaffe(protoFile_body_25, weightsFile_body_25)\n \n # GPU 사용\n # net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)\n # net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)\n frame_boy = cv2.imread(file_path)\n frame_boy = output_keypoints_photo(frame=frame_boy, net=net,proto_file=protoFile_body_25, weights_file=weightsFile_body_25, threshold=0.1, BODY_PARTS=BODY_PARTS_BODY_25)\n frame_boy,results_photo = output_keypoints_with_lines_photo(frame=frame_boy, POSE_PAIRS=POSE_PAIRS_BODY_25)\n\n cv2.imwrite('./static/images/'+filename[0:-3]+'_pose.jpg',frame_boy)\n\n return results_photo\n\n\n@app.route('/live_ver')\ndef live_ver():\n return redirect(url_for('timer', num=25*60))\n\n@app.route('/get_result', methods = ['GET','POST'])\ndef get_result():\n # return jsonify(results)\n return Response(stream_with_context(gen2())) #, content_type='text/event_stream'\n # return gen2()\n\n@app.route('/video_feed')\ndef video_feed():\n return Response(gen(),mimetype='multipart/x-mixed-replace; boundary=frame')\n\n@app.route('/live_ver')\ndef live():\n return redirect(url_for('timer', num=25*60))\n\n@app.route('/live_ver/s')\n@app.route('/live_ver/')\ndef timer(num):\n # Start target detection thread\n t = threading.Thread(target=detect_motion)\n t.daemon = True\n t.start()\n return render_template('live.html', num=num)\n\n@app.route('/live_ver/m')\ndef minutes(num):\n return redirect(url_for('timer', num=num*60))\n\n@app.route('/live_ver/h')\ndef hours(num):\n return redirect(url_for('timer', num=num*3600))\n\n@app.route('/live_ver/custom', methods=['GET', 'POST'])\ndef custom():\n time = request.form.get('time', 180)\n # use re to validate input data\n m = re.match('\\d+[smh]?$', time)\n if m is None:\n flash(u'시간을 다음과 같은 형식으로 입력해주세요 34、20s、15m、2h')\n return redirect(url_for('index'))\n if time[-1] not in 'smh':\n return redirect(url_for('timer', num=int(time)))\n else:\n type = {'s': 'timer', 'm': 'minutes', 'h': 'hours'}\n return redirect(url_for(type[time[-1]], num=int(time[:-1])))\n\nif __name__ == \"__main__\":\n\n\n\n app.run(host='0.0.0.0',port=5000) #http://121.168.117.223:5000/\n\n","repo_name":"AAISSJ/DiSuo-COCO","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":11240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"47888582998","text":"from django import forms\nfrom dal import autocomplete\n\nfrom finhack_bca.transaction.models import Transaction, CustomerTopUp\nfrom finhack_bca.users.models import User\n\n\nclass TransactionConfirmationForm(forms.ModelForm):\n\n def __init__(self, *args, **kwargs):\n super(TransactionConfirmationForm, self).__init__(*args, **kwargs)\n self.fields['store'].disabled = True\n self.fields['remarks'].disabled = True\n self.fields['amount'].disabled = True\n\n class Meta:\n model = Transaction\n fields = ['store', 'remarks', 'amount']\n\n\nclass CustomerTopUpForm(forms.ModelForm):\n\n class Meta:\n model = CustomerTopUp\n fields = ['customer', 'amount']\n widgets = {\n 'customer': autocomplete.ModelSelect2(url='customer_autocomplete')\n }","repo_name":"abirafdirp/finhacks_submission","sub_path":"finhack_bca/frontend/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"26093977113","text":"from myStack import *\n\n\ndef checkBrackets(string):\n '''function to check if the syntax of a made up language is correct\n Args:\n Param1: String with the made up code\n Returns:\n void or prints a statement with where the error is\n '''\n stack = myStack()\n BracketsDict = {'[': ']', '(': ')', '<': '>'}\n counter = 0\n for character in string:\n counter += 1\n if character not in BracketsDict.keys() and character not in BracketsDict.values():\n errorstring = ''\n for item in BracketsDict.keys():\n errorstring += item\n errorstring += ' '\n for item in BracketsDict.values():\n errorstring += item\n errorstring += ' '\n print(\"------------------\\nError 1: invalid syntax. only \",\n errorstring[:5], \" or \", errorstring[6:], \" allowed.\")\n return\n elif character in BracketsDict.keys():\n stack.push(character)\n continue\n elif character in BracketsDict.values():\n if stack.isEmpty():\n print(\"------------------\\nError 2: invalid syntax, to many closing characters:\",\n character, \" at position\", counter)\n return\n openingChar = stack.pop()\n if BracketsDict[openingChar] == character:\n continue\n else:\n print(\"------------------\\nError 3: invalid syntax: \", openingChar, \" before\",\n character, \" at position\", counter, \"in line.\", BracketsDict[openingChar], \"needed\")\n return\n if stack.isEmpty():\n print(\"Valid syntax\")\n return\n else:\n print(\"------------------\\nError 4: To many opening brackets\")\n\n\ncheckBrackets(\"(((<>)))\") # goed\ncheckBrackets(\"#$%(((<>)))\") # fout, verkeerde karakters\ncheckBrackets(\"(((<)>))\") # fout, closing ) verkeerde plek\ncheckBrackets(\"(((<>)))]\") # fout, closing ] teveel\ncheckBrackets(\"(((<>))))\") # fout, closing ) teveel\ncheckBrackets(\"(((<]>))\") # fout, closing ] verkeerde plek\ncheckBrackets(\"[(((<>)))\") # fout, teveel openening dingen\n","repo_name":"StpdFox/HU_ALDS_Ref","sub_path":"ALDS_Week_2/Opdracht_2/bracketsProblem.py","file_name":"bracketsProblem.py","file_ext":"py","file_size_in_byte":2168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30262420035","text":"from pydrive.drive import GoogleDrive\nfrom pydrive.auth import GoogleAuth\nimport os\n#認証\ngauth = GoogleAuth()\n#ローカルWebサーバとautoを作成\n#Googleドライブの認証処理\ngauth.LocalWebserverAuth()\ndrive = GoogleDrive(gauth)\n#アップロードするフォルダパス指定\nx=\"cowrie.json.2021-01-09\"\nf = drive.CreateFile({'title' : x})\n#ローカルのファイルをセットしてアップロード\nf.SetContentFile(os.path.join(x))\n#Googleドライブにアップロード\nf.Upload()\nf = None\n#参考サイト\n#","repo_name":"S-mishina/ssh-honeypot-analysis-program","sub_path":"gp.py","file_name":"gp.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36128895775","text":"\"\"\"\n一只青蛙一次可以跳上1级台阶,也可以跳上2级……它也可以跳上n级。\n求该青蛙跳上一个n级的台阶总共有多少种跳法。\n\"\"\"\n\n\nclass Solution:\n def jumpFloorII(self, number):\n # write code here\n # 方法1:\n # return pow(2, number-1)\n # 方法2:\n if number < 0:\n return 0\n if number == 1:\n return 1\n if number == 2:\n return 2\n a = 2\n ret = None\n for n in range(2, number):\n ret = a*2\n a = ret\n return ret","repo_name":"li2ui2/Python_Personal_DEMO","sub_path":"DATA_STRUCTURE/jianzhi_offer/其他/变态跳台阶.py","file_name":"变态跳台阶.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"40626621504","text":"import sys\nfrom cryptography.hazmat.primitives import serialization, hashes\nfrom cryptography.hazmat.primitives.asymmetric import padding\n\n# Load params\nif len(sys.argv) < 3:\n print('Usage: python3 unlock.py ')\n sys.exit(1)\nkey_file = sys.argv[1]\nenc_key = sys.argv[2]\n\n# Load private key\nprivate = None\nwith open(key_file, 'rb') as f:\n private = serialization.load_pem_private_key(f.read(), None)\n\n# Load encrypted key\nenc_key = bytes.fromhex(enc_key)\n\n# Decrypt\nkey = private.decrypt(\n enc_key,\n padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n)\n\nprint(str(key, 'utf-8'))\n","repo_name":"dubniczky/Ransomware","sub_path":"unlock.py","file_name":"unlock.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6742491112","text":"import googleapiclient.discovery\nimport argparse\n\ndef predict_json(project, model, instances, version=None):\n \"\"\"Send json data to a deployed model for prediction.\n\n Args:\n project (str): project where the Cloud ML Engine Model is deployed.\n model (str): model name.\n instances ([Mapping[str: Any]]): Keys should be the names of Tensors\n your deployed model expects as inputs. Values should be datatypes\n convertible to Tensors, or (potentially nested) lists of datatypes\n convertible to tensors.\n version: str, version of the model to target.\n Returns:\n Mapping[str: any]: dictionary of prediction results defined by the\n model.\n \"\"\"\n # Create the ML Engine service object.\n # To authenticate set the environment variable\n # GOOGLE_APPLICATION_CREDENTIALS=\n service = googleapiclient.discovery.build('ml', 'v1')\n name = 'projects/{}/models/{}'.format(project, model)\n\n if version is not None:\n name += '/versions/{}'.format(version)\n\n response = service.projects().predict(\n name=name,\n body={'instances': instances}\n ).execute()\n\n if 'error' in response:\n raise RuntimeError(response['error'])\n\n return response['predictions']\n \nparser = argparse.ArgumentParser()\nparser.add_argument(\"-p\", \"--project\", required=True,\n help=\"Project that flights service is deployed in\")\nargs = parser.parse_args()\n\ninstances = [\n {\n 'dep_delay': dep_delay,\n 'taxiout': taxiout,\n 'distance': 160.0,\n 'avg_dep_delay': 13.34,\n 'avg_arr_delay': avg_arr_delay,\n 'carrier': 'AS',\n 'dep_lat': 61.17,\n 'dep_lon': -150.00,\n 'arr_lat': 60.49,\n 'arr_lon': -145.48,\n 'origin': 'ANC',\n 'dest': 'CDV'\n }\n for dep_delay, taxiout, avg_arr_delay in \n [[16.0, 13.0, 67.0], \n [13.3, 13.0, 67.0], # if dep_delay was the airport mean \n [16.0, 16.0, 67.0], # if taxiout was the global mean \n [16.0, 13.0, 4] # if avg_arr_delay was the global mean \n ]\n]\n\nresponse=predict_json(args.project, 'flights7', instances, 'tf_20')\nprint(\"response={}\".format(response))\n\nprobs = [pred[u'pred'][0] for pred in response]\nprint(\"probs={}\".format(probs))\n\n# find the maximal impact variable\nmax_impact = 0.1 # unless impact of var > 0.1, we'll go with 'typical'\nmax_impact_factor = 0\nfor factor in range(1, len(probs)):\n impact = abs(probs[factor] - probs[0])\n if impact > max_impact:\n max_impact = impact\n max_impact_factor = factor\n\nreasons = [\"this flight appears rather typical\",\n \"the departure delay is typically 13.3 minutes\",\n \"the taxiout time is typically 16.0 minutes\",\n \"the avg_arrival_delay is typically 4 minutes\"]\n\nprint(\"\\n\\nThe ontime probability={}; the key reason is that {} {}\".format(\n probs[0],\n reasons[max_impact_factor],\n \"-- had it been typical, the ontime probability would have been {}\".format(probs[max_impact_factor]) if max_impact_factor > 0 else \"\"\n ))\n","repo_name":"cromann/data-science-on-gcp","sub_path":"09_cloudml/call_predict_updated.py","file_name":"call_predict_updated.py","file_ext":"py","file_size_in_byte":3152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"} +{"seq_id":"23662112127","text":"import base64\r\n\r\nfrom django.core.files.base import ContentFile\r\nfrom ingredients.models import IngredientsAmount\r\nfrom ingredients.serializers import (IngredientsAmountAddSerializer,\r\n IngredientsAmountSerializer)\r\nfrom rest_framework import serializers\r\nfrom tags.models import Tags\r\nfrom tags.serializers import TagsSerializer\r\nfrom users.serializers import CustomUserSerializer\r\n\r\nfrom .models import Favorite, Recipes, ShoppingCart\r\n\r\n\r\nclass Base64ImageField(serializers.ImageField):\r\n def to_internal_value(self, data):\r\n if isinstance(data, str) and data.startswith('data:image'):\r\n format, imgstr = data.split(';base64,')\r\n ext = format.split('/')[-1]\r\n data = ContentFile(base64.b64decode(imgstr), name=f'temp.{ext}')\r\n return super().to_internal_value(data)\r\n\r\n\r\nclass RecipesReadSerializer(serializers.ModelSerializer):\r\n tags = TagsSerializer(read_only=True, many=True)\r\n author = CustomUserSerializer(required=False)\r\n ingredients = IngredientsAmountSerializer(many=True, required=False)\r\n image = Base64ImageField()\r\n is_favorited = serializers.SerializerMethodField()\r\n is_in_shopping_cart = serializers.SerializerMethodField()\r\n\r\n class Meta:\r\n fields = (\r\n 'id',\r\n 'tags',\r\n 'author',\r\n 'ingredients',\r\n 'is_favorited',\r\n 'is_in_shopping_cart',\r\n 'name',\r\n 'image',\r\n 'text',\r\n 'cooking_time',\r\n )\r\n model = Recipes\r\n\r\n def get_is_favorited(self, obj):\r\n \"\"\"Статус - в избранном или нет.\"\"\"\r\n return self._extracted_get(Favorite, obj)\r\n\r\n def get_is_in_shopping_cart(self, obj):\r\n \"\"\"Статус - списке покупок или нет.\"\"\"\r\n return self._extracted_get(ShoppingCart, obj)\r\n\r\n def _extracted_get(self, model, obj):\r\n request = self.context.get('request')\r\n return (\r\n request.user.is_authenticated\r\n and model.objects.filter(\r\n user=request.user,\r\n recipe__id=obj.id).exists()\r\n )\r\n\r\n\r\nclass RecipesCreateSerializer(serializers.ModelSerializer):\r\n tags = serializers.PrimaryKeyRelatedField(\r\n queryset=Tags.objects.all(),\r\n many=True,\r\n required=True,\r\n )\r\n ingredients = IngredientsAmountAddSerializer(many=True, required=False)\r\n image = Base64ImageField()\r\n\r\n class Meta:\r\n fields = (\r\n 'tags',\r\n 'ingredients',\r\n 'name',\r\n 'image',\r\n 'text',\r\n 'cooking_time',\r\n )\r\n model = Recipes\r\n\r\n def add_tags_and_ingredients(self, recipes, tags_data, ingredients_data):\r\n \"\"\"Добавление тегов и ингидинетов\"\"\"\r\n recipes.tags.set(tags_data)\r\n ingredient_amounts = []\r\n for item in ingredients_data:\r\n ingredient = item.get('ingredient')\r\n amount = item.get('amount')\r\n ingredient_amount, _ = IngredientsAmount.objects.get_or_create(\r\n ingredient=ingredient,\r\n amount=amount,\r\n )\r\n ingredient_amounts.append(ingredient_amount)\r\n recipes.ingredients.set(ingredient_amounts)\r\n return recipes\r\n\r\n def create(self, validated_data):\r\n \"\"\"Создание рецепта\"\"\"\r\n tags_data = validated_data.pop('tags')\r\n ingredients_data = validated_data.pop('ingredients')\r\n recipes = Recipes.objects.create(**validated_data)\r\n return self.add_tags_and_ingredients(\r\n recipes, tags_data, ingredients_data\r\n )\r\n\r\n def update(self, instance, validated_data):\r\n \"\"\"Редактирование рецепта\"\"\"\r\n instance.ingredients.clear()\r\n instance.tags.clear()\r\n tags_data = validated_data.pop(\"tags\")\r\n ingredients_data = validated_data.pop(\"ingredients\")\r\n for attr, value in validated_data.items():\r\n setattr(instance, attr, value)\r\n instance.save()\r\n return self.add_tags_and_ingredients(\r\n instance, tags_data, ingredients_data\r\n )\r\n\r\n\r\nclass ShortRecipeSerializer(serializers.ModelSerializer):\r\n image = Base64ImageField(\r\n max_length=None,\r\n use_url=True,\r\n )\r\n\r\n class Meta:\r\n model = Recipes\r\n fields = (\r\n \"id\",\r\n \"name\",\r\n \"image\",\r\n \"cooking_time\",\r\n )\r\n","repo_name":"aoamosova/foodgram-project-react","sub_path":"backend/recipes/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":4571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"32100975685","text":"from b111017.druzyna import Druzyna\r\n\r\nteam1 = Druzyna(\"Huragan Polaszki\", \"Trener Zbychu\")\r\n\r\n# print(team1.budzet) # wywołuje funkcje\r\n# print(team1.transfery)\r\n#\r\n# team1.__budzet = 10000\r\n# print(team1.__budzet)\r\n#\r\n#\r\n# team1.transfery = \"Kowalski\"\r\nteam1.transfery = [\"lewy\", \"ronaldo\"]\r\nprint(team1.transfery)\r\n","repo_name":"bkruszewski/Python_Courses_2017-2019","sub_path":"b111017/zaspoly.py","file_name":"zaspoly.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33377265180","text":"from dados import produtos, pessoas, lista\n\n# nova_lista = map(lambda x: x * 2, lista)\nnova_lista = [x * 2 for x in lista]\nprint(lista)\nprint(list(nova_lista))\n\n\ndef maioridade(p):\n if p['idade'] >= 18:\n return f'{p[\"nome\"]} é maior de idade'\n return f'{p[\"nome\"]} é menor de idade'\n\n\ndef aumenta_preco(p):\n p['preco'] = round(p['preco'] * 1.05, 2)\n return p\n\n\nnovos_produtos = map(aumenta_preco, produtos)\n\nfor produto in novos_produtos:\n print(produto)\n\nnomes = map(maioridade, pessoas)\n\nfor pessoa in nomes:\n print(pessoa)\n ","repo_name":"luccashiroshi/Curso-Python","sub_path":"map/mapeamento.py","file_name":"mapeamento.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"24837031577","text":"'''\n# This is a class as XML files handler, including reading, writing, processing as well as finding elements in a XML file.\n\nimport xml.etree.ElementTree as ET\n\n\nclass XMLProcessor:\n def __init__(self, file_name):\n \"\"\"\n Initialize the XMLProcessor object with the given file name.\n :param file_name:string, the name of the XML file to be processed.\n \"\"\"\n self.file_name = file_name\n self.root = None\n\n def read_xml(self):\n \"\"\"\n Reads the XML file and returns the root element.\n :return: Element, the root element of the XML file.\n >>> xml_processor = XMLProcessor('test.xml')\n >>> root_element = xml_processor.read_xml()\n >>> print(root_element)\n \n \"\"\"\n\n\n def write_xml(self, file_name):\n \"\"\"\n Writes the XML data to the specified file.\n :param file_name: string, the name of the file to write the XML data.\n :return: bool, True if the write operation is successful, False otherwise.\n >>> xml_processor = XMLProcessor('test.xml')\n >>> root = xml_processor.read_xml()\n >>> success = xml_processor.write_xml('output.xml')\n >>> print(success)\n True\n \"\"\"\n\n\n def process_xml_data(self, file_name):\n \"\"\"\n Modifies the data in XML elements and writes the updated XML data to a new file.\n :param file_name: string, the name of the file to write the modified XML data.\n :return: bool, True if the write operation is successful, False otherwise.\n >>> xml_processor = XMLProcessor('test.xml')\n >>> root = xml_processor.read_xml()\n >>> success = xml_processor.process_xml_data('processed.xml')\n >>> print(success)\n True\n \"\"\"\n\n\n def find_element(self, element_name):\n \"\"\"\n Finds the XML elements with the specified name.\n :param element_name: string, the name of the elements to find.\n :return: list, a list of found elements with the specified name.\n >>> xml_processor = XMLProcessor('test.xml')\n >>> root = xml_processor.read_xml()\n >>> items = xml_processor.find_element('item')\n >>> for item in items:\n >>> print(item.text)\n apple\n banana\n orange\n \"\"\"\n\n'''\n\nimport xml.etree.ElementTree as ET\n\n\nclass XMLProcessor:\n def __init__(self, file_name):\n self.file_name = file_name\n self.root = None\n\n def read_xml(self):\n try:\n tree = ET.parse(self.file_name)\n self.root = tree.getroot()\n return self.root\n except:\n return None\n\n def write_xml(self, file_name):\n try:\n tree = ET.ElementTree(self.root)\n tree.write(file_name)\n return True\n except:\n return False\n\n def process_xml_data(self, file_name):\n for element in self.root.iter('item'):\n text = element.text\n element.text = text.upper()\n return self.write_xml(file_name)\n\n def find_element(self, element_name):\n elements = self.root.findall(element_name)\n return elements\n\n\n","repo_name":"FudanSELab/ClassEval","sub_path":"data/benchmark_solution_code/XMLProcessor.py","file_name":"XMLProcessor.py","file_ext":"py","file_size_in_byte":3193,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"3"} +{"seq_id":"19764759505","text":"nbr_articles = 2\ntva = 0.2\ntotal_facture = 0\narticles = []\nquantity = []\nprix = []\n\n\nfor i in range(1,nbr_articles+1):\n articles.append(input(f\"Donner le nom du {i} article : \"))\n quantity.append(int(input(f\"Donner la quantity du {i} article : \")))\n prix.append(float(input(f\"Donner le prix unitaire du {i} article : \")))\n\nfor i in range(0,nbr_articles):\n p = prix[i]*quantity[i]\n total_facture += p+ p*tva\n print(f\"Total de l'article {articles[i]} : {p} dh (ht)\")\n\nprint(f\"Le total de votre facture est : {total_facture} (TTC)\")\n\n","repo_name":"hamzaezzine/python-exercices","sub_path":"TP 01/exercice9.py","file_name":"exercice9.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71324515922","text":"# Hand Numbers and Text Recognition\n# Last Update 20220512\n\nfrom djitellopy import Tello\nfrom PIL import Image\nimport cv2, pytesseract\n\nfrom datetime import datetime\nfrom fingerCounter import fingerCounter\n\ndef countDown(frame_read, text, seconds, showCountDown):\n start_time = datetime.now()\n diff = (datetime.now() - start_time).seconds # converting into seconds\n while True:\n frame = frame_read.frame\n #frame = cv2.flip(frame, 1)\n img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n img_copy = frame.copy()\n im_pil = Image.fromarray(img)\n \n if showCountDown == True:\n cv2.putText(img_copy, text + str(seconds - diff), (40, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 2)\n else: \n cv2.putText(img_copy, text, (50, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 2)\n\n cv2.imshow('tello hand', img_copy)\n diff = (datetime.now() - start_time).seconds\n if (diff > seconds):\n frame_read.stop\n break\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break \n\n return\n\ndef identifyTextfromJpg(img):\n text=\"--\"\n\n # Preprocessing the image starts\n # img = cv2.imread(\"sampleText.jpg\")\n\n # Convert the image to gray scale\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n gray = cv2.GaussianBlur(gray, (5, 5), 0)\n\n # Performing OTSU threshold\n ret, thresh1 = cv2.threshold(gray, 0, 255, cv2.THRESH_OTSU | cv2.THRESH_BINARY_INV)\n\n # Specify structure shape and kernel size.\n # Kernel size increases or decreases the area of the rectangle to be detected.\n # A smaller value like (10, 10) will detect each word instead of a sentence.\n rect_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (18, 18))\n\n # Appplying dilation on the threshold image\n dilation = cv2.dilate(thresh1, rect_kernel, iterations=1)\n\n # Finding contours\n contours, hierarchy = cv2.findContours(dilation, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n\n # Creating a copy of image\n img_copy = img.copy()\n\n # Looping through the identified contours\n # Then rectangular part is cropped and passed on to pytesseract for extracting text from it\n # Extracted text is then saved and coordinates of center returned\n max_area, max_x, max_y, max_w, max_h = 0, 0, 0, 0, 0\n centerX, centerY = 0, 0\n\n # Find biggest text\n for cnt in contours:\n x, y, w, h = cv2.boundingRect(cnt)\n area = w*h\n if area > max_area and area < 300*200:\n centerX = x + w//2\n centerY = y + h//2\n max_x = x\n max_y = y\n max_w = w\n max_h = h\n # Apply OCR on the cropped image\n #cropped = img_copy[y:y + h, x:x + w]\n #text = pytesseract.image_to_string(cropped, config=\"--psm 10\")\n #cv2.putText(img_copy, text, (x + 5, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)\n cv2.rectangle(img_copy, (x, y), (x + w, y + h), (0, 255, 0), 2)\n\n if max_w * max_h > 0:\n # Drawing a rectangle on copied image\n rect = cv2.rectangle(img_copy, (max_x, max_y), (max_x + max_w, max_y + max_h), (255, 255, 0), 2)\n\n # Cropping the text block for giving input to OCR\n cropped = img_copy[max_y:max_y + max_h, max_x:max_x + max_w]\n\n # Apply OCR on the cropped image\n text = pytesseract.image_to_string(cropped, config=\"--psm 10 --oem 3 -c tessedit_char_whitelist=0123456789\")\n text = ''.join(e for e in text if e.isalnum())\n cv2.putText(img_copy, text, (max_x+5, max_y-5), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)\n\n #imgStack = utils.stackImages(1, [img, img_copy, cropped])\n #imgStack = utils.stackImages(1, [img, img_copy])\n\n #cv2.imshow('tello ocr', imgStack)\n\n return text, centerX, centerY, max_w*max_y\n\n# ———————————————–\n# Main program\n# ———————————————–\n\ntello = Tello()\ntello.connect()\ntello.streamon()\n\n# point to Tesseract OCR path\npytesseract.pytesseract.tesseract_cmd = \"C:\\\\Program Files\\\\Tesseract-OCR\\\\tesseract.exe\"\n\nm_fingerCounter = None\nm_fingerCounter = fingerCounter()\n\nmyFrame = tello.get_frame_read()\n\n#Buscar \"Puño/Cero\" para empezar\ncountDown(myFrame, \"Give me a number, we start in \", 3, True)\n\ncv2.destroyAllWindows()\n\ncountx = 0\nlastnum = 0\n\n#Buscar handnumber\nwhile True:\n #myFrame = tello.get_frame_read()\n myf = myFrame.frame\n img = cv2.resize(myf, (640, 480))\n #img = cv2.resize(myf, (320, 240))\n\n _, _, count = m_fingerCounter.countFingers(img)\n # add the fingers in both hands\n num = count['RIGHT'] + count['LEFT']\n\n if num != lastnum:\n countx = 0\n\n if str(num) == \"1\" or str(num) == \"2\" or str(num) == \"3\" or str(num) == \"4\" or str(num) == \"5\":\n countx = countx + 1\n lastnum = num\n\n if (countx >= 100):\n myFrame.stop\n break\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break \n\n#nos guardamos el ultimo\nnumber = str(num)\n\nprint(\"You have chosen number: \", number,) \n\ncv2.destroyAllWindows()\n\ncountDown(myFrame, \"You have chosen number \" + number, 5, False)\n\ncountDown(myFrame, \"Now find the number for the photo! We start in \", 3, True)\n\ncountx = 0\n\n#Buscar predicción en texto\nwhile True:\n frame = myFrame.frame\n\n #frame = cv2.flip(frame, 1)\n img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n img_copy = frame.copy()\n im_pil = Image.fromarray(img)\n cv2.imshow('tello hand', img_copy)\n\n text, centerX, centerY, area = identifyTextfromJpg(frame)\n\n if text == number:\n print(\"text on image detected:>\", text, \"<\")\n countx = countx + 1\n\n if (countx == 3):\n myFrame.stop\n break\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break \n\n#Hacer foto\ncv2.imwrite(\"handnumberPhoto.jpg\", img_copy)\ncv2.imshow('tello photo', img_copy)\nmyFrame.stop\n\nwhile True:\n frame = myFrame.frame\n\n frame = cv2.flip(frame, 1)\n img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n img_copy = frame.copy()\n im_pil = Image.fromarray(img)\n cv2.putText(img_copy, \"Photo Taken, thanks!\", (40, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 2)\n cv2.imshow('tello hand', img_copy)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break \n\nprint (\"finish\")","repo_name":"jadepedro/Tello-Computer-Vision","sub_path":"HandNumbers Recognition.py","file_name":"HandNumbers Recognition.py","file_ext":"py","file_size_in_byte":6312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"4199297777","text":"# a variable instance has two tkinter class variables: one for the entry fields and one for the labels\r\n# additionally, the variables have a \"valid\" attribute which is true when the variable is in a range where it can be used in calculation, false otherwise\r\n# holds all names/units to be displayed in the UI\r\nimport tkinter as tk\r\nfrom tkinter import messagebox # required for messagebox to work, even with tkinter/tk.messagebox as messagebox import isn't part of tkinter __init__\r\n\r\nclass Variable:\r\n \r\n # inits with all variables with value 0.0 (double), and the standard error message as \"enter variables\"\r\n def __init__(self, name, unit, lower_bound, upper_bound):\r\n print(\"creating variable [\" + name + \"]\")\r\n \r\n self.valid = False\r\n self.name = name\r\n self.unit = unit\r\n self.lower_bound = lower_bound\r\n self.upper_bound = upper_bound\r\n \r\n self.entry = tk.DoubleVar()\r\n self.entry.set(0.0)\r\n self.value = tk.DoubleVar()\r\n self.value.set(0.0)\r\n \r\n self.error = tk.StringVar()\r\n self.error.set(\"enter values\")\r\n \r\n # sets the value as the current value in the entry field (only call this after entry field validation using valid_entry_input)\r\n def entry_to_value(self):\r\n \r\n self.value.set(self.entry.get())\r\n \r\n # sets the entry value equal to the variable value\r\n def value_to_entry(self):\r\n \r\n self.entry.set(self.value.get())\r\n \r\n # checks the current entry value and sets the validity and possibly error message accordingly\r\n # returns the status as a boolean\r\n def valid_entry_input(self):\r\n print(\"checking entry validity for variable [\" + self.name + \"]\")\r\n \r\n # if the value of entry is not a double (also empty), calling .get() will raise a TclError\r\n try:\r\n # if the input doesn't raise a TclError it is a double\r\n entry_value = self.entry.get()\r\n \r\n # check if the value is in bounds, return True if it is otherwise raise error message and return false\r\n if self.value_in_bounds(entry_value):\r\n \r\n return True\r\n \r\n else:\r\n \r\n tk.messagebox.showerror(\"Variable error\", \"The input for [\" + self.name + \"] is out of bounds\")\r\n self.valid = False\r\n self.error.set(\"out of bounds\")\r\n \r\n # if the entry is not a valid double, print to the console and throw a GIU popup warning.\r\n # set valid to false, the status error message to \"invalid input\" and return False\r\n except tk.TclError:\r\n \r\n print(\"invalid input for [\" + self.name + \"]\") \r\n self.valid = False\r\n self.error.set(\"invalid input\")\r\n tk.messagebox.showerror(\"Variable error\", \"The input for [\" + self.name + \"] is invalid\")\r\n return False\r\n \r\n # takes a value, returns True if it is within bounds for the variable object, False if it is not\r\n def value_in_bounds(self, value):\r\n \r\n if value < self.lower_bound or value > self.upper_bound:\r\n return False\r\n else: \r\n return True\r\n ","repo_name":"guidocreusen/python_star_polymer_gel_porosity","sub_path":"src/calculation/variable.py","file_name":"variable.py","file_ext":"py","file_size_in_byte":3290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"41558628327","text":"# taking first number\r\nnum1 = int(input(\"Enter first number: \"))\r\n# taking second number \r\nnum2 = int(input(\"Enter second number: \")) \r\n\r\n# checking if num2 is greater than num1 then swap these numbers\r\nif num2 > num1: \r\n (num1,num2) = (num2,num1)\r\n# repeat these steps till num2 divides num1 with remainder zero\r\nwhile num1%num2 != 0: \r\n # swap num1 to num2 and num2 with remainder\r\n (num1,num2) = (num2,num1%num2) \r\n\r\n# printing GCD\r\nprint(\"The GCD of the numbers is\",num2) ","repo_name":"spiderlabweb/Python-examples","sub_path":"gcd_using_euclids_algo.py","file_name":"gcd_using_euclids_algo.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"32701754912","text":"from collections import deque\n\n\ndef bfs(graph, checked):\n dx = [0, 0, -1, 1]\n dy = [-1, 1, 0, 0]\n\n queue = deque([[0, 0, 0]])\n checked[0][0][0] = 1\n\n while queue:\n x, y, wall = queue.popleft()\n if x == n - 1 and y == m - 1:\n return checked[x][y][wall]\n\n for i in range(4):\n ax = x + dx[i]\n ay = y + dy[i]\n\n # 1. 범위를 벗아나지 않는 좌표이다. 2. 미방문인 좌표이다.\n if 0 <= ax < n and m > ay >= 0 == checked[ax][ay][wall]:\n\n # 벽이 아닌 좌표이다.\n if graph[ax][ay] == '0':\n queue.append([ax, ay, wall])\n checked[ax][ay][wall] = checked[x][y][wall] + 1\n\n # 1. 벽을 부신 전적이 없다. 2. 다음 이동할 좌표가 벽이다.\n if wall == 0 and graph[ax][ay] == '1':\n queue.append([ax, ay, 1])\n checked[ax][ay][1] = checked[x][y][wall] + 1\n return -1\n\n\nn, m = map(int, input().split())\ngrahp = [list(input()) for _ in range(n)]\nchecked = [[[0, 0] for _ in range(m)] for _ in range(n)]\n\nprint(bfs(grahp, checked))","repo_name":"kaori-killer/baekjoon-summer-challenge","sub_path":"CHAPTER_03_그리디/22-06-18/벽 부수고 이동하기.py","file_name":"벽 부수고 이동하기.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"854901477","text":"from django.http.response import HttpResponse\nfrom django.shortcuts import get_object_or_404, render\n\nfrom .models import Riddle, Option, Answers\n\n\ndef toFixed(numObj, digits=0):\n return f\"{numObj:.{digits}f}\"\n\n\ndef right_answers(id):\n tmp = Option.objects.all().filter(riddle=id, correct='1')\n array_of_right_answers = []\n for i in tmp:\n array_of_right_answers.append(str(i.id))\n return array_of_right_answers\n\ndef results(request):\n #dic = {}\n lis = []\n for i in Riddle.objects.all():\n ra = right_answers(i)\n answers = Answers.objects.all().filter(riddle_id = i)\n bad_anwers = 0\n for j in answers:\n if not(str(j.answers) == str(ra)):\n bad_anwers += 1\n if bad_anwers > 0:\n percent_wrong_answers = toFixed(100*(bad_anwers/answers.count()),1)\n #dic[i.riddle_text] = percent_wrong_answers\n #else:\n #dic[i.riddle_text] = 0\n\n if (float(percent_wrong_answers) > 50):\n lis.append([i.riddle_text, percent_wrong_answers, 1])\n else:\n lis.append([i.riddle_text, percent_wrong_answers, 0])\n #print(lis)\n #print('Вопрос - ' + i.riddle_text + ' Всего ответов - ' + str(answers.count()) + ' неправильных ответов -'+ str(bad_anwers))\n return render(request, \"results.html\", {\"results\" : lis})\n\n\ndef index(request):\n return render(request, \"index.html\", {\"latest_riddles\": Riddle.objects.order_by('id')})\n\n\ndef detail(request, riddle_id):\n return render(request, \"answer.html\", {\"riddle\": get_object_or_404(Riddle, pk=riddle_id)})\n\n\ndef answer(request, riddle_id):\n riddle = get_object_or_404(Riddle, pk=riddle_id)\n answ = Answers(riddle = riddle, answers = request.POST.getlist ('option'), ip = request.META.get('REMOTE_ADDR'))\n answ.save()\n try:\n option = riddle.option_set.get(pk=request.POST['option'])\n except (KeyError, Option.DoesNotExist):\n return render(request, 'answer.html', {'riddle': riddle, 'error_message': 'Option does not exist'})\n else:\n try:\n rid = Riddle.objects.get(pk=str(int(riddle_id) + 1))\n except Riddle.DoesNotExist:\n\n return render(request, 'end.html', {'riddle': riddle, 'error_message': 'Опрос окончен'})\n return render(request, 'answer.html', {'riddle': rid, 'error_message': ''})\n","repo_name":"emashovvn2/TeachOpros","sub_path":"riddles/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21549211354","text":"\"\"\"\napply_async\n\"\"\"\nfrom plugins import celery, sms\nfrom plugins.private.asynchronous.primary import db_session\n\n\n@celery.task()\n@db_session\ndef batch_sms(template_id: str, phone_list: list, params: list):\n \"\"\"批量发送短信\"\"\"\n\n for phone in phone_list:\n sms.send(template_id=template_id, phone_number=phone, sms_sign='台州海嘉粤运输有限公司', params=params)\n","repo_name":"Neo-python/HYcore","sub_path":"asynchronous/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11668757967","text":"import heapq\r\nimport sys\r\ninput = lambda:sys.stdin.readline().rstrip()\r\n\r\nn,m = map(int,input().split())\r\nindegree = [0 for i in range(n+1)]\r\nadj_list = [[] for _ in range(n+1)]\r\nanswer = []\r\n\r\nfor _ in range(m):\r\n a,b = map(int,input().split())\r\n indegree[b] += 1\r\n adj_list[a].append(b)\r\n\r\nmin_heap = []\r\n\r\nfor node in range(1,n+1):\r\n if indegree[node] == 0:\r\n heapq.heappush(min_heap, node)\r\n\r\nwhile min_heap:\r\n node = heapq.heappop(min_heap)\r\n answer.append(node)\r\n\r\n for adj_node in adj_list[node]:\r\n indegree[adj_node] -= 1\r\n\r\n if indegree[adj_node] == 0:\r\n heapq.heappush(min_heap, adj_node)\r\n\r\nprint(*answer)","repo_name":"mugglim/python-algo-study","sub_path":"categories/topology_sort/boj-topo-1766.py","file_name":"boj-topo-1766.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"36694532744","text":"#1\ndef exercicio_1():\n print(\"Introduza três notas:\")\n nota_1 = float(input())\n nota_2 = float(input())\n nota_3 = float(input())\n return (F\"Média do aluno é {(nota_1 + nota_2 + nota_3) / 3}\")\nexercicio_1()\n\n#2\ndef exercicio2():\n num_elementos = int(input(\"Insira o número de \"))\n lista = []\n\n for num in range (num_elementos):\n lista.append(input())\n return lista\n\n#3\ndef exercicio_3():\n entrada = input(\"Introduza 'a' para Globo, 'b' para SBT e 'z' ou 'Z' para finalizar: \")\n while entrada != 'z': \n if entrada == 'Z':\n break\n elif entrada == 'a':\n print(\"Globo\")\n elif entrada == 'b':\n print(\"SBT\")\n else:\n print(\"Inválido\")\n entrada = input()\nexercicio_3()\n\n#4\ndef exercicio_4():\n i = 0\n lista = input(\"Insira as médias dos alunos: \")\n list = lista.split()\n media_inferior = []\n while i < len(list):\n if int(list[i]) < 7:\n media_inferior.append(list[i])\n i += 1\n if len(media_inferior) < 0.25 * len(list):\n return \"Professor Coxa\"\n else:\n return \"Professor Padrão\"\nexercicio_4()\n","repo_name":"Oak1290/logicaprogramacao","sub_path":"prova.py","file_name":"prova.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30858451401","text":"\"\"\"\nIDEIA DE SOLUÇÃO: Como no momento de criar a matriz eu já possuo a sua localização, em vez de criar a matriz e depois\nverificar quem respeita as regras, optei por já fazer a verificação e assim somar os que respeitam sem necessariamente\nter que criar a matriz.\n\"\"\"\n\noperation: str = input()\nentries_sum: float = 0.0\n\nhouses: int = 0\nfor n_row in range(12):\n for n_column in range(12):\n float_entry: float = float(input())\n second_limit: int = 11 - n_row\n if (n_column < n_row) and (n_column > second_limit):\n houses += 1\n entries_sum += float_entry\n\nif operation == 'S':\n print(round(entries_sum, 1))\nelif operation == 'M':\n average = round(entries_sum / houses, 1)\n print(average)\n","repo_name":"PabloVKF/programacaoo_orientada_a_objetos_I","sub_path":"prova_3/1188.py","file_name":"1188.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36517331770","text":"import keras\nfrom keras.models import load_model\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import load_model\nfrom keras.models import model_from_json\nimport json\n\n\nwith open('model_in_json.json','r') as f:\n model_json = json.load(f)\n\nmodel = model_from_json(model_json)\nmodel.load_weights('model_weights.h5')\n\ntest_data_dir = 'Test_dir/'\n\nimg_width, img_height = 224, 224\nbatch_size=2\n\ntest_datagen = ImageDataGenerator(rescale=1. / 255)\n\ntest_data = test_datagen.flow_from_directory(\ntest_data_dir,\ntarget_size=(img_width, img_height),\nbatch_size=batch_size,\nclass_mode='categorical')\n\ntest_data.reset()\n\n\nyhat = model.predict_generator(test_data, verbose=1)\ny_classes = yhat.argmax(axis=-1)\nval = {0: '1', 1: '2', 2: '3'}\n\nfinal = y_classes[0]\n\nprint(\"The threat classification is: \", val[final])","repo_name":"sspeedy99/Abandoned-Object-Detection","sub_path":"Predict.py","file_name":"Predict.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"43213018844","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed May 27 19:02:35 2020\r\n\r\n@author: fangchenfeng\r\n\"\"\"\r\n\r\n# This is a script for trying the mixture of image containing documents\r\nfrom __future__ import division\r\nimport utils\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.image as mpimg\r\nimport matplotlib.colors\r\nimport numpy as np\r\nfrom scipy.linalg import sqrtm\r\nimport museval.metrics as mmetrics\r\nfrom rdc import rdc\r\n\r\nfrom sklearn.decomposition import FastICA\r\n\r\nfrom skimage import data\r\nfrom skimage.color import rgb2gray\r\n\r\nn = 256\r\n\r\n# load images and convert them\r\npic_set = 8\r\n\r\nimg1=mpimg.imread('./images/set'+ str(pic_set) + '_pic1.png')\r\nimg2=mpimg.imread('./images/set'+ str(pic_set) + '_pic2.png')\r\n\r\nimg1_gray = rgb2gray(img1) # the value is between 0 and 1\r\nimg2_gray = rgb2gray(img2)\r\n\r\n# flip\r\nimg2_gray_re = np.fliplr(img2_gray)\r\n\"\"\"\r\nplt.figure()\r\nplt.imshow(img1_gray, cmap='gray')\r\nplt.title(\"Ground truth 1\")\r\nplt.show\r\n\r\nplt.figure()\r\nplt.imshow(img2_gray, cmap='gray')\r\nplt.title(\"Ground truth 2\")\r\nplt.show\r\n\"\"\"\r\n# We mix them here\r\n\r\n# create 2x(255*255) source matrix\r\n\r\nsource1 = np.matrix(img1_gray)\r\nsource1 = source1.flatten('F') #column wise\r\n\r\nsource2 = np.matrix(img2_gray)\r\nsource2 = source2.flatten('F') #column wise\r\n\r\nsource1 = source1 - np.mean(source1)\r\nsource2 = source2 - np.mean(source2)\r\n\r\n#source1 = source1/np.linalg.norm(source1)\r\n#source2 = source2/np.linalg.norm(source2)\r\n\r\nprint(\"rdc = \", rdc(source1.T,source2.T))\r\nsource = np.stack((source1, source2))\r\n\r\nprint('Covariance matrix is: ')\r\nprint(np.matmul(source,source.T))\r\n\r\n# randomly generated mixing matrix\r\nnp.random.seed(0)\r\n#mixing_matrix = np.random.rand(2,2)\r\nmixing_matrix = np.array([[1, 0.5], [0.5, 1]])\r\n\r\n# X = source * mixing_matrix - The mixed images\r\n\r\nX = np.matmul(source.T, mixing_matrix)\r\n(sdr_ref, sir, sar, perm) = mmetrics.bss_eval_sources(np.asarray(source), np.asarray(X.T))\r\n# mix = [[0.6992, 0.7275], [0.4784, 0.5548]] #or use the matrix from the paper\r\n# X = np.matmul(source.T, mix)\r\n\r\nX1 = X[:,0]\r\nmx1 = np.mean(X1)\r\nX1 = np.reshape(X1, (n,n))\r\n\r\n#print(X1.min(), X1.max(), X1.mean())\r\n\r\nX2 = X[:,1]\r\nmx2 = np.mean(X2)\r\nX2 = np.reshape(X2, (n,n))\r\n\r\nmx = np.array([mx1, mx2])\r\n#print(X2.min(), X2.max(), X2.mean())\r\n\r\n#Show the mixed images\r\n\"\"\"\r\nplt.figure()\r\nplt.imshow(X1.T, cmap='gray')\r\nplt.title(\"Mixed image 1\")\r\nplt.show\r\n\r\nplt.figure()\r\nplt.imshow(X2.T, cmap='gray')\r\nplt.title(\"Mixed image 2\")\r\nplt.show\r\n\"\"\"\r\n# FastICA algorithm\r\n#X[:,0] = X[:,0] - mx1\r\n#X[:,1] = X[:,1] - mx2\r\nprint(\"mean value of the observations are: \", mx1, mx2)\r\n# ICA does whitening pre-processing automatiquely\r\nica = FastICA(n_components=2, fun = 'cube', max_iter = 2000)\r\nsource_estimated = ica.fit_transform(X)\r\nmixing_estimated = ica.mixing_\r\n\r\n#print(mx)\r\nms = np.dot(np.linalg.inv(mixing_estimated),mx)\r\n#print(ms)\r\n\r\n#source_estimated[:,0] = source_estimated[:,0]+ms[0]\r\n#source_estimated[:,1] = source_estimated[:,1]+ms[1]\r\n\r\nprint(\"mixing matrix estimated by ICA is: \") \r\nprint(mixing_estimated)\r\n\r\n(sdr, sir, sar, perm) = mmetrics.bss_eval_sources(np.asarray(source), np.asarray(source_estimated.T))\r\n\r\nprint(\"Reference SDR is: \")\r\nprint(sdr_ref)\r\nprint(\"SDR of ICA is: \")\r\nprint(sdr)\r\n# Reshape the estimated source images (from vectors into matrices)\r\ns1 = source_estimated[:,0]\r\ns1 = np.reshape(s1, (n,n))\r\n\r\n#print(s1.min(), s1.max(), s1.mean())\r\n\r\ns2 = source_estimated[:,1]\r\ns2 = np.reshape(s2, (n,n))\r\n\r\n#print(s2.min(), s2.max(), s2.mean())\r\n\r\n# Show estimated sources\r\n\"\"\"\r\nplt.figure()\r\nplt.imshow(-s1.T, cmap='gray')\r\nplt.title(\"Estimated source 1 with ICA\")\r\nplt.show\r\n\r\nplt.figure()\r\nplt.imshow(-s2.T, cmap='gray')\r\nplt.title(\"Estimated source 2 with ICA\")\r\nplt.show()\r\n\"\"\"\r\n#print(np.matmul(source_estimated.T,source_estimated))\r\n\r\n# try the sparsity separation\r\nlambda_max = 0.002\r\nlambda_final = 0.0001\r\nmax_it = 2000\r\n\r\n# whitening pre-processing\r\n\"\"\"\r\nX = X.T\r\nR = np.dot(X, X.T)\r\nW = np.linalg.inv(sqrtm(R))\r\nX = np.dot(W, X)\r\nX = X.T\r\n\"\"\"\r\nA = np.random.rand(2,2)\r\nS = X.T\r\n\r\n(S, A) = utils.sparsity_sep(X, A, S, max_it, lambda_max, lambda_final)\r\nms = np.dot(np.linalg.inv(A),mx) \r\n \r\n#S = np.dot(np.linalg.inv(A), X.T)\r\n#S[0,:] = S[0,:]+ms[:,0]\r\n#S[1,:] = S[1,:]+ms[:,1] \r\n(sdr, sir, sar, perm) = mmetrics.bss_eval_sources(np.asarray(source), np.asarray(S)) \r\n\r\ns1 = S[0,:]\r\ns1 = np.reshape(s1, (n,n))\r\n\r\n#print(s1.min(), s1.max(), s1.mean())\r\n\r\ns2 = S[1,:]\r\ns2 = np.reshape(s2, (n,n))\r\n\r\n#print(s2.min(), s2.max(), s2.mean())\r\n\r\n# Show estimated sources\r\n\"\"\"\r\nplt.figure()\r\nplt.imshow(s1.T, cmap='gray')\r\nplt.title(\"Estimated source 1 with Sparse\")\r\nplt.show\r\n\r\nplt.figure()\r\nplt.imshow(s2.T, cmap='gray')\r\nplt.title(\"Estimated source 2 with Sparse\")\r\nplt.show()\r\n\"\"\"\r\n\r\n#print(np.matmul(S,S.T))\r\nprint(\"SDR of sparsity-based method is: \")\r\nprint(sdr)\r\n","repo_name":"ffc28/ImageSeparationL2TI","sub_path":"Code/Sparse/Mix_im_Sparse.py","file_name":"Mix_im_Sparse.py","file_ext":"py","file_size_in_byte":4837,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"26730291230","text":"class Guitar:\n def __init__(self, name, year, cost):\n self.name = name\n self.year = year\n self.cost = cost\n\n def __str__(self):\n return f\"{self.name} ({self.year}): ${self.cost:.2f}\"\n\nguitars = []\n\nwith open(\"guitars.csv\") as file:\n for line in file:\n name, year, cost = line.strip().split(\",\")\n guitars.append(Guitar(name, int(year), float(cost)))\n\nfor guitar in guitars:\n print(guitar)\n","repo_name":"Lijiayi2/cp1404practicals","sub_path":"prac_07/myguitars.py","file_name":"myguitars.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"523517684","text":"from typing import Optional, Tuple\n\nimport tensorflow as tf\nfrom gpflow.base import TensorType\n\nfrom markovflow.kalman_filter import KalmanFilter\nfrom markovflow.kernels import SDEKernel\nfrom markovflow.likelihoods import MultivariateGaussian\nfrom markovflow.mean_function import MeanFunction, ZeroMeanFunction\nfrom markovflow.models.models import MarkovFlowModel\nfrom markovflow.posterior import AnalyticPosteriorProcess, PosteriorProcess\n\n\nclass GaussianProcessRegression(MarkovFlowModel):\n \"\"\"\n Performs GP regression.\n\n The key reference is Chapter 2 of::\n\n Gaussian Processes for Machine Learning\n Carl Edward Rasmussen and Christopher K. I. Williams\n The MIT Press, 2006. ISBN 0-262-18253-X.\n\n This class uses the kernel and the time points to create a state space model.\n GP regression is then a Kalman filter on that state space model using the observations.\n \"\"\"\n\n def __init__(\n self,\n input_data: Tuple[tf.Tensor, tf.Tensor],\n kernel: SDEKernel,\n mean_function: Optional[MeanFunction] = None,\n chol_obs_covariance: Optional[TensorType] = None,\n ) -> None:\n \"\"\"\n :param kernel: A kernel defining a prior over functions.\n :param input_data: A tuple of ``(time_points, observations)`` containing the observed data:\n time points of observations, with shape ``batch_shape + [num_data]``,\n observations with shape ``batch_shape + [num_data, observation_dim]``.\n :param chol_obs_covariance: A :data:`~markovflow.base.TensorType` containing\n the Cholesky factor of the observation noise covariance,\n with shape ``[observation_dim, observation_dim]``.\n a default None value will assume independent likelihood variance of 1.0\n :param mean_function: The mean function for the GP. Defaults to no mean function.\n \"\"\"\n super().__init__(self.__class__.__name__)\n time_points, observations = input_data\n observation_dim = observations.shape[-1]\n\n if chol_obs_covariance is None:\n chol_obs_covariance = tf.eye(observation_dim)\n\n tf.ensure_shape(chol_obs_covariance, [observation_dim, observation_dim])\n\n # ensure that time_points have the shape: batch_shape + [num_data]\n tf.ensure_shape(time_points, observations.shape[:-1])\n\n # To collect kernel and mean function gpflow.Module trainable_variables\n self._kernel = kernel\n if mean_function is None:\n mean_function = ZeroMeanFunction(obs_dim=1)\n self._mean_function = mean_function\n\n self._time_points = time_points\n self._observations = observations\n\n self._chol_obs_covariance = chol_obs_covariance\n\n @property\n def time_points(self) -> tf.Tensor:\n \"\"\"\n Return the time points of observations.\n\n :return: A tensor with shape ``batch_shape + [num_data]``.\n \"\"\"\n return self._time_points\n\n @property\n def observations(self) -> tf.Tensor:\n \"\"\"\n Return the observations.\n\n :return: A tensor with shape ``batch_shape + [num_data, observation_dim]``.\n \"\"\"\n return self._observations\n\n @property\n def kernel(self) -> SDEKernel:\n \"\"\"\n Return the kernel of the GP.\n \"\"\"\n return self._kernel\n\n @property\n def mean_function(self) -> MeanFunction:\n \"\"\"\n Return the mean function of the GP.\n \"\"\"\n return self._mean_function\n\n @property\n def _kalman(self) -> KalmanFilter:\n # subtract the mean function from the observations, if it exists\n residuals = self._observations\n if self._mean_function is not None:\n residuals -= self._mean_function(self._time_points)\n return KalmanFilter(\n state_space_model=self._kernel.state_space_model(self._time_points),\n emission_model=self._kernel.generate_emission_model(self._time_points),\n observations=residuals,\n chol_obs_covariance=self._chol_obs_covariance,\n )\n\n def loss(self) -> tf.Tensor:\n \"\"\"\n Return the loss, which is the negative log likelihood.\n \"\"\"\n return -self.log_likelihood()\n\n @property\n def posterior(self) -> PosteriorProcess:\n \"\"\"\n Obtain a posterior process for inference.\n\n For this class, this is the :class:`~markovflow.posterior.AnalyticPosteriorProcess`\n built from the Kalman filter.\n \"\"\"\n return AnalyticPosteriorProcess(\n posterior_dist=self._kalman.posterior_state_space_model(),\n kernel=self._kernel,\n conditioning_time_points=self._time_points,\n likelihood=MultivariateGaussian(self._chol_obs_covariance),\n mean_function=self._mean_function,\n )\n\n def log_likelihood(self) -> tf.Tensor:\n \"\"\"\n Calculate the log likelihood of the observations given the kernel parameters.\n\n In other words, :math:`log p(y_{1...T} | ϑ)` for some parameters :math:`ϑ`.\n\n :return: A scalar tensor (summed over the batch shape and the whole trajectory).\n \"\"\"\n return self._kalman.log_likelihood()\n","repo_name":"secondmind-labs/markovflow","sub_path":"markovflow/models/gaussian_process_regression.py","file_name":"gaussian_process_regression.py","file_ext":"py","file_size_in_byte":5207,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"3"} +{"seq_id":"5124286669","text":"import config\r\n\r\nfrom aiogram import Bot\r\nfrom aiogram.dispatcher import Dispatcher\r\n\r\nfrom cogs.funCollection import loader, unloader\r\n\r\nimport motor.motor_asyncio\r\n\r\nimport datetime\r\n\r\nbot = Bot(token=config.TOKEN)\r\ndp = Dispatcher(bot)\r\n\r\n# Казино - Дополнения\r\ndb1 = motor.motor_asyncio.AsyncIOMotorClient(config.TOKEN_BD1).Royal\r\n\r\n# Казино - База пользователь\r\ndb2 = motor.motor_asyncio.AsyncIOMotorClient(config.TOKEN_BD2).main\r\ndb2Backup = motor.motor_asyncio.AsyncIOMotorClient(config.TOKEN_BD2).mainBackup\r\n\r\n# Казино - Бот стоп\r\ndb3 = motor.motor_asyncio.AsyncIOMotorClient(config.TOKEN_BD3).main\r\n\r\n\r\nasync def on_startup(dp):\r\n\tprint(\"[{}]: ЗАПУЩЕНО!\".format(datetime.datetime.now()))\r\n\t\r\n\tfor numerationCommands in config.COMMANDS:\r\n\t\tnewCommandsList = []\r\n\r\n\t\tfor commandSynonyms in config.COMMANDS[numerationCommands][\"usage\"]:\r\n\t\t\tif commandSynonyms[0] != \"^\":\r\n\t\t\t\tnewCommandsList.append(commandSynonyms)\r\n\t\t\telse:\r\n\t\t\t\tcommandSynonyms = commandSynonyms[1:]\r\n\t\t\t\r\n\t\t\tfor prefix in config.PREFIXES:\r\n\t\t\t\tnewCommandsList.append(\"{}{}\".format(prefix, commandSynonyms))\r\n\r\n\t\tconfig.COMMANDS[numerationCommands][\"usage\"] = newCommandsList\r\n\r\n\tawait loader.on_loader()\r\n\r\n\r\nasync def on_shutdown(dp):\r\n\tprint(\"[{}]: СОХРАНЕНИЕ...\".format(datetime.datetime.now()))\r\n\tawait unloader.on_unloader(1)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\tfrom aiogram.utils import executor\r\n\tfrom handlers import dp\r\n\t\r\n\texecutor.start_polling(dp, on_startup=on_startup, on_shutdown=on_shutdown)\r\n","repo_name":"aygumov-g/RoyalKavkazBot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"17194335327","text":"from pathlib import Path\n\nfrom unittest import TestCase\n\nfrom pydantic import BaseSettings\nimport tomllib\nfrom mock import patch, mock_open\n\nfrom nuvlaedge.common.settings_parser import NuvlaConfig\n\n\nclass TestNuvlaConfig(TestCase):\n\n @patch.object(BaseSettings, 'parse_obj')\n @patch.object(tomllib, 'loads')\n @patch.object(Path, 'exists')\n @patch.object(Path, 'is_file')\n def test_from_toml(self, mock_is_file, mock_exists, mock_loads, mock_parse):\n with self.assertRaises(FileNotFoundError):\n mock_is_file.return_value = False\n mock_exists.return_value = False\n NuvlaConfig.from_toml(Path('testPath'))\n\n mock_is_file.return_value = True\n mock_exists.return_value = True\n opener = mock_open(read_data='FILEDATA')\n mock_loads.return_value = 'TOMLDATA'\n def mocked_open(self, *args, **kwargs):\n return opener(self, *args, **kwargs)\n\n with patch.object(Path, 'open', mocked_open):\n NuvlaConfig.from_toml(Path('test_path'))\n mock_loads.assert_called_with('FILEDATA')\n mock_parse.assert_called_with('TOMLDATA')\n\n","repo_name":"nuvlaedge/nuvlaedge","sub_path":"tests/common/test_settings_parser.py","file_name":"test_settings_parser.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"73954983449","text":"import requests\nimport re\nimport sys\n\nlanguage=sys.argv[1]\nfilename=sys.argv[2]\n\nurl='https://m.runoob.com/api/compile.php'\nfilepath='othercode/'+filename\n\nfile=open(filepath,'r')\ncode=file.read()\nfile.close()\n#print(code)\n\nif language=='php' :\n referer='https://c.runoob.com/compile/1'\n langu='3'\n fileext='php'\nelif language=='python2' :\n referer='https://c.runoob.com/compile/6'\n langu='0'\n fileext='py'\nelif language=='java' :\n referer='https://c.runoob.com/compile/10'\n langu='8'\n fileext='java'\nelif language=='cpp' :\n referer='https://c.runoob.com/compile/12'\n langu='7'\n fileext='cpp'\nelif language=='ruby' :\n referer='https://c.runoob.com/compile/13'\n langu='1'\n fileext='rb'\nelif language=='csharp' :\n referer='https://c.runoob.com/compile/14'\n langu='10'\n fileext='cs'\nelif language=='scala' :\n referer='https://c.runoob.com/compile/15'\n langu='5'\n fileext='scala'\nelif language=='objectivec' :\n referer='https://c.runoob.com/compile/16'\n langu='12'\n fileext='oc'\nelif language=='perl' :\n referer='https://c.runoob.com/compile/17'\n langu='14'\n fileext='pl'\nelif language=='bash' :\n referer='https://c.runoob.com/compile/18'\n langu='11'\n fileext='sh'\nelif language=='vb' :\n referer='https://c.runoob.com/compile/19'\n langu='9'\n fileext='vb'\nelif language=='swift' :\n referer='https://c.runoob.com/compile/20'\n langu='16'\n fileext='swift'\nelif language=='go' :\n referer='https://c.runoob.com/compile/21'\n langu='6'\n fileext='go'\nelif language=='node.js' :\n referer='https://c.runoob.com/compile/22'\n langu='4'\n fileext='node.js'\nelif language=='lua' :\n referer='https://c.runoob.com/compile/66'\n langu='17'\n fileext='lua'\nelif language=='pascal' :\n referer='https://c.runoob.com/compile/73'\n langu='18'\n fileext='pas'\nelif language=='kotlin' :\n referer='https://c.runoob.com/compile/2960'\n langu='19'\n fileext='kt'\n\nheaders={\n'Accept': '*/*',\n'Accept-Encoding': 'gzip, deflate, br',\n'Accept-Language': 'zh-CN,zh;q=0.9',\n'Connection': 'keep-alive',\n'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',\n'Host': 'm.runoob.com',\n'Origin': 'https://c.runoob.com',\n'Referer': referer,\n'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36',\n}\n\nformdata={\n'code': code,\n'stdin': '',\n'language': langu,\n'fileext': fileext,\n}\n\nr=requests.post(url,headers=headers,data=formdata)\n#print(r.text)\nresult=re.findall('\"output\":\"(.*?)\"',r.text,0)\nerrors=re.findall('\"errors\":\"(.*?)\"',r.text,0)\n#print(len(errors))\nif len(result[0]) :\n print(result[0])\nelse :\n print(errors[0])","repo_name":"fyr233/liqibot","sub_path":"AppRoot/dear_developer_your_cloud_machine_is_vulnerable_to_code_injection_and_all_your_data_may_be_accessed_by_people_using_the_qq_robot.py","file_name":"dear_developer_your_cloud_machine_is_vulnerable_to_code_injection_and_all_your_data_may_be_accessed_by_people_using_the_qq_robot.py","file_ext":"py","file_size_in_byte":2720,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"37577799650","text":"import json\nimport os\n\nimport pytest\nimport requests\nfrom datatable import Frame, dt\n\nfrom sdmx_dt import sdmx_json\nfrom tests import DATA_DIR\n\n# SDMX-JSON v1.0 aligns to SDMX v2.1\nsdmx_json_v1_commit = \"d2bf3f7\"\nsdmx_json_samples_url = (\n f\"https://raw.githubusercontent.com/sdmx-twg/sdmx-json/{sdmx_json_v1_commit}/\"\n \"data-message/samples/\"\n)\nexpected_all = {\n \"agri.json\": {\n \"observations\": Frame(\n {\n \"Reference area\": [\n \"Banteay Meanchey\",\n \"Banteay Meanchey\",\n \"Banteay Meanchey\",\n \"Banteay Meanchey\",\n \"Battambang\",\n \"Battambang\",\n \"Battambang\",\n \"Battambang\",\n ],\n \"Time Period\": [\n \"2014\",\n \"2015\",\n \"2016\",\n \"2017\",\n \"2014\",\n \"2015\",\n \"2016\",\n \"2017\",\n ],\n \"Value\": [\n 350.154,\n 389.385,\n 395.729,\n 433.638,\n 442.996,\n 426.588,\n 479.686,\n 522.296,\n ],\n \"Source\": [\n \"MAFF_Agricultural Statistics_2014\",\n \"MAFF_Agricultural Statistics_2015\",\n \"MAFF_Agricultural Statistics_2016\",\n \"MAFF_Agricultural Statistics_2017\",\n \"MAFF_Agricultural Statistics_2014\",\n \"MAFF_Agricultural Statistics_2015\",\n \"MAFF_Agricultural Statistics_2016\",\n \"MAFF_Agricultural Statistics_2017\",\n ],\n \"Observation status\": 8 * [\"Normal value\"],\n }\n ),\n },\n \"exr/exr-action-delete.json\": {\n \"observations\": [\n Frame(\n {\n \"Time period or range\": [\"2013-01-18\", \"2013-01-21\"],\n \"Currency\": [\"Russian rouble\", \"Russian rouble\"],\n \"Value\": [40.3426, 40.3],\n \"Series title\": [\"Russian rouble (RUB)\", \"Russian rouble (RUB)\"],\n \"Observation status\": [\"Normal value\", \"Normal value\"],\n }\n ),\n Frame(),\n ],\n },\n \"exr/exr-cross-section.json\": {\n \"observations\": Frame(\n {\n \"Time period or range\": [\n \"2013-01-18\",\n \"2013-01-18\",\n \"2013-01-21\",\n \"2013-01-21\",\n ],\n \"Currency\": [\n \"New Zealand dollar\",\n \"Russian rouble\",\n \"New Zealand dollar\",\n \"Russian rouble\",\n ],\n \"Value\": [1.5931, 40.3426, 1.5925, 40.3],\n \"Observation status\": 4 * [\"Normal value\"],\n \"Series title\": [\n \"New Zealand dollar (NZD)\",\n \"Russian rouble (RUB)\",\n \"New Zealand dollar (NZD)\",\n \"Russian rouble (RUB)\",\n ],\n }\n ),\n },\n}\n\npytestmark = pytest.mark.parametrize(\"name\", expected_all.keys())\n\n\n@pytest.fixture\ndef sdmx_json_msg_remote(name):\n return sdmx_json.fread_json(sdmx_json_samples_url + name)\n\n\n@pytest.fixture\ndef sdmx_json_msg_local(name):\n path = os.path.join(DATA_DIR, name.split(\"/\")[-1])\n r = requests.get(sdmx_json_samples_url + name)\n raw_msg = json.loads(r.content.decode())\n\n # Fix typos in agri.json. This doesn't preserve original ordering\n if name == \"agri.json\":\n for section_name in [\"attributes\", \"dimensions\"]:\n section = raw_msg[\"data\"][\"structure\"][section_name]\n section[\"dataSet\"] = section.pop(\"dataset\")\n # Fix typos in exr/exr-action-delete.json\n elif name == \"exr/exr-action-delete.json\":\n obs_1 = raw_msg[\"data\"][\"dataSets\"][0][\"series\"][\"0\"][\"observations\"][\"1\"]\n obs_1[1], obs_1[2] = obs_1[2], obs_1[1]\n obs_2 = raw_msg[\"data\"][\"dataSets\"][0][\"series\"][\"1\"][\"observations\"][\"1\"]\n obs_2[1], obs_2[2] = obs_2[2], obs_2[1]\n\n with open(path, \"w\") as f:\n json.dump(raw_msg, f, indent=4)\n return sdmx_json.fread_json(path, is_url=False)\n\n\n@pytest.fixture\ndef expected_dts(name):\n suffix = \".csv\"\n\n tidy_name = name.split(\"/\")[-1]\n name_dir = os.path.join(\"tests\", \"expected_data\", tidy_name)\n expected_dts = {\n f[: -len(suffix)]: dt.fread(os.path.join(name_dir, f))\n for f in os.listdir(name_dir)\n if f.endswith(suffix)\n }\n return expected_dts\n\n\ndef test_fread_json_local_and_remote_eq(\n name, sdmx_json_msg_remote, sdmx_json_msg_local\n):\n # FIXME: Is it possible to fix typos when retrieving from remote??\n if name in [\"agri.json\", \"exr/exr-action-delete.json\"]:\n return NotImplemented\n\n assert sdmx_json_msg_remote == sdmx_json_msg_local\n\n\ndef test_fread_json_types(name, sdmx_json_msg_local):\n msg = sdmx_json_msg_local # shorter alias\n assert isinstance(msg, sdmx_json.SdmxJsonDataMessage)\n assert isinstance(msg.meta, sdmx_json.SdmxJsonMeta) or msg.meta is None\n assert isinstance(msg.data, sdmx_json.SdmxJsonData) or msg.data is None\n assert isinstance(msg.errors, list) and (\n len(msg.errors) == 0 or isinstance(msg.errors[0], sdmx_json.SdmxJsonError)\n )\n\n\ndef test_get_observations(name, sdmx_json_msg_local, helpers):\n actual = sdmx_json_msg_local.data.get_observations()\n expected = expected_all[name][\"observations\"]\n\n # Should return list of datatables when there is multiple dataSets\n if isinstance(expected, list):\n assert isinstance(actual, list)\n for actual_i, expected_i in zip(actual, expected):\n helpers.check_dt_Frames_eq(actual_i, expected_i)\n else:\n helpers.check_dt_Frames_eq(actual, expected)\n\n\ndef test_get_dimensions(name, sdmx_json_msg_local, expected_dts, helpers):\n # get_dimensions() should be accessible from SdmxJsonData and DSD\n # Check for full datatable\n actual = sdmx_json_msg_local.data.structure.get_dimensions(include_values=True)\n actual2 = sdmx_json_msg_local.data.get_dimensions(include_values=True)\n expected = expected_dts[\"dimensions\"]\n\n helpers.check_dt_Frames_eq(actual, expected)\n helpers.check_dt_Frames_eq(actual2, expected)\n\n # Check for partial datatable\n partial_columns = (\"keyPosition\", \"id\", \"name\", \"level\")\n actual_p = sdmx_json_msg_local.data.structure.get_dimensions()\n actual2_p = sdmx_json_msg_local.data.get_dimensions()\n expected_p = helpers.dt_unique(expected, partial_columns)\n\n assert expected_p.keys() == partial_columns\n helpers.check_dt_Frames_eq(actual_p, expected_p)\n helpers.check_dt_Frames_eq(actual2_p, expected_p)\n\n\ndef test_get_attributes(name, sdmx_json_msg_local, expected_dts, helpers):\n # get_attributes() should be accessible from SdmxJsonData and DSD\n # Check for full datatable\n actual = sdmx_json_msg_local.data.structure.get_attributes(include_values=True)\n actual2 = sdmx_json_msg_local.data.get_attributes(include_values=True)\n expected = expected_dts[\"attributes\"]\n\n helpers.check_dt_Frames_eq(actual, expected)\n helpers.check_dt_Frames_eq(actual2, expected)\n\n # Check for partial datatable\n partial_columns = (\"id\", \"name\", \"level\")\n actual_p = sdmx_json_msg_local.data.structure.get_attributes()\n actual2_p = sdmx_json_msg_local.data.get_attributes()\n expected_p = helpers.dt_unique(expected, partial_columns)\n\n helpers.check_dt_Frames_eq(actual_p, expected_p)\n helpers.check_dt_Frames_eq(actual2_p, expected_p)\n","repo_name":"xaviermiles/sdmx-dt","sub_path":"tests/test_sdmx_json.py","file_name":"test_sdmx_json.py","file_ext":"py","file_size_in_byte":7852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1687362267","text":"import tkinter as t\r\nimport sys\r\nimport mysql.connector\r\nsys.setrecursionlimit(999999999)\r\ngrandmacount=0\r\ngrandmaprice=10\r\ncookiecount=0\r\n\r\ndef increase():\r\n global cookiecount\r\n global grandmacount\r\n global grandmaprice\r\n cookiecount += 1\r\n countL.config(text = \"cookies : \" + str(cookiecount)+'\\ngrandma price : '+str(grandmaprice)+'\\ngrandma count : '+str(grandmacount))\r\n\r\ndef grandm():\r\n global cookiecount\r\n \r\n cookiecount += 1\r\n countL.config(text = \"cookies : \" + str(cookiecount)+'\\ngrandma price :'+str(grandmaprice)+'\\ngrandma count : '+str(grandmacount))\r\n countL.after(1000,grandm)\r\n \r\ndef ab():\r\n a=0\r\n \r\ndef price():\r\n global cookiecount,grandmacount\r\n global grandmaprice\r\n cookiecount -=grandmaprice\r\n grandmaprice+=10\r\n grandmacount +=1\r\n\r\n\r\ntk1=t.Tk()\r\ncountL = t.Label(tk1, text = \"cookies : \" + str(cookiecount)+'\\ngrandma price : '+str(grandmaprice)+'\\ngrandma count : '+str(grandmacount))\r\ncountL.grid(row = 0, column = 0)\r\nCmain = t.Button(tk1,text = \"substitue\\ncookie\",padx = 50,pady=50,command = increase)\r\nCmain.grid(row = 1,column = 0)\r\nGrandma = t.Button(tk1,text = \"substitue\\ngrandma\",padx = 50,pady=50,command = lambda:[grandm(),price()] if cookiecount>=grandmaprice else ab())\r\nGrandma.grid(row = 1,column = 1)\r\n","repo_name":"BobTheBuilder8536/CookieClicker","sub_path":"CookieClicker.py","file_name":"CookieClicker.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"41129635390","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 1 18:11:01 2022\n\n@author: chaojunma\n\"\"\"\n\n# The algorithm from lecture 3 \n\nimport numpy as np\n\nfirst_item = []\nsecond_item = []\n\nface1 = ['face1.png'] * 5 + ['face2.png'] * 5 + ['face3.png'] * 5 + ['face4.png'] * 5 + ['face5.png'] * 5\nhouse1 = ['houses1.png'] * 5 + ['houses2.png'] * 5 + ['houses3.png'] * 5 + ['houses4.png'] * 5 + ['houses5.png'] * 5\n\nfirst_item.extend(face1)\nfirst_item.extend(house1)\nfirst_item.extend(face1)\nfirst_item.extend(house1)\n\nface2 = ['faces1.png', 'faces2.png','faces3.png','faces4.png','faces5.png'] * 5\nhouse2 = ['houses1.png', 'houses2.png','houses3.png','houses4.png','houses5.png'] * 5\n\nsecond_item.extend(house2)\nsecond_item.extend(face2)\nsecond_item.extend(house2)\nsecond_item.extend(face2)\n\ncues = ['cue1'] * 50 + ['cue2'] * 50\nresult = list(zip(first_item,second_item,cues))\n\nprint(result)\nlen(result)\n\nprint('---')\n\nnp.random.shuffle(result)\nprint(result)\n\n\n\n \n","repo_name":"SakuraChaojun/selected-courses","sub_path":"Computer Programming for Psychology/Assignment_3/Zipping exercises.py","file_name":"Zipping exercises.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25625440931","text":"from tensorflow import keras\nimport tensorflow as tf\nimport numpy as np\nfrom tensorflow import keras\nfrom sklearn.model_selection import StratifiedKFold\n\n# Method used to calculate different performance measures of a model. \n# It returns confusion_matrices, histories\ndef performance(location_model, x, y, learning_rate, epoch=15, batch_size=8):\n # Set values \n epoch = epoch\n batch = batch_size\n splits = 10\n location = location_model\n lr = learning_rate\n loss = \"BinaryCrossentropy\"\n\n kfold = StratifiedKFold(n_splits=splits, shuffle=True, random_state=123)\n acc_scores = [] # of test data \n histories = []\n confusion_matrices = []\n\n for train, test in kfold.split(x, y):\n # 1. Load model \n model = keras.models.load_model(location)\n\n # 2. Add precision, recall as metrics\n model.compile(loss= loss, optimizer=keras.optimizers.legacy.SGD(learning_rate = lr), metrics = ['accuracy', 'Recall', 'Precision']) \n \n # 3. Fit the model\n x_train = np.array(x)[train.astype(int)]\n y_train = np.array(y)[train.astype(int)]\n x_test = np.array(x)[test.astype(int)]\n y_test = np.array(y)[test.astype(int)]\n \n hist = model.fit(x_train, y_train, epochs=epoch, batch_size=batch, verbose=0, validation_data = (x_test, y_test))\n \n # Save information about model \n histories.append(hist)\n \n # Display accuracy of validation set \n # hist.history returns all the metrics. By adding: ['val_accuracy'][-1] we get only the accuracy of the testing set at the last epoch\n print(\"%s: %.2f%%\" % (model.metrics_names[1], hist.history['val_accuracy'][epoch-1] *100))\n acc_scores.append(hist.history['val_accuracy'][epoch-1] * 100)\n\n # Store confusion matrix \n y_pred = model.predict(x_test)\n y_pred = [1 if prediction > 0.5 else 0 for prediction in y_pred]\n confusion_mtx = tf.math.confusion_matrix(y_test, y_pred)\n confusion_matrices.append(confusion_mtx)\n \n print(\"%.2f%% (+/- %.2f%%)\" % (np.mean(acc_scores), np.std(acc_scores)))\n return confusion_matrices, histories\n\n\ndef get_metrics(epoch, histories):\n\n # Initialize the lists that will be used and returned \n list_loss = [], list_val_loss = []\n list_precision = [], list_val_precision = []\n list_recall = [], list_val_recall = []\n list_accuracy = [], list_val_accuracy = []\n\n for i in range(epoch):\n temp_loss = [ hist.history['loss'][i] for hist in histories ]\n list_loss.append(np.mean(temp_loss))\n temp_val_loss = [ hist.history['val_loss'][i] for hist in histories ]\n list_val_loss.append(np.mean(temp_val_loss))\n\n temp_precision = [ hist.history['precision'][i] for hist in histories ]\n list_precision.append(np.mean(temp_precision))\n temp_val_precision = [ hist.history['val_precision'][i] for hist in histories ]\n list_val_precision.append(np.mean(temp_val_precision))\n\n temp_recall = [ hist.history['recall'][i] for hist in histories ]\n list_recall.append(np.mean(temp_recall))\n temp_val_recall = [ hist.history['val_recall'][i] for hist in histories ]\n list_val_recall.append(np.mean(temp_val_recall))\n\n temp_accuracy = [ hist.history['accuracy'][i] for hist in histories ]\n list_accuracy.append(np.mean(temp_accuracy))\n temp_val_accuracy = [ hist.history['val_accuracy'][i] for hist in histories ]\n list_val_accuracy.append(np.mean(temp_val_accuracy))\n return list_loss, list_val_loss, list_precision, list_val_precision, list_recall, list_val_recall, list_accuracy, list_val_accuracy\n\n","repo_name":"RosameliaCarioni/gunshot_detection","sub_path":"methods_audio/model_performance.py","file_name":"model_performance.py","file_ext":"py","file_size_in_byte":3674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70237242649","text":"# @author lucasmiranda42\n# encoding: utf-8\n# module deepof\n\n\"\"\"Functions and general utilities for the deepof package.\"\"\"\nimport copy\nfrom copy import deepcopy\nfrom dask_image.imread import imread\nfrom difflib import get_close_matches\nfrom itertools import combinations, product\nfrom joblib import Parallel, delayed\nfrom math import atan2, dist\nfrom scipy.signal import savgol_filter\nfrom scipy.spatial.distance import cdist\nfrom segment_anything import sam_model_registry, SamPredictor\nfrom shapely.geometry import Polygon\nfrom sklearn import mixture\nfrom sklearn.feature_selection import VarianceThreshold\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler\nfrom sktime.transformations.series.impute import Imputer\nfrom tqdm import tqdm\nfrom typing import Tuple, Any, List, Union, NewType\nimport argparse\nimport cv2\nimport h5py\nimport math\nimport matplotlib.pyplot as plt\nimport multiprocessing\nimport networkx as nx\nimport numpy as np\nimport os\nimport pandas as pd\nimport regex as re\nimport requests\nimport ruptures as rpt\nimport sleap_io as sio\nimport torch\nimport warnings\n\nimport deepof.data\n\n# DEFINE CUSTOM ANNOTATED TYPES #\nproject = NewType(\"deepof_project\", Any)\ncoordinates = NewType(\"deepof_coordinates\", Any)\ntable_dict = NewType(\"deepof_table_dict\", Any)\n\n\n# CONNECTIVITY AND GRAPH REPRESENTATIONS\n\n\ndef connect_mouse(\n animal_ids=None, exclude_bodyparts: list = None, graph_preset: str = \"deepof_14\"\n) -> nx.Graph:\n \"\"\"Create a nx.Graph object with the connectivity of the bodyparts in the DLC topview model for a single mouse.\n\n Used later for angle computing, among others.\n\n Args:\n animal_ids (str): if more than one animal is tagged, specify the animal identyfier as a string.\n exclude_bodyparts (list): Remove the specified nodes from the graph.\n graph_preset (str): Connectivity preset to use. Currently supported: \"deepof_14\" and \"deepof_8\".\n\n Returns:\n connectivity (nx.Graph)\n\n \"\"\"\n if animal_ids is None:\n animal_ids = [\"\"]\n if not isinstance(animal_ids, list):\n animal_ids = [animal_ids]\n\n connectivities = []\n\n for animal_id in animal_ids:\n try:\n connectivity_dict = {\n \"deepof_14\": {\n \"Nose\": [\"Left_ear\", \"Right_ear\"],\n \"Spine_1\": [\"Center\", \"Left_ear\", \"Right_ear\"],\n \"Center\": [\"Left_fhip\", \"Right_fhip\", \"Spine_2\"],\n \"Spine_2\": [\"Left_bhip\", \"Right_bhip\", \"Tail_base\"],\n \"Tail_base\": [\"Tail_1\"],\n \"Tail_1\": [\"Tail_2\"],\n \"Tail_2\": [\"Tail_tip\"],\n },\n \"deepof_8\": {\n \"Nose\": [\"Left_ear\", \"Right_ear\"],\n \"Center\": [\n \"Left_fhip\",\n \"Right_fhip\",\n \"Tail_base\",\n \"Left_ear\",\n \"Right_ear\",\n ],\n \"Tail_base\": [\"Tail_tip\"],\n },\n }\n connectivity = nx.Graph(connectivity_dict[graph_preset])\n except TypeError:\n connectivity = nx.Graph(graph_preset)\n\n if animal_id:\n mapping = {\n node: \"{}_{}\".format(animal_id, node) for node in connectivity.nodes()\n }\n if exclude_bodyparts is not None:\n exclude = [\"{}_{}\".format(animal_id, exc) for exc in exclude_bodyparts]\n nx.relabel_nodes(connectivity, mapping, copy=False)\n else:\n exclude = exclude_bodyparts\n\n if exclude_bodyparts is not None:\n connectivity.remove_nodes_from(exclude)\n\n connectivities.append(connectivity)\n\n if len(connectivities) > 1:\n pass\n\n final_graph = connectivities[0]\n for g in range(1, len(connectivities)):\n final_graph = nx.compose(final_graph, connectivities[g])\n final_graph.add_edge(\n \"{}_Nose\".format(animal_ids[g - 1]), \"{}_Nose\".format(animal_ids[g])\n )\n final_graph.add_edge(\n \"{}_Tail_base\".format(animal_ids[g - 1]),\n \"{}_Tail_base\".format(animal_ids[g]),\n )\n final_graph.add_edge(\n \"{}_Nose\".format(animal_ids[g]), \"{}_Tail_base\".format(animal_ids[g - 1])\n )\n final_graph.add_edge(\n \"{}_Nose\".format(animal_ids[g - 1]), \"{}_Tail_base\".format(animal_ids[g])\n )\n\n return final_graph\n\n\ndef edges_to_weighted_adj(adj: np.ndarray, edges: np.ndarray):\n \"\"\"Convert an edge feature matrix to a weighted adjacency matrix.\n\n Args:\n - adj (np.ndarray): binary adjacency matrix of the current graph.\n - edges (np.ndarray): edge feature matrix. Last two axes should be of shape nodes x features.\n\n \"\"\"\n adj = np.repeat(np.expand_dims(adj.astype(float), axis=0), edges.shape[0], axis=0)\n if len(edges.shape) == 3:\n adj = np.repeat(np.expand_dims(adj, axis=1), edges.shape[1], axis=1)\n\n adj[np.where(adj)] = np.concatenate([edges, edges[:, ::-1]], axis=-2).flatten()\n\n return adj\n\n\ndef enumerate_all_bridges(G: nx.graph) -> list:\n \"\"\"Enumerate all 3-node connected sequences in the given graph.\n\n Args:\n - G (nx.graph): Animal connectivity graph.\n\n Returns:\n bridges (list): List with all 3-node connected sequences in the provided graph.\n\n \"\"\"\n degrees = dict(nx.degree(G))\n centers = [node for node in degrees.keys() if degrees[node] >= 2]\n\n bridges = []\n for center in centers:\n for comb in list(combinations(list(G[center].keys()), 2)):\n bridges.append([comb[0], center, comb[1]])\n\n return bridges\n\n\n# QUALITY CONTROL AND PREPROCESSING #\n\n\ndef str2bool(v: str) -> bool:\n \"\"\"\n\n Return the passed string as a boolean.\n\n Args:\n v (str): String to transform to boolean value.\n\n Returns:\n bool. If conversion is not possible, it raises an error\n\n \"\"\"\n if isinstance(v, bool):\n return v # pragma: no cover\n elif v.lower() in (\"yes\", \"true\", \"t\", \"y\", \"1\"):\n return True\n elif v.lower() in (\"no\", \"false\", \"f\", \"n\", \"0\"):\n return False\n raise argparse.ArgumentTypeError(\"Boolean compatible value expected.\")\n\n\ndef compute_animal_presence_mask(\n quality: table_dict, threshold: float = 0.5\n) -> table_dict:\n \"\"\"Compute a mask of the animal presence in the video.\n\n Args:\n quality (table_dict): Dictionary with the quality of the tracking for each body part and animal.\n threshold (float): Threshold for the quality of the tracking. If the quality is below this threshold, the animal is considered to be absent.\n\n Returns:\n animal_presence_mask (table_dict): Dictionary with the animal presence mask for each bodypart and animal.\n\n \"\"\"\n animal_presence_mask = {}\n\n for exp in quality.keys():\n animal_presence_mask[exp] = {}\n for animal_id in quality._animal_ids:\n animal_presence_mask[exp][animal_id] = (\n quality.filter_id(animal_id)[exp].median(axis=1) > threshold\n ).astype(int)\n\n animal_presence_mask[exp] = pd.DataFrame(animal_presence_mask[exp])\n\n return deepof.data.TableDict(\n animal_presence_mask, typ=\"animal_presence_mask\", animal_ids=quality._animal_ids\n )\n\n\ndef iterative_imputation(project: project, tab_dict: dict, lik_dict: dict):\n \"\"\"Perform iterative imputation on occluded body parts. Run per animal and experiment.\n\n Args:\n project (project): Project object.\n tab_dict (dict): Dictionary with the coordinates of the body parts.\n lik_dict (dict): Dictionary with the likelihood of the tracking for each body part and animal.\n\n Returns:\n tab_dict (dict): Dictionary with the coordinates of the body parts after imputation.\n\n \"\"\"\n presence_masks = compute_animal_presence_mask(lik_dict)\n tab_dict = deepof.data.TableDict(\n tab_dict, typ=\"coords\", animal_ids=project.animal_ids\n )\n imputed_tabs = copy.deepcopy(tab_dict)\n\n for animal_id in project.animal_ids:\n\n for k, tab in tab_dict.filter_id(animal_id).items():\n\n try:\n scaler = StandardScaler()\n imputed = Imputer(method=\"drift\",).fit_transform(\n scaler.fit_transform(\n tab.iloc[np.where(presence_masks[k][animal_id].values)[0]]\n )\n )\n\n imputed = pd.DataFrame(\n scaler.inverse_transform(imputed),\n index=tab.index[np.where(presence_masks[k][animal_id].values)[0]],\n columns=tab.loc[:, tab.isnull().mean(axis=0) != 1.0].columns,\n )\n\n imputed_tabs[k].update(imputed)\n\n if tab.shape[1] != imputed.shape[1]:\n warnings.warn(\n \"Some of the body parts have zero measurements. Iterative imputation skips these,\"\n \" which could bring problems downstream. A possible solution could be to refine \"\n \"DLC tracklets.\"\n )\n\n except ValueError:\n warnings.warn(\n f\"Animal {animal_id} in experiment {k} has not enough data. Skipping imputation.\"\n )\n\n return imputed_tabs\n\n\ndef set_missing_animals(\n coordinates: project, tab_dict: dict, lik_dict: dict, animal_ids: list = None\n):\n \"\"\"Set the coordinates of the missing animals to NaN.\n\n Args:\n coordinates (project): Project object.\n tab_dict (dict): Dictionary with the coordinates of the body parts.\n lik_dict (dict): Dictionary with the likelihood of the tracking for each body part and animal.\n animal_ids (list): List with the animal ids to remove. If None, all the animals with missing data are processed.\n\n Returns:\n tab_dict (dict): Dictionary with the coordinates of the body parts after removing missing animals.\n\n \"\"\"\n if animal_ids is None:\n try:\n animal_ids = coordinates.animal_ids\n except AttributeError:\n animal_ids = coordinates._animal_ids\n\n presence_masks = compute_animal_presence_mask(lik_dict)\n tab_dict = deepof.data.TableDict(tab_dict, typ=\"qc\", animal_ids=animal_ids)\n\n for animal_id in animal_ids:\n for k, tab in tab_dict.filter_id(animal_id).items():\n try:\n missing_times = tab[presence_masks[k][animal_id] == 0]\n except KeyError:\n missing_times = tab[\n presence_masks[k].sum(axis=1) < (len(animal_ids) - 1)\n ]\n\n tab_dict[k].loc[missing_times.index, missing_times.columns] = np.nan\n\n return tab_dict\n\n\ndef bp2polar(tab: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Return the DataFrame in polar coordinates.\n\n Args:\n tab (pandas.DataFrame): Table with cartesian coordinates.\n\n Returns:\n polar (pandas.DataFrame): Equivalent to input, but with values in polar coordinates.\n\n \"\"\"\n tab_ = np.array(tab)\n complex_ = tab_[:, 0] + 1j * tab_[:, 1]\n polar = pd.DataFrame(np.array([abs(complex_), np.angle(complex_)]).T)\n polar.rename(columns={0: \"rho\", 1: \"phi\"}, inplace=True)\n return polar\n\n\ndef tab2polar(cartesian_df: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Return a pandas.DataFrame in which all the coordinates are polar.\n\n Args:\n cartesian_df (pandas.DataFrame): DataFrame containing tables with cartesian coordinates.\n\n Returns:\n result (pandas.DataFrame): Equivalent to input, but with values in polar coordinates.\n\n \"\"\"\n result = []\n for df in list(cartesian_df.columns.levels[0]):\n result.append(bp2polar(cartesian_df[df]))\n result = pd.concat(result, axis=1)\n idx = pd.MultiIndex.from_product(\n [list(cartesian_df.columns.levels[0]), [\"rho\", \"phi\"]]\n )\n result.columns = idx\n result.index = cartesian_df.index\n return result\n\n\ndef compute_dist(\n pair_array: np.array, arena_abs: int = 1, arena_rel: int = 1\n) -> pd.DataFrame:\n \"\"\"Return a pandas.DataFrame with the scaled distances between a pair of body parts.\n\n Args:\n pair_array (numpy.array): np.array of shape N * 4 containing X, y positions over time for a given pair of body parts.\n arena_abs (int): Diameter of the real arena in cm.\n arena_rel (int): Diameter of the captured arena in pixels.\n\n Returns:\n result (pd.DataFrame): pandas.DataFrame with the absolute distances between a pair of body parts.\n\n \"\"\"\n lim = 2 if pair_array.shape[1] == 4 else 1\n a, b = pair_array[:, :lim], pair_array[:, lim:]\n ab = a - b\n\n dist = np.sqrt(np.einsum(\"...i,...i\", ab, ab))\n return pd.DataFrame(dist * arena_abs / arena_rel)\n\n\ndef bpart_distance(\n dataframe: pd.DataFrame, arena_abs: int = 1, arena_rel: int = 1\n) -> pd.DataFrame:\n \"\"\"Return a pandas.DataFrame with the scaled distances between all pairs of body parts.\n\n Args:\n dataframe (pandas.DataFrame): pd.DataFrame of shape N*(2*bp) containing X,y positions over time for a given set of bp body parts.\n arena_abs (int): Diameter of the real arena in cm.\n arena_rel (int): Diameter of the captured arena in pixels.\n\n Returns:\n result (pd.DataFrame): pandas.DataFrame with the absolute distances between all pairs of body parts.\n\n \"\"\"\n indexes = combinations(dataframe.columns.levels[0], 2)\n dists = []\n for idx in indexes:\n dist = compute_dist(np.array(dataframe.loc[:, list(idx)]), arena_abs, arena_rel)\n dist.columns = [idx]\n dists.append(dist)\n\n return pd.concat(dists, axis=1)\n\n\ndef angle(bpart_array: np.array) -> np.array:\n \"\"\"Return a numpy.ndarray with the angles between the provided instances.\n\n Args:\n bpart_array (numpy.array): 2D positions over time for a bodypart.\n\n Returns:\n ang (np.array): 1D angles between the three-point-instances.\n\n \"\"\"\n a, b, c = bpart_array\n\n ba = a - b\n bc = c - b\n\n cosine_angle = np.einsum(\"...i,...i\", ba, bc) / (\n np.linalg.norm(ba, axis=1) * np.linalg.norm(bc, axis=1)\n )\n ang = np.arccos(cosine_angle)\n\n return ang\n\n\ndef compute_areas(coords, animal_id=None):\n \"\"\"Compute relevant areas (head, torso, back, full) for the provided coordinates.\n\n Args:\n coords: coordinates of the body parts for a single time point.\n animal_id: animal id for the provided coordinates, if any.\n\n Returns:\n areas: list including head, torso, back, and full areas for the provided coordinates.\n\n \"\"\"\n area_bps = {\n \"head_area\": [\"Nose\", \"Left_ear\", \"Left_fhip\", \"Spine_1\"],\n \"torso_area\": [\"Spine_1\", \"Right_fhip\", \"Spine_2\", \"Left_fhip\"],\n \"back_area\": [\"Spine_1\", \"Right_bhip\", \"Spine_2\", \"Left_bhip\"],\n \"full_area\": [\n \"Nose\",\n \"Left_ear\",\n \"Left_fhip\",\n \"Left_bhip\",\n \"Tail_base\",\n \"Right_bhip\",\n \"Right_fhip\",\n \"Right_ear\",\n ],\n }\n\n areas = {}\n\n for name, bps in area_bps.items():\n\n try:\n if animal_id is not None:\n bps = [\"_\".join([animal_id, bp]) for bp in bps]\n\n x = coords.xs(key=\"x\", level=1)[bps]\n y = coords.xs(key=\"y\", level=1)[bps]\n\n if np.isnan(x).any() or np.isnan(y).any():\n areas[name] = np.nan\n else:\n areas[name] = Polygon(zip(x, y)).area\n\n except KeyError:\n continue\n\n return areas\n\n\ndef rotate(\n p: np.array, angles: np.array, origin: np.array = np.array([0, 0])\n) -> np.array:\n \"\"\"Return a 2D numpy.ndarray with the initial values rotated by angles radians.\n\n Args:\n p (numpy.ndarray): 2D Array containing positions of bodyparts over time.\n angles (numpy.ndarray): Set of angles (in radians) to rotate p with.\n origin (numpy.ndarray): Rotation axis (zero vector by default).\n\n Returns:\n - rotated (numpy.ndarray): rotated positions over time\n\n \"\"\"\n R = np.array([[np.cos(angles), -np.sin(angles)], [np.sin(angles), np.cos(angles)]])\n\n o = np.atleast_2d(origin)\n p = np.atleast_2d(p)\n\n rotated = np.squeeze((R @ (p.T - o.T) + o.T).T)\n\n return rotated\n\n\n# noinspection PyArgumentList\ndef align_trajectories(data: np.array, mode: str = \"all\") -> np.array:\n \"\"\"Remove rotational variance on the trajectories.\n\n Returns a numpy.array with the positions rotated in a way that the center (0 vector), and body part in the first\n column of data are aligned with the y-axis.\n\n Args:\n data (numpy.ndarray): 3D array containing positions of body parts over time, where shape is N (sliding window instances) * m (sliding window size) * l (features)\n mode (string): Specifies if *all* instances of each sliding window get aligned, or only the *center*\n\n Returns:\n aligned_trajs (np.ndarray): 2D aligned positions over time.\n\n \"\"\"\n angles = np.zeros(data.shape[0])\n data = deepcopy(data)\n dshape = data.shape\n\n if mode == \"center\":\n center_time = (data.shape[1] - 1) // 2\n angles = np.arctan2(data[:, center_time, 0], data[:, center_time, 1])\n elif mode == \"all\":\n data = data.reshape(-1, dshape[-1], order=\"C\")\n angles = np.arctan2(data[:, 0], data[:, 1])\n elif mode == \"none\":\n data = data.reshape(-1, dshape[-1], order=\"C\")\n angles = np.zeros(data.shape[0])\n\n aligned_trajs = np.zeros(data.shape)\n\n for frame in range(data.shape[0]):\n aligned_trajs[frame] = rotate(\n data[frame].reshape([-1, 2], order=\"C\"), angles[frame]\n ).reshape(data.shape[1:], order=\"C\")\n\n if mode == \"all\" or mode == \"none\":\n aligned_trajs = aligned_trajs.reshape(dshape, order=\"C\")\n\n return aligned_trajs\n\n\ndef load_table(\n tab: str,\n table_path: str,\n table_format: str,\n rename_bodyparts: list = None,\n animal_ids: list = None,\n):\n \"\"\"Loads a table into a structured pandas data frame.\n\n Supports inputs from both DeepLabCut and (S)LEAP.\n\n Args:\n tab (str): Name of the file containing the tracks.\n table_path (string): Full path to the file containing the tracks.\n table_format (str): type of the files to load, coming from either DeepLabCut (CSV and H5) and (S)LEAP (NPY).\n rename_bodyparts (list): list of names to use for the body parts in the provided tracking files. The order should match that of the columns in your DLC tables or the node dimensions on your (S)LEAP .npy files.\n\n Returns:\n loaded_tab (pd.DataFrame): Data frame containing the loaded tracks. Likelihood for (S)LEAP files is imputed as 1.0 (tracked values) or 0.0 (missing values).\n\n \"\"\"\n\n if table_format == \"h5\":\n\n loaded_tab = pd.read_hdf(os.path.join(table_path, tab), dtype=float)\n\n # Adapt index to be compatible with downstream processing\n loaded_tab = loaded_tab.T.reset_index(drop=False).T\n loaded_tab.columns = loaded_tab.loc[\"scorer\", :]\n loaded_tab = loaded_tab.iloc[1:]\n\n elif table_format == \"csv\":\n\n loaded_tab = pd.read_csv(\n os.path.join(table_path, tab),\n index_col=0,\n low_memory=False,\n )\n\n elif table_format in [\"npy\", \"slp\", \"analysis.h5\"]:\n\n if table_format == \"analysis.h5\":\n # Load sleap .h5 file from disk\n with h5py.File(os.path.join(table_path, tab), \"r\") as f:\n loaded_tab = np.stack(np.transpose(f[\"tracks\"][:], [3, 0, 2, 1]))\n slp_bodyparts = [n.decode() for n in f[\"node_names\"][:]]\n slp_animal_ids = [n.decode() for n in f[\"track_names\"][:]]\n\n elif table_format == \"slp\":\n # Use sleap-io to convert .slp files into numpy arrays\n loaded_tab = sio.load_slp(os.path.join(table_path, tab))\n slp_bodyparts = [i.name for i in loaded_tab.skeletons[0].nodes]\n slp_animal_ids = [i.name for i in loaded_tab.tracks]\n loaded_tab = loaded_tab.numpy()\n\n else:\n # Load numpy array from disk\n loaded_tab = np.load(os.path.join(table_path, tab), \"r\")\n\n # Check that body part names are provided\n slp_bodyparts = rename_bodyparts\n if not animal_ids[0]:\n slp_animal_ids = [str(i) for i in range(loaded_tab.shape[1])]\n else:\n slp_animal_ids = animal_ids\n assert (\n len(rename_bodyparts) == loaded_tab.shape[2]\n ), \"Some body part names seem to be missing. Did you set the rename_bodyparts argument correctly?\"\n\n # Create the header as a multi index, using animals, body parts and coordinates\n if not animal_ids[0]:\n animal_ids = slp_animal_ids\n\n # Impute likelihood as a third dimension in the last axis,\n # with 1.0 if xy values are present and 0.0 otherwise\n likelihoods = np.expand_dims(\n np.all(np.isfinite(loaded_tab), axis=-1), axis=-1\n ).astype(float)\n loaded_tab = np.concatenate([loaded_tab, likelihoods], axis=-1)\n\n # Collapse nodes and animals to the desired shape\n loaded_tab = pd.DataFrame(loaded_tab.reshape(loaded_tab.shape[0], -1))\n\n multi_index = pd.MultiIndex.from_product(\n [[\"sleap_scorer\"], slp_animal_ids, slp_bodyparts, [\"x\", \"y\", \"likelihood\"]],\n names=[\"scorer\", \"individuals\", \"bodyparts\", \"coords\"],\n )\n multi_index = pd.DataFrame(\n pd.DataFrame(multi_index).explode(0).values.reshape([-1, 4]).T,\n index=[\"scorer\", \"individuals\", \"bodyparts\", \"coords\"],\n )\n\n loaded_tab = pd.concat([multi_index.iloc[1:], loaded_tab], axis=0)\n loaded_tab.columns = multi_index.loc[\"scorer\"]\n\n if rename_bodyparts is not None:\n loaded_tab = rename_track_bps(\n loaded_tab,\n rename_bodyparts,\n (animal_ids if table_format in [\"h5\", \"csv\"] else [\"\"]),\n )\n\n return loaded_tab\n\n\ndef rename_track_bps(\n loaded_tab: pd.DataFrame, rename_bodyparts: list, animal_ids: list\n):\n \"\"\"Renames all body parts in the provided dataframe.\n\n Args:\n loaded_tab (pd.DataFrame): Data frame containing the loaded tracks. Likelihood for (S)LEAP files is imputed as 1.0 (tracked values) or 0.0 (missing values).\n rename_bodyparts (list): list of names to use for the body parts in the provided tracking files. The order should match that of the columns in your DLC tables or the node dimensions on your (S)LEAP files.\n animal_ids (list): list of IDs to use for the animals present in the provided tracking files.\n\n Returns:\n renamed_tab (pd.DataFrame): Data frame with renamed body parts\n\n \"\"\"\n renamed_tab = copy.deepcopy(loaded_tab)\n\n if not animal_ids[0]:\n current_bparts = loaded_tab.loc[\"bodyparts\", :].unique()\n else:\n current_bparts = list(\n map(\n lambda x: \"_\".join(x.split(\"_\")[1:]),\n loaded_tab.loc[\"bodyparts\", :].unique(),\n )\n )\n\n for old, new in zip(current_bparts, rename_bodyparts):\n renamed_tab.replace(old, new, inplace=True, regex=True)\n\n return renamed_tab\n\n\ndef scale_table(\n coordinates: coordinates,\n feature_array: np.ndarray,\n scale: str,\n global_scaler: Any = None,\n):\n \"\"\"Scales features in a table controlling for both individual body size and interanimal variability.\n\n Args:\n coordinates (coordinates): a deepof coordinates object.\n feature_array (np.ndarray): array to scale. Should be shape (instances x features).\n scale (str): Data scaling method. Must be one of 'standard', 'robust' (default; recommended) and 'minmax'.\n global_scaler (Any): global scaler, fit in the whole dataset.\n\n \"\"\"\n exp_temp = feature_array.to_numpy()\n\n annot_length = 0\n if coordinates._propagate_labels:\n exp_temp = exp_temp[:, :-1]\n annot_length += 1\n\n if coordinates._propagate_annotations:\n exp_temp = exp_temp[\n :, : -list(coordinates._propagate_annotations.values())[0].shape[1]\n ]\n annot_length += list(coordinates._propagate_annotations.values())[0].shape[1]\n\n if global_scaler is None:\n # Scale each modality separately using a custom function\n exp_temp = scale_animal(exp_temp, scale)\n else:\n # Scale all experiments together, to control for differential stats\n exp_temp = global_scaler.transform(exp_temp)\n\n current_tab = np.concatenate(\n [\n exp_temp,\n feature_array.copy().to_numpy()[:, feature_array.shape[1] - annot_length :],\n ],\n axis=1,\n )\n\n return current_tab\n\n\ndef scale_animal(feature_array: np.ndarray, scale: str):\n \"\"\"Scales features in the provided array.\n\n Args:\n feature_array (np.ndarray): array to scale. Should be shape (instances x features).\n graph (nx.Graph): connectivity graph for the current animals.\n scale (str): Data scaling method. Must be one of 'standard', 'robust' (default; recommended) and 'minmax'.\n\n Returns:\n Scaled version of the input array, with features normalized by modality.\n List of scalers per modality.\n\n \"\"\"\n scalers = []\n\n # number of body part sets to use for coords (x, y), speeds, and distances\n if scale == \"standard\":\n cur_scaler = StandardScaler()\n elif scale == \"minmax\":\n cur_scaler = MinMaxScaler()\n else:\n cur_scaler = RobustScaler()\n\n normalized_array = cur_scaler.fit_transform(feature_array)\n scalers.append(cur_scaler)\n\n return normalized_array\n\n\ndef kleinberg(\n offsets: list, s: float = np.e, gamma: float = 1.0, n=None, T=None, k=None\n):\n \"\"\"Apply Kleinberg's algorithm (described in 'Bursty and Hierarchical Structure in Streams').\n\n The algorithm models activity bursts in a time series as an\n infinite hidden Markov model.\n\n Taken from pybursts (https://github.com/romain-fontugne/pybursts/blob/master/pybursts/pybursts.py)\n and adapted for dependency compatibility reasons.\n\n Args:\n offsets (list): a list of time offsets (numeric)\n s (float): the base of the exponential distribution that is used for modeling the event frequencies\n gamma (float): coefficient for the transition costs between states\n n, T: to have a fixed cost function (not dependent of the given offsets). Which is needed if you want to compare bursts for different inputs.\n k: maximum burst level\n\n \"\"\"\n if s <= 1:\n raise ValueError(\"s must be greater than 1!\")\n if gamma <= 0:\n raise ValueError(\"gamma must be positive!\")\n if not n is None and n <= 0:\n raise ValueError(\"n must be positive!\")\n if not T is None and T <= 0:\n raise ValueError(\"T must be positive!\")\n if len(offsets) < 1:\n raise ValueError(\"offsets must be non-empty!\")\n\n offsets = np.array(offsets, dtype=object)\n\n if offsets.size == 1:\n bursts = np.array([0, offsets[0], offsets[0]], ndmin=2, dtype=object)\n return bursts\n\n offsets = np.sort(offsets)\n gaps = np.diff(offsets)\n\n if not np.all(gaps):\n raise ValueError(\"Input cannot contain events with zero time between!\")\n\n if T is None:\n T = np.sum(gaps)\n\n if n is None:\n n = np.size(gaps)\n\n g_hat = T / n\n gamma_log_n = gamma * math.log(n)\n\n if k is None:\n k = int(math.ceil(float(1 + math.log(T, s) + math.log(1 / np.amin(gaps), s))))\n\n def tau(i, j):\n if i >= j:\n return 0\n else:\n return (j - i) * gamma_log_n\n\n alpha_function = np.vectorize(lambda x: s**x / g_hat)\n alpha = alpha_function(np.arange(k))\n\n def f(j, x):\n return alpha[j] * math.exp(-alpha[j] * x)\n\n C = np.repeat(float(\"inf\"), k)\n C[0] = 0\n\n q = np.empty((k, 0))\n for t in range(np.size(gaps)):\n C_prime = np.repeat(float(\"inf\"), k)\n q_prime = np.empty((k, t + 1))\n q_prime.fill(np.nan)\n\n for j in range(k):\n cost_function = np.vectorize(lambda x: C[x] + tau(x, j))\n cost = cost_function(np.arange(0, k))\n\n el = np.argmin(cost)\n\n if f(j, gaps[t]) > 0:\n C_prime[j] = cost[el] - math.log(f(j, gaps[t]))\n\n if t > 0:\n q_prime[j, :t] = q[el, :]\n\n q_prime[j, t] = j + 1\n\n C = C_prime\n q = q_prime\n\n j = np.argmin(C)\n q = q[j, :]\n\n prev_q = 0\n\n N = 0\n for t in range(np.size(gaps)):\n if q[t] > prev_q:\n N = N + q[t] - prev_q\n prev_q = q[t]\n\n bursts = np.array(\n [np.repeat(np.nan, N), np.repeat(offsets[0], N), np.repeat(offsets[0], N)],\n ndmin=2,\n dtype=object,\n ).transpose()\n\n burst_counter = -1\n prev_q = 0\n stack = np.zeros(int(N), dtype=int)\n stack_counter = -1\n for t in range(np.size(gaps)):\n if q[t] > prev_q:\n num_levels_opened = q[t] - prev_q\n for i in range(int(num_levels_opened)):\n burst_counter += 1\n bursts[burst_counter, 0] = prev_q + i\n bursts[burst_counter, 1] = offsets[t]\n stack_counter += 1\n stack[stack_counter] = int(burst_counter)\n elif q[t] < prev_q:\n num_levels_closed = prev_q - q[t]\n for i in range(int(num_levels_closed)):\n bursts[stack[stack_counter], 2] = offsets[t]\n stack_counter -= 1\n prev_q = q[t]\n\n while stack_counter >= 0:\n bursts[stack[stack_counter], 2] = offsets[np.size(gaps)]\n stack_counter -= 1\n\n return bursts\n\n\ndef smooth_boolean_array(a: np.array, scale: int = 1) -> np.array:\n \"\"\"Return a boolean array in which isolated appearances of a feature are smoothed.\n\n Args:\n a (numpy.ndarray): Boolean instances.\n scale (int): Kleinberg scale parameter. Higher values result in stricter smoothing.\n\n Returns:\n a (numpy.ndarray): Smoothened boolean instances.\n\n \"\"\"\n offsets = np.where(a)[0]\n if len(offsets) == 0:\n return a # no detected activity\n\n bursts = kleinberg(offsets, gamma=0.01)\n a = np.zeros(np.size(a), dtype=bool)\n for i in bursts:\n if i[0] == scale:\n a[int(i[1]) : int(i[2])] = True\n\n return a\n\n\ndef split_with_breakpoints(a: np.ndarray, breakpoints: list) -> np.ndarray:\n \"\"\"\n\n Split a numpy.ndarray at the given breakpoints.\n\n Args:\n a (np.ndarray): N (instances) * m (features) shape\n breakpoints (list): list of breakpoints obtained with ruptures\n\n Returns:\n split_a (np.ndarray): padded array of shape N (instances) * l (maximum break length) * m (features)\n\n \"\"\"\n rpt_lengths = list(np.array(breakpoints)[1:] - np.array(breakpoints)[:-1])\n\n try:\n max_rpt_length = np.max([breakpoints[0], np.max(rpt_lengths)])\n except ValueError:\n max_rpt_length = breakpoints[0]\n\n # Reshape experiment data according to extracted ruptures\n split_a = np.split(np.expand_dims(a, axis=0), breakpoints[:-1], axis=1)\n\n split_a = [\n np.pad(\n i, ((0, 0), (0, max_rpt_length - i.shape[1]), (0, 0)), constant_values=0.0\n )\n for i in split_a\n ]\n split_a = np.concatenate(split_a, axis=0)\n\n return split_a\n\n\ndef rolling_window(\n a: np.ndarray,\n window_size: int,\n window_step: int,\n automatic_changepoints: str = False,\n precomputed_breaks: np.ndarray = None,\n) -> np.ndarray:\n \"\"\"Return a 3D numpy.array with a sliding-window extra dimension.\n\n Args:\n a (np.ndarray): N (instances) * m (features) shape\n window_size (int): Size of the window to apply\n window_step (int): Step of the window to apply\n automatic_changepoints (str): Changepoint detection algorithm to apply. If False, applies a fixed sliding window.\n precomputed_breaks (np.ndarray): Precomputed breaks to use, bypassing the changepoint detection algorithm. None by default (break points are computed).\n\n Returns:\n rolled_a (np.ndarray): N (sliding window instances) * l (sliding window size) * m (features)\n\n \"\"\"\n breakpoints = None\n\n if automatic_changepoints:\n # Define change point detection model using ruptures\n # Remove dimensions with low variance (occurring when aligning the animals with the y axis)\n if precomputed_breaks is None:\n rpt_model = rpt.KernelCPD(\n kernel=automatic_changepoints, min_size=window_size, jump=window_step\n ).fit(VarianceThreshold(threshold=1e-3).fit_transform(a))\n\n # Extract change points from current experiment\n breakpoints = rpt_model.predict(pen=4.0)\n\n else:\n breakpoints = np.cumsum(precomputed_breaks)\n\n rolled_a = split_with_breakpoints(a, breakpoints)\n\n else:\n shape = (a.shape[0] - window_size + 1, window_size) + a.shape[1:]\n strides = (a.strides[0],) + a.strides\n rolled_a = np.lib.stride_tricks.as_strided(\n a, shape=shape, strides=strides, writeable=True\n )[::window_step]\n\n return rolled_a, breakpoints\n\n\ndef rupture_per_experiment(\n table_dict: table_dict,\n to_rupture: np.ndarray,\n rupture_indices: list,\n automatic_changepoints: str,\n window_size: int,\n window_step: int,\n precomputed_breaks: dict = None,\n) -> np.ndarray:\n \"\"\"Apply the rupture method independently to each experiment, and concatenate into a single dataset at the end.\n\n Returns a dataset and the rupture indices, adapted to be used in a concatenated version\n of the labels.\n\n Args:\n table_dict (deepof.data.table_dict): table_dict with all experiments.\n to_rupture (np.ndarray): Array with dataset to rupture.\n rupture_indices (list): Indices of tables to rupture. Useful to select training and test sets.\n automatic_changepoints (str): Rupture method to apply. If false, a sliding window of window_length * window_size is obtained. If one of \"l1\", \"l2\" or \"rbf\", different automatic change point detection algorithms are applied on each independent experiment.\n window_size (int): If automatic_changepoints is False, specifies the length of the sliding window. If not, it determines the minimum size of the obtained time series breaks.\n window_step (int): If automatic_changepoints is False, specifies the stride of the sliding window. If not, it determines the minimum step size of the obtained time series breaks.\n precomputed_breaks (dict): If provided, changepoint detection is prevented, and provided breaks are used instead.\n\n Returns:\n ruptured_dataset (np.ndarray): Dataset with all ruptures concatenated across the first axis.\n rupture_indices (list): Indices of ruptures.\n\n \"\"\"\n # Generate a base ruptured training set and a set of breaks\n ruptured_dataset, break_indices = None, None\n cumulative_shape = 0\n # Iterate over all experiments and populate them\n for i, (key, tab) in enumerate(table_dict.items()):\n if i in rupture_indices:\n current_size = tab.shape[0]\n current_train, current_breaks = rolling_window(\n to_rupture[cumulative_shape : cumulative_shape + current_size],\n window_size,\n window_step,\n automatic_changepoints,\n (None if not precomputed_breaks else precomputed_breaks[key]),\n )\n # Add shape of the current tab as the last breakpoint,\n # to avoid skipping breakpoints between experiments\n if current_breaks is not None:\n current_breaks = np.array(current_breaks) + cumulative_shape\n\n cumulative_shape += current_size\n\n try: # pragma: no cover\n # To concatenate the current ruptures with the ones obtained\n # until now, pad the smallest to the length of the largest\n # alongside axis 1 (temporal dimension) with zeros.\n if ruptured_dataset.shape[1] >= current_train.shape[1]:\n current_train = np.pad(\n current_train,\n (\n (0, 0),\n (0, ruptured_dataset.shape[1] - current_train.shape[1]),\n (0, 0),\n ),\n )\n elif ruptured_dataset.shape[1] < current_train.shape[1]:\n ruptured_dataset = np.pad(\n ruptured_dataset,\n (\n (0, 0),\n (0, current_train.shape[1] - ruptured_dataset.shape[1]),\n (0, 0),\n ),\n )\n\n # Once that's taken care of, concatenate ruptures alongside axis 0\n ruptured_dataset = np.concatenate([ruptured_dataset, current_train])\n if current_breaks is not None:\n break_indices = np.concatenate([break_indices, current_breaks])\n except (ValueError, AttributeError):\n ruptured_dataset = current_train\n if current_breaks is not None:\n break_indices = current_breaks\n\n return ruptured_dataset, break_indices\n\n\ndef smooth_mult_trajectory(\n series: np.array, alpha: int = 0, w_length: int = 11\n) -> np.ndarray:\n \"\"\"Return a smoothed a trajectory using a Savitzky-Golay 1D filter.\n\n Args:\n series (numpy.ndarray): 1D trajectory array with N (instances)\n alpha (int): 0 <= alpha < w_length; indicates the difference between the degree of the polynomial and the window length for the Savitzky-Golay filter used for smoothing. Higher values produce a worse fit, hence more smoothing.\n w_length (int): Length of the sliding window to which the filter fit. Higher values yield a coarser fit, hence more smoothing.\n\n Returns:\n smoothed_series (np.ndarray): smoothed version of the input, with equal shape\n\n \"\"\"\n if alpha is None:\n return series\n\n smoothed_series = savgol_filter(\n series, polyorder=(w_length - alpha), window_length=w_length, axis=0\n )\n\n assert smoothed_series.shape == series.shape\n\n return smoothed_series\n\n\ndef moving_average(time_series: pd.Series, lag: int = 5) -> pd.Series:\n \"\"\"Fast implementation of a moving average function.\n\n Args:\n time_series (pd.Series): Uni-variate time series to take the moving average of.\n lag (int): size of the convolution window used to compute the moving average.\n\n Returns:\n moving_avg (pd.Series): Uni-variate moving average over time_series.\n\n \"\"\"\n moving_avg = np.convolve(time_series, np.ones(lag) / lag, mode=\"same\")\n\n return moving_avg\n\n\ndef mask_outliers(\n time_series: pd.DataFrame,\n likelihood: pd.DataFrame,\n likelihood_tolerance: float,\n lag: int,\n n_std: int,\n mode: str,\n) -> pd.DataFrame:\n \"\"\"Return a mask over the bivariate trajectory of a body part, identifying as True all detected outliers.\n\n An outlier can be marked with one of two criteria: 1) the likelihood reported by DLC is below likelihood_tolerance,\n and/or 2) the deviation from a moving average model is greater than n_std.\n\n Args:\n time_series (pd.DataFrame): Bi-variate time series representing the x, y positions of a single body part\n likelihood (pd.DataFrame): Data frame with likelihood data per body part as extracted from deeplabcut\n likelihood_tolerance (float): Minimum tolerated likelihood, below which an outlier is called\n lag (int): Size of the convolution window used to compute the moving average\n n_std (int): Number of standard deviations over the moving average to be considered an outlier\n mode (str): If \"and\" (default) both x and y have to be marked in order to call an outlier. If \"or\", one is enough.\n\n Returns\n mask (pd.DataFrame): Bi-variate mask over time_series. True indicates an outlier.\n\n \"\"\"\n moving_avg_x = moving_average(time_series[\"x\"], lag)\n moving_avg_y = moving_average(time_series[\"y\"], lag)\n\n residuals_x = time_series[\"x\"] - moving_avg_x\n residuals_y = time_series[\"y\"] - moving_avg_y\n\n outlier_mask_x = np.abs(residuals_x) > np.mean(\n residuals_x[lag:-lag]\n ) + n_std * np.std(residuals_x[lag:-lag])\n outlier_mask_y = np.abs(residuals_y) > np.mean(\n residuals_y[lag:-lag]\n ) + n_std * np.std(residuals_y[lag:-lag])\n outlier_mask_l = likelihood < likelihood_tolerance\n mask = None\n\n if mode == \"and\":\n mask = (outlier_mask_x & outlier_mask_y) | outlier_mask_l\n elif mode == \"or\":\n mask = (outlier_mask_x | outlier_mask_y) | outlier_mask_l\n\n return mask\n\n\ndef full_outlier_mask(\n experiment: pd.DataFrame,\n likelihood: pd.DataFrame,\n likelihood_tolerance: float,\n exclude: str,\n lag: int,\n n_std: int,\n mode: str,\n) -> pd.DataFrame:\n \"\"\"Iterate over all body parts of experiment, and outputs a dataframe where all x, y positions are replaced by a boolean mask, where True indicates an outlier.\n\n Args:\n experiment (pd.DataFrame): Data frame with time series representing the x, y positions of every body part\n likelihood (pd.DataFrame): Data frame with likelihood data per body part as extracted from deeplabcut\n likelihood_tolerance (float): Minimum tolerated likelihood, below which an outlier is called\n exclude (str): Body part to exclude from the analysis (to concatenate with bpart alignment)\n lag (int): Size of the convolution window used to compute the moving average\n n_std (int): Number of standard deviations over the moving average to be considered an outlier\n mode (str): If \"and\" (default) both x and y have to be marked in order to call an outlier. If \"or\", one is enough.\n\n Returns:\n full_mask (pd.DataFrame): Mask over all body parts in experiment. True indicates an outlier\n\n \"\"\"\n body_parts = experiment.columns.levels[0]\n full_mask = experiment.copy()\n\n if exclude:\n full_mask.drop(exclude, axis=1, inplace=True)\n\n for bpart in body_parts:\n if bpart != exclude:\n mask = mask_outliers(\n experiment[bpart],\n likelihood[bpart],\n likelihood_tolerance,\n lag,\n n_std,\n mode,\n )\n\n full_mask.loc[:, (bpart, \"x\")] = mask\n full_mask.loc[:, (bpart, \"y\")] = mask\n continue\n\n return full_mask\n\n\ndef interpolate_outliers(\n experiment: pd.DataFrame,\n likelihood: pd.DataFrame,\n likelihood_tolerance: float,\n exclude: str = \"\",\n lag: int = 5,\n n_std: int = 3,\n mode: str = \"or\",\n limit: int = 10,\n) -> pd.DataFrame:\n \"\"\"Mark all outliers in experiment and replaces them using a uni-variate linear interpolation approach.\n\n Note that this approach only works for equally spaced data (constant camera acquisition rates).\n\n Args:\n experiment (pd.DataFrame): Data frame with time series representing the x, y positions of every body part.\n likelihood (pd.DataFrame): Data frame with likelihood data per body part as extracted from deeplabcut.\n likelihood_tolerance (float): Minimum tolerated likelihood, below which an outlier is called.\n exclude (str): Body part to exclude from the analysis (to concatenate with bpart alignment).\n lag (int): Size of the convolution window used to compute the moving average.\n n_std (int): Number of standard deviations over the moving average to be considered an outlier.\n mode (str): If \"and\" both x and y have to be marked in order to call an outlier. If \"or\" (default), one is enough.\n limit (int): Maximum of consecutive outliers to interpolate. Defaults to 10.\n\n Returns:\n interpolated_exp (pd.DataFrame): Interpolated version of experiment.\n\n \"\"\"\n interpolated_exp = experiment.copy()\n\n # Creates a mask marking all outliers\n mask = full_outlier_mask(\n experiment, likelihood, likelihood_tolerance, exclude, lag, n_std, mode\n )\n\n interpolated_exp[mask] = np.nan\n interpolated_exp.interpolate(\n method=\"linear\", limit=limit, limit_direction=\"both\", inplace=True\n )\n # Add original frames to what happens before lag\n interpolated_exp = pd.concat(\n [experiment.iloc[:lag, :], interpolated_exp.iloc[lag:, :]]\n )\n\n return interpolated_exp\n\n\ndef filter_columns(columns: list, selected_id: str) -> list:\n \"\"\"Given a set of TableDict columns, returns those that correspond to a given animal, specified in selected_id.\n\n Args:\n columns (list): List of columns to filter.\n selected_id (str): Animal ID to filter for.\n\n Returns:\n filtered_columns (list): List of filtered columns.\n\n \"\"\"\n if selected_id is None:\n return columns\n\n columns_to_keep = []\n for column in columns:\n # Speed transformed columns\n if selected_id == \"supervised\" and column in [\n \"nose2nose\",\n \"sidebyside\",\n \"sidereside\",\n ]:\n columns_to_keep.append(column)\n if type(column) == str and column.startswith(selected_id):\n columns_to_keep.append(column)\n # Raw coordinate columns\n if column[0].startswith(selected_id) and column[1] in [\"x\", \"y\", \"rho\", \"phi\"]:\n columns_to_keep.append(column)\n # Raw distance and angle columns\n elif len(column) in [2, 3] and all([i.startswith(selected_id) for i in column]):\n columns_to_keep.append(column)\n elif column[0].lower().startswith(\"pheno\"):\n columns_to_keep.append(column)\n\n return columns_to_keep\n\n\ndef load_segmentation_model(path):\n model_url = \"https://datashare.mpcdf.mpg.de/s/GccLGXXZmw34f8o/download\"\n\n if path is None:\n installation_path = os.path.dirname(os.path.abspath(__file__))\n path = os.path.join(\n installation_path,\n \"trained_models\",\n \"arena_segmentation\",\n \"sam_vit_h_4b8939.pth\",\n )\n\n if not os.path.exists(path):\n # Creating directory if it does not exist\n directory = os.path.dirname(path)\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n print(\"Arena segmentation model not found. Downloading...\")\n\n response = requests.get(model_url, stream=True)\n response.raise_for_status()\n\n with open(path, \"wb\") as file:\n total_length = int(response.headers.get(\"content-length\"))\n for chunk in tqdm(\n response.iter_content(chunk_size=1024),\n total=total_length // 1024,\n unit=\"KB\",\n ):\n if chunk:\n file.write(chunk)\n\n # Load the model using PyTorch\n sam = sam_model_registry[\"vit_h\"](checkpoint=path)\n sam.to(device=\"cpu\")\n predictor = SamPredictor(sam)\n\n return predictor\n\n\ndef get_arenas(\n coordinates: coordinates,\n tables: table_dict,\n arena: str,\n arena_dims: int,\n project_path: str,\n project_name: str,\n segmentation_model_path: str,\n videos: list = None,\n debug: bool = False,\n test: bool = False,\n):\n \"\"\"Extract arena parameters from a project or coordinates object.\n\n Args:\n coordinates (coordinates): Coordinates object.\n tables (table_dict): TableDict object containing tracklets per animal.\n arena (str): Arena type (must be either \"polygonal-manual\", \"circular-manual\", \"polygonal-autodetect\", or \"circular-autodetect\").\n arena_dims (int): Arena dimensions.\n project_path (str): Path to project.\n project_name (str): Name of project.\n segmentation_model_path (str): Path to segmentation model used for automatic arena detection.\n videos (list): List of videos to extract arena parameters from. Defaults to None (all videos are used).\n debug (bool): If True, a frame per video with the detected arena is saved. Defaults to False.\n test (bool): If True, the function is run in test mode. Defaults to False.\n\n Returns:\n arena_params (list): List of arena parameters.\n\n \"\"\"\n scales = []\n arena_params = []\n video_resolution = []\n\n def get_first_length(arena_corners):\n return math.dist(arena_corners[0], arena_corners[1])\n\n if arena in [\"polygonal-manual\", \"circular-manual\"]: # pragma: no cover\n\n propagate_last = False\n for i, video_path in enumerate(videos):\n\n if not propagate_last:\n arena_corners, h, w = extract_polygonal_arena_coordinates(\n os.path.join(project_path, project_name, \"Videos\", video_path),\n arena,\n i,\n videos,\n )\n\n if arena_corners is None:\n propagate_last = True\n\n else:\n cur_scales = [\n *np.mean(arena_corners, axis=0).astype(int),\n get_first_length(arena_corners),\n arena_dims,\n ]\n\n if propagate_last:\n cur_arena_params = arena_params[-1]\n cur_scales = scales[-1]\n else:\n cur_arena_params = arena_corners\n\n if arena == \"circular-manual\":\n\n if not propagate_last:\n cur_arena_params = fit_ellipse_to_polygon(cur_arena_params)\n\n scales.append(\n list(\n np.array(\n [\n cur_arena_params[0][0],\n cur_arena_params[0][1],\n np.mean(\n [cur_arena_params[1][0], cur_arena_params[1][1]]\n )\n * 2,\n ]\n )\n )\n + [arena_dims]\n )\n else:\n scales.append(cur_scales)\n\n arena_params.append(cur_arena_params)\n video_resolution.append((h, w))\n\n elif arena in [\"polygonal-autodetect\", \"circular-autodetect\"]:\n\n # Open GUI for manual labelling of two scaling points in the first video\n arena_reference = None\n if arena == \"polygonal-autodetect\": # pragma: no cover\n\n if test:\n arena_reference = np.zeros((4, 2))\n else:\n arena_reference = extract_polygonal_arena_coordinates(\n os.path.join(project_path, project_name, \"Videos\", videos[0]),\n arena,\n 0,\n [videos[0]],\n )[0]\n\n # Load SAM\n segmentation_model = load_segmentation_model(segmentation_model_path)\n\n for vid_index, _ in enumerate(videos):\n arena_parameters, h, w = automatically_recognize_arena(\n coordinates=coordinates,\n tables=tables,\n videos=videos,\n vid_index=vid_index,\n path=os.path.join(project_path, project_name, \"Videos\"),\n arena_type=arena,\n segmentation_model=segmentation_model,\n arena_reference=arena_reference,\n debug=debug,\n )\n\n if \"polygonal\" in arena:\n\n closest_side_points = closest_side(\n simplify_polygon(arena_parameters), arena_reference[:2]\n )\n\n scales.append(\n [\n *np.mean(arena_parameters, axis=0).astype(int),\n dist(*closest_side_points),\n arena_dims,\n ]\n )\n\n elif \"circular\" in arena:\n # scales contains the coordinates of the center of the arena,\n # the absolute diameter measured from the video in pixels, and\n # the provided diameter in mm (1 -default- equals not provided)\n scales.append(\n list(\n np.array(\n [\n arena_parameters[0][0],\n arena_parameters[0][1],\n np.mean(\n [arena_parameters[1][0], arena_parameters[1][1]]\n )\n * 2,\n ]\n )\n )\n + [arena_dims]\n )\n\n arena_params.append(arena_parameters)\n video_resolution.append((h, w))\n\n elif not arena:\n return None, None, None\n\n else: # pragma: no cover\n raise NotImplementedError(\n \"arenas must be set to one of: 'polygonal-manual', 'polygonal-autodetect', 'circular-manual', 'circular-autodetect'\"\n )\n\n return np.array(scales), arena_params, video_resolution\n\n\ndef simplify_polygon(polygon: list, relative_tolerance: float = 0.05):\n \"\"\"Simplify a polygon using the Ramer-Douglas-Peucker algorithm.\n\n Args:\n polygon (list): List of polygon coordinates.\n relative_tolerance (float): Relative tolerance for simplification. Defaults to 0.05.\n\n Returns:\n simplified_poly (list): List of simplified polygon coordinates.\n\n \"\"\"\n poly = Polygon(polygon)\n perimeter = poly.length\n tolerance = perimeter * relative_tolerance\n\n simplified_poly = poly.simplify(tolerance, preserve_topology=False)\n return list(simplified_poly.exterior.coords)[\n :-1\n ] # Exclude last point (same as first)\n\n\ndef closest_side(polygon: list, reference_side: list):\n \"\"\"Find the closest side in other polygons to a reference side in the first polygon.\n\n Args:\n polygon (list): List of polygons.\n reference_side (list): List of coordinates of the reference side.\n\n Returns:\n closest_side_points (list): List of coordinates of the closest side.\n\n \"\"\"\n\n def angle(p1, p2):\n return atan2(p2[1] - p1[1], p2[0] - p1[0])\n\n ref_length = dist(*reference_side)\n ref_angle = angle(*reference_side)\n\n min_difference = float(\"inf\")\n closest_side_points = None\n\n for i in range(len(polygon)):\n side_points = (polygon[i], polygon[(i + 1) % len(polygon)])\n side_length = dist(*side_points)\n side_angle = angle(*side_points)\n total_difference = abs(side_length - ref_length) + abs(side_angle - ref_angle)\n\n if total_difference < min_difference:\n min_difference = total_difference\n closest_side_points = list(side_points)\n\n return closest_side_points\n\n\n# noinspection PyUnboundLocalVariable\ndef automatically_recognize_arena(\n coordinates: coordinates,\n tables: table_dict,\n videos: list,\n vid_index: int,\n path: str = \".\",\n arena_type: str = \"circular-autodetect\",\n arena_reference: list = None,\n segmentation_model: torch.nn.Module = None,\n debug: bool = False,\n) -> Tuple[np.array, int, int]:\n \"\"\"Return numpy.ndarray with information about the arena recognised from the first frames of the video.\n\n WARNING: estimates won't be reliable if the camera moves along the video.\n\n Args:\n coordinates (coordinates): Coordinates object.\n tables (table_dict): Dictionary of tables per experiment.\n videos (list): Relative paths of the videos to analise.\n vid_index (int): Element of videos list to use.\n path (str): Full path of the directory where the videos are.\n potentially more accurate in poor lighting conditions.\n arena_type (string): Arena type; must be one of ['circular-autodetect', 'circular-manual', 'polygon-manual'].\n arena_reference (list): List of coordinates defining the reference arena annotated by the user.\n segmentation_model (torch.nn.Module): Model used for automatic arena detection.\n debug (bool): If True, save a video frame with the arena detected.\n\n Returns:\n arena (np.ndarray): 1D-array containing information about the arena. If the arena is circular, returns a 3-element-array) -> center, radius, and angle. If arena is polygonal, returns a list with x-y position of each of the n the vertices of the polygon.\n h (int): Height of the video in pixels.\n w (int): Width of the video in pixels.\n\n \"\"\"\n # Read video as a 3D array\n current_video = imread(os.path.join(path, videos[vid_index]))\n h, w = current_video[0].shape[:2]\n\n # Select the corresponding tracklets\n current_tab = tables[\n get_close_matches(\n videos[vid_index].split(\".\")[0],\n [\n vid\n for vid in tables.keys()\n if (\n vid.startswith(videos[vid_index].split(\".\")[0])\n or videos[vid_index].startswith(vid)\n )\n ],\n cutoff=0.01,\n n=1,\n )[0]\n ]\n\n # Get distances of all body parts and timepoints to both center and periphery\n distances_to_center = cdist(\n current_tab.values.reshape(-1, 2), np.array([[w // 2, h // 2]])\n ).reshape(current_tab.shape[0], -1)\n\n possible_frames = np.nanmin(distances_to_center, axis=1) > np.nanpercentile(\n distances_to_center, 5.0\n )\n possible_distances_to_center = distances_to_center[possible_frames]\n current_video = current_video[: possible_frames.shape[0]][possible_frames]\n\n if arena_reference is not None:\n # If a reference is provided manually, avoid frames where the mouse is too close to the edges, which can\n # hinder segmentation\n min_distance_to_arena = cdist(\n current_tab.values.reshape(-1, 2), arena_reference\n ).reshape([distances_to_center.shape[0], -1, len(arena_reference)])\n\n min_distance_to_arena = min_distance_to_arena[possible_frames]\n current_frame = np.argmax(\n np.nanmin(np.nanmin(min_distance_to_arena, axis=1), axis=1)\n )\n\n else:\n # If not, use the maximum distance to the center as a proxy\n current_frame = np.argmin(np.nanmax(possible_distances_to_center, axis=1))\n\n frame = current_video[current_frame].compute()\n\n # Get mask using the segmentation model\n segmentation_model.set_image(frame)\n\n frame_masks, score, logits = segmentation_model.predict(\n point_coords=np.array([[w // 2, h // 2]]),\n point_labels=np.array([1]),\n multimask_output=True,\n )\n\n # Get arenas for all retrieved masks, and select that whose area is the closest to the reference\n if arena_reference is not None:\n arenas = [\n arena_parameter_extraction(frame_mask, arena_type)\n for frame_mask in frame_masks\n ]\n arena = arenas[\n np.argmin(\n np.abs(\n [Polygon(arena_reference).area - Polygon(a).area for a in arenas]\n )\n )\n ]\n else:\n arena = arena_parameter_extraction(frame_masks[np.argmax(score)], arena_type)\n\n if debug:\n\n # Save frame with mask and arena detected\n frame_with_arena = np.ascontiguousarray(frame.copy(), dtype=np.uint8)\n\n if \"circular\" in arena_type:\n cv2.ellipse(\n img=frame_with_arena,\n center=arena[0],\n axes=arena[1],\n angle=arena[2],\n startAngle=0.0,\n endAngle=360.0,\n color=(40, 86, 236),\n thickness=3,\n )\n\n elif \"polygonal\" in arena_type:\n\n cv2.polylines(\n img=frame_with_arena,\n pts=[arena],\n isClosed=True,\n color=(40, 86, 236),\n thickness=3,\n )\n\n # Plot scale references\n closest_side_points = closest_side(\n simplify_polygon(arena), arena_reference[:2]\n )\n\n for point in closest_side_points:\n cv2.circle(\n frame_with_arena,\n list(map(int, point)),\n radius=10,\n color=(40, 86, 236),\n thickness=2,\n )\n\n cv2.imwrite(\n os.path.join(\n coordinates.project_path,\n coordinates.project_name,\n \"Arena_detection\",\n f\"{videos[vid_index][:-4]}_arena_detection.png\",\n ),\n frame_with_arena,\n )\n\n return arena, h, w\n\n\ndef retrieve_corners_from_image(\n frame: np.ndarray, arena_type: str, cur_vid: int, videos: list\n): # pragma: no cover\n \"\"\"Open a window and waits for the user to click on all corners of the polygonal arena.\n\n The user should click on the corners in sequential order.\n\n Args:\n frame (np.ndarray): Frame to display.\n arena_type (str): Type of arena to be used. Must be one of the following: \"circular-manual\", \"polygon-manual\".\n cur_vid (int): Index of the current video in the list of videos.\n videos (list): List of videos to be processed.\n\n Returns:\n corners (np.ndarray): nx2 array containing the x-y coordinates of all n corners.\n\n \"\"\"\n corners = []\n\n def click_on_corners(event, x, y, flags, param):\n # Callback function to store the coordinates of the clicked points\n nonlocal corners, frame\n\n if event == cv2.EVENT_LBUTTONDOWN:\n corners.append((x, y))\n\n # Resize frame to a standard size\n frame = frame.copy()\n\n # Create a window and display the image\n cv2.startWindowThread()\n\n while True:\n frame_copy = frame.copy()\n\n cv2.imshow(\n \"deepof - Select polygonal arena corners - (q: exit / d: delete{}) - {}/{} processed\".format(\n (\" / p: propagate last to all remaining videos\" if cur_vid > 0 else \"\"),\n cur_vid,\n len(videos),\n ),\n frame_copy,\n )\n\n cv2.setMouseCallback(\n \"deepof - Select polygonal arena corners - (q: exit / d: delete{}) - {}/{} processed\".format(\n (\" / p: propagate last to all remaining videos\" if cur_vid > 0 else \"\"),\n cur_vid,\n len(videos),\n ),\n click_on_corners,\n )\n\n # Display already selected corners\n if len(corners) > 0:\n for c, corner in enumerate(corners):\n cv2.circle(frame_copy, (corner[0], corner[1]), 4, (40, 86, 236), -1)\n # Display lines between the corners\n if len(corners) > 1 and c > 0:\n if \"polygonal\" in arena_type or len(corners) < 5:\n cv2.line(\n frame_copy,\n (corners[c - 1][0], corners[c - 1][1]),\n (corners[c][0], corners[c][1]),\n (40, 86, 236),\n 2,\n )\n\n # Close the polygon\n if len(corners) > 2:\n if \"polygonal\" in arena_type or len(corners) < 5:\n cv2.line(\n frame_copy,\n (corners[0][0], corners[0][1]),\n (corners[-1][0], corners[-1][1]),\n (40, 86, 236),\n 2,\n )\n if len(corners) >= 5 and \"circular\" in arena_type:\n cv2.ellipse(\n frame_copy,\n *fit_ellipse_to_polygon(corners),\n startAngle=0,\n endAngle=360,\n color=(40, 86, 236),\n thickness=3,\n )\n\n cv2.imshow(\n \"deepof - Select polygonal arena corners - (q: exit / d: delete{}) - {}/{} processed\".format(\n (\" / p: propagate last to all remaining videos\" if cur_vid > 0 else \"\"),\n cur_vid,\n len(videos),\n ),\n frame_copy,\n )\n\n # Remove last added coordinate if user presses 'd'\n if cv2.waitKey(1) & 0xFF == ord(\"d\"):\n corners = corners[:-1]\n\n # Exit is user presses 'q'\n if len(corners) > 2:\n if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n break\n\n # Exit and copy all coordinates if user presses 'c'\n if cur_vid > 0 and cv2.waitKey(1) & 0xFF == ord(\"p\"):\n corners = None\n break\n\n cv2.destroyAllWindows()\n cv2.waitKey(1)\n\n # Return the corners\n return corners\n\n\ndef extract_polygonal_arena_coordinates(\n video_path: str, arena_type: str, video_index: int, videos: list\n): # pragma: no cover\n \"\"\"Read a random frame from the selected video, and opens an interactive GUI to let the user delineate the arena manually.\n\n Args:\n video_path (str): Path to the video file.\n arena_type (str): Type of arena to be used. Must be one of the following: \"circular-manual\", \"polygonal-manual\".\n video_index (int): Index of the current video in the list of videos.\n videos (list): List of videos to be processed.\n\n Returns:\n np.ndarray: nx2 array containing the x-y coordinates of all n corners of the polygonal arena.\n int: Height of the video.\n int: Width of the video.\n\n \"\"\"\n current_video = imread(video_path)\n current_frame = np.random.choice(current_video.shape[0])\n\n # Get and return the corners of the arena\n try:\n import google.colab\n\n arena_corners = retrieve_corners_from_colab(\n current_video[current_frame].compute(),\n arena_type,\n video_index,\n videos,\n )\n\n except ImportError:\n arena_corners = retrieve_corners_from_image(\n current_video[current_frame].compute(),\n arena_type,\n video_index,\n videos,\n )\n return arena_corners, current_video.shape[2], current_video.shape[1]\n\n\ndef fit_ellipse_to_polygon(polygon: list): # pragma: no cover\n \"\"\"Fit an ellipse to the provided polygon.\n\n Args:\n polygon (list): List of (x,y) coordinates of the corners of the polygon.\n\n Returns:\n tuple: (x,y) coordinates of the center of the ellipse.\n tuple: (a,b) semi-major and semi-minor axes of the ellipse.\n float: Angle of the ellipse.\n\n \"\"\"\n # Detect the main ellipse containing the arena\n ellipse_params = cv2.fitEllipse(np.array(polygon))\n\n # Parameters to return\n center_coordinates = tuple([int(i) for i in ellipse_params[0]])\n axes_length = tuple([int(i) // 2 for i in ellipse_params[1]])\n ellipse_angle = ellipse_params[2]\n\n return center_coordinates, axes_length, ellipse_angle\n\n\ndef arena_parameter_extraction(\n frame: np.ndarray,\n arena_type: str,\n) -> np.array:\n \"\"\"Return x,y position of the center, the lengths of the major and minor axes, and the angle of the recognised arena.\n\n Args:\n frame (np.ndarray): numpy.ndarray representing an individual frame of a video\n arena_type (str): Type of arena to be used. Must be either \"circular\" or \"polygonal\".\n\n \"\"\"\n # Obtain contours from the image, and retain the largest one\n cnts, _ = cv2.findContours(\n frame.astype(np.int64), cv2.RETR_FLOODFILL, cv2.CHAIN_APPROX_TC89_KCOS\n )\n main_cnt = np.argmax([len(c) for c in cnts])\n\n if \"circular\" in arena_type:\n center_coordinates, axes_length, ellipse_angle = fit_ellipse_to_polygon(\n cnts[main_cnt]\n )\n return center_coordinates, axes_length, ellipse_angle\n\n elif \"polygonal\" in arena_type:\n return np.squeeze(cnts[main_cnt])\n\n\ndef rolling_speed(\n dframe: pd.DatetimeIndex,\n window: int = 3,\n rounds: int = 3,\n deriv: int = 1,\n center: str = None,\n shift: int = 2,\n typ: str = \"coords\",\n) -> pd.DataFrame:\n \"\"\"Return the average speed over n frames in pixels per frame.\n\n Args:\n dframe (pandas.DataFrame): Position over time dataframe.\n window (int): Number of frames to average over.\n rounds (int): Float rounding decimals.\n deriv (int): Position derivative order; 1 for speed, 2 for acceleration, 3 for jerk, etc.\n center (str): For internal usage only; solves an issue with pandas.MultiIndex that arises when centering frames to a specific body part.\n shift (int): Window shift for rolling speed calculation.\n typ (str): Type of dataset. Intended for internal usage only.\n\n Returns:\n speeds (pd.DataFrame): Data frame containing 2D speeds for each body part in the original data or their\n consequent derivatives.\n\n \"\"\"\n original_shape = dframe.shape\n try:\n body_parts = dframe.columns.levels[0]\n except AttributeError:\n body_parts = dframe.columns\n\n speeds = pd.DataFrame\n\n for der in range(deriv):\n features = 2 if der == 0 and typ == \"coords\" else 1\n\n distances = (\n np.concatenate(\n [\n np.array(dframe).reshape([-1, features], order=\"C\"),\n np.array(dframe.shift(shift)).reshape([-1, features], order=\"C\"),\n ],\n axis=1,\n )\n / shift\n )\n\n distances = np.array(compute_dist(distances))\n distances = distances.reshape(\n [\n original_shape[0],\n (original_shape[1] // 2 if typ == \"coords\" else original_shape[1]),\n ],\n order=\"C\",\n )\n distances = pd.DataFrame(distances, index=dframe.index)\n speeds = np.round(distances.rolling(window).mean(), rounds)\n dframe = speeds\n\n speeds.columns = body_parts\n\n return speeds.fillna(0.0)\n\n\ndef filter_short_bouts(\n cluster_assignments: np.ndarray,\n cluster_confidence: np.ndarray,\n confidence_indices: np.ndarray,\n min_confidence: float = 0.0,\n min_bout_duration: int = None,\n): # pragma: no cover\n \"\"\"Filter out cluster assignment bouts shorter than min_bout_duration.\n\n Args:\n cluster_assignments (np.ndarray): Array of cluster assignments.\n cluster_confidence (np.ndarray): Array of cluster confidence values.\n confidence_indices (np.ndarray): Array of confidence indices.\n min_confidence (float): Minimum confidence value.\n min_bout_duration (int): Minimum bout duration in frames.\n\n Returns:\n np.ndarray: Mask of confidence indices to keep.\n\n \"\"\"\n # Compute bout lengths, and filter out bouts shorter than min_bout_duration\n bout_lengths = np.diff(\n np.where(\n np.diff(np.concatenate([[np.inf], cluster_assignments, [np.inf]])) != 0\n )[0]\n )\n\n if min_bout_duration is None:\n min_bout_duration = np.mean(bout_lengths)\n\n confidence_indices[\n np.repeat(bout_lengths, bout_lengths) < min_bout_duration\n ] = False\n\n # Compute average confidence per bout\n cum_bout_lengths = np.concatenate([[0], np.cumsum(bout_lengths)])\n bout_average_confidence = np.array(\n [\n cluster_confidence[confidence_indices][\n cum_bout_lengths[i] : cum_bout_lengths[i + 1]\n ].mean()\n for i in range(len(bout_lengths))\n ]\n )\n\n return (np.repeat(bout_average_confidence, bout_lengths) >= min_confidence) & (\n confidence_indices\n )\n\n\n# MACHINE LEARNING FUNCTIONS #\n\n\ndef gmm_compute(x: np.array, n_components: int, cv_type: str) -> list:\n \"\"\"Fit a Gaussian Mixture Model to the provided data and returns evaluation metrics.\n\n Args:\n x (numpy.ndarray): Data matrix to train the model\n n_components (int): Number of Gaussian components to use\n cv_type (str): Covariance matrix type to use. Must be one of \"spherical\", \"tied\", \"diag\", \"full\".\n\n Returns:\n - gmm_eval (list): model and associated BIC for downstream selection.\n\n \"\"\"\n gmm = mixture.GaussianMixture(\n n_components=n_components,\n covariance_type=cv_type,\n max_iter=100000,\n init_params=\"kmeans\",\n )\n gmm.fit(x)\n gmm_eval = [gmm, gmm.bic(x)]\n\n return gmm_eval\n\n\ndef gmm_model_selection(\n x: pd.DataFrame,\n n_components_range: range,\n part_size: int,\n n_runs: int = 100,\n n_cores: int = False,\n cv_types: Tuple = (\"spherical\", \"tied\", \"diag\", \"full\"),\n) -> Tuple[List[list], List[np.ndarray], Union[int, Any]]:\n \"\"\"Run GMM clustering model selection on the specified X dataframe.\n\n Outputs the bic distribution per model, a vector with the median BICs and an object with the overall best model.\n\n Args:\n x (pandas.DataFrame): Data matrix to train the models\n n_components_range (range): Generator with numbers of components to evaluate\n n_runs (int): Number of bootstraps for each model\n part_size (int): Size of bootstrap samples for each model\n n_cores (int): Number of cores to use for computation\n cv_types (tuple): Covariance Matrices to try. All four available by default\n\n Returns:\n - bic (list): All recorded BIC values for all attempted parameter combinations (useful for plotting).\n - m_bic(list): All minimum BIC values recorded throughout the process (useful for plottinh).\n - best_bic_gmm (sklearn.GMM): Unfitted version of the best found model.\n\n \"\"\"\n # Set the default of n_cores to the most efficient value\n if not n_cores:\n n_cores = min(multiprocessing.cpu_count(), n_runs)\n\n bic = []\n m_bic = []\n lowest_bic = np.inf\n best_bic_gmm = 0\n\n pbar = tqdm(total=len(cv_types) * len(n_components_range))\n\n for cv_type in cv_types:\n\n for n_components in n_components_range:\n\n res = Parallel(n_jobs=n_cores, prefer=\"threads\")(\n delayed(gmm_compute)(\n x.sample(part_size, replace=True), n_components, cv_type\n )\n for _ in range(n_runs)\n )\n bic.append([i[1] for i in res])\n\n pbar.update(1)\n m_bic.append(np.median([i[1] for i in res]))\n if m_bic[-1] < lowest_bic:\n lowest_bic = m_bic[-1]\n best_bic_gmm = res[0][0]\n\n return bic, m_bic, best_bic_gmm\n\n\n# RESULT ANALYSIS FUNCTIONS #\n\n\ndef cluster_transition_matrix(\n cluster_sequence: np.array,\n nclusts: int,\n autocorrelation: bool = True,\n return_graph: bool = False,\n) -> Tuple[Union[nx.Graph, Any], np.ndarray]:\n \"\"\"Compute the transition matrix between clusters and the autocorrelation in the sequence.\n\n Args:\n cluster_sequence (numpy.array): Sequence of cluster assignments.\n nclusts (int): Number of clusters in the sequence.\n autocorrelation (bool): Whether to compute the autocorrelation of the sequence.\n return_graph (bool): Whether to return the transition matrix as an networkx.DiGraph object.\n\n Returns:\n trans_normed (numpy.ndarray / networkx.Graph): Transition matrix as numpy.ndarray or networkx.DiGraph.\n autocorr (numpy.array): If autocorrelation is True, returns a numpy.ndarray with all autocorrelation values on cluster assignment.\n \"\"\"\n # Stores all possible transitions between clusters\n clusters = [str(i) for i in range(nclusts)]\n cluster_sequence = cluster_sequence.astype(str)\n\n trans = {t: 0 for t in product(clusters, clusters)}\n k = len(clusters)\n\n # Stores the cluster sequence as a string\n transtr = \"\".join(list(cluster_sequence))\n\n # Assigns to each transition the number of times it occurs in the sequence\n for t in trans.keys():\n trans[t] = len(re.findall(\"\".join(t), transtr, overlapped=True))\n\n # Normalizes the counts to add up to 1 for each departing cluster\n trans_normed = np.zeros([k, k]) + 1e-5\n for t in trans.keys():\n trans_normed[int(t[0]), int(t[1])] = np.round(\n trans[t]\n / (sum({i: j for i, j in trans.items() if i[0] == t[0]}.values()) + 1e-5),\n 3,\n )\n\n # If specified, returns the transition matrix as an nx.Graph object\n if return_graph:\n trans_normed = nx.Graph(trans_normed)\n\n if autocorrelation:\n cluster_sequence = list(map(int, cluster_sequence))\n autocorr = np.corrcoef(cluster_sequence[:-1], cluster_sequence[1:])\n return trans_normed, autocorr\n\n return trans_normed\n","repo_name":"mlfpm/deepof","sub_path":"deepof/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":76742,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"31"} +{"seq_id":"13657055356","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\nn = int(input().rstrip())\r\narray = []\r\n\r\nfor _ in range(n):\r\n array.append(list(input().rstrip().split()))\r\n\r\nresult = sorted(array, key=lambda x: (int(x[3]), int(x[2]), int(x[1])))\r\n\r\nprint(result[-1][0])\r\nprint(result[0][0])","repo_name":"jwshin0908/Algorithm","sub_path":"백준/Silver/5635. 생일/생일.py","file_name":"생일.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"43388145848","text":"from weighted_graph import Graph\nimport string\nimport dijkstra\n\nglobal infinity\ninfinity = float('inf')\n\nclass A_star(Graph):\n def __init__(self) -> None:\n super().__init__()\n \n def traverse(self, start: str, goal: str) -> list:\n \n # Until the goal node has been visited:\n while not self.graph[goal].visited:\n # Find the node with the lowest \"f\" value that has not been visited\n lowestF = infinity\n lowestNode = None\n \n for node in self.graph:\n if self.graph[node].f < lowestF and not self.graph[node].visited:\n lowestNode = node\n lowestF = self.graph[node].f\n \n # For each connected node that has not been visited: \n for child in self.graph[lowestNode].children:\n # i. Calculate the relative distance from the start by adding the edge value and the heuristic\n relative_distance = self.graph[child.node].h + child.weight\n \n #ii. If the distance from the start plus the heuristic is lower than the currently recorded value for \"f\":\n if relative_distance < self.graph[child.node].f:\n # 1. Set the \"f\" value of the connected node to the newly calculated distance\n self.graph[child.node].f = relative_distance\n \n\t\t\t\t\t# 2. Set the previous node to the current node\n self.graph[child.node].previous = lowestNode\n \n # set the current node as visited\n self.graph[lowestNode].visited = True\n \n # Start from the goal node\n path = []\n path.append(self.graph[goal].node)\n previous = None\n # Repeat until start node is reached \n while previous is not start:\n # Add the previous node to the start of a list\n previous = self.graph[path[len(path)-1]].previous\n path.append(self.graph[previous].node)\n \n # Output the list \n return path\n \n \n \nif __name__ == \"__main__\":\n graph = A_star()\n graph_dijkstra = dijkstra.Dijkstra() # to compare paths for validation\n \n graph.create_graph('A','B','C','D','E','F','Z')\n \n graph.add_children('A',['B',9],['C',4],['D',7])\n graph.add_children('B',['E',11])\n graph.add_children('C',['E',17],['F',12])\n graph.add_children('D',['F',14])\n graph.add_children('E',['Z',5])\n graph.add_children('F',['Z',9])\n \n graph_dijkstra.graph = graph.graph\n \n graph.graph['A'].g = 0\n graph.graph['A'].h = 21\n graph.graph['B'].h = 14\n graph.graph['C'].h = 18\n graph.graph['D'].h = 18\n graph.graph['E'].h = 5\n graph.graph['F'].h = 8\n \n \n for letter in list(string.ascii_uppercase)[:5]:\n graph.graph[letter].updateF()\n \n path = graph.traverse('A','Z')\n print(path)\n \n path = graph_dijkstra.traverse('A','Z')\n print(path)\n \n \n ","repo_name":"panji-pana/a-level-algorithms","sub_path":"python/A_star.py","file_name":"A_star.py","file_ext":"py","file_size_in_byte":3067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22787625426","text":"import imageio\nimport os\nfrom musket2.datasets import DataSet,PredictionItem,DataOwner\nfrom musket2 import utils\nimport numpy as np\nimport imgaug as iaa\nimport torch\nimport traceback\n\n\nclass SimpleDirBasedDataSet(DataSet):\n\n def __init__(self, path, in_ext=\"png\", out_ext=\"png\"):\n path = utils.normalize_path(path)\n self.path = path\n ldir = os.listdir(path)\n if \".DS_Store\" in ldir:\n ldir.remove(\".DS_Store\")\n self.ids = [x[0:x.index('.')] for x in ldir if \"mask_floor\" in x]\n self.in_ext = in_ext\n self.out_ext = out_ext\n pass\n\n def __len__(self):\n return len(self.ids)\n\n def __getitem__(self, item):\n in_ext = self.in_ext\n image = imageio.imread(os.path.join(self.path, self.ids[item] + \".\" + in_ext))\n mask = imageio.imread(os.path.join(self.path, self.ids[item] + \".\" + in_ext + \"_mask_floor.png\"))\n image = image.astype(np.uint8)\n p = PredictionItem(self.ids[item] + str(), image[:, :, 0:3], mask[:, :, 0].astype(np.bool))\n return p\n\n\n\nclass SegmentationWrapper:\n\n def __init__(self, ds, shape,aug=None,nc=1) -> None:\n self.ds = ds\n self.nc=nc\n rs=iaa.augmenters.Resize({\"height\": shape[0], \"width\": shape[1]})\n self.shape=shape\n if aug is not None:\n self.aug:iaa.augmenters.Augmenter=iaa.augmenters.Sequential([aug,rs])\n else:\n self.aug =rs\n\n def __getitem__(self, ind: int):\n try:\n row = self.ds[ind]\n image_aug, segmap_aug= self.aug(image=row.x,segmentation_maps=iaa.SegmentationMapsOnImage(row.y,shape=(row.y.shape[0],row.y.shape[1],1)))\n\n image = torch.from_numpy(image_aug.astype(np.float32)).permute((2, 0, 1))\n mask = torch.from_numpy(segmap_aug.arr.astype(np.float32)).permute((2, 0, 1))\n return {\"image\": image, \"label\": mask}\n except:\n traceback.print_exc()\n image = torch.zeros(( self.shape[2], self.shape[0], self.shape[1]))\n mask = torch.zeros(( self.nc, self.shape[0], self.shape[1]))\n return {\"image\": image, \"label\": mask}\n\n def __len__(self) -> int:\n return len(self.ds)\n\n\n\n\ndef create_segmentation_loader(ds,shape,batch_size,num_data_workers=0,shuffle=True,drop_last=True,augmentation=None,nc=1):\n sw=SegmentationWrapper(ds,shape,aug=augmentation,nc=nc)\n return DataOwner(sw,batch_size,num_data_workers,shuffle,drop_last).new_loader()\n","repo_name":"petrochenko-pavel-a/musket2","sub_path":"musket2/musket2/imaging/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":2485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19022809113","text":"# Author: Gabriel De Jesus\r\n# Date: August 17, 2019\r\n# Purpose: Filter data from Excel spreadsheets\r\n# Simple GUI included for ease of use, but can be run from the command line if needed\r\nfrom tkinter import *\r\nfrom tkinter import ttk\r\nfrom tkinter.filedialog import askopenfilename\r\nimport pandas as pd\r\nfrom pandas import DataFrame\r\nimport tkinter as tk\r\n\r\nroot = Tk( )\r\n\r\n# Process Grant Section\r\ndef GrantFilter():\r\n name = None\r\n name = askopenfilename(filetypes =((\"Excel 97/2003\", \"*.xls\"),(\"Excel\",\"*.xlsx\"),(\"All Files\",\"*.*\")))\r\n if name != None:\r\n df_initial = pd.read_excel(name)\r\n df_final = pd.DataFrame()\r\n total_rows = len(df_initial.index)\r\n root.destroy()\r\n df_initial.columns = df_initial.columns.str.strip()\r\n # We iterate through rows to match values in a specified column\r\n # Here, we're looking to match either CA/AZ in State data\r\n # We can also further specify regions in the state, using zip code\r\n for row in range(total_rows):\r\n if ( str(df_initial.loc[row]['State']) == 'CA'):\r\n if ( int(str(df_initial.loc[row]['Zip'])[:5]) <= 93108 ):\r\n df_final = df_final.append(df_initial.loc[row])\r\n for row in range(total_rows):\r\n if ( str(df_initial.loc[row]['State']) == 'AZ'):\r\n df_final = df_final.append(df_initial.loc[row])\r\n # export data to excel after sorting by any specific column if wanted\r\n df_final = df_final.sort_values(by=['Dollars Remaining'], ascending=False)\r\n df_final.to_excel('Grants-Filtered.xlsx', index=False)\r\n elif name == '':\r\n root.destroy()\r\n return\r\n else:\r\n root.destroy()\r\n return\r\n\r\n# This function is essentially the same as above, except we can further\r\n# limit any filtering of excel data by implementing an \"array\" of values\r\n# we want to match, converting it to a set, and then filtering\r\n# the set to an appendable dataframe which we concatenate\r\n# before exporting to a new Excel file.\r\ndef EvoFilter():\r\n evo_name = None\r\n evo_name = askopenfilename(filetypes =((\"Excel 97/2003\", \"*.xls\"),(\"Excel\",\"*.xlsx\"),(\"All Files\",\"*.*\")))\r\n if evo_name != None:\r\n #evo_core_df = pd.DataFrame()\r\n df_evo_initial = pd.read_excel(evo_name, sheet_name=\"Product Line\", skiprows=16)\r\n evo_core_df = pd.DataFrame()\r\n det_core_df = pd.DataFrame()\r\n combined_df = pd.DataFrame()\r\n total_evo_rows = len(df_evo_initial.index)\r\n root.destroy()\r\n evo_core_part_list = ['part1','part2','etc']\r\n evo_core_part_list_set = set(evo_core_part_list)\r\n det_core_part_list = ['secondary_part1','etc']\r\n det_core_part_list_set = set(det_core_part_list)\r\n #print(df_evo_initial)\r\n for evo_row in range(total_evo_rows):\r\n if ( str(df_evo_initial.loc[evo_row]['Product Line'])[:3] in evo_core_part_list_set ):\r\n evo_core_df = evo_core_df.append(df_evo_initial.loc[evo_row])\r\n evo_core_df = evo_core_df.sort_values(by=[' YTD 2019 Net Sales'], ascending=False)\r\n for det_row in range(total_evo_rows):\r\n if ( str(df_evo_initial.loc[det_row]['Product Line'])[:3] in det_core_part_list_set ):\r\n det_core_df = det_core_df.append(df_evo_initial.loc[det_row])\r\n # We can sort data extracted by various attributes that may be in the spreadsheet\r\n # Total sales, Revenue, Profit, etc.\r\n det_core_df = det_core_df.sort_values(by=[' YTD 2019 Net Sales'], ascending=False)\r\n combined_df = combined_df.append(evo_core_df)\r\n combined_df = combined_df.append(pd.Series(), ignore_index=True)\r\n combined_df = combined_df.append(det_core_df)\r\n\r\n combined_df.to_excel('Evo-Core-Filtered.xlsx', index=False)\r\n elif evo_name == '':\r\n root.destroy()\r\n return\r\n else:\r\n root.destroy()\r\n return\r\n######\r\nTitle = root.title( \"Excel Filtering - Gabriel De Jesus\")\r\nlabel = ttk.Label(root, text =\"Program by: Gabriel De Jesus. \\nNot for distribution.\",foreground=\"red\",font=(\"Times New Roman\", 12))\r\nlabel.pack()\r\n\r\n# Menu Bar\r\n#menu = Menu(root)\r\n#root.config(menu=menu)\r\n#file = Menu(menu, tearoff=0)\r\n#file.add_command(label = 'Grant Filtering', command = GrantFilter)\r\n#file.add_command(label = 'EVO Core Filter', command = EvoFilter)\r\n#file.add_command(label = 'Exit', command = lambda:exit())\r\n#menu.add_cascade(label = 'File', menu = file)\r\n# Convenience Buttons\r\ngrantbutton = tk.Button(root, text=\"Grant Filtering (CA/AZ)\", command=GrantFilter)\r\ngrantbutton.pack()\r\nevobutton = tk.Button(root, text=\"EVO Filtering\", command=EvoFilter)\r\nevobutton.pack()\r\nquitbutton = tk.Button(root, text=\"Exit\", fg=\"red\", bg=\"black\", command=lambda:exit())\r\nquitbutton.pack()\r\nroot.mainloop()\r\n","repo_name":"sammwang/excel-filter-python","sub_path":"excelfilter.py","file_name":"excelfilter.py","file_ext":"py","file_size_in_byte":4848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"16982557090","text":"import pika\nimport sqlCommands\n\ncredentials = pika.PlainCredentials('gabe','gabe')\n\nparameters = pika.ConnectionParameters('192.168.1.160',5672,'/',credentials)\n\n\nconnection = pika.BlockingConnection(parameters)\n\nchannel = connection.channel()\n\nchannel.queue_declare(queue='SQL')\n\n\nchannel.basic_publish(exchange='',\n routing_key='SQL',\n body='Authentication Failed')\n\n\nprint(\"Authentication Failed\")\n\nconnection.close()\n\n","repo_name":"Gabe397/Tales","sub_path":"talesDB/failSend.py","file_name":"failSend.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12904371836","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.template.loader import render_to_string\nfrom django.core.files.storage import FileSystemStorage\nfrom django.conf import settings\nfrom django.core.files.base import ContentFile\nimport io\nimport PyPDF2\n\nfrom pdf_parser import summarize_helper, get_completion\n\ndef index(request):\n return render(request, 'index.html')\n\n\ndef upload_book(request):\n if request.method == 'POST':\n # Get the uploaded file from the request\n uploaded_file = request.FILES['file']\n\n # Save the uploaded file to a temporary file on the server\n fs = FileSystemStorage()\n filename = fs.save(uploaded_file.name, uploaded_file)\n uploaded_file_url = fs.url(filename)\n \n # Render a response with the URL of the uploaded file\n return render(request, 'upload_success.html', {'uploaded_file_url': uploaded_file_url})\n \n else:\n # Render a response with an upload form\n return render(request, 'upload.html')\n\n\ndef summarize(request):\n if request.method == 'POST':\n # Get the uploaded file and page range from the form\n uploaded_file = request.FILES['pdf-file']\n start_page = request.POST.get('start-page')\n end_page = request.POST.get('end-page')\n\n # Save the uploaded file to a temporary file on the server\n fs = FileSystemStorage()\n filename = fs.save(uploaded_file.name, uploaded_file)\n uploaded_file_url = fs.url(filename)\n filepath = fs.path(filename)\n\n # Parse the PDF and get the summary for the selected page range\n with open(filepath, 'rb') as pdf_file:\n \n # pdf_reader = PyPDF2.PdfFileReader(pdf_file)\n # if page_range == 'all':\n # text = ''\n # for page in range(pdf_reader.numPages):\n # page_obj = pdf_reader.getPage(page)\n # text += page_obj.extractText()\n # else:\n # start, end = page_range.split('-')\n # start = int(start) - 1\n # end = int(end)\n # text = ''\n # for page in range(start, end):\n # page_obj = pdf_reader.getPage(page)\n # text += page_obj.extractText()\n summary = summarize_helper(pdf_file, start_page, end_page)\n\n # Render the summary as a string and return it as an HTTP response\n # summary = render_to_string('summary.html', {'text': text})\n response = HttpResponse(summary, content_type='text/html')\n response['Content-Disposition'] = 'attachment; filename=\"summary.html\"'\n return response\n\n else:\n return HttpResponse('Invalid request method')\n\n","repo_name":"devanshi-jain/openai_api_project","sub_path":"guide_backend/BookBrief/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"32863609625","text":"from qiskit.chemistry.drivers import BaseDriver, UnitsType\nfrom qiskit.chemistry import QiskitChemistryError\nfrom qiskit.chemistry.drivers.pyquanted.integrals import compute_integrals\nimport importlib\nfrom enum import Enum\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass BasisType(Enum):\n BSTO3G = 'sto3g'\n B631G = '6-31g'\n B631GSS = '6-31g**'\n\n\nclass PyQuanteDriver(BaseDriver):\n \"\"\"Python implementation of a PyQuante driver.\"\"\"\n\n KEY_UNITS = 'units'\n KEY_BASIS = 'basis'\n\n CONFIGURATION = {\n \"name\": \"PYQUANTE\",\n \"description\": \"PyQuante Driver\",\n \"input_schema\": {\n \"$schema\": \"http://json-schema.org/schema#\",\n \"id\": \"pyquante_schema\",\n \"type\": \"object\",\n \"properties\": {\n \"atoms\": {\n \"type\": \"string\",\n \"default\": \"H 0.0 0.0 0.0; H 0.0 0.0 0.735\"\n },\n KEY_UNITS: {\n \"type\": \"string\",\n \"default\": UnitsType.ANGSTROM.value,\n \"oneOf\": [\n {\"enum\": [\n UnitsType.ANGSTROM.value,\n UnitsType.BOHR.value,\n ]}\n ]\n },\n \"charge\": {\n \"type\": \"integer\",\n \"default\": 0\n },\n \"multiplicity\": {\n \"type\": \"integer\",\n \"default\": 1\n },\n KEY_BASIS: {\n \"type\": \"string\",\n \"default\": BasisType.BSTO3G.value,\n \"oneOf\": [\n {\"enum\": [\n BasisType.BSTO3G.value,\n BasisType.B631G.value,\n BasisType.B631GSS.value,\n ]}\n ]\n }\n },\n \"additionalProperties\": False\n }\n }\n\n def __init__(self,\n atoms,\n units=UnitsType.ANGSTROM,\n charge=0,\n multiplicity=1,\n basis=BasisType.BSTO3G):\n \"\"\"\n Initializer\n Args:\n atoms (str or list): atoms list or string separated by semicolons or line breaks\n units (UnitsType): angstrom or bohr\n charge (int): charge\n multiplicity (int): spin multiplicity\n basis (BasisType): sto3g or 6-31g or 6-31g**\n \"\"\"\n if not isinstance(atoms, list) and not isinstance(atoms, str):\n raise QiskitChemistryError(\"Invalid atom input for PYQUANTE Driver '{}'\".format(atoms))\n\n if isinstance(atoms, list):\n atoms = ';'.join(atoms)\n else:\n atoms = atoms.replace('\\n', ';')\n\n units = units.value\n basis = basis.value\n\n self.validate(locals())\n super().__init__()\n self._atoms = atoms\n self._units = units\n self._charge = charge\n self._multiplicity = multiplicity\n self._basis = basis\n\n @staticmethod\n def check_driver_valid():\n err_msg = 'PyQuante2 is not installed. See https://github.com/rpmuller/pyquante2'\n try:\n spec = importlib.util.find_spec('pyquante2')\n if spec is not None:\n return\n except Exception as e:\n logger.debug('PyQuante2 check error {}'.format(str(e)))\n raise QiskitChemistryError(err_msg) from e\n\n raise QiskitChemistryError(err_msg)\n\n @classmethod\n def init_from_input(cls, section):\n \"\"\"\n Initialize via section dictionary.\n\n Args:\n params (dict): section dictionary\n\n Returns:\n Driver: Driver object\n \"\"\"\n if section is None or not isinstance(section, dict):\n raise QiskitChemistryError('Invalid or missing section {}'.format(section))\n\n params = section\n kwargs = {}\n for k, v in params.items():\n if k == PyQuanteDriver.KEY_UNITS:\n v = UnitsType(v)\n elif k == PyQuanteDriver.KEY_BASIS:\n v = BasisType(v)\n\n kwargs[k] = v\n\n logger.debug('init_from_input: {}'.format(kwargs))\n return cls(**kwargs)\n\n def run(self):\n return compute_integrals(atoms=self._atoms,\n units=self._units,\n charge=self._charge,\n multiplicity=self._multiplicity,\n basis=self._basis)\n","repo_name":"epiqc/PartialCompilation","sub_path":"qiskit-chemistry/qiskit/chemistry/drivers/pyquanted/pyquantedriver.py","file_name":"pyquantedriver.py","file_ext":"py","file_size_in_byte":4633,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"31"} +{"seq_id":"72579577688","text":"import socket\nimport select\nimport time\n\ndef checkStatus(sock):\n \"\"\" Return the number of bytes ready to be read from a socket\n\n Args:\n sock (socket): the socket to test for data\n\n Returns:\n int: the number of bytes ready to be read.\n \"\"\"\n ready_to_read, ready_to_write, in_error = select.select([sock,], [sock,], [], 5)\n return len(ready_to_read)\n\ndef testPort(host:str,portnum:int, poke=None):\n \"\"\"Given a host (IP or name) and a port number, connect if possible and return any data transmitted.\n \n Args:\n host (string): the host to scan. Can be an IP address or hostname\n portnum (int): the port number, between 0 and 65535\n poke (string): if given, the string to send to the server upon connection.\n\n Returns:\n list of strings or None: the data returned by the connection, or None if the connection failed. If a list is returned, it represents the sequence of responses. The first element is the reponse recieved immediately, the second is the response after sending any given data.\n \"\"\"\n response=[]\n\n try:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n result = sock.connect_ex((host, portnum))\n if result == 0: #0 means no error\n time.sleep(0.1) #Give the server time to send\n if checkStatus(sock)>0:\n rcv = sock.recv(1024)\n response.append(rcv.decode(\"utf-8\", \"ignore\"))\n if poke!=None:\n sock.sendall(str.encode(poke))\n time.sleep(1) #Give the server time to send\n ready_to_read, ready_to_write, in_error = select.select([sock,], [sock,], [], 5)\n if checkStatus(sock)>0:\n rcv = sock.recv(1024)\n response.append(rcv.decode(\"utf-8\", \"ignore\"))\n\n sock.shutdown(socket.SHUT_RDWR)\n sock.close()\n\n return response\n else:\n return None\n except socket.error:\n return None if response is [] else response\n return None\n","repo_name":"Nikelandjelo/CU","sub_path":"harbourmaster/src/h_sockets.py","file_name":"h_sockets.py","file_ext":"py","file_size_in_byte":2043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"2501170744","text":"import math\n\nwhile True:\n N = list(input())\n if N == ['0']:\n break\n\n if len(N) % 2 == 0: # 짝수면 중간에 아무 수 하나 끼워넣기\n N.insert(len(N)//2, '0')\n \n median = math.ceil(len(N) / 2) - 1\n if N[0:median] == list(reversed(N[median+1:])):\n print(\"yes\")\n else:\n print(\"no\")","repo_name":"Sensol2/DailyBOJ","sub_path":"백준/Bronze/1259. 팰린드롬수/팰린드롬수.py","file_name":"팰린드롬수.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"41966526667","text":"FILE_PATH = \"./input.txt\"\nMARK_LEN = 14\n\ndef main():\n input_file = open(FILE_PATH, \"r\")\n data = input_file.readlines()\n print(\"Marker position: {}\".format(find_marker(data)))\n\n\ndef find_marker(signal):\n signal_len = len(signal[0])\n i = 0\n while i < signal_len - MARK_LEN:\n if check_packet(signal[0], i):\n return i + MARK_LEN\n i += 1\n\n\ndef check_packet(signal, start):\n i = 0\n while i < MARK_LEN - 1:\n j = 1\n while j <= MARK_LEN - 1 - i:\n if signal[start + i] == signal[start + j + i]:\n return 0\n j += 1\n i += 1\n return 1\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"sszahinos/Advent_Of_Code_2022","sub_path":"6_tuning_trouble/solution_2.py","file_name":"solution_2.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"39553039665","text":"import pandas as pd\nimport matplotlib\nimport matplotlib.pyplot as plt\n\n# 한글설정\nmatplotlib.rcParams['font.family'] = 'Malgun Gothic' # '맑은 고딕' 으로 설정\nmatplotlib.rcParams['axes.unicode_minus'] = False\n\ndf = pd.read_csv('./auto-mpg.csv')\ndf.columns = ['mpg', 'cylinders', 'displacement', 'horsepower', 'weight', 'acceleration', 'model year', 'origin', 'name']\n\npd.set_option('display.max_columns', len(df.columns)) # 출력할 최대 열의 개수\npd.set_option('display.max_colwidth', 30) # 출력 열의 너비\npd.set_option('display.width', 1000) # 출력 전체폭 너비\n\nprint(\"auto-mpg\", df.head(), sep='\\n', end='\\n\\n')\n\ndf.plot(x='weight', y='mpg', kind='scatter', title=\"무게(weight)와 연비(mpg)의 관계\")\nplt.show()\n","repo_name":"mywns123/pandas_study","sub_path":"part3/df_plot_scatter.py","file_name":"df_plot_scatter.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"38869677451","text":"from math import factorial \n\nclass DirNum:\n def __init__(self, num):\n self.num = num\n self.dir = -1 # right to left \n \n def changeDir(self):\n self.dir = -self.dir \n\n def __gt__(self, other):\n return self.num > other.num\n\nn = int(input(\"Enter n: \"))\nnums = [DirNum(i) for i in range(1, n + 1)]\n\n# print 1st perumtation\nfor dirNum in nums:\n print(dirNum.num, end=' ')\nprint()\n\n\ndef getMobile(nums):\n mobile = DirNum(0) \n\n for i in range(len(nums)):\n dirNum = nums[i]\n if (dirNum.dir == -1 and i != 0) or (dirNum.dir == 1 and i < len(nums) - 1): # (right to left) or (left to right) \n if dirNum > nums[i + dirNum.dir] and dirNum > mobile:\n mobile = dirNum\n \n return mobile\n\n\ndef printOnePerm(nums):\n mobile = getMobile(nums)\n index = nums.index(mobile)\n\n # swap number in mobile dir\n nums[index], nums[index + mobile.dir] = nums[index + mobile.dir], nums[index]\n\n # change dir and print at the same time\n for dirNum in nums:\n if dirNum > mobile:\n dirNum.changeDir()\n print(dirNum.num, end=' ')\n print()\n\n\nfor i in range(factorial(n) - 1):\n printOnePerm(nums)\n","repo_name":"ajaycs18/ADA-4TH-SEM","sub_path":"johnsontrotter.py","file_name":"johnsontrotter.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"4615470313","text":"def isAnagram(s, t):\n if len(s) != len(t): return False\n\n seenS, seenT = {}, {}\n\n for i in range(len(s)):\n seenS[s[i]] = 1 + seenS.get(s[i], 0)\n seenT[t[i]] = 1 + seenT.get(t[i], 0)\n\n return seenS == seenT\n\n\nif __name__ == '__main__':\n\n print(isAnagram('anagram', 'nagaram'))","repo_name":"desi-123/leetcode","sub_path":"valid_anagram.py","file_name":"valid_anagram.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73353688409","text":"import os, requests\nimport pandas as pd\nfrom bs4 import BeautifulSoup\nimport urllib.request as ur\n\n\ndestFolder = os.path.dirname(os.path.abspath(__file__))\ndict = {}\n\n\ndef convert_float(text):\n zeros = 0\n if text == \"N/A\":\n floated = text\n else:\n unit = text[-1]\n if \".\" in text:\n number = text.split('.')\n minus = len(number[1]) - 1\n else:\n minus = 0\n if unit == 'T':\n zeros = '0' * (12 - minus)\n if unit == 'B':\n zeros = '0' * (9 - minus)\n if unit == 'M':\n zeros = '0' * (6 - minus)\n if unit == 'k':\n zeros = '0' * (3 - minus)\n else:\n pass\n floated = float(\n text.replace('T', zeros).replace('B', zeros).replace('M', zeros).replace('k', zeros).replace('.', ''))\n return (floated)\n\n\nclass Scraper:\n\n # Initializing the scraper with the urls that will be used in the different methods\n def __init__(self, index):\n self.index = index\n self.url_is = 'https://finance.yahoo.com/quote/' + self.index + '/financials?p=' + self.index\n self.url_bs = 'https://finance.yahoo.com/quote/' + self.index + '/balance-sheet?p=' + self.index\n self.url_cf = 'https://finance.yahoo.com/quote/' + self.index + '/cash-flow?p=' + self.index\n self.url_stats = 'https://finance.yahoo.com/quote/' + self.index + '/key-statistics?p=' + self.index\n self.url_pr = 'https://finance.yahoo.com/quote/' + self.index + '/profile?p=' + self.index\n print(self.index)\n\n # Finding the number of columns in the table\n def nb_cols(self, ls):\n columns = ['ttm', '12/31/2019', '12/31/2018', '12/31/2017', '12/31/2016']\n cols = 1\n for x in columns:\n if x in ls:\n cols += 1\n return cols\n\n # Getting the soup as a Dataframe\n def get_dataframe(self, url):\n page = requests.get(url)\n soup = BeautifulSoup(page.content, 'html.parser')\n soup = soup.find('div', class_=\"W(100%) Whs(nw) Ovx(a) BdT Bdtc($seperatorColor)\")\n ls = ['Label']\n for element in soup.find_all('div'):\n ls.append(element.get('title'))\n ls.append(element.string)\n ls = list(filter(None, ls))\n new_ls = []\n for x in ls:\n try:\n x = float(x.replace(',', ''))\n except Exception as e:\n pass\n finally:\n if x != '-' and x in new_ls and isinstance(x, str):\n pass\n else:\n new_ls.append(x)\n print(new_ls)\n cols = self.nb_cols(new_ls)\n data = list(zip(*[iter(new_ls)] * cols))\n result = pd.DataFrame(data[0:])\n result.columns = result.iloc[0] # Set first row as column names\n result = result.set_index('Label') # Change 0 index column to Labels\n result.columns = result.iloc[0] # Name columns to first row of dataframe\n result.drop(result.index[0], inplace=True) # Drop first index row\n result.index.name = '' # Remove the index name\n print(result)\n return result\n\n # Getting the statistics as a Dataframe\n def get_nb(self):\n ls_main = []\n ls_stats = []\n ls_fins = []\n ls_trads = []\n read_data = ur.urlopen(self.url_stats).read()\n soup_stats = BeautifulSoup(read_data, 'html.parser')\n soup_financials = soup_stats.find('div', class_=\"Fl(start) W(50%) smartphone_W(100%)\").find_all('tr')\n soup_trading = soup_stats.find('div', class_=\"Fl(end) W(50%) smartphone_W(100%)\").find_all('tr')\n\n soup_stats = soup_stats.find('table')\n for stats in soup_stats:\n stats = stats.find_all('td')\n for stat in stats:\n ls_stats.append(stat.text)\n ls_main.append(stat.text)\n # print(ls_stats)\n\n for fins in soup_financials:\n fins = fins.find_all('td')\n for fin in fins:\n ls_fins.append(fin.text)\n ls_main.append(fin.text)\n # print(ls_fins)\n\n for trads in soup_trading:\n trads = trads.find_all('td')\n for trad in trads:\n ls_trads.append(trad.text)\n ls_main.append(trad.text)\n # print(ls_trads)\n\n # print(ls_main)\n stats_data = list(zip(*[iter(ls_main)] * 2))\n all_stats = pd.DataFrame(stats_data[0:])\n all_stats = all_stats.set_index(0)\n all_stats.index.name = ''\n print(all_stats)\n return all_stats\n\n # Getting profile information as a list\n def get_sect(self):\n countries = ['Spain', 'France', 'United States', 'Germany', 'Belgium']\n ls_sect = []\n read_data = ur.urlopen(self.url_pr).read()\n soup_prof = BeautifulSoup(read_data, 'html.parser')\n soup_prof1 = soup_prof.find('div', class_=\"asset-profile-container\").find_all('span', class_=\"Fw(600)\")\n soup_prof2 = soup_prof.find('div', class_=\"asset-profile-container\").find('p')\n for x in soup_prof1:\n ls_sect.append(x.text)\n for country in countries:\n if country in soup_prof2:\n ls_sect.append(country)\n if len(ls_sect) == 3:\n ls_sect.append('Undefined')\n # print(ls_sect)\n return ls_sect\n\n # Getting more profile information as a list\n def get_pro(self):\n ls_pro = []\n read_data = ur.urlopen(self.url_pr).read()\n soup_prof = BeautifulSoup(read_data, 'html.parser')\n soup_prof2 = soup_prof.find('table').find_all('tr')\n soup_prof2 = soup_prof2[1].find_all('td')\n for y in soup_prof2:\n ls_pro.append(y.text)\n print(ls_pro)\n return ls_pro\n\n # Getting all the information into a dictionary\n def get_ratios(self):\n # get datatables\n dtis = self.get_dataframe(self.url_is)\n dtbs = self.get_dataframe(self.url_bs)\n dtcf = self.get_dataframe(self.url_cf)\n dtnb = Scraper(self.index).get_nb()\n dtpro = Scraper(self.index).get_pro()\n dtsect = Scraper(self.index).get_sect()\n\n # standardize column names (to avoid different dates which are dependent on the company)\n if len(dtis.columns) == 4:\n dtis.columns = ['TTM', 'N-1', 'N-2', 'N-3']\n dtbs.columns = ['N-1', 'N-2', 'N-3']\n dtcf.columns = ['TTM', 'N-1', 'N-2', 'N-3']\n else:\n dtis.columns = ['TTM', 'N-1', 'N-2', 'N-3', 'N-4']\n dtbs.columns = ['N-1', 'N-2', 'N-3', 'N-4']\n dtcf.columns = ['TTM', 'N-1', 'N-2', 'N-3', 'N-4']\n\n dtnb.columns = ['data']\n\n ### Valuation Ratios in progress ###\n\n ## Market Cap: ##\n try:\n Market_Cap = dtnb.at['Market Cap (intraday) 5', 'data']\n dict['Market Cap'] = convert_float(Market_Cap)\n\n print('Market Cap : ' + str(dict['Market Cap']))\n except KeyError as e:\n dict['Market Cap'] = \"NA\"\n print('Market Cap : ' + 'KeyError - reason \"%s\"' % str(e))\n\n ## Entreprise Value: ##\n try:\n EV = dtnb.at['Enterprise Value 3', 'data']\n dict['Enterprise Value'] = convert_float(EV)\n print('Enterprise Value : ' + str(EV))\n except KeyError as e:\n dict['Enterprise Value'] = \"NA\"\n print('Enterprise Value : ' + 'KeyError - reason \"%s\"' % str(e))\n\n ## P/E ratio: ##\n try:\n PE_ratio = dtnb.at['Trailing P/E ', 'data']\n dict['P/E ratio'] = PE_ratio.replace('.', ',')\n print('P/E ratio : ' + str(PE_ratio))\n except KeyError as e:\n dict['P/E ratio'] = \"NA\"\n print('P/E ratio : ' + 'KeyError - reason \"%s\"' % str(e))\n\n ## PEG ratio: ##\n try:\n PEG_ratio = dtnb.at['PEG Ratio (5 yr expected) 1', 'data']\n dict['PEG ratio'] = PEG_ratio.replace('.', ',')\n print('PEG ratio : ' + str(PEG_ratio))\n except KeyError as e:\n dict['PEG ratio'] = \"NA\"\n print('PEG ratio : ' + 'KeyError - reason \"%s\"' % str(e))\n\n ## P/B ratio: ##\n try:\n PB_ratio = dtnb.at['Price/Book (mrq)', 'data']\n dict['P/B ratio'] = PB_ratio.replace('.', ',')\n print('P/B ratio : ' + str(PB_ratio))\n except KeyError as e:\n dict['P/B ratio'] = \"NA\"\n print('P/B ratio : ' + 'KeyError - reason \"%s\"' % str(e))\n\n ## P/S ratio: ##\n try:\n PS_ratio = dtnb.at['Price/Sales (ttm)', 'data']\n dict['P/S ratio'] = PS_ratio.replace('.', ',')\n print('P/S ratio : ' + str(PS_ratio))\n except KeyError as e:\n dict['P/S ratio'] = \"NA\"\n print('P/S ratio : ' + 'KeyError - reason \"%s\"' % str(e))\n\n ## EV_EBITDA ratio: ##\n try:\n EV_EBITDA = dtnb.at['Enterprise Value/EBITDA 6', 'data']\n dict['EV/EBITDA'] = EV_EBITDA.replace('.', ',')\n print('EV/EBITDA : ' + str(EV_EBITDA))\n except KeyError as e:\n dict['EV/EBITDA'] = \"NA\"\n print('EV/EBITDA : ' + 'KeyError - reason \"%s\"' % str(e))\n\n ## Dividend Yield trailing ratio: ##\n try:\n TA_Dividend_Yield = dtnb.at['Trailing Annual Dividend Yield 3', 'data']\n dict['TA Dividend Yield'] = TA_Dividend_Yield.replace('.', ',')\n print('Trailing Annual Dividend Yield : ' + str(TA_Dividend_Yield))\n except KeyError as e:\n dict['TA Dividend Yield'] = \"NA\"\n print('Trailing Annual Dividend Yield : ' + 'KeyError - reason \"%s\"' % str(e))\n\n ## Dividend Yield over 5 Years: ##\n try:\n _5Y_Dividend_Yield = dtnb.at['5 Year Average Dividend Yield 4', 'data']\n dict['5Y Dividend Yield'] = _5Y_Dividend_Yield.replace('.', ',')\n print('5 Year Average Dividend Yield : ' + str(_5Y_Dividend_Yield))\n except KeyError as e:\n dict['5Y Dividend Yield'] = \"NA\"\n print('5 Year Average Dividend Yield : ' + 'KeyError - reason \"%s\"' % str(e))\n\n ## Dividends Payout : ##\n\n try:\n Dividend_payout = dtnb.at['Payout Ratio 4', 'data']\n dict['Dividend payout'] = Dividend_payout.replace('.', ',')\n print('Dividend payout : ' + str(Dividend_payout))\n except KeyError as e:\n dict['Dividend payout'] = \"NA\"\n print('Dividend payout : ' + 'KeyError - reason \"%s\"' % str(e))\n except ValueError as ve:\n dict['Dividend payout'] = \"NA\"\n print('Dividend payout : ' + 'ValueError - reason \"%s\"' % str(ve))\n\n ### Profitability ratio in progress ###\n\n ## ROA N-1: ##\n try:\n dict['ROA'] = dtnb.at['Return on Assets (ttm)', 'data']\n print('ROA : ' + str(dict['ROA']) + '%')\n except KeyError as e:\n print('ROA : ' + 'KeyError - reason \"%s\"' % str(e))\n\n ## ROE N-1: ##\n try:\n dict['ROE'] = dtnb.at['Return on Equity (ttm)', 'data']\n print('ROE : ' + str(dict['ROE']) + '%')\n except KeyError as e:\n dict['ROE'] = \"NA\"\n print('ROE : ' + 'KeyError - reason \"%s\"' % str(e))\n\n ## Profit Margin: ##\n try:\n Profit_Margin = dtnb.at['Profit Margin ', 'data']\n dict['Profit Margin'] = Profit_Margin.replace('.', ',')\n print('Profit Margin : ' + str(Profit_Margin))\n except KeyError as e:\n dict['Profit Margin'] = \"NA\"\n print('Profit Margin : ' + 'KeyError - reason \"%s\"' % str(e))\n\n ## EPS: ##\n try:\n EPS = dtnb.at['Diluted EPS (ttm)', 'data']\n dict['EPS'] = EPS.replace('.', ',')\n print('EPS : ' + str(EPS))\n except KeyError as e:\n dict['EPS'] = \"NA\"\n print('EPS : ' + 'KeyError - reason \"%s\"' % str(e))\n\n ### Liquidity ratio OK ###\n\n # Current Ratio N-1:\n try:\n Current_Ratio = float(dtbs.at['Total Current Assets', 'N-1']) / float(\n dtbs.at['Total Current Liabilities', 'N-1'])\n dict['Current Ratio'] = round(Current_Ratio, 2)\n print('Current Ratio : ' + str(dict['Current Ratio']) + '%')\n except KeyError as e:\n dict['Current Ratio'] = \"NA\"\n print('Current Ratio : ' + 'KeyError - reason \"%s\"' % str(e))\n\n # Current Ratio #\n try:\n Current_Ratio = dtnb.at['Current Ratio (mrq)', 'data']\n dict['Current Ratio mrq'] = Current_Ratio.replace('.', ',')\n print('Current Ratio mrq: ' + str(dict['Current Ratio mrq']))\n except KeyError as e:\n dict['Current Ratio mrq'] = \"\"\n print('Current Ratio mrq: ' + 'KeyError - reason \"%s\"' % str(e))\n\n # Quick Ratio N-1:\n try:\n Quick_Ratio = (float(dtbs.at['Total Current Assets', 'N-1']) - float(dtcf.at['Inventory', 'N-1'])) / float(\n dtbs.at['Total Current Liabilities', 'N-1'])\n dict['Quick Ratio'] = round(Quick_Ratio, 2)\n print('Quick Ratio : ' + str(dict['Quick Ratio']) + '%')\n except ValueError as ve:\n dict['Quick Ratio'] = \"NA\"\n print('Quick Ratio : ' + 'ValueError - reason \"%s\"' % str(ve))\n except KeyError as e:\n dict['Quick Ratio'] = \"NA\"\n print('Quick Ratio : ' + 'KeyError - reason \"%s\"' % str(e))\n\n ### Efficiency ratio NO ###\n\n ### Debt Ratio ###\n\n ## Total Debt: ##\n try:\n Total_Debt = dtnb.at['Total Debt (mrq)', 'data']\n dict['Total Debt'] = convert_float(Total_Debt)\n print('Total Debt : ' + str(Total_Debt))\n except KeyError as e:\n dict['Total Debt'] = \"NA\"\n print('Total Debt : ' + 'KeyError - reason \"%s\"' % str(e))\n except ValueError as ve:\n dict['Total Debt'] = \"NA\"\n print('Total Debt : ' + 'ValueError - reason \"%s\"' % str(ve))\n\n ## Debt/Equity: ##\n try:\n Debt_Equity = dtnb.at['Total Debt/Equity (mrq)', 'data']\n dict['Debt/Equity'] = Debt_Equity.replace('.', ',')\n print('Debt/Equity : ' + str(Debt_Equity))\n except KeyError as e:\n dict['Debt/Equity'] = \"NA\"\n print('Debt/Equity : ' + 'KeyError - reason \"%s\"' % str(e))\n\n ### OTHER ###\n\n ## Revenue ttm : ##\n try:\n Revenue = dtnb.at['Revenue (ttm)', 'data']\n dict['Revenue'] = convert_float(Revenue)\n print('Revenue : ' + str(Revenue))\n except KeyError as e:\n dict['Revenue'] = \"NA\"\n print('Revenue : ' + 'KeyError - reason \"%s\"' % str(e))\n\n ## Revenue per share : ##\n try:\n RPS = dtnb.at['Revenue Per Share (ttm)', 'data']\n dict['RPS'] = RPS.replace('.', ',')\n print('RPS : ' + str(RPS))\n except KeyError as e:\n dict['RPS'] = \"NA\"\n print('RPS : ' + 'KeyError - reason \"%s\"' % str(e))\n\n ## Gross Profit (ttm) : ##\n try:\n Gross_Profit = dtnb.at['Gross Profit (ttm)', 'data']\n dict['Gross Profit'] = convert_float(Gross_Profit)\n print('Gross Profit : ' + str(Gross_Profit))\n except KeyError as e:\n dict['Gross Profit'] = \"NA\"\n print('Gross Profit : ' + 'KeyError - reason \"%s\"' % str(e))\n\n ## EBITDA : ##\n try:\n EBITDAttm = dtnb.at['EBITDA ', 'data']\n dict['EBITDA'] = convert_float(EBITDAttm)\n print('EBITDA : ' + str(EBITDAttm))\n except KeyError as e:\n dict['EBITDA'] = \"NA\"\n print('EBITDA : ' + 'KeyError - reason \"%s\"' % str(e))\n\n ## Beta : ##\n try:\n Beta = dtnb.at['Beta (5Y Monthly) ', 'data']\n dict['Beta'] = Beta.replace('.', ',')\n print('Beta : ' + str(Beta))\n except KeyError as e:\n dict['Beta'] = \"NA\"\n print('Beta : ' + 'KeyError - reason \"%s\"' % str(e))\n\n ## Shares Outstanding : ##\n try:\n Shares_Outstanding = dtnb.at['Shares Outstanding 5', 'data']\n dict['Shares Outstanding'] = convert_float(Shares_Outstanding)\n print('Shares Outstanding : ' + str(Shares_Outstanding))\n except KeyError as e:\n dict['Shares Outstanding'] = \"NA\"\n print('Shares Outstanding : ' + 'KeyError - reason \"%s\"' % str(e))\n\n ## Float : ##\n try:\n Float = dtnb.at['Float ', 'data']\n dict['Float'] = convert_float(Float)\n print('Float : ' + str(Float))\n except KeyError as e:\n dict['Float'] = \"NA\"\n print('Float : ' + 'KeyError - reason \"%s\"' % str(e))\n\n for x in dtis.columns:\n try:\n Net_Income = dtis.at['Normalized Income', x]\n dict['Net Income {}'.format(x)] = Net_Income\n print('Net Income {}'.format(x) + ': ' + str(Net_Income))\n except KeyError as e:\n dict['Net Income {}'.format(x)] = \"NA\"\n print('Net Income {}'.format(x) + ': ' + 'KeyError - reason \"%s\"' % str(e))\n\n for z in dtis.columns:\n try:\n Total_Revenue = dtis.at['Total Revenue', z]\n dict['Total Revenue {}'.format(z)] = Total_Revenue\n print('Total Revenue {}'.format(z) + ': ' + str(Total_Revenue))\n except KeyError as e:\n dict['Total Revenue {}'.format(z)] = \"NA\"\n print('Total Revenue {}'.format(z) + ': ' + 'KeyError - reason \"%s\"' % str(e))\n\n for w in dtis.columns:\n try:\n Gross_Profit = dtis.at['Gross Profit', w]\n dict['Gross Profit {}'.format(w)] = Gross_Profit\n print('Gross Profit {}'.format(w) + ': ' + str(Gross_Profit))\n except KeyError as e:\n dict['Gross Profit {}'.format(w)] = \"NA\"\n print('Gross Profit {}'.format(w) + ': ' + 'KeyError - reason \"%s\"' % str(e))\n\n for v in dtis.columns:\n try:\n Operating_Income = dtis.at['Operating Income', v]\n dict['Operating Income {}'.format(v)] = Operating_Income\n print('Operating Income {}'.format(v) + ': ' + str(Operating_Income))\n except KeyError as e:\n dict['Operating Income {}'.format(v)] = \"NA\"\n print('Operating Income {}'.format(v) + ': ' + 'KeyError - reason \"%s\"' % str(e))\n\n for u in dtis.columns:\n try:\n Income_BefTax = dtis.at['Pretax Income', u]\n dict['Income Before Tax {}'.format(u)] = Income_BefTax\n print('Income Before Tax {}'.format(u) + ': ' + str(Income_BefTax))\n except KeyError as e:\n dict['Income Before Tax {}'.format(u)] = \"NA\"\n print('Income Before Tax {}'.format(u) + ': ' + 'KeyError - reason \"%s\"' % str(e))\n\n for t in dtis.columns:\n try:\n EBITDA = dtis.at['EBITDA', t]\n dict['EBITDA {}'.format(t)] = EBITDA\n print('EBITDA {}'.format(t) + ': ' + str(EBITDA))\n except KeyError as e:\n dict['EBITDA {}'.format(t)] = \"NA\"\n print('EBITDA {}'.format(t) + ': ' + 'KeyError - reason \"%s\"' % str(e))\n\n for y in dtbs.columns:\n try:\n Total_Assets = dtbs.at['Total Assets', y]\n dict['Total Assets {}'.format(y)] = Total_Assets\n print('Total Assets {}'.format(y) + ': ' + str(Total_Assets))\n except KeyError as e:\n dict['Total Assets {}'.format(y)] = \"NA\"\n print('Total Assets {}'.format(y) + ': ' + 'KeyError - reason \"%s\"' % str(e))\n\n for s in dtbs.columns:\n try:\n Total_Liabilities = dtbs.at['Total Liabilities', s]\n dict['Total Liabilities {}'.format(s)] = Total_Liabilities\n print('Total Liabilities {}'.format(s) + ': ' + str(Total_Liabilities))\n except KeyError as e:\n dict['Total Liabilities {}'.format(s)] = \"NA\"\n print('Total Liabilities {}'.format(s) + ': ' + 'KeyError - reason \"%s\"' % str(e))\n\n for r in dtcf.columns:\n try:\n OpCashFlow = dtcf.at['Operating Cash Flow', r]\n dict['Operating Cash Flow {}'.format(r)] = OpCashFlow\n print('Operating Cash Flow {}'.format(r) + ': ' + str(OpCashFlow))\n except KeyError as e:\n dict['Operating Cash Flow {}'.format(r)] = \"NA\"\n print('Operating Cash Flow {}'.format(r) + ': ' + 'KeyError - reason \"%s\"' % str(e))\n\n for q in dtcf.columns:\n try:\n CapEx = dtcf.at['Capital Expenditure', q]\n dict['Capital Expenditure {}'.format(q)] = CapEx\n print('Capital Expenditure {}'.format(q) + ': ' + str(CapEx))\n except KeyError as e:\n dict['Capital Expenditure {}'.format(q)] = \"NA\"\n print('Capital Expenditure {}'.format(q) + ': ' + 'KeyError - reason \"%s\"' % str(e))\n\n for p in dtcf.columns:\n try:\n FreeCashFlow = dtcf.at['Free Cash Flow', p]\n dict['Free Cash Flow {}'.format(p)] = FreeCashFlow\n print('Free Cash Flow {}'.format(p) + ': ' + str(FreeCashFlow))\n except KeyError as e:\n dict['Free Cash Flow {}'.format(p)] = \"NA\"\n print('Free Cash Flow {}'.format(p) + ': ' + 'KeyError - reason \"%s\"' % str(e))\n\n dict['Sector'] = dtsect[0]\n dict['Subsector'] = dtsect[1]\n dict['Empleados'] = dtsect[2].replace(',', '')\n dict['Pais'] = dtsect[3]\n\n dict['CEO'] = dtpro[0]\n dict['Salario'] = dtpro[2]\n\n print(dict)\n return dict\n\n\n# RUN :\nif __name__ == '__main__':\n Scraper('T').get_ratios()\n","repo_name":"diexel64/stock_scraper","sub_path":"scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":22097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"15081738444","text":"instructions = list()\nwith open(\"input.txt\", \"r\") as f:\n\tfor line in f:\n\t\tinstructions.append((line[0], int(line[1:])))\n\nx_cord = 0\ny_cord = 0\nfacing = 0 #E-0, S-1, E-2, N-3\nwaypoint = [10,1]\n\nfor x in instructions:\n\tif x[0] == \"N\":\n\t\twaypoint[1] += x[1]\n\telif x[0] == \"S\":\n\t\twaypoint[1] -= x[1]\n\telif x[0] == \"E\":\n\t\twaypoint[0] += x[1]\n\telif x[0] == \"W\":\n\t\twaypoint[0] -= x[1]\n\telif x[0] == \"L\":\n\t\tfor a in range(0,x[1]//90):\n\t\t\tt = waypoint[0]\n\t\t\twaypoint[0] = -1 * waypoint[1]\n\t\t\twaypoint[1] = t\n\telif x[0] == \"R\":\n\t\tfor a in range(0,x[1]//90):\n\t\t\tt = waypoint[0]\n\t\t\twaypoint[0] = waypoint[1]\n\t\t\twaypoint[1] = -1 * t\n\telif x[0] == \"F\":\n\t\tx_cord += x[1]*waypoint[0]\n\t\ty_cord += x[1]*waypoint[1]\n\nprint(f\"Manhatan distance: {abs(x_cord)+abs(y_cord)}\")","repo_name":"Kliszek/AdventOfCode-2020","sub_path":"Day12/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70473282969","text":"def solution(k, score):\n answer = []\n score_ls = []\n for i in range(1,len(score)+1):\n score_ls.append(score[i-1])\n score_ls.sort(reverse=True)\n answer.append((i,score_ls[:k]))\n\n return [answer[i][1][-1] for i in range(len(answer))]\n\n#다른풀이\ndef solution(k, score):\n q = []\n\n answer = []\n for s in score:\n\n q.append(s)\n if (len(q) > k):\n q.remove(min(q))\n answer.append(min(q))\n\n return answer\n","repo_name":"jwh7027/problem-solving","sub_path":"Programmers/Level1/명예의전당.py","file_name":"명예의전당.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"15963765326","text":"# -*- coding: utf-8 -*-\n\"\"\"\nThis module contains utilities added by billiard, to keep\n\"non-core\" functionality out of ``.util``.\"\"\"\nfrom __future__ import absolute_import\n\nimport signal\nimport sys\n\nfrom time import time\nimport pickle as pypickle\ntry:\n import cPickle as cpickle\nexcept ImportError: # pragma: no cover\n cpickle = None # noqa\n\nfrom .exceptions import RestartFreqExceeded\n\nif sys.version_info < (2, 6): # pragma: no cover\n # cPickle does not use absolute_imports\n pickle = pypickle\n pickle_load = pypickle.load\n pickle_loads = pypickle.loads\nelse:\n pickle = cpickle or pypickle\n pickle_load = pickle.load\n pickle_loads = pickle.loads\n\n# cPickle.loads does not support buffer() objects,\n# but we can just create a StringIO and use load.\nif sys.version_info[0] == 3:\n from io import BytesIO\nelse:\n try:\n from cStringIO import StringIO as BytesIO # noqa\n except ImportError:\n from StringIO import StringIO as BytesIO # noqa\n\nTERMSIGS = (\n 'SIGHUP',\n 'SIGQUIT',\n 'SIGILL',\n 'SIGTRAP',\n 'SIGABRT',\n 'SIGEMT',\n 'SIGFPE',\n 'SIGBUS',\n 'SIGSEGV',\n 'SIGSYS',\n 'SIGPIPE',\n 'SIGALRM',\n 'SIGTERM',\n 'SIGXCPU',\n 'SIGXFSZ',\n 'SIGVTALRM',\n 'SIGPROF',\n 'SIGUSR1',\n 'SIGUSR2',\n)\n\n\ndef pickle_loads(s, load=pickle_load):\n # used to support buffer objects\n return load(BytesIO(s))\n\n\ndef _shutdown_cleanup(signum, frame):\n sys.exit(-(256 - signum))\n\n\ndef reset_signals(handler=_shutdown_cleanup):\n for sig in TERMSIGS:\n try:\n signum = getattr(signal, sig)\n current = signal.getsignal(signum)\n if current is not None and current != signal.SIG_IGN:\n signal.signal(signum, handler)\n except (OSError, AttributeError, ValueError, RuntimeError):\n pass\n\n\nclass restart_state(object):\n RestartFreqExceeded = RestartFreqExceeded\n\n def __init__(self, maxR, maxT):\n self.maxR, self.maxT = maxR, maxT\n self.R, self.T = 0, None\n\n def step(self, now=None):\n now = time() if now is None else now\n R = self.R\n if self.T and now - self.T >= self.maxT:\n # maxT passed, reset counter and time passed.\n self.T, self.R = now, 0\n elif self.maxR and self.R >= self.maxR:\n # verify that R has a value as the result handler\n # resets this when a job is accepted. If a job is accepted\n # the startup probably went fine (startup restart burst\n # protection)\n if self.R: # pragma: no cover\n pass\n self.R = 0 # reset in case someone catches the error\n raise self.RestartFreqExceeded(\"%r in %rs\" % (R, self.maxT))\n # first run sets T\n if self.T is None:\n self.T = now\n self.R += 1\n","repo_name":"jiangningCX/cpython_forum","sub_path":"build/billiard/billiard/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":2840,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"37000937226","text":"import os\n \n# walk through all files in path and return path and content for each file.\ndef read_files(path):\n\tfor root, dir_names, file_names in os.walk(path):\n\t\tfor path in dir_names:\n\t\t\tread_files(os.path.join(root, path))\n\t\tfor file_name in file_names:\n\t\t\tfile_path = os.path.join(root, file_name)\n\t\t\tif os.path.isfile(file_path):\n\t\t\t\t# content in raw mails starts after whitespace, so only store content when past first header.\n\t\t\t\tpast_header, lines = False, []\n\t\t\t\tf = open(file_path)\n\t\t\t\tfor line in f:\n\t\t\t\t\tif past_header:\n\t\t\t\t\t\tlines.append(line)\n\t\t\t\t\telif line == '\\n':\n\t\t\t\t\t\tpast_header = True\n\t\t\t\tcontent = '\\n'.join(lines)\n\t\t\t\tf.close()\n\t\t\t\tyield file_path, content\n\n# Initiaize list with all raw email content\ndef init_emaillist(path):\n\temail_list = []\n\tfor file_path, content in read_files(path):\n\t\temail_list.append(content)\n\treturn email_list\n\n","repo_name":"Vermeij/Spamfilter","sub_path":"reademails.py","file_name":"reademails.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"29601990002","text":"import config\nimport tensorflow as tf\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.layers import AveragePooling2D\nfrom tensorflow.keras.layers import Dropout\nfrom tensorflow.keras.layers import Flatten\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.layers import Input\nfrom tensorflow.keras.models import Model\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport argparse\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report\n\n\ndef get_data_set_from_directory(data_dir):\n BATCH_SIZE = 128\n IMG_SIZE = (224, 224)\n BUFFER_SIZE = BATCH_SIZE*5\n\n train_dataset = tf.keras.preprocessing.image_dataset_from_directory(\n data_dir,\n validation_split=0.2,\n subset=\"training\",\n seed=123,\n image_size=IMG_SIZE,\n batch_size=BATCH_SIZE)\n validation_dataset = tf.keras.preprocessing.image_dataset_from_directory(\n data_dir,\n validation_split=0.2,\n subset=\"validation\",\n seed=123,\n image_size=IMG_SIZE,\n batch_size=BATCH_SIZE)\n return train_dataset, validation_dataset\n\n\ndef model(base=\"MobileNet\"):\n\n # Data augmentation layer\n data_augmentation = tf.keras.Sequential([\n tf.keras.layers.experimental.preprocessing.RandomFlip('horizontal'),\n tf.keras.layers.experimental.preprocessing.RandomRotation(0.2),\n tf.keras.layers.experimental.preprocessing.Rescaling(1./127.5, offset= -1)\n ])\n\n # Base model for transfer learning\n base_model = None\n if base == \"MobileNet\":\n base_model = tf.keras.applications.MobileNetV2(input_shape=(224,224,3)\n , include_top=False\n , weights=\"imagenet\" )\n else:\n base_model = tf.keras.applications.InceptionV3(input_shape=(224,224,3)\n , include_top=False\n , weights=\"imagenet\" )\n\n\n # Flattening\n global_average = tf.keras.layers.GlobalAveragePooling2D()\n\n # final layer\n dropout_layer = tf.keras.layers.Dropout(0.2)\n prediction_layer = tf.keras.layers.Dense(2)\n\n inputs = tf.keras.Input(shape=(224, 224, 3))\n x = data_augmentation(inputs)\n x = base_model(x)\n x = global_average(x)\n x = dropout_layer(x)\n outputs = prediction_layer(x)\n model = tf.keras.Model(inputs, outputs)\n\n return model\n\nclass InferenceModel(object):\n\n model = None\n\n def __init__(self, model_dir, base, num_class):\n # Data augmentation layer\n data_augmentation = tf.keras.Sequential([\n tf.keras.layers.experimental.preprocessing.RandomFlip('horizontal'),\n tf.keras.layers.experimental.preprocessing.RandomRotation(0.2),\n tf.keras.layers.experimental.preprocessing.Rescaling(1./127.5, offset= -1)\n ])\n\n # Base model for transfer learning\n base_model = None\n if base == \"MobileNet\":\n base_model = tf.keras.applications.MobileNetV2(input_shape=(224,224,3)\n , include_top=False\n , weights=\"imagenet\" )\n else:\n base_model = tf.keras.applications.InceptionV3(input_shape=(224,224,3)\n , include_top=False\n , weights=\"imagenet\" )\n # Flattening\n global_average = tf.keras.layers.GlobalAveragePooling2D()\n # final layer\n dropout_layer = tf.keras.layers.Dropout(0.2)\n prediction_layer = tf.keras.layers.Dense(num_class, activation=\"softmax\")\n\n inputs = tf.keras.Input(shape=(224, 224, 3))\n x = data_augmentation(inputs)\n x = base_model(x)\n x = global_average(x)\n x = dropout_layer(x)\n outputs = prediction_layer(x)\n self.model = tf.keras.Model(inputs, outputs)\n\n self.model.load_weights(model_dir)\n\n def predict(self, image):\n return self.model.predict(image)\n\n\nif __name__ == '__main__':\n # train_dataset, validation_dataset = get_data_set_from_directory(\"rcnn\\dataset\")\n # mymodel = model()\n # print(mymodel.summary())\n\n my_parser = argparse.ArgumentParser(description='')\n my_parser.add_argument(\"-d\", \"--data_dir\", type=str, default=\"\",\n help=\"Folder contains your training dataset\")\n my_parser.add_argument(\"-m\", \"--model\", type=str, default=\"MobileNet\",\n help=\"The backbone of model. MobileNet of InceptionNet\")\n my_parser.add_argument(\"-ckpt\", \"--checkpoint\", type=str, default=\"\",\n help=\"Folder to save your model in .h5 type\")\n\n args = vars(my_parser.parse_args())\n print(args)\n","repo_name":"PhamHoangBao0310/Object_Detection","sub_path":"rcnn/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33383231541","text":"\"\"\"Adds the 'translationparty' command, allowing users to connect to google translator to translate a phrase back and forth from English to Japanese, using translationparty.com's phrases after an equilibrium is found.\"\"\"\n\nimport json\nimport urllib.request, urllib.parse\nimport random\n\n# Various UAs from Safari's Develop menu.\nUSER_AGENTS = (\n 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_7; en-us) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Safari/530.17',\n 'Mozilla/5.0 (iPhone; U; CPU iPhone OS 3_0 like Mac OS X; en-us) AppleWebKit/528.18 (KHTML, like Gecko) Version/4.0 Mobile/7A341 Safari/528.16',\n 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)',\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)',\n 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)',\n 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en-US; rv:1.9.0.10) Gecko/2009042315 Firefox/3.0.10',\n 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.0.10) Gecko/2009042316 Firefox/3.0.10',\n 'Opera/9.64 (Macintosh; Intel Mac OS X; U; en) Presto/2.1.1',\n 'Opera/9.64 (Windows NT 6.0; U; en) Presto/2.1.1',\n)\n\ndef load_url(url):\n handle = urllib.request.urlopen(urllib.request.Request(url, headers={'User-Agent': random.choice(USER_AGENTS), 'Referrer': 'http://darkdna.net/'}))\n data = handle.read().decode('utf-8')\n handle.close()\n return data\n\ndef escapeurl(part):\n return urllib.parse.quote(part)\n\nsuccessstrings = {\n'uppercase': [\"But it's not nice to shout in Japanese, either.\"],\n'lowercase': [\"But would it kill you to capitalize properly?\"],\n'dirty': [\"Shame on you, by the way.\",\n \"With language like that, you're a real goodwill ambassador!\"],\n'twitter': [\"This is better than Twitter, admit it.\"],\n'meme': [\"You're sure are up on your Internet jokes!\",\n \"That Internet joke is funny in any language!\"],\n'tp': [\"Translation Party was made by Will Carlough and Richard Boenigk.
Send us an email at translationparty@gmail.com\"],\n'darkdna': [\"So, you like DarkDNA?\",\n \"Don't forget to visit us at irc.darkdna.net/#lobby.\",\n \"Schongo originated on DarkDNA, you know.\"],\n'multipurpose': [\"This is a real translation party!\",\n \"You should move to Japan!\",\n \"You've done this before, haven't you.\",\n \"Okay, I get it, you like Translation Party.\",\n \"That's deep, man.\",\n \"Come on, you can do better than that.\",\n \"That didn't even make that much sense in English.\",\n \"You've heard about Question Party right?\",\n \"Translation Party is hiring, you know.\"],\n}\n\ndef successstring(eq):\n if eq.lower() == eq:\n return successstrings['lowercase'][random.randint(0,len(successstrings['lowercase'])-1)]\n elif eq.upper() == eq:\n return successstrings['uppercase'][random.randint(0,len(successstrings['uppercase'])-1)]\n \n profanities = ['fuck', 'shit', 'ass', 'tit', 'bitch', 'screw', 'dick', 'pussy', 'nuts', 'balls']\n for profanity in profanities:\n if profanity in eq:\n return successstrings['dirty'][random.randint(0,len(successstrings['dirty'])-1)]\n \n memes = ['i can has', 'i can haz', 'all your base', 'never gonna']\n for meme in memes:\n if meme in eq:\n return successstrings['meme'][random.randint(0,len(successstrings['meme'])-1)]\n \n if 'twitter' in eq or 'tweet' in eq:\n return successstrings['tp'][random.randint(0,len(successstrings['tp'])-1)]\n \n tp = ['translation', 'party', 'who made', 'who built', 'will', 'carlough', 'rick', 'richard', 'boenigk']\n for t in tp:\n if t in eq:\n return successstrings['tp'][random.randint(0,len(successstrings['tp'])-1)]\n \n if 'darkdna' in eq.lower():\n return successstrings['darkdna'][random.randint(0,len(successstrings['meme'])-1)]\n \n return successstrings['multipurpose'][random.randint(0,len(successstrings['multipurpose'])-1)]\n\n \n \n\ndef onLoad():\n @command('translationparty', 1)\n def translationparty_cmd(ctx, cmd, message, *args):\n lengres = 'null'\n lres = 'null'\n res = message\n lang = 'en'\n ctx.reply(\"`BLet's Go!:`B %s\" % message, 'translationparty')\n i = 0\n while lengres != res and i < 15:\n i += 1\n if lang == 'en':\n data = load_url(\"http://ajax.googleapis.com/ajax/services/language/translate?v=1.0&q=\" + escapeurl(res) + \"&langpair=en%7Cja\")\n parsed = json.loads(data)\n response = parsed['responseData']['translatedText']\n if lengres == 'null':\n ctx.reply(\"`BInto Japanese:`B %s\" % response, 'translationparty')\n else:\n ctx.reply(\"`BBack into Japanese:`B %s\" % response, 'translationparty')\n lengres = res\n lres = res\n res = response\n lang = 'ja'\n else:\n data = load_url(\"http://ajax.googleapis.com/ajax/services/language/translate?v=1.0&q=\" + escapeurl(res) + \"&langpair=ja%7Cen\")\n parsed = json.loads(data)\n response = parsed['responseData']['translatedText']\n ctx.reply(\"`BBack into english:`B %s\" % response, 'translationparty')\n lres = res\n res = response\n \n lang = 'en'\n if lengres == res:\n ctx.reply(\"`BEquilibrium found! %s`B\" % successstring(lengres), 'translationparty')\n else:\n ctx.error(\"Couldn't find Equilibrium in 15 turns, Sorry!\")\n \n \n\n \n","repo_name":"DarkDNA/Schongo-Modules","sub_path":"broken/translationparty.py","file_name":"translationparty.py","file_ext":"py","file_size_in_byte":5832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"4366857819","text":"from ray import tune\nfrom ray.rllib.agents.ppo import PPOTrainer\nfrom ray.rllib.evaluation import MultiAgentEpisode\nfrom ray.rllib.evaluation.metrics import collect_episodes, summarize_episodes\n\nclass MyCallback(tune.Callback):\n def on_train_result(self, *args, **kwargs):\n result = kwargs[\"result\"]\n episodes, _ = collect_episodes(\n self.trainer,\n self.trainer.workers.local_worker(),\n num_episodes=result[\"episodes_this_iter\"])\n summary = summarize_episodes(episodes)\n print(f\"Mean episode reward: {summary['episode_reward_mean']}\")\n def on_episode_end(self, *, worker, base_env, policies, episode, **kwargs):\n # Access the episode rewards and print them out\n rewards = episode[\"rewards\"]\n mean_reward = sum(rewards) / len(rewards)\n print(\"Mean episode reward:\", mean_reward) \n","repo_name":"boettiger-lab/approx-model-or-approx-soln","sub_path":"src/OldCode/callback_fn.py","file_name":"callback_fn.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"74444530647","text":"import re\nfrom collections import defaultdict\n\ndef convert_to_int(val):\n return [int(x) for x in val]\n\n\ndef get_manhat_dist(sensor, beacon):\n return abs(sensor[0] - beacon[0]) + abs(sensor[1] - beacon[1])\n\n\ndef cover_full_manhat_dist(cavern, center, dist):\n y_dist = dist\n while y_dist >= -dist:\n new_y = center[1] + y_dist\n if new_y != 2000000:\n y_dist -= 1\n continue\n x_dist = dist - abs(y_dist)\n x_start = center[0] - x_dist\n x_end = center[0] + x_dist\n for x in range(x_start, x_end + 1, 1):\n new_coord = [x, new_y]\n cavern[new_coord[1]].add(new_coord[0])\n y_dist -= 1\n\n\ndef print_cavern(cavern):\n keys = list(cavern.keys())\n keys.sort()\n\n for key in keys:\n values = list(cavern[key])\n values.sort()\n print(\"y\", key, \" x:\", values)\n\n\ndef main(lines):\n cavern = defaultdict(lambda: set())\n beacons = []\n for line in lines:\n reg = re.compile(\"x=([-]?[0-9]+), y=([-]?[0-9]+)\")\n sensor_raw, beacon_raw = reg.findall(line)\n sensor = convert_to_int(sensor_raw)\n beacon = convert_to_int(beacon_raw)\n beacons.append(beacon)\n print(line, sensor, beacon)\n\n manhat_dist = get_manhat_dist(sensor, beacon)\n print(\"man dist:\", manhat_dist)\n cover_full_manhat_dist(cavern, sensor, manhat_dist)\n\n # print_cavern(cavern)\n # remove beacons from cavern since they apparently don't count :(\n for beacon in beacons:\n row = cavern[beacon[1]]\n if beacon[0] in row:\n row.remove(beacon[0])\n\n # print_cavern(cavern)\n print(len(cavern[2000000]))\n","repo_name":"kvosbur/AdventOfCode","sub_path":"2022/Python/day15/part1Solution.py","file_name":"part1Solution.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"24779540835","text":"import os\n\n\nclass BaseConfig(object):\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n QUEUES = ['default']\n GOOGLE_ISS = 'https://accounts.google.com'\n APP_TOKEN = os.getenv('MKM_APP_TOKEN')\n APP_SECRET = os.getenv('MKM_APP_SECRET')\n ACCESS_TOKEN = os.getenv('MKM_ACCESS_TOKEN')\n ACCESS_TOKEN_SECRET = os.getenv('MKM_ACCESS_TOKEN_SECRET')\n GOOGLE_CLIENT_SECRET = os.getenv('GOOGLE_CLIENT_SECRET')\n GOOGLE_CLIENT_ID = os.getenv('GOOGLE_CLIENT_ID')\n LOG_FORMAT = '%(asctime)s %(name)s %(lineno)d %(levelname)s %(message)s'\n\n\nclass TestingConfig(BaseConfig):\n LOG_LEVEL = 'DEBUG'\n SQLALCHEMY_DATABASE_URI = \"postgresql://postgres@localhost:5432/test\"\n GOOGLE_ISS = 'GOOGLE_ISS'\n APP_TOKEN = 'MKM_APP_TOKEN'\n APP_SECRET = 'MKM_APP_SECRET'\n ACCESS_TOKEN = 'MKM_ACCESS_TOKEN'\n ACCESS_TOKEN_SECRET = 'MKM_ACCESS_TOKEN_SECRET'\n GOOGLE_CLIENT_SECRET = 'GOOGLE_CLIENT_SECRET'\n GOOGLE_CLIENT_ID = 'GOOGLE_CLIENT_ID'\n MKM_URL = \"http://dummy.mkm.url\"\n MKM_USER_URL = \"http://dummy.user.url\"\n GOOGLE_REDIRECT_URL = 'http://localhost:5000/oauth'\n\n\nclass DevelopmentConfig(BaseConfig):\n LOG_LEVEL = 'DEBUG'\n SQLALCHEMY_DATABASE_URI = 'postgresql://localhost:5432/wishlists'\n REDIS_URL = 'redis://localhost:6379/0'\n DEBUG = True\n MKM_URL = \"https://sandbox.cardmarket.com/ws/v2.0/output.json\"\n MKM_USER_URL = \"https://sandbox.cardmarket.com/en/Magic/Users\"\n GOOGLE_REDIRECT_URL = 'http://localhost:5000/oauth'\n\n\nclass ProductionConfig(BaseConfig):\n LOG_LEVEL = 'INFO'\n DEBUG = False\n SQLALCHEMY_DATABASE_URI = os.getenv('DATABASE_URL')\n REDIS_URL = os.getenv('REDIS_URL')\n MKM_URL = \"https://api.cardmarket.com/ws/v2.0/output.json\"\n MKM_USER_URL = \"https://www.cardmarket.com/en/Magic/Users\"\n GOOGLE_REDIRECT_URL = 'http://www.vampirictutor.com/oauth'\n\n\nPRODUCTS_CACHE_TTL = os.getenv('PRODUCTS_CACHE_TTL', 60 * 60) # in seconds\nARTICLES_CACHE_TTL = os.getenv('ARTICLES_CACHE_TTL', 60 * 10) # in seconds\n","repo_name":"tetianakh/wishlist-optimizer","sub_path":"wishlist_optimizer/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"1954891011","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the pairs function below.\ndef pairs(k, a):\n a.sort()\n j = 1\n c = 0\n i = 0\n while(i < len(a) and j < len(a)):\n print(i,j)\n diff = abs(a[j] - a[i])\n if diff == k:\n c += 1\n j += 1\n #j = len(a)-1\n #i += 1\n elif diff > k:\n #j -= 1\n i += 1\n elif diff < k:\n #i += 1\n j += 1\n return c\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n nk = input().split()\n n = int(nk[0])\n k = int(nk[1])\n arr = list(map(int, input().split()))\n\n result = pairs(k, arr)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","repo_name":"Rinki8890/PythonTrial","sub_path":"Search/pairUsingPointer.py","file_name":"pairUsingPointer.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74187469209","text":"\"\"\"setup exp2 package\"\"\"\n\nfrom pathlib import Path\nfrom setuptools import find_packages, setup\n\nHERE = Path(__file__).parent.resolve()\nREADME = (HERE / \"README.md\").read_text(encoding=\"utf-8\")\n\nsetup(\n name=\"exp2\",\n version=\"1.0\",\n description=\"Explosion explorer\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n packages=find_packages(),\n)\n","repo_name":"calebbuffa/explosion-explorer","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"69874951768","text":"from MultVAE_Dataset import *\nfrom MultVAE_model import *\nfrom MultVAE_training_helper import *\nfrom torch import nn\nfrom torch.utils.data import DataLoader\nimport datetime as dt\nimport argparse\nimport mlflow\nimport mlflow.pytorch\n\nparser = argparse.ArgumentParser(description='File Paths for training, validating, and testing')\nparser.add_argument('-tr', \n '--train_path', \n nargs = '?',\n type = str, \n help = 'training data path',\n default = '/scratch/work/js11133/sad_data/processed/full/train/user_to_queries.pkl')\nparser.add_argument('-v',\n '--val_path', \n nargs = '?',\n type = str,\n help = 'validation data path',\n default ='/scratch/work/js11133/sad_data/processed/full/val/user_to_queries.pkl' )\nparser.add_argument('-d', \n '--dict_path', \n nargs = '?',\n type = str,\n help = 'Dictionary path',\n default = '/scratch/work/js11133/sad_data/processed/hotel_hash.json')\nparser.add_argument('-s', \n '--save_path', \n nargs = '?',\n type = str,\n help = 'models save path',\n default = '/scratch/work/js11133/sad_data/models/multVAE/')\nparser.add_argument('-l', \n '--num_layers', \n nargs = '?',\n type = int,\n help = 'Number of hidden layers in MultVAE encoder and Decoder',\n default = 1)\nparser.add_argument('-lr', \n '--learning_rate', \n nargs = '?',\n type = float,\n help = 'Learning Rate for model',\n default = 1e-3)\nparser.add_argument('-hd', \n '--hidden_dim', \n nargs = '?',\n type = int,\n help = 'Size of Hidden Dimension',\n default = 600)\nparser.add_argument('-lt', \n '--latent_dim', \n nargs = '?',\n type = int,\n help = 'Size of Latent Dimension',\n default = 200)\nparser.add_argument('-n',\n '--model_run_id', \n type = str,\n required=True,\n help='model_run_id. should be the run_id of all the models in the model_folder',\n )\nparser.add_argument('-e',\n '--epoch', \n type = int,\n required=True,\n help='max epoch, the last epoch that you want to validate towards',\n default = 0\n )\nargs = parser.parse_args()\n\nif __name__ == '__main__':\n \n #Define loaders\n train_loader, hotel_length = make_dataloader(data_path = args.train_path,\n hotel_path=args.dict_path,\n batch_size = 256)\n\n val_loader, _ = make_dataloader(data_path = args.val_path,\n hotel_path=args.dict_path,\n batch_size = 256)\n with mlflow.start_run(run_name = 'MultVAE'): \n \n run_id = mlflow.active_run().info.run_id\n print('MLFlow Run ID is :{}'.format(run_id))\n mlflow.log_param('dataset', 'full')\n mlflow.log_param('train_split', 'train')\n mlflow.log_param('model_name', 'MultVAE')\n mlflow.log_param('run_id', run_id)\n\n if torch.cuda.is_available():\n device = torch.device(\"cuda\")\n print('There are %d GPU(s) available.' % torch.cuda.device_count())\n print('We will use the GPU:', torch.cuda.get_device_name(0))\n else:\n print('No GPU available, using the CPU instead.')\n device = torch.device(\"cpu\") \n\n \n mlflow.log_param('device', device)\n mlflow.log_param('hotel_dim', hotel_length)\n mlflow.log_param('hidden_dim', args.hidden_dim)\n mlflow.log_param('latent_dim', args.latent_dim)\n mlflow.log_param('dropout', 0.5)\n mlflow.log_param('beta', 1.0)\n mlflow.log_param('learning_rate', args.learning_rate)\n mlflow.log_param('n_enc_hidden_layers', args.num_layers)\n mlflow.log_param('n_dec_hidden_layers', args.num_layers)\n\n model_name = 'multvae_{}_annealed_epoch_{}.uri'.format(args.model_run_id,args.epoch)\n model_path = os.path.join(args.save_path,model_name)\n\n model = mlflow.pytorch.load_model(model_path)\n model.to(device)\n time_start = dt.datetime.now()\n\n metrics, final_epoch =train_and_validate(\n model=model,\n train_loader=train_loader,\n valid_loader=val_loader,\n device = device,\n start_beta = 1.0,\n max_beta=1.0,\n num_epoch=450,\n learning_rate=args.learning_rate,\n max_patience=5,\n run_id = run_id,\n save_path = args.save_path,\n )\n time_end = dt.datetime.now()\n train_time = (time_end - time_start).total_seconds()\n\n\n with open('checkpoints/metrics_{}.pkl'.format(run_id), \"wb\" ) as f:\n pickle.dump(metrics,f)\n\n #mlflow.log_artifacts('/scratch/work/js11133/sad_data/models/multVAE', artifact_path = 'models_per_epoch')\n mlflow.log_artifact('checkpoints/metrics_{}.pkl'.format(run_id))\n \n mlflow.log_metric('Num_epochs', final_epoch + 1)\n mlflow.log_metric('training_time', train_time)\n print('Model trained in {} seconds'.format(train_time))\n\n mlflow.pytorch.save_model(pytorch_model = model, path = args.save_path + 'multvae_anneal_{}.uri'.format(run_id))\n\n\n\n \n\n \n \n","repo_name":"EricHe98/sad_final_project","sub_path":"src/models/MultVAE/train_preloaded.py","file_name":"train_preloaded.py","file_ext":"py","file_size_in_byte":6282,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"25647983295","text":"from nonebot import on_command\nfrom nonebot.rule import Rule\nfrom nonebot.adapters.onebot.v11.adapter import Message, MessageSegment\nfrom nonebot.adapters.onebot.v11 import Message, MessageSegment, Bot, MessageEvent\nfrom nonebot.matcher import Matcher\nfrom nonebot.params import CommandArg\n\nfrom pathlib import Path\nimport random\nimport os\n\nfabing = on_command(\"fabing\", rule=Rule(), aliases={'发病', '发病文学'}, priority=5)\nPACKAGE_PATH = os.path.dirname(os.path.abspath(__file__))\nTEMPLATE_PATH = str(Path(PACKAGE_PATH) / \"templates.txt\")\nfabing_tmpl = []\nrandom_index = 0\n\n\ndef load_fabing_tmpl():\n global fabing_tmpl\n global random_index\n\n fabing_tmpl = []\n with open(TEMPLATE_PATH, encoding='utf-8') as fp:\n contents = fp.readlines()\n for line in contents:\n fabing_tmpl.append(line.strip().replace('\\\\n', '\\n\\n'))\n\n random.shuffle(fabing_tmpl)\n random_index = 0\n\nload_fabing_tmpl()\n\n@fabing.handle()\nasync def handle_fabing(matcher: Matcher, event: MessageEvent, arg: Message = CommandArg()):\n global random_index\n \n if len(arg) == 0:\n await matcher.finish('缺少必要的参数:必须指定一个对象')\n \n args = arg.extract_plain_text().rsplit()\n if args[0] == '重载' or args[0] == 'reload':\n load_fabing_tmpl()\n await matcher.finish('重载成功')\n \n target = args[0]\n if random_index >= len(fabing_tmpl):\n random_index = 0\n tmpl = fabing_tmpl[random_index]\n random_index += 1\n await matcher.finish(tmpl.format(target))\n","repo_name":"eigeen/nonebot_plugin_fabing","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"11905093235","text":"import requests\nimport sys\n\nCLIENT_ID = \"b38ca4ba21994e978786f16b70bce288\"\nCLIENT_SECRET = \"4fb5268d4aa7466ebf78f2ea9fca867f\"\nUSER_ID = \"22yxrpnqslh2ch2s2ls32irwy\"\n\nAUTH_URL = \"https://accounts.spotify.com/api/token\"\nBASE_URL = \"https://api.spotify.com/v1/\"\n\n# retrieving access token\nauth_response = requests.post(AUTH_URL, {\n \"grant_type\": \"client_credentials\",\n \"client_id\": CLIENT_ID,\n \"client_secret\": CLIENT_SECRET,\n})\nauth_response_data = auth_response.json()\naccess_token = auth_response_data['access_token']\n\nheaders = {\n \"Authorization\": \"Bearer {token}\".format(token=access_token)\n}\n\n# EXAMPLE CODE: searching data for a song \ntrack_id = \"5CalS8Gn69OOrR9aiw0ZO9\"\nr = requests.get(BASE_URL + \"audio-features/\" + track_id, headers=headers)\nr = r.json()\n# print(r)\n\n# get playlists: https://developer.spotify.com/console/get-playlists/\n# need to implement offset to get more than 50 playlists\nr = requests.get(BASE_URL + \"users/\" + USER_ID + \"/playlists?limit=50\", headers=headers)\nr = r.json()\n# print(r)\nprint(\"Number of playlists: \" + str(len(r[\"items\"])))\nfor playlist in r[\"items\"]:\n print(playlist[\"name\"])","repo_name":"jonathanzhang53/spotify-toolkit","sub_path":"archive/apitest.py","file_name":"apitest.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12390406982","text":"def config_system(self, section):\n install_mgmt_primary = section.get(\"install_mgmt_primary\", False)\n install_mgmt_secondary = section.get(\"install_mgmt_secondary\", False)\n if install_mgmt_primary and install_mgmt_secondary:\n self._raise_error(\"Only one parameter from ['install_mgmt_primary', 'install_mgmt_secondary'] \"\n \"can be set to true.\")\n\n install_mds_primary = section.get(\"install_mds_primary\", False)\n install_mds_secondary = section.get(\"install_mds_secondary\", False)\n if install_mds_primary and install_mds_secondary:\n self._raise_error(\"Only one parameter from ['install_mds_primary', 'install_mds_secondary'] \"\n \"can be set to true.\")\n\n install_mds_interface = section.get(\"install_mds_interface\", None)\n if install_mds_interface is not None and install_mds_interface not in self._seen_interfaces:\n self._raise_error(\"Invalid value for 'install_mds_interface': Interface '%s' does not exist\"\n % install_mds_interface)\n\n iface = section.get(\"iface\", None)\n if iface is not None and iface not in self._seen_interfaces:\n self._raise_error(\"Invalid value for 'iface': Interface '%s' does not exist\" % iface)\n\n gateway_daip = section.get(\"gateway_daip\", False)\n install_security_managment = section.get(\"install_security_managment\", False)\n gateway_cluster_member = section.get(\"gateway_cluster_member\", False)\n if gateway_daip and (install_security_managment or gateway_cluster_member):\n self.rais_error(\"The parameter 'gateway_daip' must be set to false if one of the parameters \"\n \"[install_security_managment, gateway_cluster_member] is set to ture.\")\n","repo_name":"chkp-stuartgreen/cp-cloudinit-create","sub_path":"cloudyamlvalidator/_config_system.py","file_name":"_config_system.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"10367693762","text":"import json\nimport logging\nfrom typing import Any, Dict, List, Optional, Union\n\nimport numpy as np\nimport pandas as pd\nimport yaml\nfrom pandas import DataFrame\nfrom pydantic import (\n Field,\n field_validator,\n model_validator,\n SerializeAsAny,\n ValidationInfo,\n)\n\nfrom xopt import _version\nfrom xopt.evaluator import Evaluator, validate_outputs\nfrom xopt.generator import Generator\nfrom xopt.generators import get_generator\nfrom xopt.pydantic import XoptBaseModel\nfrom xopt.utils import explode_all_columns\nfrom xopt.vocs import VOCS\n\n__version__ = _version.get_versions()[\"version\"]\n\nlogger = logging.getLogger(__name__)\n\n\nclass Xopt(XoptBaseModel):\n \"\"\"\n Object to handle a single optimization problem.\n\n Xopt is designed for managing a single optimization problem by unifying the\n definition, configuration, and execution of optimization tasks. It combines the\n Variables, Objective, Constraints, Statics (VOCS) definition with a generator for\n candidate generation and an evaluator for objective function evaluations.\n\n Parameters\n ----------\n vocs : VOCS\n VOCS object for defining the problem's variables, objectives, constraints, and\n statics.\n generator : SerializeAsAny[Generator]\n An object responsible for generating candidates for optimization.\n evaluator : SerializeAsAny[Evaluator]\n An object used for evaluating candidates generated by the generator.\n strict : bool, optional\n A flag indicating whether exceptions raised during evaluation should stop the\n optimization process.\n dump_file : str, optional\n An optional file path for dumping attributes of the xopt object and the\n results of evaluations.\n max_evaluations : int, optional\n An optional maximum number of evaluations to perform. If set, the optimization\n process will stop after reaching this limit.\n data : DataFrame, optional\n An optional DataFrame object for storing internal data related to the optimization\n process.\n serialize_torch : bool\n A flag indicating whether Torch (PyTorch) models should be serialized when\n saving them.\n serialize_inline : bool\n A flag indicating whether Torch models should be stored via binary string\n directly inside the main configuration file.\n\n Methods\n -------\n step()\n Executes one optimization cycle, generating candidates, submitting them for\n evaluation, waiting for evaluation results, and updating data storage.\n run()\n Runs the optimization process until the specified stopping criteria are met,\n such as reaching the maximum number of evaluations.\n evaluate(input_dict: Dict)\n Evaluates a candidate without storing data.\n evaluate_data(input_data)\n Evaluates a set of candidates, adding the results to the internal DataFrame.\n add_data(new_data)\n Adds new data to the internal DataFrame and the generator's data.\n reset_data()\n Resets the internal data by clearing the DataFrame.\n random_evaluate(n_samples=1, seed=None, **kwargs)\n Generates random inputs using the VOCS and evaluates them, adding the data to\n Xopt.\n yaml(**kwargs)\n Serializes the Xopt configuration to a YAML string.\n dump(file: str = None, **kwargs)\n Dumps the Xopt configuration to a specified file.\n dict(**kwargs) -> Dict\n Provides a custom dictionary representation of the Xopt configuration.\n json(**kwargs) -> str\n Serializes the Xopt configuration to a JSON string.\n \"\"\"\n\n vocs: VOCS = Field(description=\"VOCS object for Xopt\")\n generator: SerializeAsAny[Generator] = Field(\n description=\"generator object for Xopt\"\n )\n evaluator: SerializeAsAny[Evaluator] = Field(\n description=\"evaluator object for Xopt\"\n )\n strict: bool = Field(\n True,\n description=\"flag to indicate if exceptions raised during evaluation \"\n \"should stop Xopt\",\n )\n dump_file: Optional[str] = Field(\n None, description=\"file to dump the results of the evaluations\"\n )\n max_evaluations: Optional[int] = Field(\n None, description=\"maximum number of evaluations to perform\"\n )\n data: Optional[DataFrame] = Field(None, description=\"internal DataFrame object\")\n serialize_torch: bool = Field(\n False,\n description=\"flag to indicate that torch models should be serialized \"\n \"when dumping\",\n )\n serialize_inline: bool = Field(\n False,\n description=\"flag to indicate if torch models\"\n \" should be stored inside main config file\",\n )\n\n @model_validator(mode=\"before\")\n @classmethod\n def validate_model(cls, data: Any):\n if isinstance(data, dict):\n # validate vocs\n if isinstance(data[\"vocs\"], dict):\n data[\"vocs\"] = VOCS(**data[\"vocs\"])\n\n # validate generator\n if isinstance(data[\"generator\"], dict):\n name = data[\"generator\"].pop(\"name\")\n generator_class = get_generator(name)\n data[\"generator\"] = generator_class.model_validate(\n {**data[\"generator\"], \"vocs\": data[\"vocs\"]}\n )\n elif isinstance(data[\"generator\"], str):\n generator_class = get_generator(data[\"generator\"])\n\n data[\"generator\"] = generator_class.model_validate(\n {\"vocs\": data[\"vocs\"]}\n )\n\n return data\n\n @field_validator(\"evaluator\", mode=\"before\")\n def validate_evaluator(cls, value):\n if isinstance(value, dict):\n value = Evaluator(**value)\n\n return value\n\n @field_validator(\"data\", mode=\"before\")\n def validate_data(cls, v, info: ValidationInfo):\n if isinstance(v, dict):\n try:\n v = pd.DataFrame(v)\n except IndexError:\n v = pd.DataFrame(v, index=[0])\n\n # also add data to generator\n # TODO: find a more robust way of doing this\n info.data[\"generator\"].add_data(v)\n\n return v\n\n @property\n def n_data(self):\n if self.data is None:\n return 0\n else:\n return len(self.data)\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Initialize Xopt.\n\n Parameters\n ----------\n args : tuple\n Positional arguments; a single YAML string can be passed as the only argument\n to initialize Xopt.\n kwargs : dict\n Keyword arguments for initializing Xopt.\n\n Raises\n ------\n ValueError\n If both a YAML string and keyword arguments are specified during\n initialization.\n If more than one positional argument is provided.\n\n Notes\n -----\n - If a single YAML string is provided in the `args` argument, it is deserialized\n into keyword arguments using `yaml.safe_load`.\n - When using the YAML string for initialization, no additional keyword arguments\n are allowed.\n\n \"\"\"\n if len(args) == 1:\n if len(kwargs) > 0:\n raise ValueError(\"cannot specify yaml string and kwargs for Xopt init\")\n super().__init__(**yaml.safe_load(args[0]))\n elif len(args) > 1:\n raise ValueError(\n \"arguments to Xopt must be either a single yaml string \"\n \"or a keyword arguments passed directly to pydantic\"\n )\n else:\n super().__init__(**kwargs)\n\n def step(self):\n \"\"\"\n Run one optimization cycle.\n\n This method performs the following steps:\n - Determines the number of candidates to request from the generator.\n - Passes the candidate request to the generator.\n - Submits candidates to the evaluator.\n - Waits until all evaluations are finished\n - Updates data storage and generator data storage (if applicable).\n\n \"\"\"\n logger.info(\"Running Xopt step\")\n\n # get number of candidates to generate\n n_generate = self.evaluator.max_workers\n\n # generate samples and submit to evaluator\n logger.debug(f\"Generating {n_generate} candidates\")\n new_samples = self.generator.generate(n_generate)\n\n # Evaluate data\n self.evaluate_data(new_samples)\n\n def run(self):\n \"\"\"\n Run until the maximum number of evaluations is reached or the generator is done.\n\n \"\"\"\n while not self.generator.is_done:\n # Stopping criteria\n if self.max_evaluations is not None:\n if self.n_data >= self.max_evaluations:\n logger.info(\n \"Xopt is done. \"\n f\"Max evaluations {self.max_evaluations} reached.\"\n )\n break\n\n self.step()\n\n def evaluate(self, input_dict: Dict):\n \"\"\"\n Evaluate a candidate without storing data.\n\n Parameters\n ----------\n input_dict : Dict\n A dictionary representing the input data for candidate evaluation.\n\n Returns\n -------\n Any\n The result of the evaluation.\n\n \"\"\"\n self.vocs.validate_input_data(DataFrame(input_dict, index=[0]))\n return self.evaluator.evaluate(input_dict)\n\n def evaluate_data(\n self,\n input_data: Union[\n pd.DataFrame,\n List[Dict[str, float]],\n Dict[str, List[float]],\n Dict[str, float],\n ],\n ) -> pd.DataFrame:\n \"\"\"\n Evaluate data using the evaluator and wait for results.\n\n This method evaluates a set of candidates and adds the results to the internal\n DataFrame.\n\n Parameters\n ----------\n input_data : Union[pd.DataFrame, List[Dict[str, float], Dict[str, List[float],\n Dict[str, float]]]\n The input data for evaluation, which can be provided as a DataFrame, a list of\n dictionaries, or a single dictionary.\n\n Returns\n -------\n pd.DataFrame\n The results of the evaluations added to the internal DataFrame.\n\n \"\"\"\n # translate input data into pandas dataframes\n if not isinstance(input_data, DataFrame):\n try:\n input_data = DataFrame(input_data)\n except ValueError:\n input_data = DataFrame(input_data, index=[0])\n\n logger.debug(f\"Evaluating {len(input_data)} inputs\")\n self.vocs.validate_input_data(input_data)\n output_data = self.evaluator.evaluate_data(input_data)\n\n if self.strict:\n validate_outputs(output_data)\n new_data = pd.concat([input_data, output_data], axis=1)\n\n # explode any list like results if all the output names exist\n new_data = explode_all_columns(new_data)\n\n self.add_data(new_data)\n\n # dump data to file if specified\n if self.dump_file is not None:\n self.dump()\n\n return new_data\n\n def add_data(self, new_data: pd.DataFrame):\n \"\"\"\n Concatenate new data to the internal DataFrame and add it to the generator's\n data.\n\n Parameters\n ----------\n new_data : pd.DataFrame\n New data to be added to the internal DataFrame.\n\n \"\"\"\n logger.debug(f\"Adding {len(new_data)} new data to internal dataframes\")\n\n # Set internal dataframe.\n if self.data is not None:\n new_data = pd.DataFrame(new_data, copy=True) # copy for reindexing\n new_data.index = np.arange(len(self.data), len(self.data) + len(new_data))\n\n self.data = pd.concat([self.data, new_data], axis=0)\n else:\n self.data = new_data\n self.generator.add_data(new_data)\n\n def reset_data(self):\n \"\"\"\n Reset the internal data by clearing the DataFrame.\n\n \"\"\"\n self.data = pd.DataFrame()\n self.generator.data = pd.DataFrame()\n\n def random_evaluate(self, n_samples=1, seed=None, **kwargs):\n \"\"\"\n Convenience method to generate random inputs using VOCs and evaluate them.\n\n This method generates random inputs using the Variables, Objectives,\n Constraints, and Statics (VOCS) and evaluates them, adding the data to the\n Xopt object and generator.\n\n Parameters\n ----------\n n_samples : int, optional\n The number of random samples to generate.\n seed : int, optional\n The random seed for reproducibility.\n **kwargs\n Additional keyword arguments for generating random inputs.\n\n Returns\n -------\n pd.DataFrame\n The results of the evaluations added to the internal DataFrame.\n\n \"\"\"\n random_inputs = self.vocs.random_inputs(n_samples, seed=seed, **kwargs)\n result = self.evaluate_data(random_inputs)\n return result\n\n def yaml(self, **kwargs):\n \"\"\"\n Serialize the Xopt configuration to a YAML string.\n\n Parameters\n ----------\n **kwargs\n Additional keyword arguments for customizing serialization.\n\n Returns\n -------\n str\n The Xopt configuration serialized as a YAML string.\n\n \"\"\"\n output = json.loads(\n self.json(\n serialize_torch=self.serialize_torch,\n serialize_inline=self.serialize_inline,\n **kwargs,\n )\n )\n return yaml.dump(output)\n\n def dump(self, file: str = None, **kwargs):\n \"\"\"\n Dump data to a file.\n\n Parameters\n ----------\n file : str, optional\n The path to the file where the Xopt configuration will be dumped.\n **kwargs\n Additional keyword arguments for customizing the dump.\n\n Raises\n ------\n ValueError\n If no dump file is specified via argument or in the `dump_file` attribute.\n\n \"\"\"\n fname = file if file is not None else self.dump_file\n\n if fname is None:\n raise ValueError(\n \"no dump file specified via argument or in `dump_file` attribute\"\n )\n else:\n with open(fname, \"w\") as f:\n f.write(self.yaml(**kwargs))\n logger.debug(f\"Dumped state to YAML file: {fname}\")\n\n def dict(self, **kwargs) -> Dict:\n \"\"\"\n Handle custom dictionary generation.\n\n Parameters\n ----------\n **kwargs\n Additional keyword arguments for customizing the dictionary generation.\n\n Returns\n -------\n Dict\n A dictionary representation of the Xopt configuration.\n\n \"\"\"\n result = super().model_dump(**kwargs)\n result[\"generator\"] = {\"name\": self.generator.name} | result[\"generator\"]\n return result\n\n def json(self, **kwargs) -> str:\n \"\"\"\n Handle custom serialization of generators and DataFrames.\n\n Parameters\n ----------\n **kwargs\n Additional keyword arguments for customizing serialization.\n\n Returns\n -------\n str\n The Xopt configuration serialized as a JSON string.\n\n \"\"\"\n result = super().to_json(**kwargs)\n dict_result = json.loads(result)\n dict_result[\"generator\"] = {\"name\": self.generator.name} | dict_result[\n \"generator\"\n ]\n dict_result[\"data\"] = (\n json.loads(self.data.to_json()) if self.data is not None else None\n )\n\n # TODO: implement version checking\n # dict_result[\"xopt_version\"] = __version__\n\n return json.dumps(dict_result)\n\n def __repr__(self):\n \"\"\"\n Return information about the Xopt object, including the YAML representation\n without data.\n\n Returns\n -------\n str\n A string representation of the Xopt object.\n\n \"\"\"\n\n # get dict minus data\n config = json.loads(self.json())\n config.pop(\"data\")\n return f\"\"\"\n Xopt\n________________________________\nVersion: {__version__}\nData size: {self.n_data}\nConfig as YAML:\n{yaml.dump(config)}\n\"\"\"\n\n def __str__(self):\n \"\"\"\n Return a string representation of the Xopt object.\n\n Returns\n -------\n str\n A string representation of the Xopt object.\n\n \"\"\"\n return self.__repr__()\n","repo_name":"ChristopherMayes/Xopt","sub_path":"xopt/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":16564,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"31"} +{"seq_id":"12296843315","text":"rock = '''\n _______\n---' ____)\n (_____)\n (_____)\n (____)\n---.__(___)\n'''\n\npaper = '''\n _______\n---' ____)____\n ______)\n _______)\n _______)\n---.__________)\n'''\n\nscissors = '''\n _______\n---' ____)____\n ______)\n __________)\n (____)\n---.__(___)\n'''\noptions = [rock, paper, scissors]\noptionstxt = [\"Rock\", \"Paper\", \"Scissors\"]\nimport random\n\n# Player Choice\nchoice = int(input(\"What do you choose? 0 for Rock, 1 for Paper, 2 for Scissors?\\n\"))\n\n# Computer Choice\ncomp_sel = random.randint(0, 2)\nprint(comp_sel)\n\nif choice >= 3 or choice < 0:\n print(\"You typed an invalid number, you lose!\")\nelse:\n print(options[choice])\n print(\"Computer selects:\")\n print(optionstxt[comp_sel])\n print(options[comp_sel])\n\n if choice == 0 and comp_sel == 2:\n print(\"You Win!\")\n elif comp_sel == 0 and choice == 2:\n print(\"You Lose!\")\n elif comp_sel > choice:\n print(\"You Lose!\")\n elif choice > comp_sel:\n print(\"You Win!\")\n elif choice == comp_sel:\n print(\"It's A Draw\")\n\n# This code is a long way of doing it but still works\n# if (choice == 0):\n# if (comp_sel == 0):\n# print(options[choice] + \"\\n\" + \"Computer choose:\\n\" + optionstxt[comp_sel] + \"\\n\" + options[comp_sel] + \"\\n\" + \\\n# \"It's A Draw\")\n# elif (comp_sel == 1):\n# print(options[choice] + \"\\n\" + \"Computer choose:\\n\" + optionstxt[comp_sel] + \"\\n\" + options[comp_sel] + \"\\n\" + \\\n# \"Computer Wins!\")\n# elif (comp_sel == 2):\n# print(options[choice] + \"\\n\" + \"Computer choose:\\n\" + optionstxt[comp_sel] + \"\\n\" + options[comp_sel] + \"\\n\" + \\\n# \"You Win!\")\n# elif (choice == 1):\n# if (comp_sel == 0):\n# print(options[choice] + \"\\n\" + \"Computer choose:\\n\" + optionstxt[comp_sel] + \"\\n\" + options[comp_sel] + \"\\n\" + \\\n# \"You Win!\")\n# elif (comp_sel == 1):\n# print(options[choice] + \"\\n\" + \"Computer choose:\\n\" + optionstxt[comp_sel] + \"\\n\" + options[comp_sel] + \"\\n\" + \\\n# \"It's A Draw\")\n# elif (comp_sel == 2):\n# print(options[choice] + \"\\n\" + \"Computer choose:\\n\" + optionstxt[comp_sel] + \"\\n\" + options[comp_sel] + \"\\n\" + \\\n# \"Computer Wins!\")\n# elif (choice == 2):\n# if (comp_sel == 0):\n# print(options[choice] + \"\\n\" + \"Computer choose:\\n\" + optionstxt[comp_sel] + \"\\n\" + options[comp_sel] + \"\\n\" + \\\n# \"Computer Wins!\")\n# elif (comp_sel == 1):\n# print(options[choice] + \"\\n\" + \"Computer choose:\\n\" + optionstxt[comp_sel] + \"\\n\" + options[comp_sel] + \"\\n\" + \\\n# \"You Win!\")\n# elif (comp_sel == 2):\n# print(options[choice] + \"\\n\" + \"Computer choose:\\n\" + optionstxt[comp_sel] + \"\\n\" + options[comp_sel] + \"\\n\" + \\\n# \"It's A Draw\")\n# else:\n# print (f\"{choice} is an invalid option. Please try again!\")\n","repo_name":"santoshjani31/100-Days-Of-Code","sub_path":"Day 04 - Rock Paper Scissors/Rock_Paper_Scissors.py","file_name":"Rock_Paper_Scissors.py","file_ext":"py","file_size_in_byte":2740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40103018320","text":"#!/usr/bin/env python2\n\nimport os\nimport sys\nimport shutil\nimport argparse\n\n# DDL modules/packages\nimport binary_conversion\n\nPROJECT_DIR = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..'))\nASSETS_DIR = os.path.abspath(os.path.join(PROJECT_DIR, 'assets'))\nBUILT_ASSETS_DIR = os.path.abspath(os.path.join(PROJECT_DIR, '_built_assets'))\nBINARY_CONVERSION_FOLDERS = ['animations']\n\ndef parse_args(argv=[]):\n \"\"\"\n Parse the arguments provided by the user\n :param argv: List of arguments\n :return: Parsed user provided arguments\n \"\"\"\n parser = argparse.ArgumentParser(description='Place animation assets')\n parser.add_argument('--verbose',\n action='store_true',\n help='Prints extra output')\n parser.add_argument('--replace-existing-assets',\n action='store_true',\n default=False,\n help='Replaces existing assets')\n parser.add_argument('--asset-relocation-dir',\n action='store',\n default=BUILT_ASSETS_DIR,\n help='Absolute path to the dir you want to export the assets')\n parser.add_argument('--flatc',\n action='store',\n default='',\n help='Absolute path from the root of your system to the dir containing flatc executables')\n\n options, _ = parser.parse_known_args(argv)\n return options\n\ndef listdir_nohidden(path):\n \"\"\"\n Get a non-hidden files from a directory\n :param path: Absolute path to directory\n :return: List of items at the top level of the directory\n \"\"\"\n dir_list = []\n for dir in os.listdir(path):\n if not dir.startswith('.'):\n dir_list.append(dir)\n return dir_list\n\ndef convert_json_to_binary(json_files, bin_name, dest_dir, flatc_loc=''):\n \"\"\"\n Convert given JSON files into a single binary\n :param json_files: List of JSON file to convert (list contains absolute path to each file)\n :param bin_name: Name of the binary to be generated\n :param dest_dir: Absolute path to the directory where binary is to be stored\n :param flatc_loc: Absolute path to the flatc binaries\n :return: Nothing\n \"\"\"\n bin_name = bin_name.lower()\n try:\n bin_file = binary_conversion.main(json_files, bin_name, flatc_loc)\n except Exception as e:\n print(\"%s: %s\" % (type(e).__name__, e.message))\n # If binary conversion failed, use the json files...\n for json_file in json_files:\n json_dest = os.path.join(dest_dir, os.path.basename(json_file))\n shutil.copy(json_file, json_dest)\n print(\"Restored %s\" % json_dest)\n else:\n bin_dest = os.path.join(dest_dir, os.path.split(bin_file)[1])\n shutil.move(bin_file, bin_dest)\n \ndef place_animation_assets(location, flatc_loc='', replace_existing_assets=False):\n \"\"\"\n Place required animation assets into a desired directory\n :param location: Absolute path to the directory where the assets need to be placed\n :param flatc_loc: Absolute path to the flatc binaries\n :param replace_existing_assets: Replaces existing assets; Set to true if you want to replace existing assets\n :return: Nothing\n \"\"\"\n if replace_existing_assets:\n print(\"[WARN] Existing assets will be replaced!\")\n # Go through each type of assets\n for dir in listdir_nohidden(ASSETS_DIR):\n dst_path = os.path.join(location, dir)\n if (os.path.isdir(dst_path) and replace_existing_assets) or (os.path.isdir(dst_path) and not listdir_nohidden(dst_path)):\n shutil.rmtree(dst_path)\n if not os.path.isdir(dst_path):\n print('Working on ' + dir + '...')\n if dir not in BINARY_CONVERSION_FOLDERS:\n shutil.copytree(os.path.join(ASSETS_DIR, dir), dst_path)\n else:\n os.makedirs(dst_path)\n conv_dir_path = os.path.join(ASSETS_DIR, dir)\n for sub_dir in listdir_nohidden(conv_dir_path):\n src_path = os.path.join(conv_dir_path, sub_dir)\n files = list(listdir_nohidden(src_path))\n files = map(lambda x: os.path.join(src_path, x), files)\n convert_json_to_binary(files, sub_dir, dst_path, flatc_loc)\n else:\n print(dir + ' already exists!')\n \n \nif __name__ == '__main__':\n options = parse_args(sys.argv[1:])\n relocation_dir = os.path.abspath(options.asset_relocation_dir)\n if options.verbose:\n print(\"asset-relocation-dir: {}\".format(relocation_dir))\n if not options.flatc:\n print(\"[WARN] Path to flatc binaries not provided!\\nChecking if flatc is installed on the system...\")\n if not binary_conversion.is_flatc_installed():\n sys.exit(\"[ERROR] Flatc not found! Try installing it using the package manager of your choice.\")\n place_animation_assets(relocation_dir, options.flatc, options.replace_existing_assets)\n","repo_name":"digital-dream-labs/vector-animations-build","sub_path":"scripts/buildScripts/animation_assets.py","file_name":"animation_assets.py","file_ext":"py","file_size_in_byte":5050,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"31"} +{"seq_id":"11034130500","text":"from typing import Any, Dict, List, Type, TypeVar, Union, cast\n\nfrom attrs import define as _attrs_define\nfrom attrs import field as _attrs_field\n\nfrom ..types import UNSET, Unset\n\nT = TypeVar(\"T\", bound=\"MainSpellInformation\")\n\n\n@_attrs_define\nclass MainSpellInformation:\n \"\"\"\n Attributes:\n amount (Union[Unset, int]):\n city (Union[Unset, List[str]]):\n cooldown_alone (Union[Unset, int]):\n cooldown_group (Union[Unset, int]):\n damage_type (Union[Unset, str]):\n formula (Union[Unset, str]):\n group_attack (Union[Unset, bool]):\n group_healing (Union[Unset, bool]):\n group_support (Union[Unset, bool]):\n level (Union[Unset, int]):\n mana (Union[Unset, int]):\n premium_only (Union[Unset, bool]):\n price (Union[Unset, int]):\n soul_points (Union[Unset, int]):\n type_instant (Union[Unset, bool]):\n type_rune (Union[Unset, bool]):\n vocation (Union[Unset, List[str]]):\n \"\"\"\n\n amount: Union[Unset, int] = UNSET\n city: Union[Unset, List[str]] = UNSET\n cooldown_alone: Union[Unset, int] = UNSET\n cooldown_group: Union[Unset, int] = UNSET\n damage_type: Union[Unset, str] = UNSET\n formula: Union[Unset, str] = UNSET\n group_attack: Union[Unset, bool] = UNSET\n group_healing: Union[Unset, bool] = UNSET\n group_support: Union[Unset, bool] = UNSET\n level: Union[Unset, int] = UNSET\n mana: Union[Unset, int] = UNSET\n premium_only: Union[Unset, bool] = UNSET\n price: Union[Unset, int] = UNSET\n soul_points: Union[Unset, int] = UNSET\n type_instant: Union[Unset, bool] = UNSET\n type_rune: Union[Unset, bool] = UNSET\n vocation: Union[Unset, List[str]] = UNSET\n additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n amount = self.amount\n city: Union[Unset, List[str]] = UNSET\n if not isinstance(self.city, Unset):\n city = self.city\n\n cooldown_alone = self.cooldown_alone\n cooldown_group = self.cooldown_group\n damage_type = self.damage_type\n formula = self.formula\n group_attack = self.group_attack\n group_healing = self.group_healing\n group_support = self.group_support\n level = self.level\n mana = self.mana\n premium_only = self.premium_only\n price = self.price\n soul_points = self.soul_points\n type_instant = self.type_instant\n type_rune = self.type_rune\n vocation: Union[Unset, List[str]] = UNSET\n if not isinstance(self.vocation, Unset):\n vocation = self.vocation\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update({})\n if amount is not UNSET:\n field_dict[\"amount\"] = amount\n if city is not UNSET:\n field_dict[\"city\"] = city\n if cooldown_alone is not UNSET:\n field_dict[\"cooldown_alone\"] = cooldown_alone\n if cooldown_group is not UNSET:\n field_dict[\"cooldown_group\"] = cooldown_group\n if damage_type is not UNSET:\n field_dict[\"damage_type\"] = damage_type\n if formula is not UNSET:\n field_dict[\"formula\"] = formula\n if group_attack is not UNSET:\n field_dict[\"group_attack\"] = group_attack\n if group_healing is not UNSET:\n field_dict[\"group_healing\"] = group_healing\n if group_support is not UNSET:\n field_dict[\"group_support\"] = group_support\n if level is not UNSET:\n field_dict[\"level\"] = level\n if mana is not UNSET:\n field_dict[\"mana\"] = mana\n if premium_only is not UNSET:\n field_dict[\"premium_only\"] = premium_only\n if price is not UNSET:\n field_dict[\"price\"] = price\n if soul_points is not UNSET:\n field_dict[\"soul_points\"] = soul_points\n if type_instant is not UNSET:\n field_dict[\"type_instant\"] = type_instant\n if type_rune is not UNSET:\n field_dict[\"type_rune\"] = type_rune\n if vocation is not UNSET:\n field_dict[\"vocation\"] = vocation\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n d = src_dict.copy()\n amount = d.pop(\"amount\", UNSET)\n\n city = cast(List[str], d.pop(\"city\", UNSET))\n\n cooldown_alone = d.pop(\"cooldown_alone\", UNSET)\n\n cooldown_group = d.pop(\"cooldown_group\", UNSET)\n\n damage_type = d.pop(\"damage_type\", UNSET)\n\n formula = d.pop(\"formula\", UNSET)\n\n group_attack = d.pop(\"group_attack\", UNSET)\n\n group_healing = d.pop(\"group_healing\", UNSET)\n\n group_support = d.pop(\"group_support\", UNSET)\n\n level = d.pop(\"level\", UNSET)\n\n mana = d.pop(\"mana\", UNSET)\n\n premium_only = d.pop(\"premium_only\", UNSET)\n\n price = d.pop(\"price\", UNSET)\n\n soul_points = d.pop(\"soul_points\", UNSET)\n\n type_instant = d.pop(\"type_instant\", UNSET)\n\n type_rune = d.pop(\"type_rune\", UNSET)\n\n vocation = cast(List[str], d.pop(\"vocation\", UNSET))\n\n main_spell_information = cls(\n amount=amount,\n city=city,\n cooldown_alone=cooldown_alone,\n cooldown_group=cooldown_group,\n damage_type=damage_type,\n formula=formula,\n group_attack=group_attack,\n group_healing=group_healing,\n group_support=group_support,\n level=level,\n mana=mana,\n premium_only=premium_only,\n price=price,\n soul_points=soul_points,\n type_instant=type_instant,\n type_rune=type_rune,\n vocation=vocation,\n )\n\n main_spell_information.additional_properties = d\n return main_spell_information\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties\n","repo_name":"wiese-m/tibia-data-api-client","sub_path":"tibia_data_api_client/models/main_spell_information.py","file_name":"main_spell_information.py","file_ext":"py","file_size_in_byte":6407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33050631784","text":"\"\"\"Testing data for query.\"\"\"\n\nfrom pathlib import Path\n\nimport yaml\n\nROOT = Path(__file__).parent\n\nUUID = \"df735f02-efe5-4b07-b212-583bb99ba241\"\nSUBMISSION_DATE = \"2020-10-01\"\nAPP_BUILD_ID = \"2020100100\"\n\n# NOTE: what happens when channel = \"*\"?\nHISTOGRAM_BUCKET_COUNTS = [\n {\n \"agg_type\": \"summed_histogram\",\n \"app_build_id\": \"*\",\n \"app_version\": 84,\n \"channel\": \"*\",\n \"key\": \"\",\n \"metric\": \"network_tcp_connection\",\n \"metric_type\": \"timing_distribution\",\n \"os\": \"*\",\n \"ping_type\": \"*\",\n \"range_max\": 3,\n \"record\": {\"key\": \"1\", \"value\": 1.0},\n },\n {\n \"agg_type\": \"summed_histogram\",\n \"app_build_id\": \"*\",\n \"app_version\": 84,\n \"channel\": \"*\",\n \"key\": \"\",\n \"metric\": \"network_tcp_connection\",\n \"metric_type\": \"timing_distribution\",\n \"os\": \"*\",\n \"ping_type\": \"*\",\n \"range_max\": 3,\n \"record\": {\"key\": \"2\", \"value\": 0.0},\n },\n]\n\nEXPECT = [\n {\n \"agg_type\": \"histogram\",\n \"aggregates\": [\n {\"key\": \"0\", \"value\": 0.125},\n {\"key\": \"1\", \"value\": 0.625},\n {\"key\": \"2\", \"value\": 0.125},\n {\"key\": \"3\", \"value\": 0.125},\n ],\n \"app_build_id\": \"*\",\n \"app_version\": 84,\n \"channel\": \"*\",\n \"client_agg_type\": \"summed_histogram\",\n \"key\": \"\",\n \"metric\": \"network_tcp_connection\",\n \"metric_type\": \"timing_distribution\",\n \"os\": \"*\",\n \"ping_type\": \"*\",\n \"total_users\": 1,\n }\n]\n\nprefix = \"glam_etl\"\ntables = [\n (\n f\"{prefix}.org_mozilla_fenix_glam_nightly__histogram_bucket_counts_v1.yaml\",\n HISTOGRAM_BUCKET_COUNTS,\n ),\n (\"expect.yaml\", EXPECT),\n]\nfor name, data in tables:\n with (ROOT / name).open(\"w\") as fp:\n yaml.dump(data, fp)\n","repo_name":"mozilla/bigquery-etl","sub_path":"tests/sql/glam-fenix-dev/glam_etl/org_mozilla_fenix_glam_nightly__histogram_probe_counts_v1/test_minimal/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","stars":210,"dataset":"github-code","pt":"31"} +{"seq_id":"31364838546","text":"import random \r\n\r\ndef joga():\r\n\tprint(\"\\n*******************\")\r\n\tprint(\"Jogo da adivinhação\")\r\n\tprint(\"*******************\\n\")\r\n\r\n\tprint(\"Escolha o nível de dificuldade:\")\r\n\r\n\tpontos = 1000\r\n\trange_poss = 10\r\n\r\n\twhile(True):\r\n\t\tdif = input(\"1- Fácil \\n2- Médio \\n3- Difícil\\n\")\r\n\r\n\t\tif (dif == \"1\"):\r\n\t\t\tbreak\r\n\t\telif(dif == \"2\"):\r\n\t\t\trange_poss = 50\r\n\t\t\tpontos = 1200\r\n\t\t\tbreak\r\n\t\telif(dif == \"3\"):\r\n\t\t\trange_poss = 100\r\n\t\t\tpontos = 1300\r\n\t\t\tbreak\r\n\t\telse:\r\n\t\t\tprint(\"Opção inválida!\")\r\n\r\n\tnumero_secreto = random.randrange(1,range_poss + 1)\r\n\tchances = 5\r\n\r\n\tfor tentativas in range(1, chances + 1):\r\n\r\n\t\tprint(f\"Tentativa {tentativas} de {chances} \\n\")\r\n\t\tchute = input(f\"Digite o seu número entre 1 e {range_poss}: \")\r\n\t\t\r\n\t\tif(chute.isnumeric()):\r\n\t\t\tchute = int(chute)\r\n\t\telse:\r\n\t\t\tprint(\"\\ninválido\\n\")\r\n\t\t\tcontinue\r\n\t\tif (chute < 1) or (chute > range_poss):\r\n\t\t\tprint(f\"O número está entre 1 e {range_poss}.\")\r\n\t\t\tcontinue\r\n\t\telif (numero_secreto == chute):\r\n\t\t\tprint(\"\\n**Você acertou!\")\r\n\t\t\tprint(f\"Pontuação: {pontos}\")\r\n\t\t\tbreak\r\n\t\telif (tentativas==chances):\r\n\t\t\tprint(\"\\n Errou! \\n\")\r\n\t\t\tprint(f\"O número era {numero_secreto}.\")\r\n\t\t\tbreak\r\n\t\telif(chute > numero_secreto):\r\n\t\t\tprint(\"\\n¬¬Errou! O número é menor. \\n\")\r\n\t\telse:\r\n\t\t\tprint(\"\\n¬¬Errou! O número é maior. \\n\")\r\n\r\n\t\tpontos = pontos - abs(numero_secreto - chute)\r\n\r\n\r\nif(__name__==\"__main__\"):\r\n\tjoga()\r\n\r\n\r\n\r\n\r\n","repo_name":"Aquiles-b/mini-games-py","sub_path":"games/adivinhacao.py","file_name":"adivinhacao.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"9402212411","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n# Given training and testing data, learning rate epsilon, batch size, and regularization strength alpha,\n# conduct stochastic gradient descent (SGD) to optimize the weight matrix Wtilde (785x10).\n# Then return Wtilde.\ndef softmaxRegression (trainingImages, trainingLabels, testingImages, testingLabels, epsilon, batchSize, alpha):\n classes = 10 #we will have 10 w vectors in w\n epochCount = 5\n\n #the following code initializes Wtilde\n Wtilde = np.random.randn(np.shape(trainingImages)[1], classes) * 1e-5 #creates a 785 x 10 array, each column refers to the weights of a class, each row is the weight of a pixel in a class, the last row is the bias\n #the following code shuffles the data\n rng_state = np.random.get_state() #lets me shuffle both the images and labels so they still match up\n np.random.shuffle(trainingImages) #randomly shuffles the rows (images)\n np.random.set_state(rng_state) #resets the state to what it was for Xtilde\n np.random.shuffle(trainingLabels) #shuffles trainingLabels in the same way\n\n trainingLosses = [] #keeps track of the training loss throughout training\n last20Batches = [] #keeps track of the training loss for the final 20 batches\n for epoch in range(epochCount): #for every epoch out of epochCount do:\n startPoint = 0 #the starting index of images we parse in a batch\n\n for j in range(np.int32(np.ceil(np.shape(trainingImages)[0]/batchSize))): #for every mini-batch j out of 60000/batchSize = 60 batches do:\n subsetRange = None #the images in a batch\n\n #if the final batch is not >= batchSize, this batch contains whatever is left\n if(np.shape(trainingImages[startPoint:-1, :])[0] < batchSize):\n subsetRange = range(startPoint, j*batchSize + np.shape(trainingImages[startPoint:-1, :])[0])\n #else, we know that we have enough images for a full batch\n else:\n subsetRange = range(startPoint, j*batchSize + batchSize)\n\n xSubset = trainingImages[subsetRange, :] # select rows [subsetRange, +batchSize)\n ySubset = trainingLabels[subsetRange, :] #gets the subset of ground truth values\n yhat = softmax(np.asarray(xSubset).dot(Wtilde)) #computes yhat for subset\n gradient = computeGradient(xSubset, yhat, ySubset, Wtilde, batchSize, alpha) #computes the regularized gradient\n Wtilde = Wtilde - (gradient*epsilon) #applies changes to Wtilde\n\n startPoint += batchSize #increases the startpoint by batchSize so we don't start from 0 again\n\n #keeps track of the final 20 batches\n if((epoch == epochCount - 1) and (j >= (np.shape(trainingImages)[0]/batchSize) - 20)):\n last20Batches.append(fCE(trainingImages, trainingLabels, Wtilde, alpha))\n \n trainingLosses.append(fCE(trainingImages, trainingLabels, Wtilde, alpha))\n print(\"Epoch\", epoch + 1, \"completed...\")\n print(\"training loss: \", trainingLosses[epoch])\n\n print(\"\\n==================\\nTraining fCE for final 20 batches:\")\n count = 1\n for v in last20Batches:\n print(str(count) + ':', v)\n count += 1\n return Wtilde\n\n#returns a design matrix Xtilde for SGD\n#trainingImages: a 2D array where each row is an image, and each column is a pixel\n#returns: a 2D array, equal to trainingImages but with an extra row of ones to compute the bias\ndef getDesignMatrix(trainingImages):\n xShape = np.shape(trainingImages) #shape = (#rows, #col); each row is an image, each col is a pixel in the image\n vectorOf1s = np.ones(shape = (xShape[0], 1)) #creates a 1D column vector of size (60000, 1)\n return(np.append(arr = trainingImages, values = vectorOf1s, axis = 1)) #added a vector of 1s for bias \n\n#returns a normalized yhat\n#yhat: a 2D array where each row is the probability vector of an image, and each column is the probability that an image is of a specific classification\n#returns: a 2D array that is a normalized yhat\ndef softmax(yhat):\n yhat = np.exp(yhat) #enforce non negativity\n\n summation = np.sum(yhat, axis = 1) #sums each row together\n for row in range(np.shape(yhat)[0]):\n yhat[row] = yhat[row] / summation[row] #enforces the sum of each prediction for an image = 1\n return yhat\n\n\n#computes the regularized gradient of fCE\n#x: a 2D array, where each row is an image, and each column is a pixel in an image\n#yhat: a 2D array, where each row is a set of normalized probabilities that an image is of a classification\n#y: a 2D array, where each row is the ground-truth classifications of an image\n#Wtilde: a 2D array, where each row is the weights for each classification applied to a pixel in an image, and each column is an image\n#n: an int; the batch size\n#returns: a float64 of the gradient of fCE\ndef computeGradient(x, yhat, y, Wtilde, n, alpha):\n gradFCE = np.transpose(x).dot(yhat - y) / n #computes unregularized gradient\n regularize = alpha * np.mean(a = Wtilde[:-1], axis = 0) #gets the derivative of the L2 regularization without bias\n regularize = np.repeat(a = [regularize], repeats = np.shape(gradFCE)[0], axis = 0)\n return(gradFCE + regularize)\n\n#1-hot encodes the labels\n#labels: a 1D array of classifications\n#classes: the number of labels\n#returns: a 2D array where each column represents a classification\ndef oneHotEncode(labels, classes):\n y = np.zeros((len(labels), classes)) #creates a 2D array of the proper size\n for i in range(len(labels)): #sets the index of the label to 1\n y[i, labels[i]] = 1\n return y\n\n\ndef fCE(x, y, Wtilde, alpha):\n yhat = np.asarray(x).dot(Wtilde)\n yhat = softmax(yhat)\n sum = np.sum(y * np.log(yhat), axis = 0)\n unreg = np.mean(sum) * -1\n reg = alpha * 0.5 * np.mean(np.sum(Wtilde * Wtilde, axis = 0))\n return unreg + reg\n\ndef fPC(yhat, y):\n predictions = np.argmax(yhat, axis = 1) #produces a vector of the indeces of the maximum value in each row\n print(str(np.mean(np.equal(predictions, y)) * 100) + '% Correct')\n\nif __name__ == \"__main__\":\n # Load data\n trainingImages = np.load(\"fashion_mnist_train_images.npy\") / 255.0 # Normalizing by 255 helps accelerate training\n trainingLabels = np.load(\"fashion_mnist_train_labels.npy\")\n testingImages = np.load(\"fashion_mnist_test_images.npy\") / 255.0 # Normalizing by 255 helps accelerate training\n testingLabels = np.load(\"fashion_mnist_test_labels.npy\")\n\n # Append a constant 1 term to each example to correspond to the bias terms\n trainingImages = getDesignMatrix(trainingImages)\n # Change from 0-9 labels to \"one-hot\" binary vector labels. For instance, \n # if the label of some example is 3, then its y should be [ 0, 0, 0, 1, 0, 0, 0, 0, 0, 0 ]\n trainingLabels = oneHotEncode(trainingLabels, 10)\n\n # Train the model\n Wtilde = softmaxRegression(trainingImages, trainingLabels, testingImages, testingLabels, epsilon=0.1, batchSize=100, alpha=.1)\n\n #c) computes PC accuracy on test set\n testingImages = getDesignMatrix(testingImages)\n yhatTest = np.asarray(testingImages).dot(Wtilde)\n yhatTest = softmax(yhatTest)\n \n print(\"\\n==================\\nfPC:\")\n fPC(yhatTest, testingLabels)\n\n # Visualize the vectors\n for Wvector in np.transpose(Wtilde[:-1]):\n vector = np.reshape(a = Wvector, newshape = (28, 28))\n plt.imshow(vector)\n plt.show()","repo_name":"HoneyJar-0/Machine-Learning-Homework","sub_path":"HW3/homework3_jtreiss1.py","file_name":"homework3_jtreiss1.py","file_ext":"py","file_size_in_byte":7435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"2053667639","text":"def arcor2_setup_py(**kwargs):\n\n if \"license\" not in kwargs:\n kwargs[\"license\"] = \"LGPL\"\n\n if \"author\" not in kwargs:\n kwargs[\"author\"] = \"Robo@FIT\"\n\n if \"author_email\" not in kwargs:\n kwargs[\"author_email\"] = \"imaterna@fit.vut.cz\"\n\n if \"classifiers\" not in kwargs:\n kwargs[\"classifiers\"] = [\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Developers\",\n \"Topic :: Software Development :: Build Tools\",\n \"License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)\",\n \"Programming Language :: Python :: 3.8\",\n \"Natural Language :: English\",\n \"Topic :: Scientific/Engineering\"\n ]\n\n kwargs[\"python_requires\"] = \"==3.8.*\" # we support only Python 3.8\n\n return setup_py(**kwargs)\n\n\ndef arcor2_python_distribution(name: str, description: str, binaries=None, **kwargs):\n\n python_library(\n name=name,\n dependencies=[\n \":VERSION\"\n ]\n )\n\n resources(\n name=\"py.typed\",\n sources=[\"py.typed\"],\n )\n\n resources(\n name=\"VERSION\",\n sources=[\"VERSION\"],\n )\n\n if \"setup_py_commands\" not in kwargs:\n kwargs[\"setup_py_commands\"] = [\"sdist\", \"bdist_wheel\", \"--python-tag\", \"py38\"]\n\n kwargs[\"name\"] = f\"{name}_dist\"\n\n if \"dependencies\" not in kwargs:\n kwargs[\"dependencies\"] = []\n\n kwargs[\"dependencies\"].append(\":py.typed\")\n kwargs[\"dependencies\"].append(f\":{name}\")\n\n if binaries is None:\n binaries={}\n\n kwargs[\"provides\"] = arcor2_setup_py(\n name=name,\n description=description\n )\n\n if binaries:\n kwargs[\"entry_points\"] = {\"console_scripts\": binaries}\n\n return python_distribution(**kwargs)\n\n\ndef arcor2_pex_binary(**kwargs):\n\n if \"zip_safe\" not in kwargs:\n kwargs[\"zip_safe\"] = False\n\n if \"entry_point\" not in kwargs:\n kwargs[\"entry_point\"] = f\"{kwargs['name']}.py:main\"\n\n return pex_binary(**kwargs)","repo_name":"xBambusekD/arcor2","sub_path":"pants-plugins/macros.py","file_name":"macros.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"31"} +{"seq_id":"72554467289","text":"#===============================================================================\n# Multiplier Test\n#===============================================================================\n\nfrom pymtl3 import *\nfrom pymtl3.stdlib.test_utils import run_test_vector_sim\nfrom sim.multiplier import Multiplier\n\ndef test_positive_multiplication(cmdline_opts):\n multiplier = Multiplier(WIDTH=32)\n run_test_vector_sim(multiplier, [\n ('A B Result*'),\n (0x5, 0x2, 0xa),\n (0x7, 0x3, 0x15),\n (0xa, 0x0, 0x0),\n (0x1, 0xffffffff, 0xffffffff)\n ], cmdline_opts)\n\ndef test_zero_multiplication(cmdline_opts):\n multiplier = Multiplier(WIDTH=32)\n run_test_vector_sim(multiplier, [\n ('A B Result*'),\n (0x0, 0x2, 0x0),\n (0x0, 0x3, 0x0),\n (0x0, 0x0, 0x0)\n ], cmdline_opts)\n\ndef test_overflow(cmdline_opts):\n multiplier = Multiplier(WIDTH=32)\n run_test_vector_sim(multiplier, [\n ('A B Result*'),\n (0xffffffff, 0x2, 0xfffffffe), # Maximum value multiplied by 2 (overflow ignored)\n (0x80000000, 0x2, 0x00000000), # Maximum positive signed integer multiplied by 2 (overflow ignored)\n (0xffffffff, 0xffffffff, 0x1) # Maximum value squared (overflow ignored)\n ], cmdline_opts)\n","repo_name":"wrs225/Caravel-Vector-Coprocessor-AI","sub_path":"src/FunctionalUnits/sim/block_test/multiplier_test.py","file_name":"multiplier_test.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"43799272004","text":"from .admin_views import AddCourseChooseCourseInfoView, CourseDateInspectView\nfrom .filters import CoursesDateFilter, CoursesOrgaFilter\nfrom .helpers import AttendeeButtonHelper\nfrom .models import (\n Course, CourseInformationPage, ListOfCoursesPage, SskStudentAttendee,\n SskRubMemberAttendee, SskHospitalAttendee, SskExternalAttendee\n)\n\n\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import ugettext as _\nfrom django.utils.translation import ugettext_lazy as _l\n\nfrom rubionadmin.admin import HiddenModelAdmin\n\n\nfrom wagtail.contrib.modeladmin.options import (\n ModelAdmin, ModelAdminGroup, modeladmin_register\n)\n\nfrom wagtail.wagtailadmin.menu import MenuItem\nfrom wagtail.wagtailcore.models import PageViewRestriction\n\n\n\n\nclass CourseModelAdminMenuItem(MenuItem):\n # It is annoying to create a new class just for adding a (hardcoded!) parameter to the URL.\n # But it works.\n def __init__(self, model_admin, order):\n\n self.model_admin = model_admin\n url = model_admin.url_helper.index_url + '?status=upcoming'\n classnames = 'icon icon-%s' % model_admin.get_menu_icon()\n super(CourseModelAdminMenuItem, self).__init__(\n label=model_admin.get_menu_label(), url=url,\n classnames=classnames, order=order)\n\n def is_shown(self, request):\n return self.model_admin.permission_helper.user_can_list(request.user)\n\n\nclass ListOfCoursesPageMA( HiddenModelAdmin ):\n exclude_from_explorer = True\n model = ListOfCoursesPage\n\nmodeladmin_register(ListOfCoursesPageMA)\n\nclass CourseModelAdmin(ModelAdmin):\n model = Course\n menu_label = _l('Course dates')\n menu_icon = ' icon-fa-graduation-cap' \n menu_order = 200 \n add_to_settings_menu = False \n exclude_from_explorer = True\n list_display = ('course_info', 'start', 'end', 'attendees', 'free_slots', 'data_sharing')\n list_filter = (CoursesDateFilter, CoursesOrgaFilter, )\n inspect_view_enabled = True\n inspect_view_class = CourseDateInspectView\n \n choose_parent_view_class = AddCourseChooseCourseInfoView\n choose_parent_template_name = 'courses/admin/choose_course.html'\n \n def course_info( self, obj ):\n return obj.get_parent().specific.title_trans\n course_info.short_description = _l('Course')\n\n def attendees( self, obj ):\n attendees = []\n for at, num in obj.registered_attendees_stats.items():\n attendees.append(\"{}: {}\".format(at, num) )\n return mark_safe(\"
\".join( attendees ))\n \n attendees.short_description = _l('Attendees')\n \n def free_slots( self, obj ):\n pass\n \n def get_menu_item(self, order=None):\n return CourseModelAdminMenuItem(self, order or self.get_menu_order()) \n\n def data_sharing(self, obj):\n dsp = obj.get_data_sharing_page()\n if dsp:\n pvr = PageViewRestriction.objects.filter(page = dsp).first()\n if pvr:\n return pvr.password\n else:\n return None\n else:\n return mark_safe('{}'.format(_l('No data sharing')))\n \n \n\nclass CourseInformationPageModelAdmin( ModelAdmin ):\n model = CourseInformationPage\n menu_label = _l('courses')\n menu_icon = 'fa-calendar'\n exclude_from_explorer = True\n list_display = ('_title',)\n\n \n def _title( self, obj):\n return obj.title_trans\n _title.short_description = _('Title')\n\nclass SskStudentAttendeeMA( ModelAdmin ):\n model = SskStudentAttendee\n button_helper_class = AttendeeButtonHelper\n \nclass SskExternalAttendeeMA( ModelAdmin ):\n model = SskExternalAttendee\n button_helper_class = AttendeeButtonHelper\n\nclass SskHospitalAttendeeMA( ModelAdmin ):\n model = SskHospitalAttendee\n button_helper_class = AttendeeButtonHelper\n\nclass SskRubMemberAttendeeMA( ModelAdmin ):\n model = SskRubMemberAttendee\n button_helper_class = AttendeeButtonHelper\n\n\nclass CoursesModelAttendeesGroup ( ModelAdminGroup ):\n menu_label = _l('Attendees')\n menu_order = 40\n menu_icon = 'fa-graduation-cap' \n items = (SskStudentAttendeeMA, ) \n\n\nclass CoursesModelAdminGroup ( ModelAdminGroup ):\n menu_label = _l('Teaching')\n menu_order = 40\n menu_icon = 'fa-graduation-cap' \n items = (\n CourseInformationPageModelAdmin, CourseModelAdmin, \n SskStudentAttendeeMA, SskExternalAttendeeMA,\n SskHospitalAttendeeMA, SskRubMemberAttendeeMA\n )\n\n\n# Now you just need to register your customised ModelAdmin class with Wagtail\nmodeladmin_register(CoursesModelAdminGroup)\n\n","repo_name":"varvarafo/website","sub_path":"courses/wagtail_hooks.py","file_name":"wagtail_hooks.py","file_ext":"py","file_size_in_byte":4614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"29191525588","text":"# 1. Do level order traversal to find the element to be deleted\n# 2. Replace to be deleted node with deepest right most node\n# 3. Delete deepest right most node\n\n# This is different from BST deletion\n\n\nclass Node:\n def __init__(self, value):\n self.right = None\n self.left = None\n self.value = value\n\n\ndef inorder(temp):\n if temp is None:\n return\n inorder(temp.left)\n print(temp.value, end=\" \")\n inorder(temp.right)\n\n\ndef delete_deepest(temp, d_node):\n queue = [temp]\n while len(queue) != 0:\n x = queue[0]\n queue.pop(0)\n if x.right is not None:\n if x.right is d_node:\n x.right = None\n return\n else:\n queue.append(temp.right)\n if x.left is not None:\n if x.left is d_node:\n x.left = None\n else:\n queue.append(temp.left)\n\n\ndef deletion(root, key):\n if root is None:\n return\n if root.left is None and root.right is None:\n if root.value == key:\n return None\n else:\n return root\n\n queue = [root]\n key_node = None\n temp = None\n while len(queue) != 0:\n temp = queue[0]\n queue.pop(0)\n if temp.value == key:\n key_node = temp\n if temp.left is not None:\n queue.append(temp.left)\n if temp.right is not None:\n queue.append(temp.right)\n if key_node:\n x = temp.value\n delete_deepest(root, temp)\n key_node.value = temp.value\n return root\n\n\nif __name__ == \"__main__\":\n root = Node(10)\n root.left = Node(11)\n root.left.left = Node(7)\n root.left.right = Node(12)\n root.right = Node(9)\n root.right.left = Node(15)\n root.right.right = Node(8)\n\n inorder(root)\n print(\"\\n\")\n root = deletion(root, 12)\n inorder(root)\n","repo_name":"cspandit/Python-DS-and-Algo","sub_path":"Binary Tree/problem/deletion_tree.py","file_name":"deletion_tree.py","file_ext":"py","file_size_in_byte":1876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"23540862","text":"import os\nimport shutil\n\nif __name__ == '__main__':\n prev_path = '/media/data2/eric/voxceleb2/eyes'\n new_path = '/media/data2/eric/ssldf-voxceleb2'\n\n left_eyes_path = os.path.join(new_path, 'left-eye-frames')\n right_eyes_path = os.path.join(new_path, 'right-eye-frames')\n if not os.path.exists(left_eyes_path):\n os.makedirs(left_eyes_path)\n if not os.path.exists(right_eyes_path):\n os.makedirs(right_eyes_path)\n\n people = os.listdir(prev_path)\n for person in people:\n events = os.path.join(prev_path, person)\n for event in os.listdir(events):\n videos = os.path.join(prev_path, person, event)\n for video in videos:\n images = os.path.join(prev_path, person, event, video)\n new_video_name = \"_\".join((person, event, video))\n left_eye_video_path = os.path.join(left_eyes_path, new_video_name)\n right_eye_video_path = os.path.join(right_eyes_path, new_video_name)\n if not os.path.exists(left_eye_video_path):\n os.makedirs(left_eye_video_path)\n if not os.path.exists(right_eye_video_path):\n os.makedirs(right_eye_video_path)\n for image in images:\n image_file = os.path.join(prev_path, person, event, video, image)\n if image[:9] == 'left_eye_':\n left_eye_image_file = os.path.join(left_eye_video_path, image[9:])\n shutil.copyfile(image_file, left_eye_image_file)\n elif image[:10] == 'right_eye_':\n right_eye_image_file = os.path.join(right_eye_video_path, image[10:])\n shutil.copyfile(image_file, right_eye_image_file)\n else:\n print('There is some critical error')","repo_name":"erkinboy-botirov/ssl-df","sub_path":"scripts/split_cropped_eyes.py","file_name":"split_cropped_eyes.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"10424006227","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport lib.vector as Vector\n\n\ndef nbSommets(G):\n \"\"\"\n Retourne le nombre de sommets du graphe.\n\n :param G: List - Representation du graphe par liste d'adjacence\n\n :return: Integer\n \"\"\"\n return len(G) - 1\n\n\ndef nbArcs(G):\n \"\"\"\n Retourne le nombre d'arcs du graphe.\n\n :param G: List - Representation du graphe par liste d'adjacence\n\n :return: Integer\n \"\"\"\n return len(list(sum(G, [])))\n\ndef ajoutArc(G, i, j):\n \"\"\"\n Ajoute un arc au graphe.\n\n :param G: List - Representation du graphe par liste d'adjacence\n :param i: Integer - Premier sommet\n :param j: Integer - Second sommet\n \"\"\"\n G[i].append(j)\n\n\ndef enleveArc(G, i, j):\n \"\"\"\n Enleve une arete au graphe.\n\n :param G: List - Representation du graphe par liste d'adjacence\n :param i: Integer - Premier sommet\n :param j: Integer - Second sommet\n \"\"\"\n if j in G[i]:\n G[i].remove(j)\n\n\ndef degS(G, i):\n \"\"\"\n Calculte le degre sortant d'un sommet.\n\n :param G: List - Representation du graphe par liste d'adjacence\n :param i: Integer - Sommet\n\n :return: Integer\n \"\"\"\n return len(G[i])\n\n\ndef degreS(G):\n \"\"\"\n Calcule le degre sortant de chacun des sommets.\n\n :param G: List - Representation du graphe par liste d'adjacence\n\n :return: List - Degre sortant du sommet x à la position x\n \"\"\"\n return [degS(G, i) for i in range(1, len(G))]\n\n\ndef degE(G, i):\n \"\"\"\n Calculte le degre entrant d'un sommet.\n\n :param G: List - Representation du graphe par liste d'adjacence\n :param i: Integer - Sommet\n\n :return: Integer\n \"\"\"\n return len(voisinageE(G, i))\n\n\ndef degreE(G):\n \"\"\"\n Calcule le degre entrant de chacun des sommets.\n\n :param G: List - Representation du graphe par liste d'adjacence\n\n :return: List - Degre entrant du sommet x à la position x\n \"\"\"\n return [degE(G, i) for i in range(1, len(G))]\n\ndef voisinageE(G, i):\n \"\"\"\n Calcule le voisinage entrant d'un sommet.\n\n :param G: List - Representation du graphe par liste d'adjacence\n :param i: Integer - Sommet\n\n :return:\n \"\"\"\n return [j for j in range(1, len(G)) if i in G[j]]\n\ndef listeToMatrice(G):\n \"\"\"\n Generation de la matrice d'adjacence.\n\n :param G: List - Representation du graphe par liste d'adjacence\n\n :return: List(List)\n \"\"\"\n size = len(G)\n\n M = Vector.initMat(size - 1, 0)\n for i in range(1, size):\n for j in G[i]:\n M[i - 1][j - 1] += 1\n\n return M\n\n\ndef arcsToListe(n, L):\n \"\"\"\n Generation du vecteur des listes d'adjacences.\n\n :param n: Integer - Nombre de sommets\n :param L: List - Liste d'arcs\n\n :return: List(List)\n \"\"\"\n G = Vector.initVectList(n + 1)\n for arc in L:\n G[arc[0]].append(arc[1])\n\n return G\n\n\ndef matToListe(M):\n \"\"\"\n Generation du vecteur des listes d'adjacences.\n\n :param M: List(List) - Matrice d'adjacence\n\n :return: List(List)\n \"\"\"\n size = len(M)\n\n G = Vector.initVectList(size + 1)\n for i in range(size):\n for j in range(size):\n for x in range(M[i][j]):\n G[i + 1].append(j + 1)\n\n return G\n\n\nif __name__ == \"__main__\":\n L = [[1, 5], [2, 1], [2, 4], [3, 2], [4, 3], [5, 2], [5, 4]]\n G = [[], [5], [1, 4], [2], [3], [2, 4]]\n M = [[0, 0, 0, 0, 1],\n [1, 0, 0, 1, 0],\n [0, 1, 0, 0, 0],\n [0, 0, 1, 0, 0],\n [0, 1, 0, 1, 0]]\n\n print(\"Sommets :\", nbSommets(G))\n print(\"Arcs :\", nbArcs(G))\n print()\n ajoutArc(G, 1, 3)\n print(\"Ajout arc (1,3) :\", G)\n enleveArc(G, 1, 3)\n print(\"Enleve arc (1,3) :\", G)\n print()\n print(\"Degre sortant du sommet 5 :\", degS(G, 5))\n print(\"Vecteur des degres sortant :\", degreS(G))\n print()\n print(\"Degre entrant du sommet 5 :\", degE(G, 5))\n print(\"Vecteur des degres entrant :\", degreE(G))\n print()\n print(\"Voisinage entrant du sommet 3 :\", voisinageE(G, 3))\n print()\n print(\"Liste -> Matrice :\", listeToMatrice(G))\n print(\"Arcs -> Liste :\", arcsToListe(5, L))\n print(\"Matrice -> Liste :\", matToListe(M))\n","repo_name":"KuroBayashi/School-University","sub_path":"Python/License 3/Graphe/src/TP1/oriente.py","file_name":"oriente.py","file_ext":"py","file_size_in_byte":4123,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"18089970827","text":"\"\"\" Patching Products.PluggableAuthService not to stuck when listing local\n groups and local roles in ZMI\n\"\"\"\n\nfrom Products.PageTemplates.PageTemplateFile import PageTemplateFile\n\n\npatched_manage_groups = PageTemplateFile('www/zgGroups'\n , globals()\n , __name__='manage_groups'\n )\n\npatched_manage_roles = PageTemplateFile('www/zrRoles'\n , globals()\n , __name__='manage_roles'\n )\n","repo_name":"eea/Products.EEAPloneAdmin","sub_path":"Products/EEAPloneAdmin/patches/patch_pluggableauthservice.py","file_name":"patch_pluggableauthservice.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"5331029138","text":"'''\n @name: ros_env_raw_scan_prep_wo.py\n @brief: This class is a simulation environment wrapper for\n the Polar Representation.\n @author: Ronja Gueldenring\n @version: 3.5\n @date: 2019/04/05\n'''\n\n# python relevant\nimport numpy as np\nimport math\n# ros-relevant\nimport rospy\n# custom classes\nfrom rl_agent.env_wrapper.ros_env import RosEnvAbs\nfrom sensor_msgs.msg import LaserScan\n\nclass RosEnvRawScanPrepWp(RosEnvAbs):\n '''\n This class is a simulation environment wrapper for\n the Polar Representation.\n '''\n def __init__(self, ns, state_collector, execution_mode, task_mode, state_size, observation_space, stack_offset, action_size, action_space, debug, goal_radius, wp_radius, robot_radius, reward_fnc):\n state_collector.set_state_mode(2)\n super(RosEnvRawScanPrepWp, self).__init__(ns, state_collector, execution_mode, task_mode, state_size, observation_space, stack_offset, action_size, action_space, debug, goal_radius, wp_radius, robot_radius, reward_fnc)\n self.__res = rospy.get_param(\"%s/rl_agent/resolution\"%ns)\n\n def get_observation_(self):\n \"\"\"\n Function returns state that will be fed to the rl-agent\n It includes\n the raw laser scan data,\n the waypoint data in with the same format as the laser scan data.\n The distance of the waypoint is saved\n at the appropriate angle position in the vector.\n :return: state\n \"\"\"\n waypoint = self.wp_\n num_of_wps = len(waypoint.points)\n\n state = np.ones(self.STATE_SIZE, dtype=np.float)\n\n # add laserscan\n state[ :, 0, 0] = self.merged_scan_.ranges\n\n # generate wp-vector\n wp_vector = np.zeros(self.STATE_SIZE[0])\n for i in range(num_of_wps):\n dist = math.sqrt(math.pow(waypoint.points[i].x, 2) + math.pow(waypoint.points[i].y, 2))\n angle = math.atan2(waypoint.points[i].y, waypoint.points[i].x) + math.pi\n wp_vector[math.floor(angle/self.merged_scan_.angle_increment)] = dist\n state[:,1,0] = wp_vector\n\n # Discretize to a resolution of 5cm.\n state = np.round(np.divide(state, self.__res))*self.__res\n if self.debug_:\n debug_scan = LaserScan()\n # debug_scan.header.frame_id = self.merged_scan_.header.frame_id\n debug_scan.header = self.merged_scan_.header\n debug_scan.angle_min = self.merged_scan_.angle_min\n debug_scan.angle_max = self.merged_scan_.angle_max\n debug_scan.angle_increment = self.merged_scan_.angle_increment\n debug_scan.range_max = 7.0\n debug_scan.ranges = state[:, 0, 0]\n self.debugger_.show_scan_stack(debug_scan)\n return state\n","repo_name":"RGring/drl_local_planner_ros_stable_baselines","sub_path":"rl_agent/src/rl_agent/env_wrapper/ros_env_raw_scan_prep_wp.py","file_name":"ros_env_raw_scan_prep_wp.py","file_ext":"py","file_size_in_byte":2765,"program_lang":"python","lang":"en","doc_type":"code","stars":115,"dataset":"github-code","pt":"31"} +{"seq_id":"74832374167","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"sqlalchemy-mutablesoup\",\n version=\"0.0.9\",\n author=\"Dillon Bowen\",\n author_email=\"dsbowen@wharton.upenn.edu\",\n description=\"Mutable BeautifulSoup database type\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://dsbowen.github.io/sqlalchemy-mutablesoup\",\n packages=setuptools.find_packages(),\n install_requires=[\n 'bs4>=0.0.1',\n 'flask>=1.1.1',\n 'sqlalchemy>=1.3.12',\n ],\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.6',\n)","repo_name":"dsbowen/sqlalchemy-mutablesoup","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26693723491","text":"import requests\nfrom datetime import datetime\n\nUSERNAME = \"\"\nTOKEN = \"\"\nGRAPH_ID = \"\"\n\n\npixela_endpoint = \"https://pixe.la/v1/users\"\npixela_parameters = {\n \"token\": \"\",\n \"username\": \"\",\n \"agreeTermsOfService\": \"yes\",\n \"notMinor\": \"yes\"\n\n}\n\n# response = requests.post(url=pixela_endpoint, json=pixela_parameters)\n# print(response.text)\n\ngraph_endpoint = f\"{pixela_endpoint}/{USERNAME}/graphs\"\n\ngraph_config = {\n \"id\": \"graph1\",\n \"name\": \"Personal_Growth_Graph\",\n \"unit\": \"Km\",\n \"type\": \"float\",\n \"color\": \"shibafu\"\n}\nheaders = {\n \"X-USER-TOKEN\": TOKEN\n}\n\n\ntoday = datetime.now()\n# today = datetime(year=2020, month=2, day=15)\nprint(today.strftime(\"%Y%m%d\"))\n#response = requests.post(url=graph_endpoint, json=graph_config, headers=headers)\n#print(response.text)\n\n\nadd_pixel_for_today = f\"{pixela_endpoint}/{USERNAME}/graphs/{GRAPH_ID}\"\nadd_pixel_for_today_config = {\n \"date\": today.strftime(\"%Y%m%d\"),\n \"quantity\": str(input(\"How many days? \")),\n}\n\n# edit_graph = f\"{pixela_endpoint}/{USERNAME}/graphs/{GRAPH_ID}\"\n# edit_graph_config = {\n# \"unit\": \"days\"\n# }\n\n# response = requests.put(url=edit_graph, json=edit_graph_config, headers=headers)\n# print(response.text)\n\nresponse = requests.post(url=add_pixel_for_today, json=add_pixel_for_today_config, headers=headers)\nprint(response.text)\n\nedit_pixel = f\"{pixela_endpoint}/{USERNAME}/graphs/{GRAPH_ID}/{today.strftime('%Y%m%d')}\"\nedit_pixel_config = {\n \"quantity\": \"45\"\n}\n\n# response = requests.put(url=edit_pixel, json=edit_pixel_config, headers=headers)\n# print(response.text)\n\ndelete_pixel = f\"{pixela_endpoint}/{USERNAME}/graphs/{GRAPH_ID}/{today.strftime('%Y%m%d')}\"\n\n# response = requests.delete(url=delete_pixel, headers=headers)\n# print(response.text)\n","repo_name":"nthnelliott857/100DaysOfCodeV2","sub_path":"Day37/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"24036336958","text":"import os, sys, tempfile, tarfile, shutil, subprocess\n\n# Params\n\ndeb_dir = sys.argv[1] # Where the deb config files are\nfiles_dir = sys.argv[2] # Where the syntensity build is (output of package_for_release.py)\n\n# Prepare\n\ntemp_dir = tempfile.mkdtemp()\n\nos.makedirs(os.path.join(temp_dir, 'DEBIAN'))\nos.makedirs(os.path.join(temp_dir, 'usr', 'bin'))\nos.makedirs(os.path.join(temp_dir, 'usr', 'share', 'applications'))\nos.makedirs(os.path.join(temp_dir, 'usr', 'share', 'pixmaps'))\n\n# Create control\n\nshutil.copyfile(os.path.join(deb_dir, 'control', 'control'), os.path.join(temp_dir, 'DEBIAN', 'control'))\nshutil.copyfile(os.path.join(deb_dir, 'control', 'postinst'), os.path.join(temp_dir, 'DEBIAN', 'postinst'))\nsubprocess.call(['chmod 0555 %s' % os.path.join(temp_dir, 'DEBIAN', 'postinst')], shell=True)\n\n# Create data\n\nfiles = [ # general files\n ['usr', 'bin', 'syntensity'],\n ['usr', 'share', 'applications', 'syntensity.desktop'],\n ['usr', 'share', 'pixmaps', 'syntensity.png'],\n]\nfor _file in files:\n joined = os.path.join(*_file)\n shutil.copyfile(os.path.join(deb_dir, 'data', joined), os.path.join(temp_dir, joined))\n\ndef add_files(location):\n full = os.path.join(files_dir, location)\n for _file in os.listdir(full): # build files\n if _file[-1] == '~': continue\n joined = os.path.join(location, _file)\n full_joined = os.path.join(full, _file)\n if os.path.isdir(full_joined):\n add_files(joined)\n else:\n dest = os.path.join(temp_dir, 'usr', 'share', 'games', 'syntensity', joined)\n if not os.path.exists(os.path.dirname(dest)):\n os.makedirs(os.path.dirname(dest))\n shutil.copyfile(full_joined, dest)\nadd_files('')\n\n# Combine into deb\n\ncommand = 'dpkg -b %s new_deb.deb' % temp_dir\nsubprocess.call([command], shell=True)\n\n# Clean up\n\nshutil.rmtree(temp_dir)\n\n","repo_name":"kripken/intensityengine","sub_path":"tools/make_deb.py","file_name":"make_deb.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"31"} +{"seq_id":"34334464050","text":"import logging\nfrom datetime import datetime\n\nfrom src.data.complaint_data import ComplaintDocument\nfrom src.data.review_data import ReviewDocument\n\n\ndef delete_all_and_seed_database():\n complaints = [\n ComplaintDocument(\n mobile=666666003, barcode=\"8400000000017\", description=\"123456\", registration_date=datetime.now(),\n opened=True),\n ComplaintDocument(\n mobile=666666003, barcode=\"8400000000024\", description=\"123456\", registration_date=datetime.now(),\n opened=True),\n ComplaintDocument(\n mobile=666666003, barcode=\"8400000000031\", description=\"123456\", registration_date=datetime.now(),\n opened=True),\n ComplaintDocument(\n mobile=666666004, barcode=\"8400000000024\", description=\"123456\", registration_date=datetime.now(),\n opened=True),\n ComplaintDocument(\n mobile=666666005, barcode=\"8400000000048\", description=\"123456\", registration_date=datetime.now(),\n opened=True),\n ComplaintDocument(\n mobile=66, barcode=\"8400000000048\", description=\"123456\", registration_date=datetime.now(),\n opened=True),\n ]\n logging.info(\"Delete all and seed database... ++Complaint\")\n ComplaintDocument.drop_collection()\n ComplaintDocument.objects.insert(complaints)\n\n reviews = [\n ReviewDocument(mobile=66, barcode=\"8400000000017\", score=2.5, opinion=\"Is ok but not that much\"),\n ReviewDocument(mobile=66, barcode=\"8400000000024\", score=5, opinion=\"Best product\"),\n ReviewDocument(mobile=66, barcode=\"8400000000031\", score=0.5, opinion=\"Really bad\"),\n ReviewDocument(mobile=666666003, barcode=\"8400000000017\", score=4, opinion=\"I like the product\"),\n ReviewDocument(mobile=666666003, barcode=\"8400000000031\", score=1.5, opinion=\"Bad, really bad\"),\n ReviewDocument(mobile=666666003, barcode=\"8400000000048\", score=5, opinion=\"Best product ever\"),\n ReviewDocument(mobile=666666004, barcode=\"8400000000017\", score=3.5, opinion=\"Is ok but I don't like it\"),\n ReviewDocument(mobile=666666004, barcode=\"8400000000031\", score=2, opinion=\"Not what I expected\"),\n ReviewDocument(mobile=666666004, barcode=\"8400000000024\", score=5, opinion=\"I enjoyed it a lot!\"),\n ReviewDocument(mobile=666666005, barcode=\"8400000000048\", score=4.5,\n opinion=\"Just few things to make it perfect\"),\n ReviewDocument(mobile=666666005, barcode=\"8400000000017\", score=3,\n opinion=\"Fits what I expected but bad quality\"),\n ReviewDocument(mobile=666666005, barcode=\"8400000000031\", score=0.5, opinion=\"I don't recommend buying it\"),\n ReviewDocument(mobile=666666005, barcode=\"8400000000017\", score=4, opinion=\"Really good\"),\n ]\n logging.info(\"Delete all and seed database... ++Review\")\n ReviewDocument.drop_collection()\n ReviewDocument.objects.insert(reviews)\n","repo_name":"miw-upm/betca-tpv-customer-support","sub_path":"src/data/seeder_dev.py","file_name":"seeder_dev.py","file_ext":"py","file_size_in_byte":2952,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"32506756909","text":"import gym\nfrom environments.FixedCartPoleEnv import FixedCartPoleEnv\nfrom environments.RoverEnv import RoverEnv\nfrom environments.AtariEnv import AtariEnv\nfrom models.AgentSafeDQN import AgentSafeDQN\nfrom models.AgentDQN import AgentDQN\nfrom models.AgentIterativeSafetyGraph import AgentIterativeSafetyGraph\nfrom experiment.nn_config import *\nfrom matplotlib import pyplot as plt\nfrom typing import Dict, Tuple, List\nimport time\n\n\nMAX_EPISODE = 200\nVISUALISATION = False\nPLOT_INTERVAL = 20\nARTIFICIAL_DELAY = -0.1\nplot_values: Dict[str, Dict[int, Tuple[list, gym.Env, float]]] = {} # values, env and accuracy (tuple) of each episode (second dict) of each experiment (first dict).\nepisode_info: Dict[str, list] = {}\n\ndef experiment_rover_base(predefined_actions = None):\n env = RoverEnv(seed=100)\n i_dim, o_dim, DQN_nn = SimplifiedCartPole_SafetyMonitor_NN(2,4)\n _, _, MON_nn = SimplifiedCartPole_SafetyMonitor_NN(2,4)\n agent = AgentSafeDQN(i_dim, o_dim, DQN_nn, MON_nn)\n actions, rewards = run_experiment(\"base\", predefined_actions, agent, env)\n return actions\n\ndef experiment_rover_vanilla(predefined_actions = None):\n env = RoverEnv(seed=100)\n i_dim, o_dim, DQN_nn = SimplifiedCartPole_SafetyMonitor_NN(2,4)\n agent = AgentDQN(i_dim, o_dim, DQN_nn)\n actions, rewards = run_experiment(\"vanilla\", predefined_actions, agent, env)\n return actions\n\ndef experiment_rover_refined(predefined_actions = None):\n env = RoverEnv(seed=100)\n i_dim, o_dim, DQN_nn = SimplifiedCartPole_SafetyMonitor_NN(2,4)\n agent = AgentIterativeSafetyGraph(i_dim, o_dim, DQN_nn, (10, 10))\n actions, rewards = run_experiment(\"refined\", predefined_actions, agent, env)\n return actions\n\ndef experiment_rover_refined_no_feedback(predefined_actions = None):\n env = RoverEnv(seed=100)\n i_dim, o_dim, DQN_nn = SimplifiedCartPole_SafetyMonitor_NN(2,4)\n agent = AgentIterativeSafetyGraph(i_dim, o_dim, DQN_nn, (10, 10), feedback=False)\n actions, rewards = run_experiment(\"no-feedback\", predefined_actions, agent, env)\n return actions\n\ndef experiment_pole_refined(predefined_actions = None):\n env = FixedCartPoleEnv(seed=100)\n i_dim, o_dim, DQN_nn = SimplifiedCartPole_SafetyMonitor_NN(2,2)\n agent = AgentIterativeSafetyGraph(i_dim, o_dim, DQN_nn, (16, 16))\n actions, rewards = run_experiment(\"refined\", predefined_actions, agent, env)\n return actions\n\ndef experiment_pole_base(predefined_actions = None):\n env = FixedCartPoleEnv(seed=100)\n i_dim, o_dim, DQN_nn = SimplifiedCartPole_SafetyMonitor_NN(2,2)\n _, _, MON_nn = SimplifiedCartPole_SafetyMonitor_NN(2,2)\n agent = AgentSafeDQN(i_dim, o_dim, DQN_nn, MON_nn)\n actions, rewards = run_experiment(\"base\", predefined_actions, agent, env)\n return actions\n\ndef experiment_pole_vanilla(predefined_actions = None):\n env = FixedCartPoleEnv(seed=100)\n i_dim, o_dim, DQN_nn = SimplifiedCartPole_SafetyMonitor_NN(2,2)\n agent = AgentDQN(i_dim, o_dim, DQN_nn)\n actions, rewards = run_experiment(\"vanilla\", predefined_actions, agent, env)\n return actions\n\ndef experiment_refined_experiences_atari(predefined_actions = None):\n env = AtariEnv(seed=100)\n i_dim, o_dim, DQN_nn = SimplifiedCartPole_SafetyMonitor_NN(2,3)\n agent = AgentIterativeSafetyGraph(i_dim, o_dim, DQN_nn, (20, 20))\n actions, rewards = run_experiment(\"refined\", predefined_actions, agent, env)\n return actions\n\ndef run_experiment(experiment_name, predefined_actions, agent, env):\n print(f\"Running Experiment {experiment_name}\") \n action_index = 0\n actions = []\n rewards = []\n for i in range(0, MAX_EPISODE):\n state = env.reset()\n episode_reward = 0\n\n e = 0\n maxE = 100\n while True:\n e += 1\n action = None\n if predefined_actions != None and action_index < len(predefined_actions):\n action = predefined_actions[action_index]\n else:\n action = agent.get_action(state)\n actions.append(action)\n action_index += 1\n next_state, reward, violation, _ = env.step(action)\n experience = [state, action, reward, next_state, violation]\n agent.add_experience(experience)\n episode_reward += reward\n state = next_state\n if ARTIFICIAL_DELAY >= 0: time.sleep(ARTIFICIAL_DELAY)\n if VISUALISATION: env.render()\n if violation: \n agent.train()\n env.reset()\n break\n if e >= maxE:\n env.reset()\n break\n\n if experiment_name not in episode_info: episode_info[experiment_name] = {}\n episode_info[experiment_name][i] = e\n\n if i % PLOT_INTERVAL == 0: \n record(experiment_name, i, agent, env)\n #if hasattr(agent, \"safety_graph\"):\n #agent.safety_graph.visualize()\n #plot(only_updates=True, only_accuracy=False)\n\n rewards.append(episode_reward) \n print(\"Episode {0}/{1} -- reward {2}\".format(i+1, MAX_EPISODE, episode_reward)) \n return actions, rewards\n\ndef record(experiment, episode, agent, env):\n reso = (15, 15)\n c = agent.output_dim\n values = agent.dqn.get_snapshot(reso)\n accuracy = env.test_agent_accuracy(agent)\n if experiment not in plot_values: plot_values[experiment] = {}\n\n rvs = []\n for _ in range(c):\n rvs.append([[0]*reso[0] for _ in range(reso[1])])\n\n for y in range(reso[1]):\n for x in range(reso[0]):\n for i in range(c):\n rvs[i][reso[1]-y-1][x] = values[reso[1]-y-1][x][i]\n\n for i in range(c):\n rvs[i] = np.interp(rvs[i], [np.min(rvs[i]), np.max(rvs[i])], [-1, +1])\n \n\n plot_values[experiment][episode] = (rvs, env, accuracy)\n\ndef plot_output(values, accuracy, only_accuracy=False):\n if only_accuracy:\n print(f\"accuracy: {accuracy}\")\n return\n\n print(f\"accuracy: {accuracy}\")\n plt.imshow(values, cmap='hot', interpolation='bicubic')\n plt.legend()\n plt.colorbar()\n plt.show()\n\ndef plot_output_4(experiment_name, values, accuracy, only_accuracy, env, cmap='hot', interpolation='bicubic'):\n if only_accuracy:\n print(f\"accuracy: {accuracy}\")\n return\n\n print(f\"accuracy: {accuracy}\")\n _, ax = plt.subplots(len(env.action_names))\n\n for i, name in enumerate(env.action_names):\n ax[i].imshow(values[i], cmap='hot')\n ax[i].set_title(name)\n\n plt.show() \n\ndef plot_episode_info():\n for experiment in episode_info.keys():\n plt.plot(episode_info[experiment].keys(), episode_info[experiment].values(), label = experiment, linestyle=\"-.\")\n plt.legend()\n plt.show()\n\ndef plot_episode_info_accuracy():\n for experiment in episode_info.keys():\n accuracies = []\n ap = {}\n for c in episode_info[experiment].values():\n accuracies.append(c/100)\n for i in range(0, len(accuracies), PLOT_INTERVAL):\n l = accuracies[i:i+PLOT_INTERVAL]\n a = sum(l)/len(l)\n ap[i] = a\n\n plt.plot(ap.keys(), ap.values(), label = experiment, linestyle=\"-.\")\n plt.legend()\n plt.show()\n\n\ndef plot(only_updates=False, only_accuracy=False, only_comparision=False): \n if only_updates:\n last_experiment = list(plot_values.values())[-1]\n values, env, accuracy = list(last_experiment.values())[-1]\n plot_output_4(list(plot_values.keys())[-1], values, accuracy, only_accuracy, env)\n else:\n episodes = {}\n accuracies = {}\n for experiment in plot_values.keys():\n episodes[experiment] = []\n accuracies[experiment] = []\n for episode, (values, env, accuracy) in plot_values[experiment].items():\n episodes[experiment].append(episode)\n accuracies[experiment].append(accuracy)\n if not only_comparision:plot_output_4(experiment, values, accuracy, only_accuracy, env)\n for experiment in plot_values.keys():\n plt.plot(episodes[experiment], accuracies[experiment], label = experiment, linestyle=\"-.\")\n plt.legend()\n plt.show()\n\nif __name__ == \"__main__\":\n\n experiment_rover_refined()\n experiment_rover_refined_no_feedback()\n\n # experiment_pole_refined()\n # experiment_pole_base()\n # experiment_pole_vanilla()\n\n plot(only_comparision=True)\n plot_episode_info()\n plot_episode_info_accuracy()","repo_name":"alizandian/safe-split-dqn","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8460,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"27991317529","text":"dna = 'ATGGCCTTT'\nw = 0\nlist1= 'ACDEFGHIKLMNPQRSTVWY'\nlist2 = 'ACDEFGHIKLMNPQRSTVWY'\ncount = 0\nfor i in range(len(list1)):\n for j in range(len(list2)):\n if list1[i] != list2[j] and list2[j] != list1[i]:\n print(list1[i],list2[j])\n count += 1\n list2= list2[1:]\nprint(count)\n","repo_name":"gglez660/homework","sub_path":"28aapair.py","file_name":"28aapair.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"929836920","text":"import os\n\ndef findAllFile(base): #获取文件内容\n for root, ds, fs in os.walk(base):\n for f in fs:\n yield f\n\ndef subcrypt(test,ka,kb):\n key = \"\"\n for j in range(0, len(test)):\n o = test[j]\n if (o.isupper()):\n key += chr(ord(\"A\") + ((ord(test[j]) - ord(\"A\")) * ka + kb) % 26)\n elif (o.islower()):\n key += chr(ord(\"a\") + ((ord(test[j]) - ord(\"a\")) * ka + kb) % 26)\n else:\n key += o\n\n return key\n\nif __name__ == '__main__':\n base = './files/'\n fileslist = []\n num = 0\n for i in findAllFile(base):\n fileslist.append(i)\n num += 1\n\n key = str(input(\"请输入加密秘钥:\")).split(\",\")\n ka = int(key[0])\n kb = int(key[1])\n\n for filename in fileslist:\n #读取文件内容\n print(\"开始加密: \" + filename)\n f_crypt = \"\"\n with open(\"./files/\" + filename, encoding=\"utf8\", errors='ignore') as f:\n f_str = f.read()\n f.close()\n\n crypt_file = open('files/' + 'crypt_' + filename, 'w')\n crypt_file.write(subcrypt(f_str, ka, kb))\n\n crypt_file.close()\n\n","repo_name":"element119th/XDUCS-ASG","sub_path":"网络方向/密码学实验/Affine/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"1057765252","text":"import imp\nimport kraken.templates as tmp\n\n# Load the trained model\nmodel_path = 'model.mlmodel'\nmodel = tmp.load_any(model_path)\n\n# Read the image file\nimage_path = 'Images/h.jpg'\nwith open(image_path, 'rb') as f:\n image = f.read()\n\n# Recognize the text in the image\ntext = kraken.recognize(image, model=model)\n\n# Print the OCR output\nprint(text)","repo_name":"MhamedSanaa/pfa-ai","sub_path":"testKraken.py","file_name":"testKraken.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"36294354840","text":"#!C:\\python38\\python\n\n\nimport pyodbc\n\nconn = pyodbc.connect('DRIVER={sql server};' 'SERVER=DESKTOP-1BOUP1O;' 'DATABASE=comIT;' 'UID=sa;' 'PWD=jiaxl51238')\ncursor = conn.cursor()\ncursor.execute('SELECT * FROM persons')\nfor row in cursor:\n print(row)\n\nconn.close()\n","repo_name":"xiaolongjia/techTrees","sub_path":"Python/06_DB/pyodbcTest.py","file_name":"pyodbcTest.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25010934634","text":"import re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream import HLSStream, RTMPStream\n\nRUURL = \"b=chrome&p=win&v=56&f=0&d=1\"\n\n_url_re = re.compile(r\"https?://www.rtvs.sk/televizia/live-[\\w-]+\")\n_playlist_url_re = re.compile(r'\"playlist\": \"([^\"]+)\"')\n\n_playlist_schema = validate.Schema(\n [\n {\n \"sources\": [\n validate.any(\n {\n \"type\": \"dash\",\n \"file\": validate.url(scheme=\"http\")\n }, {\n \"type\": \"hls\",\n \"file\": validate.url(scheme=\"http\")\n }, {\n \"type\": \"rtmp\",\n \"file\": validate.text,\n \"streamer\": validate.url(scheme=\"rtmp\")\n }\n )\n ]\n }\n ],\n validate.get(0),\n validate.get(\"sources\")\n)\n\n\nclass Rtvs(Plugin):\n @classmethod\n def can_handle_url(cls, url):\n return _url_re.match(url)\n\n def _get_streams(self):\n res = self.session.http.get(self.url)\n match = _playlist_url_re.search(res.text)\n if match is None:\n return\n\n res = self.session.http.get(match.group(1) + RUURL)\n sources = self.session.http.json(res, schema=_playlist_schema)\n\n streams = {}\n\n for source in sources:\n if source[\"type\"] == \"rtmp\":\n streams[\"rtmp_live\"] = RTMPStream(self.session, {\n \"rtmp\": source[\"streamer\"],\n \"pageUrl\": self.url,\n \"live\": True\n })\n elif source[\"type\"] == \"hls\":\n streams.update(HLSStream.parse_variant_playlist(self.session, source[\"file\"]))\n\n return streams\n\n\n__plugin__ = Rtvs\n","repo_name":"Tup0lev/BiliBili_Global_Streaming_Projet_Katyusha","sub_path":"packages/streamlink/plugins/rtvs.py","file_name":"rtvs.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"31"} +{"seq_id":"39078448346","text":"import json\nimport operator\nimport os\n\nimport colors\n\nUNKNOWN = 0\nRUN = 1\nFAIL = 2\nERROR = 3\nTIMEOUT = 4\n\n_color_mapping = {\n RUN : colors.green,\n FAIL: colors.orange,\n ERROR : colors.red,\n TIMEOUT: colors.magneta,\n UNKNOWN : colors.id,\n }\n\nclass TCData:\n def __init__(self, basename = None, bin_p = None, recompiled_p = None):\n self.binary = bin_p\n self.recompiled = recompiled_p\n self.basename = basename\n self.total = 0\n self.success = 0\n self.cases = {}\n self.ces = {}\n\n def is_recompiled(self):\n return self.recompiled is not None\n\n def get_result_color(self):\n if self.total == 0:\n return colors.magneta\n\n if self.total == self.success:\n return colors.green\n\n if self.success == 0:\n return colors.red\n\n return colors.orange\n\n def print(self, verbosity):\n end = \"\\n\"\n if verbosity == 0:\n end = \" \"\n\n print(\"{:<30s}\".format(self.get_result_color()(self.basename)), end=end)\n if not self.is_recompiled():\n print(\"\\tRecompilation failed: ERROR\")\n return\n\n if self.total == 0:\n print(colors.magneta(\"\\tNo tests were executed\"))\n return\n\n if verbosity == 0:\n print(colors.get_result_color(self.total, self.success) +\n \"{:>5s}\".format(str(self.success) + \"/\" + str(self.total)) +\n colors.clean())\n elif verbosity == 1:\n for case, val in sorted(self.cases.items(), key = operator.itemgetter(0)):\n print(\" \" * 2, _color_mapping[val](case))\n\n def print_ces(self):\n for case, ce in self.ces.items():\n print(colors.red(self.basename) + ': '+ ('without_args' if not case else case))\n print(ce)\n\n def get(self, test_case):\n return self.cases.get(test_case, UNKNOWN)\n\n def outer_get(self, name, test_case):\n if self and name == self.basename:\n return self.get(test_case)\n return UNKNOWN\n\nclass _MyEncoder(json.JSONEncoder):\n def default(self, o):\n return o.__dict__\n\ndef _object_hook(d):\n if \"binary\" in d:\n obj = TCData()\n obj.__dict__.update(d)\n return obj\n return d\n\n# SQL serialization may be useful as well\ndef store_json(root, filename):\n if os.path.isfile(filename):\n print(\"Log file already exists\")\n return\n with open(filename, 'w') as f:\n json.dump(root, f, cls=_MyEncoder, indent=4)\n\ndef load_json(filename):\n with open(filename, 'r') as f:\n return json.load(f, object_hook = _object_hook)\n\n\nclass _Format:\n l_header = 25\n\n def header(self, message):\n self._header = message\n self.h_queued = True\n\n def case(self, message):\n self._case = message\n\n def _header_dump(self):\n if not self.h_queued:\n return\n self.h_queued = False\n\n printed = self._header.ljust(_Format.l_header)\n self.h_fill = len(printed)\n\n print(printed, end=\"\")\n self.present_header = True\n\n def _case_dump(self):\n fill = 0 if self.present_header else self.h_fill\n printed = \" \" * fill + \"|\" + self._case.ljust(_Format.l_header) + \"|\"\n\n print(printed, end=\"\")\n self.present_header = False\n\n def _res_dump(self, result):\n print(\" \" + str(result) + \" |\", end=\"\")\n\n # TODO: Print something else than numbers with verbosity = 2\n def dump(self, results, verbosity, original):\n\n # Print everything and really verbose\n if verbosity == 2:\n self._header_dump()\n self._case_dump()\n self._res_dump(original)\n for r in results:\n print(\" \", _color_mapping[r](str(r)), \" |\", end=\"\")\n print()\n\n if verbosity == 0:\n if all(r == original for r in results):\n return\n\n self._header_dump()\n self._case_dump()\n self._res_dump(original)\n for r in results:\n if r == original:\n print(\" \" * 3 + \"|\", end=\"\")\n else:\n print(\" \", _color_mapping[r](str(r)), \" |\", end=\"\")\n print()\n\n# Compare test results to some base and print them in reasonable way\n# f is formatter -> first suite name and then case are preset and when dump() is called\n# actual results are printed based on verbosity level\ndef compare(base, results, formatter, full):\n # first we need to sort the items\n s_base = sorted(base.items(), key=operator.itemgetter(0))\n\n for entry in s_base:\n\n suite_name, tcdata = entry\n formatter.header(suite_name)\n\n for case_name, case_result in tcdata.cases.items():\n\n formatter.case(case_name)\n r_case_results = []\n\n for r in results:\n r_tcdata = r.get(suite_name, None)\n r_case_results.append(r_tcdata.outer_get(tcdata.basename, case_name))\n formatter.dump(r_case_results, full, case_result)\n","repo_name":"lifting-bits/mcsema","sub_path":"tests/integration_tests/result_data.py","file_name":"result_data.py","file_ext":"py","file_size_in_byte":5101,"program_lang":"python","lang":"en","doc_type":"code","stars":2539,"dataset":"github-code","pt":"31"} +{"seq_id":"22969442530","text":"#Contains miscellaneous functions relating to minister functionality\n\ndef check_corruption(minister_type, global_manager):\n '''\n Description:\n Returns whether the minister in the inputted office would lie about the result of a given roll\n Input:\n string minister_type: Minister office to check the corruption of, like Minister of Trade\n Description:\n boolean: Returns whether the minister in the inputted office would lie about the result of a given roll\n '''\n return(global_manager.get('current_ministers')[minister_type].check_corruption)\n\ndef get_skill_modifier(minister_type, global_manager):\n '''\n Description:\n Returns the skill-based dice roll modifier of the minister in the inputted office\n Input:\n string 'minister_type': Minister office to check the corruption of, like Minister of Trade\n Output:\n int: Returns the skill-based dice roll modifier of the minister in the inputted office, between -1 and 1\n '''\n return(global_manager.get('current_ministers')[minister_type].get_skill_modifier)\n\ndef calibrate_minister_info_display(global_manager, new_minister):\n '''\n Description:\n Updates all relevant objects to display the inputted minister\n Input:\n global_manager_template global_manager: Object that accesses shared variables\n string new_minister: The new minister that is displayed\n Output:\n None\n '''\n global_manager.set('displayed_minister', new_minister)\n global_manager.get('minister_info_display').calibrate(new_minister)\n\ndef calibrate_trial_info_display(global_manager, info_display, new_minister):\n '''\n Description:\n Updates all relevant objects to display the inputted minister for a certain side of a trial\n Input:\n global_manager_template global_manager: Object that accesses shared variables\n button/actor list info_display: Interface collection that is calibrated to the inputted minister\n the trial\n minister/string new_minister: The new minister that is displayed, or 'none'\n Output:\n None\n '''\n if type(info_display) == list:\n return\n info_display.calibrate(new_minister)\n if info_display == global_manager.get('defense_info_display'):\n global_manager.set('displayed_defense', new_minister)\n elif info_display == global_manager.get('prosecution_info_display'):\n global_manager.set('displayed_prosecution', new_minister)\n\ndef trial_setup(defense, prosecution, global_manager):\n '''\n Description:\n Sets the trial info displays to the defense and prosecution ministers at the start of a trial\n Input:\n minister defense: Minister to calibrate defense info display to\n minister prosecution: Minsiter to calibrate prosecution info display to\n Output:\n None\n '''\n calibrate_trial_info_display(global_manager, global_manager.get('defense_info_display'), defense)\n calibrate_trial_info_display(global_manager, global_manager.get('prosecution_info_display'), prosecution)\n \ndef update_available_minister_display(global_manager):\n '''\n Description:\n Updates the display of available ministers to be hired, displaying 3 of them in order based on the current display index\n Input:\n global_manager_template global_manager: Object that accesses shared variables\n Output:\n None\n '''\n available_minister_portrait_list = global_manager.get('available_minister_portrait_list')\n available_minister_left_index = global_manager.get('available_minister_left_index')\n available_minister_list = global_manager.get('available_minister_list')\n for current_index in range(len(available_minister_portrait_list)):\n minister_index = available_minister_left_index + current_index\n if minister_index < len(available_minister_list) and minister_index >= 0:\n available_minister_portrait_list[current_index].calibrate(available_minister_list[minister_index])\n else:\n available_minister_portrait_list[current_index].calibrate('none')\n if len(available_minister_list) > 0 and not available_minister_left_index + 2 >= len(available_minister_list):\n calibrate_minister_info_display(global_manager, available_minister_list[available_minister_left_index + 2])\n\ndef positions_filled(global_manager):\n '''\n Description:\n Returns whether all minister positions are currently filled. Any action in the game that could require minister rolls should only be allowed when all minister positions are filled\n Input:\n global_manager_template global_manager: Object that accesses shared variables\n Output:\n boolean: Returns whether all minister positions are currently filled\n '''\n completed = True\n for current_position in global_manager.get('minister_types'):\n if global_manager.get('current_ministers')[current_position] == 'none':\n completed = False\n return(completed)\n","repo_name":"Vrotki/Scramble-for-Africa-game","sub_path":"modules/minister_utility.py","file_name":"minister_utility.py","file_ext":"py","file_size_in_byte":4988,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"36684977981","text":"# Replace All 0’s With 1 In A Given Integer.\r\n\r\nnum = int(input())\r\nN = num\r\n\r\n# using inbuilt method\r\nnum = str(num)\r\nprint(num.replace('0', '1'))\r\n\r\n# using loop\r\nn = \"\"\r\nwhile(N != 0):\r\n r = N % 10\r\n if(r == 0):\r\n r = 1\r\n n += str(r)\r\n N //= 10\r\nprint(n[::-1])","repo_name":"Gopal7476/Python-Programming","sub_path":"Working WIth Numbers/15.ReplaceAll0'sWith1'sInGivenInteger.py","file_name":"15.ReplaceAll0'sWith1'sInGivenInteger.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"23672891061","text":"\nimport numpy as np\n\ntime_spent = np.genfromtxt('file.csv', delimiter=',')\n\n# print(time_spent)\nminutes_mean = np.mean(time_spent)\n\nminutes_median = np.median(time_spent)\n\nprint(minutes_mean)\n\nprint(minutes_median)\n\nbest_measure = minutes_median","repo_name":"mikeyhodl/numpy-codecademy","sub_path":"Mean vs. Median.py","file_name":"Mean vs. Median.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"39406558409","text":"## This is free and unencumbered software released into the public domain. \n## see LICENSE file or https://unlicense.org/ for full text of license.\n\n# command character (useful when there's multiple bots, default '!')\n# for now, must be ONE character long ONLY otherwise everything breaks!\ncommand_char = '!'\n# enable shock mode? if 'True', enabled, if 'False', Disabled. \nShockEnabled = True\n## IF YOU EDIT THIS MANUALLY PLEASE ENSURE YOUR *Level VALUES ARE BETWEEN 3 and 100 and your *Time values ARE BETWEEN 0.25 AND 9.50!!\n## IF THEY AREN'T THIS CAN CAUSE ISSUES\n## shock defaults \nShockDefaultLevel = 3\nShockDefaultTime = 0.50\n## set MAXIMUM shock levels\nShockMaxLevel = 10\nShockMaxTime = 2.00\n## vibration defaults\nVibrateDefaultLevel = 3\nVibrateDefaultTime = 0.50\n## set MAXIMUM vibration levels\nVibrateMaxLevel = 100\nVibrateMaxTime = 9.50\n## ROULETTE FUNCTION CONFIG\nroulette_chance = 6 ## express as 1/x i.e. for 1 in 6,6. default is '6'\nroulette_level_fixed = True # 'False' if you want to use any value up to max, 'True' if you want to just use default values. default 'True'\nroulette_delay = 2 ## seconds to wait before revealing outcome. default '2'\n## remote key - if you don't know what this is, leave it as the default!\nkey_ = '00101100101001010' # Default \" '00101100101001010' \" include the ', not the spaces around it)\n## it is VERY VERY Important to leave a new line AFTER THIS LINE\n## if this is not done it can break the program. this is not ideal and will be fixed in due course\n## but in the interim leave a new line please \n\n","repo_name":"smouldery/shock-collar-control","sub_path":"collarbot_config.py","file_name":"collarbot_config.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"31"} +{"seq_id":"31931747981","text":"from SloppyCell.ReactionNetworks import *\n\nnet1 = IO.from_SBML_file('BIOMD0000000022.xml')\nnet1.set_var_constant('EmptySet', True)\ntraj = Dynamics.integrate(net1, [0, 20*24])\nvals = traj.get_var_vals(20*24)\nnet1.set_var_ics(vals)\n\nper01 = net1.copy('per01')\nper01.set_var_ic('s2', 0)\ntraj = Dynamics.integrate(per01, [0, 200*24])\nvals = traj.get_var_vals(200*24)\nper01.set_var_ics(vals)\n\ndClkJrk = net1.copy('dClkJrk')\ndClkJrk.set_var_ic('s6', 0)\ntraj = Dynamics.integrate(dClkJrk, [0, 20*24])\nvals = traj.get_var_vals(20*24)\ndClkJrk.set_var_ics(vals)\n\ndouble = net1.copy('double')\ndouble.set_var_ic('s6', 0)\ntraj = Dynamics.integrate(double, [0, 20*24])\nvals = traj.get_var_vals(20*24)\ndouble.set_var_ics(vals)\n\ncper = net1.copy('cper')\ncper.set_var_ic('s1', 0)\ncper.set_var_ic('c1', 0.846)\ntraj = Dynamics.integrate(cper, [0, 20*24+15])\nvals = traj.get_var_vals(20*24+15)\ncper.set_var_ics(vals)\n\n# per01, dClkJrk, and double are all in fixed-points. Thus we neglect them,\n# since their dynamics are very uninteresting.\nnetworks = [cper, net1]\nint_times = [(0, 72)] * len(networks)\n","repo_name":"GutenkunstLab/SloppyCell","sub_path":"Example/Gutenkunst2007/Ueda_2001/Nets.py","file_name":"Nets.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"43157360075","text":"from abc import ABCMeta\nfrom aima.core.util.datastructure import FIFOQueue\nfrom aima.core.util.functions import select_randomly_from_list\nfrom aima.core.util.other import PlusInfinity\n\n__author__ = 'Ivan Mushketik'\n__docformat__ = 'restructuredtext en'\n\nclass Variable:\n \"\"\"\n Variable is an object with unique name.\n \"\"\"\n def __init__(self, name):\n self.name = name\n\n def __str__(self):\n return \"CSPVariable('\" + str(self.name) + \"')\"\n\n def __eq__(self, other):\n if not isinstance(other, Variable):\n return False\n\n return self.name == other.name\n\n def __hash__(self):\n return hash(self.name)\n\n\nclass Domain:\n \"\"\"\n Defines set of values that can be assigned to a variable\n \"\"\"\n def __init__(self, values):\n \"\"\"\n\n :param values (list): values that can be assigned\n :return:\n \"\"\"\n self.values = list(values)\n\n def size(self):\n \"\"\"\n Get number of values in domain\n\n :return (int): number of values\n \"\"\"\n return len(self.values)\n\n def get(self, index):\n \"\"\"\n Get value with specified number\n\n :param index: number of value in domain\n :return: value from the domain\n \"\"\"\n\n return self.values[index]\n\n def remove(self, value):\n \"\"\"\n Remove value from domain\n\n :param value: value to remove\n :return: None\n \"\"\"\n self.values.remove(value)\n\n def is_empty(self):\n \"\"\"\n Check if domain is empty\n\n :return (bool): True if domain is empty, False otherwise\n \"\"\"\n return len(self.values) == 0\n\n def contains(self, value):\n \"\"\"\n Check if specified value belongs to a domain\n\n :param value: value to check\n :return (bool): True if value belongs to a domain, False otherwise\n \"\"\"\n return value in self.values\n\n def __iter__(self):\n return iter(self.values)\n\n def __eq__(self, other):\n if not isinstance(other, Domain):\n return False\n\n if self.size() != other.size():\n return False\n\n for i in range(self.size()):\n if self.get(i) != other.get(i):\n return False\n\n return True\n\n def __str__(self):\n return str(self.values)\n\n\nclass Assignment:\n \"\"\"\n This class holds what values was assigned to what variables\n \"\"\"\n def __init__(self):\n self.variable_to_value = {}\n\n def get_variables(self):\n \"\"\"\n Get assigned variables\n\n :return list(Variable): list of assigned variables\n \"\"\"\n return self.variable_to_value.keys()\n\n def get_assignment(self, var):\n \"\"\"\n Get value that was assigned to the specified variable\n\n :param var (Variable):\n :return: assigned value if any was assigned to a specified variable, None otherwise\n \"\"\"\n return self.variable_to_value.get(var)\n\n def set_assignment(self, var, value):\n \"\"\"\n Assign valut to a variable\n\n :param var: variable to assign\n :param value: value to assign\n :return: None\n \"\"\"\n self.variable_to_value[var] = value\n\n def remove_assignment(self, var):\n \"\"\"\n Remove assignment for a specified variable\n\n :param var (Variable): variable to remove assignment for\n :return: None\n \"\"\"\n\n del self.variable_to_value[var]\n\n def has_assignment_for(self, var):\n \"\"\"\n Check if any value was assigned to a specified variable\n\n :param var (Variable): variable to check\n :return: True if some value was assigned, false otherwise\n \"\"\"\n return self.variable_to_value.get(var) != None\n\n def is_consistent(self, constraints):\n \"\"\"\n Check if current assignment doesn't violate any constraint\n\n :param constraints (Constraint): constraints to check\n :return (bool): True all constrainsts are satisfied, False otherwise \n \"\"\"\n for constraint in constraints:\n if not constraint.is_satisfied_with(self):\n return False\n return True\n\n def is_complete(self, variables):\n \"\"\"\n Check if all values were assigned to all variables.\n\n :param variables iterable(Variables): variables to check\n :return (bool): True if values were assigned to all variables, False otherwise\n \"\"\"\n for var in variables:\n if not self.has_assignment_for(var):\n return False\n\n return True\n\n def is_solution(self, csp):\n \"\"\"\n Check if current assignment is a solution to the specifed CSP problem\n\n :param csp (CSP):\n :return (bool): True is current assignment is a solution, False otherwise.\n \"\"\"\n return self.is_consistent(csp.get_constraints()) and self.is_complete(csp.get_variables())\n\n def copy(self):\n copy = Assignment()\n copy.variable_to_value = self.variable_to_value.copy()\n\n return copy\n\n def __str__(self):\n first = True\n result = \"{\"\n for var in self.variable_to_value.keys():\n if not first:\n result += \", \"\n result += str(var) + \" = \" + str(self.variable_to_value[var])\n first = False\n\n result += \"}\"\n return result\n\n\nclass Constraint(metaclass=ABCMeta):\n \"\"\"\n Abstract class for specifying a constraint in CSP problem\n \"\"\"\n def get_scope(self):\n \"\"\"\n Get variables that are constrained by an instance of this class\n\n :return iterable(Variable):\n \"\"\"\n raise NotImplementedError()\n\n def is_satisfied_with(self, assignment):\n raise NotImplementedError()\n\nclass NotEqualConstraint(Constraint):\n def __init__(self, var1, var2):\n self.var1 = var1\n self.var2 = var2\n self.scope = (var1, var2)\n\n def get_scope(self):\n return self.scope\n\n def is_satisfied_with(self, assignment):\n value1 = assignment.get_assignment(self.var1)\n\n return value1 == None or (not value1 == assignment.get_assignment(self.var2))\n\n\nclass CSP:\n\n def __init__(self, variables):\n self.variables = list(variables)\n self.domains = {}\n self.constraints = []\n self.var_constraints = {}\n\n for variable in variables:\n self.domains[variable] = []\n self.var_constraints[variable] = []\n\n def get_variables(self):\n return self.variables\n\n def get_domain(self, var):\n return self.domains[var]\n\n def set_domain(self, var, domain):\n self.domains[var] = Domain(domain.values)\n\n def remove_value_from_domain(self, var, value):\n self.domains[var].remove(value)\n\n\n def get_constraints(self, var=None):\n if var != None:\n return self.var_constraints[var]\n else:\n return self.constraints\n\n def add_constraint(self, constraint):\n self.constraints.append(constraint)\n for var in constraint.get_scope():\n self.var_constraints[var].append(constraint)\n\n def get_neighbor(self, var, constraint):\n pass\n\n def copy_domains(self):\n result = CSP()\n result.constraints = list(self.constraints)\n result.var_constraints = self.var_constraints.copy()\n result.domains = self.domains.copy()\n \n\nclass CSPStateListener(metaclass=ABCMeta):\n def state_changed(self, csp, assignment):\n raise NotImplementedError()\n\n\nclass SolutionStrategy:\n def __init__(self):\n self.listeners = []\n\n def add_csp_state_listener(self, listener):\n self.listeners.append(listener)\n\n def remove_csp_state_listener(self, listener):\n self.listeners.remove(listener)\n\n def _notify_state_changed(self, csp, assignment=None):\n for listener in self.listeners:\n listener.state_changed(csp, assignment)\n\n def solve(self, csp):\n raise NotImplementedError()\n\n\nclass DomainRestoreInfo:\n def __init__(self):\n self.saved_domains = {}\n self.empty_domain_found = False\n\n def clear(self):\n self.saved_domains = {}\n\n def is_empty(self):\n return len(self.saved_domains.keys()) == 0\n\n def store_domain_for(self, var, domain):\n if self.saved_domains.get(var) == None:\n self.saved_domains[var] = list(domain)\n\n def restore_domains(self, csp):\n for var in self.saved_domains.keys():\n csp.set_domain(var, self.saved_domains[var])\n\n\n \n \n# Artificial Intelligence A Modern Approach (3rd Ed.): Figure 6.5, Page 215.\n# \n#\n# function BACKTRACKING-SEARCH(csp) returns a solution, or failure\n# return BACKTRACK({ }, csp)\n# \n# function BACKTRACK(assignment, csp) returns a solution, or failure\n# if assignment is complete then return assignment\n# var = SELECT-UNASSIGNED-VARIABLE(csp)\n# for each value in ORDER-DOMAIN-VALUES(var, assignment, csp) do\n# if value is consistent with assignment then\n# add {var = value} to assignment\n# inferences = INFERENCE(csp, var, value)\n# if inferences != failure then\n# add inferences to assignment\n# result = BACKTRACK(assignment, csp)\n# if result != failure then\n# return result\n# remove {var = value} and inferences from assignment\n# return failure\n# \n# Figure 6.5 A simple backtracking algorithm for constraint satisfaction\n# problems. The algorithm is modeled on the recursive depth-first search of\n# Chapter 3. By varying the functions SELECT-UNASSIGNED-VARIABLE and\n# ORDER-DOMAIN-VALUES, we can implement the general-purpose heuristic discussed\n# in the text. The function INFERENCE can optionally be used to impose arc-,\n# path-, or k-consistency, as desired. If a value choice leads to failure\n# (noticed wither by INFERENCE or by BACKTRACK), then value assignments\n# (including those made by INFERENCE) are removed from the current assignment\n# and a new value is tried.\nclass BacktrackingStrategy(SolutionStrategy):\n # function BACKTRACKING-SEARCH(csp) returns a solution, or failure\n # return BACKTRACK({ }, csp)\n def solve(self, csp):\n return self._recursive_backtrack_search(csp, Assignment())\n\n # function BACKTRACK(assignment, csp) returns a solution, or failure\n def _recursive_backtrack_search(self, csp, assignment):\n result = None\n # if assignment is complete then return assignment\n if assignment.is_complete(csp.get_variables()):\n result = assignment\n else:\n # var = SELECT-UNASSIGNED-VARIABLE(csp)\n var = self._select_unassigned_variable(assignment, csp)\n # for each value in ORDER-DOMAIN-VALUES(var, assignment, csp) do\n for value in self._order_domain_values(var, csp, assignment):\n # add {var = value} to assignment\n assignment.set_assignment(var, value)\n # inferences = INFERENCE(csp, var, value)\n # if inferences != failure then\n if assignment.is_consistent(csp.get_constraints(var)):\n info = self._inference(var, assignment, csp)\n\n if not info.is_empty():\n self._notify_state_changed(csp)\n if not info.empty_domain_found:\n # result = BACKTRACK(assignment, csp)\n result = self._recursive_backtrack_search(csp, assignment)\n # if result != failure then\n if result != None:\n # return result\n break\n\n info.restore_domains(csp)\n # remove {var = value} and inferences from assignment\n assignment.remove_assignment(var)\n\n return result\n\n def _select_unassigned_variable(self, assignment, csp):\n \"\"\"\n Select one of unassigned variables. This method is overridden in ImprovedBacktrackingStrategy\n to implement different variable selection heuristics\n\n :param assignment (Assignment): current assignment\n :param csp (CSP): CSP to solve\n :return (Variable): variable to assign value for\n \"\"\"\n for var in csp.get_variables():\n if not assignment.has_assignment_for(var):\n return var\n\n return None\n\n def _order_domain_values(self, var, csp, assignment):\n return csp.get_domain(var)\n\n def _inference(self, var, assignment, csp):\n return DomainRestoreInfo()\n\n\n# Artificial Intelligence A Modern Approach (3rd Ed.): Figure 6.3, Page 209.\n# \n#\n# function AC-3(csp) returns false if an inconsistency is found and true otherwise\n# inputs: csp, a binary CSP with components (X, D, C)\n# local variables: queue, a queue of arcs, initially all the arcs in csp\n# while queue is not empty do\n# (Xi, Xj) = REMOVE-FIRST(queue)\n# if REVISE(csp, Xi, Xj) then\n# if size of Di = 0 then return false\n# for each Xk in Xi.NEIGHBORS - {Xj} do\n# add (Xk, Xi) to queue\n# return true\n# \n# function REVISE(csp, Xi, Xj) returns true iff we revise the domain of Xi\n# revised = false\n# for each x in Di do\n# if no value y in Dj allows (x ,y) to satisfy the constraint between Xi and Xj then\n# delete x from Di\n# revised = true\n# return revised\n# \n# Figure 6.3 The arc-consistency algorithm AC-3. After applying AC-3, either\n# every arc is arc-consistent, or some variable has an empty domain, indicating\n# that the CSP cannot be solved. The name \"AC-3\" was used by the algorithm's\n# inventor (Mackworth, 1977) because it's the third version developed in the\n# paper.\nclass AC3Strategy:\n def reduce_domains(self, csp, var=None, value=None, assignment=None):\n if var == None:\n result = DomainRestoreInfo()\n queue = FIFOQueue()\n for var in csp.get_variables():\n queue.add(var)\n\n self._reduce_domains(queue, csp, result, Assignment())\n return result\n\n else:\n result = DomainRestoreInfo()\n domain = csp.get_domain(var)\n if domain.contains(value):\n if domain.size() > 1:\n queue = FIFOQueue()\n queue.add(var)\n result.store_domain_for(var, domain)\n csp.set_domain(var, Domain([value]))\n self._reduce_domains(queue,csp, result, assignment)\n else:\n result.empty_domain_found = True\n\n return result\n\n def _reduce_domains(self, queue, csp, info, assignment):\n while not queue.is_empty():\n var = queue.pop()\n\n for constraint in csp.get_constraints(var):\n neighbors = set([n for n in constraint.get_scope() if n != var ])\n for neighbor in neighbors:\n if self._revise(neighbor, var, constraint, csp, info, assignment):\n if csp.get_domain(neighbor).is_empty():\n info.empty_domain_found = True\n return\n queue.add(neighbor)\n\n def _revise(self, xi, xj, constraint, csp, info, assignment):\n revised = False\n copy_assignment = assignment.copy()\n\n for i_value in csp.get_domain(xi):\n copy_assignment.set_assignment(xi, i_value)\n consistent_extension_found = False\n\n for j_value in csp.get_domain(xj):\n copy_assignment.set_assignment(xj, j_value)\n if constraint.is_satisfied_with(copy_assignment):\n consistent_extension_found = True\n break\n\n copy_assignment.remove_assignment(xj)\n\n if not consistent_extension_found:\n info.store_domain_for(xi, csp.get_domain(xi))\n csp.remove_value_from_domain(xi, i_value)\n revised = True\n\n return revised\n\n# Possible heuristics of variables selection in ImprovedBacktrackingStrategy\nclass Selection:\n DEFAULT_ORDER = 0\n MRV = 1\n MRV_DEG = 2\n\n# Inference strategies in ImprovedBacktrackingStrategy\nclass Inference:\n NONE = 0\n FORWARD_CHECKING = 1\n AC3 = 2\n\nclass ImprovedBacktrackingStrategy(BacktrackingStrategy):\n def __init__(self, selection, inference=Inference.NONE, enable_lcv=False):\n super().__init__()\n self.selection = selection\n self.inference = inference\n self.enable_lcv = enable_lcv\n\n def solve(self, csp):\n if self.inference == Inference.AC3:\n info = AC3Strategy().reduce_domains(csp)\n\n if not info.is_empty():\n self._notify_state_changed(csp)\n if info.empty_domain_found:\n return None\n\n return super().solve(csp)\n\n def _select_unassigned_variable(self, assignment, csp):\n if self.selection == Selection.MRV:\n return self._apply_mrv_heuristic(csp, assignment)[0]\n elif self.selection == Selection.MRV_DEG:\n vars = self._apply_mrv_heuristic(csp, assignment)\n return self._apply_degree_heuristic(vars, assignment, csp)[0]\n else:\n return super()._select_unassigned_variable(assignment, csp)\n\n def _apply_mrv_heuristic(self, csp, assignment):\n \"\"\"\n Return list of variables with the least number of assignable variables\n\n :param csp (CSP): CSP to solve\n :param assignment (Assignment): current assignment\n :return list(Variable): list of variales with the least number of assignable variables\n \"\"\"\n result = []\n mrv = PlusInfinity()\n copy_assignment = assignment.copy()\n\n for var in csp.get_variables():\n if not copy_assignment.has_assignment_for(var):\n # Get number of left values for this variable\n num = self._calculate_left_values(var, csp, copy_assignment)\n if num <= mrv:\n if num < mrv:\n result = []\n mrv = num\n result.append(var)\n\n return result\n\n def _calculate_left_values(self, var, csp, assignment):\n num = 0\n\n for val in csp.get_domain(var):\n # Set value\n assignment.set_assignment(var, val)\n violated = False\n # Check if any constraint is violated\n for constraint in csp.get_constraints(var):\n if not constraint.is_satisfied_with(assignment):\n violated = True\n # If not violated this value can be set\n if not violated:\n num += 1\n\n assignment.remove_assignment(var)\n\n return num\n\n\n def _apply_degree_heuristic(self, vars, assignment, csp):\n \"\"\"\n Find variable with the biggest degree\n\n :param vars list(Variable): list of variables with the least number of assignable values\n :param assignment (Assignment): current assignment\n :param csp (CSP): CSP to solve\n :return: result list of variables with the highest degree. Result variables is subset of vars.\n \"\"\"\n\n result = []\n max_degree = -1\n\n for var in vars:\n neighbors = set()\n for constraint in csp.get_constraints(var):\n for neighbor in constraint.get_scope():\n # Collect all not assigned variables with common constraints\n if not assignment.has_assignment_for(neighbor):\n neighbors.add(neighbor)\n\n # Number of collected variables is a degree of the current variable\n degree = len(neighbors)\n if degree >= max_degree:\n if degree > max_degree:\n result = []\n max_degree = degree\n result.append(var)\n\n return result\n\n def _order_domain_values(self, var, csp, assignment):\n if self.enable_lcv:\n return self._apply_least_constraining_value_heuristic(var, csp, assignment)\n else:\n return super()._order_domain_values(var, csp, assignment)\n\n def _apply_least_constraining_value_heuristic(self, var, csp, assignment):\n pairs = []\n for value in csp.get_domain(var):\n num = self._count_lost_values(var, value, csp, assignment)\n pairs.append((value, num))\n\n pairs = sorted(pairs, key = lambda pair: pair[1])\n\n return [value for (value, num) in pairs]\n\n def _count_lost_values(self, var, value, csp, assignment):\n copy_assignment = assignment.copy()\n assignment.set_assignment(var, value)\n\n result = 0\n for constraint in csp.get_constraints():\n for neighbor in constraint.get_scope():\n if neighbor != var:\n for n_value in csp.get_domain(neighbor):\n copy_assignment.set_assignment(neighbor, n_value)\n if not constraint.is_satisfied_with(copy_assignment):\n result += 1\n copy_assignment.remove_assignment(neighbor)\n \n return result\n\n def _inference(self, var, assignment, csp):\n if self.inference == Inference.FORWARD_CHECKING:\n return self._do_forward_checking(var, assignment, csp)\n elif self.inference == Inference.AC3:\n return AC3Strategy().reduce_domains(csp, var, assignment.get_assignment(var), assignment)\n else:\n return super()._inference(var, assignment, csp)\n\n\n def _do_forward_checking(self, var, assignment, csp):\n result = DomainRestoreInfo()\n\n for constraint in csp.get_constraints(var):\n for neighbor in constraint.get_scope():\n if not assignment.has_assignment_for(neighbor):\n if self._revise(neighbor, constraint, assignment, csp, result):\n if csp.get_domain(neighbor).is_empty():\n result.empty_domain_found = True\n return result\n\n return result\n\n def _revise(self, var, constraint, assignment, csp, info):\n revised = False\n\n for value in csp.get_domain(var):\n assignment.set_assignment(var, value)\n if not constraint.is_satisfied_with(assignment):\n info.store_domain_for(var, value)\n revised = True\n\n assignment.remove_assignment(var)\n\n return revised\n\n# Artificial Intelligence A Modern Approach (3rd Ed.): Figure 6.8, Page 221.\n#\n# function MIN-CONFLICTS(csp, max-steps) returns a solution or failure\n# inputs: csp, a constraint satisfaction problem\n# max-steps, the number of steps allowed before giving up\n# current = an initial complete assignment for csp\n# for i = 1 to max steps do\n# if current is a solution for csp then return current\n# var = a randomly chosen conflicted variable from csp.VARIABLES\n# value = the value v for var that minimizes CONFLICTS(var, v, current, csp)\n# set var = value in current\n# return failure\n# \n# Figure 6.8 The MIN-CONFLICTS algorithm for solving CSPs by local search. The\n# initial state may be chosen randomly or by a greedy assignment process that\n# chooses a minimal-conflict value for each variable in turn. The CONFLICTS\n# function counts the number of constraints violated by a particular value,\n# given the rest of the current assignment.\nclass MinConflictsStrategy(SolutionStrategy):\n def __init__(self, max_step):\n super().__init__()\n self.max_step = max_step\n\n # function MIN-CONFLICTS(csp, max-steps) returns a solution or failure\n def solve(self, csp):\n # current = an initial complete assignment for csp\n assignment = self._generate_random_assignment(csp)\n self._notify_state_changed(csp)\n # for i = 1 to max steps do\n for i in range(self.max_step):\n # if current is a solution for csp then return current\n if assignment.is_solution(csp):\n return assignment\n else:\n # var = a randomly chosen conflicted variable from csp.VARIABLES\n vars = self._get_conflicted_variables(assignment, csp)\n var = select_randomly_from_list(vars)\n # value = the value v for var that minimizes CONFLICTS(var, v, current, csp)\n value = self._get_min_conflict_value_for(var, assignment, csp)\n # set var = value in current\n assignment.set_assignment(var, value)\n self._notify_state_changed(csp, assignment)\n\n # return failure\n return None\n\n def _generate_random_assignment(self, csp):\n assignment = Assignment()\n for var in csp.get_variables():\n value = select_randomly_from_list(list(csp.get_domain(var)))\n assignment.set_assignment(var, value)\n\n return assignment\n\n def _get_conflicted_variables(self, assignment, csp):\n result = set()\n for constraint in csp.get_constraints():\n if not constraint.is_satisfied_with(assignment):\n for var in constraint.get_scope():\n result.add(var)\n\n return list(result)\n\n def _get_min_conflict_value_for(self, var, assignment, csp):\n constraints = csp.get_constraints()\n duplicate_assignment = assignment.copy()\n min_conflict = PlusInfinity()\n result_candidates = []\n\n for value in csp.get_domain(var):\n duplicate_assignment.set_assignment(var, value)\n curr_conflict = self._count_conflicts(assignment, constraints)\n if curr_conflict <= min_conflict:\n if curr_conflict < min_conflict:\n result_candidates = []\n min_conflict = curr_conflict\n result_candidates.append(value)\n\n if len(result_candidates) != 0:\n return select_randomly_from_list(result_candidates)\n else:\n return None\n\n def _count_conflicts(self, assignment, constraints):\n result = 0\n for constraint in constraints:\n if not constraint.is_satisfied_with(assignment):\n result += 1\n\n return result\n \n\nclass MapCSP(CSP):\n NSW = Variable(\"NSW\")\n NT = Variable(\"NT\")\n Q = Variable(\"Q\")\n SA = Variable(\"SA\")\n T = Variable(\"T\")\n V = Variable(\"V\")\n WA = Variable(\"WA\")\n RED = \"RED\"\n GREEN = \"GREEN\"\n BLUE = \"BLUE\"\n\n def __init__(self):\n super().__init__(self._collect_variables())\n colors = Domain((self.RED, self.GREEN, self.BLUE))\n\n for var in self.get_variables():\n self.set_domain(var, colors)\n\n self.add_constraint(NotEqualConstraint(self.WA, self.NT))\n self.add_constraint(NotEqualConstraint(self.WA, self.SA))\n self.add_constraint(NotEqualConstraint(self.NT, self.SA))\n self.add_constraint(NotEqualConstraint(self.NT, self.Q))\n self.add_constraint(NotEqualConstraint(self.SA, self.Q))\n self.add_constraint(NotEqualConstraint(self.SA, self.NSW))\n self.add_constraint(NotEqualConstraint(self.SA, self.V))\n self.add_constraint(NotEqualConstraint(self.Q, self.NSW))\n self.add_constraint(NotEqualConstraint(self.NSW, self.V))\n\n def _collect_variables(self):\n variables = []\n variables.append(self.NSW)\n variables.append(self.NT)\n variables.append(self.Q)\n variables.append(self.SA)\n variables.append(self.T)\n variables.append(self.V)\n variables.append(self.WA)\n\n return variables\n","repo_name":"mushketyk/aima-python","sub_path":"aima/core/search/csp.py","file_name":"csp.py","file_ext":"py","file_size_in_byte":27816,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"31"} +{"seq_id":"19999139320","text":"\"\"\"\nSVM CLASSIFER FULL\n\"\"\"\n\nimport sys\nimport os\nimport cv2\nimport numpy as np\nfrom sklearn.svm import SVC\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import confusion_matrix\n\nimport time\nfrom sklearn import svm as svc\n\n#import cPickle as pickle\nimport pickle\n\nSHAPE = (8,24)\n\ndef read_files(directory):\n print (\"Reading files...\")\n s = 1\n feature_list = list()\n label_list = list()\n num_classes = 0\n for root, dirs, files in os.walk(directory):\n for d in dirs:\n num_classes += 1\n images = os.listdir(root+d)\n for image in images:\n s += 1\n label_list.append(d)\n feature_list.append(extract_feature(root+d+\"/\"+image))\n\n print (str(num_classes) + \" classes\")\n return np.asarray(feature_list), np.asarray(label_list)\n\n#counter = 0\ndef extract_feature(image_file):\n\n #global counter\n #print(image_file)\n img = cv2.imread(image_file, 0)\n\n #filter_g = cv2.bilateralFilter(img,35,75,75)\n filter_g = cv2.GaussianBlur(img,(15,15),0)\n full_mask = filter_g\n \n img = cv2.resize(full_mask, SHAPE, interpolation = cv2.INTER_CUBIC)\n #cv2.imwrite('./tests/{}.jpg'.format(counter), img)\n img = img.flatten()\n #counter +=1\n #print('SHape is', img.shape)\n\n #img = img/(np.mean(img)+0.0001)\n img = img/255\n return img\n \n\n\ndef buildSVM(features, target):\n print (\"Fitting\")\n C = 0.1 # SVM regularization parameter\n #C = 10\n #svm = SVC()\n #svm = SVC(kernel='linear', C = C)\n #svm = SVC(kernel='rbf', gamma=0.7, C=C) # RBF\n #svm = SVC(kernel='poly', degree=2, C=C)\n svm = svc.LinearSVC(C=C)\n # Fitting model\n svm.fit(features, target)\n\n return svm\n\n\n\ndef main(directory):\n # generating two numpy arrays for features and labels\n feature_array, label_array = read_files(directory)\n # Splitting the data into test and training splits\n print('TOTAL FEATURES shape::', feature_array.shape)\n print('TOTAL LABELS shape::', label_array.shape)\n # Splitting the data into test and training splits\n X_train, X_test, y_train, y_test = train_test_split(feature_array, label_array, test_size=0.2, random_state=42)\n\n # Train and Test dataset size details\n print (\"Train_x Shape :: \", X_train.shape)\n print (\"Train_y Shape :: \", y_train.shape)\n print (\"Test_x Shape :: \", X_test.shape)\n print (\"Test_y Shape :: \", y_test.shape)\n\n svm = buildSVM(X_train, y_train)\n\n \n print (\"Saving model...\")\n pickle.dump(svm, open(\"svm_LINEARSVC.pkl\", \"wb\"))\n\n print (\"Trained model :: \", svm)\n\n print (\"Testing...\\n\")\n \n right = 0\n total = 0\n\n for x, y in zip(X_test, y_test):\n x = x.reshape(1, -1)\n prediction = svm.predict(x)[0]\n if y == prediction:\n right += 1\n total += 1\n\n accuracy = float(right)/float(total)*100\n\n print (\"Accuracy:: \", str(accuracy) + \"% accuracy\")\n\n\n predictions = svm.predict(X_test)\n \n for i in range(0, 5):\n print (\"Actual outcome :: {} and Predicted outcome :: {}\".format(list(y_test)[i], predictions[i]))\n\n print (\"Train Accuracy :: \", accuracy_score(y_train, svm.predict(X_train)))\n print (\"Test Accuracy :: \", accuracy_score(y_test, predictions))\n print (\" Confusion matrix \", confusion_matrix(y_test, predictions))\n\n print (\"Saving model...\")\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 2:\n print (\"Usage: python svm2.py .//\")\n exit()\n\n # Directory containing subfolders with images in them.\n directory = sys.argv[1]\n\n main(directory)","repo_name":"stanlee321/semaforo","sub_path":"svm2Binary.py","file_name":"svm2Binary.py","file_ext":"py","file_size_in_byte":3555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"43585234170","text":"# -*- coding: utf8 -*-\n\nfrom symbols import *\n\n# board:\nWIDTH = 800\nHEIGHT = 600\n\n# colors:\nGREY = (200, 200, 200)\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nDARK_GREEN = (0, 127, 0)\nBLUE = (0, 0, 255)\nBLUE_SKY = (90, 155, 255)\nYELLOW = (255, 255, 0)\nPINK = (255, 0, 255)\nTURQUOISE = (0, 255, 255)\nORANGE = (255, 127, 0)\nVIOLET = (170, 20, 170)\n\nBG = WHITE\nAXIS_COLOR = GREY\n\nPLOTCOLORS = [\n TURQUOISE,\n RED,\n GREEN,\n BLUE,\n PINK,\n YELLOW,\n ORANGE,\n DARK_GREEN,\n VIOLET,\n BLUE_SKY,\n]\nCOLORS_NBR = len(PLOTCOLORS)\n\nAREA_COLOR = YELLOW\n\n# axes:\nTIC_SIZE = 4\nARROW_HEIGHT = 20\nARROW_WIDTH = 10\n\n# labels:\nOFFSET_LABEL = 5\nOFFSET_LABEL_AXE = 15\n\n# We can assume that all symbols have the same size.\nSYMBOL_HEIGHT = len(symbols[\"0\"])\nSYMBOL_WIDTH = len(symbols[\"0\"][0])\n\n# area calculation\nE = 2\nepsilon = 10\n","repo_name":"simonpicard/graphing-calculator","sub_path":"src/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"10781873150","text":"import pyautogui\nimport time\n\nx = 1119\ny = 307\ny = y + 50 * 3\nx = x + 50 * 2\n# jiaoyi\n\ni = 1\nwhile i < 4:\n currentMouseX, currentMouseY = pyautogui.position()\n print(currentMouseX, currentMouseY)\n time.sleep(1)\n pyautogui.rightClick(x=x, y=y, interval=1)\n x = x + 50\n i = i + 1\n\ntime.sleep(1)\npyautogui.moveTo(x=300, y=694)\ntime.sleep(0.5)\npyautogui.click(interval=1)\n# pyautogui.leftClick(x=300, y=694, interval=4)\n# time.sleep(1)\n# pyautogui.leftClick(x=300, y=694, interval=1)\n# 交易框位置: 300 694\n# 整理背包位置: 1166 264\n\n# 第00格:1119 307\n# 左右各50增加\n","repo_name":"entrehuihui/tensorflow-image","sub_path":"get.py","file_name":"get.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34680486649","text":"\n\ndef binary_search(arr_sorted, elem_needed, low, high):\n pivot_index = (high+low) // 2\n if arr_sorted[pivot_index] != elem_needed and high <= low :\n return -1\n elif arr_sorted[pivot_index] == elem_needed:\n return pivot_index + 1\n elif arr_sorted[pivot_index] > elem_needed:\n new_high = pivot_index - 1\n return binary_search(arr_sorted, elem_needed, low, new_high)\n elif arr_sorted[pivot_index] < elem_needed:\n new_low = pivot_index + 1\n return binary_search(arr_sorted, elem_needed, new_low, high)\n\n\narr_1_s = list(map(int, input().split()))\narr_2_n = list(map(int, input().split()))\narr_sorted_leng = arr_1_s.pop(0)\narr_needed_leng = arr_2_n.pop(0)\narr_founded_indexes = []\n\nlo = 0\nhi = arr_sorted_leng - 1\n\nfor i in range(arr_needed_leng):\n element_n = arr_2_n[i]\n arr_founded_indexes.append(binary_search(arr_1_s, element_n, lo, hi))\n\nprint(*arr_founded_indexes)\n\n","repo_name":"Egor-Sidorov/Algorithmic_tasks","sub_path":"Binary search.py","file_name":"Binary search.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22544019024","text":"\n# standard library\nimport os\nimport subprocess\n\n# pifou library\nimport pifou.lib\nimport pifou.metadata\n\n# local library\nimport dash.model\nimport dash.controller\n\n\n@pifou.lib.log\nclass Dash(object):\n \"\"\"Dash controller\"\"\"\n\n def __init__(self):\n self.model = None\n self.controller = None\n\n def set_controller(self, controller):\n self.controller = controller\n controller.launch.connect(self.launch_listener)\n\n def set_model(self, model):\n self.model = model\n\n if self.controller:\n self.controller.set_model(model)\n\n def launch_listener(self, index):\n \"\"\"Launch `path`\n\n `path` points to a live workspace.\n\n 1. Parse workspace\n 2. Find app\n 3. Find args, kwargs and environment\n 4. Run app\n\n \"\"\"\n\n path = self.model.data(index, 'path')\n self.launch_path(path)\n\n def launch_path(self, path):\n basename = os.path.basename(path)\n application, _ = os.path.splitext(basename)\n\n cmd = list()\n\n exe = pifou.lib.where(application)\n if not exe:\n self.log.info(\"Application '{0}' could \"\n \"not be found\".format(application))\n return False\n\n cmd.append(exe)\n\n location = pifou.metadata.Location(path)\n\n # Get arguments\n args = pifou.metadata.entry(location, \"apps/\"\n + application\n + \"/args\")\n pifou.metadata.inherit(args)\n\n for arg in args:\n pifou.metadata.inherit(arg)\n cmd.append(arg.path.name)\n\n # Get keyword arguments\n args = pifou.metadata.entry(location, \"apps/\"\n + application\n + \"/kwargs\")\n pifou.metadata.inherit(args)\n\n for arg in args:\n pifou.metadata.inherit(arg)\n cmd.append(arg.path.name)\n cmd.append(arg.value)\n self.log.info(\"Inheriting from kwarg: %s\" % arg.path)\n\n # Resolve keywords\n keywords = {\n '$workspace': path,\n }\n\n for part in cmd:\n index = cmd.index(part)\n part = str(part).lower()\n\n try:\n keyword = keywords[part]\n cmd[index] = keyword\n\n except KeyError:\n pass\n\n self.log.info(\"Running %s\" % cmd)\n subprocess.Popen(cmd)\n\n def kwargs_from_workspace(self, root, application):\n \"\"\"Fetch keyword arguments from `root` for `application`\n\n Arguments:\n root (str): Absolute path to workspace\n\n \"\"\"\n\n raise NotImplemented\n\n def args_from_workspace(self, root, application):\n \"\"\"Fetch arguments from `root` for `application`\n\n Arguments:\n root (str): Absolute path to workspace\n\n \"\"\"\n\n raise NotImplemented\n\n def environment_from_workspace(self, root, application):\n \"\"\"Fetch environment settings from `root` for `application`\n\n Arguments:\n root (str): Absolute path to workspace\n\n \"\"\"\n\n raise NotImplemented\n\n\nif __name__ == '__main__':\n import pigui.pyqt5.util\n\n with pigui.pyqt5.util.application_context():\n win = dash.controller.Dash()\n\n model = dash.model.Model()\n\n app = Dash()\n app.set_controller(win)\n app.set_model(model)\n\n model.setup('c:\\studio\\content')\n\n win.resize(*dash.settings.WINDOW_SIZE)\n win.animated_show()\n","repo_name":"abstractfactory/dash","sub_path":"dash/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":3598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"11009807608","text":"# -*- coding: utf-8 -*-\n\nimport datetime\nfrom odoo import models, fields, api, _\nfrom odoo.exceptions import UserError, ValidationError, AccessError\nfrom xlrd import open_workbook\nfrom itertools import groupby\nfrom operator import itemgetter\nimport base64\nimport openpyxl\nfrom odoo.tools.float_utils import float_compare, float_is_zero, float_repr, float_round\nfrom tempfile import TemporaryFile\n\n\nclass BoxPackage(models.Model):\n _name = 'box.package'\n _description='Box Packaging'\n _rec_name = 'name'\n _order = 'id desc'\n\n @api.model\n def default_get(self, fields):\n res = super(BoxPackage, self).default_get(fields)\n res['filename'] = None\n return res\n\n\n @api.model\n def _get_default_date_planned_start(self):\n if self.env.context.get('default_date_deadline'):\n return fields.Datetime.to_datetime(self.env.context.get('default_date_deadline'))\n return datetime.datetime.now()\n\n @api.model\n def _get_default_picking_type(self):\n company_id = self.env.context.get('default_company_id', self.env.company.id)\n return self.env['stock.picking.type'].search([\n ('code', '=', 'mrp_operation'),\n ('warehouse_id.company_id', '=', company_id),\n ], limit=1).id\n\n @api.model\n def _get_default_location_src_id(self):\n location = False\n company_id = self.env.context.get('default_company_id', self.env.company.id)\n if self.env.context.get('default_picking_type_id'):\n location = self.env['stock.picking.type'].browse(self.env.context['default_picking_type_id']).default_location_src_id\n if not location:\n location = self.env['stock.warehouse'].search([('company_id', '=', company_id)], limit=1).lot_stock_id\n return location and location.id or False\n\n @api.model\n def _get_default_location_dest_id(self):\n location = False\n company_id = self.env.context.get('default_company_id', self.env.company.id)\n if self._context.get('default_picking_type_id'):\n location = self.env['stock.picking.type'].browse(self.env.context['default_picking_type_id']).default_location_dest_id\n if not location:\n location = self.env['stock.warehouse'].search([('company_id', '=', company_id)], limit=1).lot_stock_id\n return location and location.id or False\n \n name = fields.Char('Reference', readonly=True, default=lambda x: _('New'), copy=False)\n product_id = fields.Many2one('product.product', 'Product', domain=\"[('id', 'in', allowed_product_ids)]\")\n allowed_product_ids = fields.Many2many('product.product', compute='_compute_allowed_product_ids')\n product_qty = fields.Float('Quantity To Produce', default=1.0, digits='Product Unit of Measure')\n bom_id = fields.Many2one('mrp.bom', 'Bill of Material', domain=\"\"\"[\n '&',\n '|',\n ('company_id', '=', False),\n ('company_id', '=', company_id),\n '&',\n '|',\n ('product_id','=',product_id),\n '&',\n ('product_tmpl_id.product_variant_ids','=',product_id),\n ('product_id','=',False),\n ('type', '=', 'normal')]\"\"\",\n check_company=True)\n product_uom_id = fields.Many2one('uom.uom', 'Product Unit of Measure', readonly=True, required=True,\n states={'draft': [('readonly', False)]}, domain=\"[('category_id', '=', product_uom_category_id)]\")\n product_uom_category_id = fields.Many2one(related='product_id.uom_id.category_id') \n date_planned_start = fields.Datetime('Scheduled Date',default=_get_default_date_planned_start)\n product_tmpl_id = fields.Many2one('product.template', 'Product Template', related='product_id.product_tmpl_id')\n user_id = fields.Many2one('res.users', 'Responsible', default=lambda self: self.env.user, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]},\n domain=lambda self: [('groups_id', 'in', self.env.ref('mrp.group_mrp_user').id)])\n upload_file = fields.Binary('Upload File', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]} ,copy=False)\n filename = fields.Char(copy=False)\n move_raw_ids = fields.One2many('box.package.move', 'raw_material_box_package_id', string='Components', copy=True)\n picking_type_id = fields.Many2one(\n 'stock.picking.type', 'Operation Type',default=_get_default_picking_type, domain=\"[('code', '=', 'mrp_operation'), ('company_id', '=', company_id)]\", required=True)\n origin = fields.Char('Source', copy=False)\n state = fields.Selection([\n ('draft', 'Draft'),\n ('confirm', 'Confirmed'),\n ('done', 'Done'),\n ('cancel', 'Canceled')], string='State',copy=False, default=\"draft\")\n company_id = fields.Many2one(\n 'res.company', 'Company', default=lambda self: self.env.company,\n index=True, required=True)\n location_src_id = fields.Many2one(\n 'stock.location', 'Components Location',\n default=_get_default_location_src_id,\n readonly=True, required=True,\n domain=\"[('usage','=','internal'), '|', ('company_id', '=', False), ('company_id', '=', company_id)]\",\n states={'draft': [('readonly', False)]}, check_company=True)\n location_dest_id = fields.Many2one(\n 'stock.location', 'Finished Products Location',\n default=_get_default_location_dest_id,\n readonly=True, required=True,\n domain=\"[('usage','=','internal'), '|', ('company_id', '=', False), ('company_id', '=', company_id)]\",\n states={'draft': [('readonly', False)]}, check_company=True)\n mo_count = fields.Integer(compute='compute_mo_count')\n\n @api.onchange('upload_file', 'filename')\n def _onchange_upload_file(self):\n if not self.state == 'draft': \n mo_objs = self.env['mrp.production'].search([('box_package_id', '=', self.name),('state','not in',('draft','cancel'))]) \n if mo_objs:\n for mo in mo_objs:\n if mo.unreserve_visible == True or mo.state in ('to_close','progress','confirm'):\n ans = mo.button_unreserve()\n for move in mo.move_raw_ids: \n for move_line in move.move_line_ids: \n move_line.write({\n 'qty_done': move_line.product_uom_qty\n })\n if mo.product_id.tracking == 'serial':\n mo.write({'qty_producing': 0 })\n \n def action_view_mo(self):\n mo_obj = self.env['mrp.production'].search([('box_package_id', '=', self.id)])\n mo_ids = []\n view_id = self.env.ref('mrp.mrp_production_form_view').id\n for each in mo_obj:\n mo_ids.append(each.id)\n if len(mo_ids) <= 1:\n return {\n 'name': _('Manufacturing Orders'),\n 'res_model': 'mrp.production',\n 'view_mode': 'form',\n 'view_type': 'form',\n 'view_id': view_id, \n 'type': 'ir.actions.act_window',\n 'domain': [('box_package_id','=',self.id)],\n 'res_id': mo_ids and mo_ids[0]\n }\n else:\n return {\n 'name': _('Manufacturing Orders'),\n 'res_model': 'mrp.production',\n 'type': 'ir.actions.act_window',\n 'view_mode': 'tree,form',\n 'domain': [('box_package_id','=',self.id)],\n }\n\n def compute_mo_count(self):\n for record in self:\n record.mo_count = self.env['mrp.production'].search_count(\n [('box_package_id', '=', self.id)])\n \n @api.onchange('picking_type_id')\n def onchange_picking_type(self):\n location = self.env.ref('stock.stock_location_stock')\n try:\n location.check_access_rule('read')\n except (AttributeError, AccessError):\n location = self.env['stock.warehouse'].search([('company_id', '=', self.env.company.id)], limit=1).lot_stock_id\n self.location_src_id = self.picking_type_id.default_location_src_id.id or location.id\n self.location_dest_id = self.picking_type_id.default_location_dest_id.id or location.id\n\n\n @api.depends('product_id', 'bom_id', 'company_id')\n def _compute_allowed_product_ids(self):\n for record in self:\n product_domain = [\n ('type', 'in', ['product', 'consu']),\n '|',\n ('company_id', '=', False),\n ('company_id', '=', record.company_id.id)\n ]\n if record.bom_id:\n if record.bom_id.product_id:\n product_domain += [('id', '=', record.bom_id.product_id.id)]\n else:\n product_domain += [('id', 'in', record.bom_id.product_tmpl_id.product_variant_ids.ids)]\n record.allowed_product_ids = self.env['product.product'].search(product_domain)\n\n @api.onchange('bom_id')\n def _onchange_bom_id(self):\n if not self.product_id and self.bom_id:\n self.product_id = self.bom_id.product_id or self.bom_id.product_tmpl_id.product_variant_ids[0]\n self.product_qty = self.bom_id.product_qty or 1.0\n self.product_uom_id = self.bom_id and self.bom_id.product_uom_id.id or self.product_id.uom_id.id\n self.move_raw_ids = [(2, move.id) for move in self.move_raw_ids.filtered(lambda m: m.bom_line_id)]\n self.picking_type_id = self.bom_id.picking_type_id or self.picking_type_id\n\n @api.onchange('product_id', 'picking_type_id', 'company_id')\n def onchange_product_id(self):\n \"\"\" Finds UoM of changed product. \"\"\"\n if not self.product_id:\n self.bom_id = False\n elif not self.bom_id or self.bom_id.product_tmpl_id != self.product_tmpl_id or (self.bom_id.product_id and self.bom_id.product_id != self.product_id):\n bom = self.env['mrp.bom']._bom_find(product=self.product_id, picking_type=self.picking_type_id, company_id=self.company_id.id, bom_type='normal')\n if bom:\n self.bom_id = bom.id\n self.product_qty = self.bom_id.product_qty\n self.product_uom_id = self.bom_id.product_uom_id.id\n else:\n self.bom_id = False\n self.product_uom_id = self.product_id.uom_id.id\n\n @api.onchange('bom_id', 'product_id', 'product_qty', 'product_uom_id')\n def _onchange_move_raw(self):\n if not self.bom_id and not self._origin.product_id:\n return\n # Clear move raws if we are changing the product. In case of creation (self._origin is empty),\n # we need to avoid keeping incorrect lines, so clearing is necessary too.\n if self.product_id != self._origin.product_id:\n self.move_raw_ids = [(5,)]\n if self.bom_id and self.product_qty > 0:\n # keep manual entries\n list_move_raw = [(4, box_move.id) for box_move in self.move_raw_ids.filtered(lambda m: not m.bom_line_id)] \n moves_raw_values = self._get_moves_raw_values() \n move_raw_dict = {move.bom_line_id.id: move for move in self.move_raw_ids.filtered(lambda m: m.bom_line_id)} \n for move_raw_values in moves_raw_values:\n if move_raw_values['bom_line_id'] in move_raw_dict:\n # update existing entries\n list_move_raw += [(1, move_raw_dict[move_raw_values['bom_line_id']].id, move_raw_values)]\n else:\n # add new entries\n list_move_raw += [(0, 0, move_raw_values)] \n self.move_raw_ids = list_move_raw\n else:\n self.move_raw_ids = [(2, move.id) for move in self.move_raw_ids.filtered(lambda m: m.bom_line_id)]\n\n def _get_moves_raw_values(self):\n moves = []\n for production in self:\n factor = production.product_uom_id._compute_quantity(production.product_qty, production.bom_id.product_uom_id) / production.bom_id.product_qty\n boms, lines = production.bom_id.explode(production.product_id, factor, picking_type=production.bom_id.picking_type_id)\n for bom_line, line_data in lines:\n if bom_line.child_bom_id and bom_line.child_bom_id.type == 'phantom' or\\\n bom_line.product_id.type not in ['product', 'consu']:\n continue\n operation = bom_line.operation_id.id or line_data['parent_line'] and line_data['parent_line'].operation_id.id\n moves.append(production._get_move_raw_values(\n bom_line.product_id,\n line_data['qty'],\n bom_line.product_uom_id,\n operation,\n bom_line\n ))\n return moves\n\n def _get_move_raw_values(self, product_id, product_uom_qty, product_uom, operation_id=False, bom_line=False):\n data = {\n 'bom_line_id': bom_line.id if bom_line else False,\n 'product_id': product_id.id,\n 'product_uom_qty': product_uom_qty,\n 'product_uom': product_uom.id,\n 'raw_material_box_package_id': self.id,\n 'origin': self.name,\n 'company_id': self.company_id.id,\n }\n return data\n \n @api.model\n def create(self,vals):\n if vals.get('name', _('New')) == _('New'):\n vals['name'] = self.env['ir.sequence'].next_by_code('box.package') or _('New')\n result = super(BoxPackage, self).create(vals)\n return result\n\n def button_done(self):\n if not self.state == 'draft':\n self.with_context(assign_lot=1).action_assign()\n mo_objs = self.env['mrp.production'].search([('box_package_id', '=', self.name),('state','in',('confirmed','progress','to_close'))]) \n if mo_objs:\n if any(production.reservation_state != 'assigned' for production in mo_objs):\n raise UserError(_('Please first reserve stock for all the manufacturing order using \"Check Availability\" feature.'))\n if all(production.reservation_state == 'assigned' for production in mo_objs):\n for mo in mo_objs:\n wiz_act = mo.button_mark_done()\n\n if all(production.state == 'done' for production in mo_objs):\n if mo_objs:\n for line in self.move_raw_ids:\n line.quantity_done = sum([move.quantity_done for move in mo_objs.mapped('move_raw_ids').filtered(lambda m: m.product_id.id == line.product_id.id)])\n self.write({'state': 'done'})\n \n def action_cancel(self):\n mo_obj = self.env['mrp.production'].search([('box_package_id', '=', self.id)])\n if mo_obj:\n for rec in mo_obj:\n rec.action_cancel()\n return self.write({'state': 'cancel'})\n\n def action_confirm(self):\n list_create_mo = []\n for rec in range(int(self.product_qty)):\n rec=rec+1\n lst = []\n for i in self.move_raw_ids:\n lst.append((0, 0, {\n 'name': self.name,\n 'product_id': i.product_id.id,\n 'product_uom_qty': i.product_uom_qty / self.product_qty,\n 'product_uom': i.product_uom.id,\n 'company_id': i.company_id.id,\n 'location_id': self.location_src_id.id,\n 'location_dest_id': self.product_id.with_company(self.company_id).property_stock_production.id,\n 'bom_line_id': i.bom_line_id.id if i.bom_line_id else False,\n 'warehouse_id': self.location_src_id.get_warehouse().id,\n 'procure_method': 'make_to_stock',\n }))\n mrp_vales = {\n 'product_id': self.product_id.id,\n 'product_uom_id': self.product_id.uom_id.id,\n 'company_id': self.company_id.id,\n 'bom_id': self.bom_id.id,\n 'product_qty': 1,\n 'picking_type_id': self.picking_type_id.id,\n 'user_id': self.user_id.id,\n 'date_planned_start': self.date_planned_start,\n 'origin': self.origin,\n 'box_package_id': self.id,\n 'location_src_id': self.location_src_id.id,\n 'location_dest_id': self.location_dest_id.id,\n 'move_raw_ids': lst\n }\n list_create_mo.append(mrp_vales)\n mp = self.env['mrp.production'].create(list_create_mo) \n for record in mp:\n record._onchange_move_finished()\n ans = record.action_confirm()\n\n return self.write({'state':'confirm'})\n\n def action_assign(self):\n mo_objs = self.env['mrp.production'].search([('box_package_id', '=', self.id)])\n self._check_filename()\n data = self.import_data_form_file()\n if data:\n index = 0 \n list_keys = list(data.keys())\n unique_list = []\n mo_have_lot = mo_objs.mapped('lot_producing_id').mapped('name')\n already_assigned = []\n for key_data in list_keys:\n dup_found = self.env['stock.production.lot'].search([('name','=', key_data)])\n if not dup_found or key_data in mo_have_lot:\n unique_list.append(key_data)\n else:\n already_assigned.append(key_data)\n for production in mo_objs:\n if len(unique_list) > index:\n lot_name = data.get(unique_list[index])\n self._action_assign(production, lot_name)\n index += 1\n else:\n note = ''\n if len(list_keys) != len(mo_objs):\n note = 'Found only ' + str(len(list_keys)) + ' NFC tags, but required ' + str(len(mo_objs)) + ' NFC tag information.\\n'\n if already_assigned:\n note = note + \", \".join(l for l in already_assigned) + ' NFC tags are already assigned to existing box. Please verify uploaded information. '\n if note:\n raise UserError(_(note))\n if self._context.get('assign_lot'):\n index = 0\n for production in mo_objs:\n if not production.lot_producing_id:\n if len(unique_list) > index:\n lot = self.env['stock.production.lot'].create({\n 'name': unique_list[index],\n 'product_id': self.product_id.id,\n 'company_id': self.env.company.id,\n })\n index += 1\n production.write({\n 'lot_producing_id': lot.id,\n })\n production._onchange_producing()\n production._onchange_lot_producing()\n for line in production.move_raw_ids.mapped('move_line_ids'):\n if line.product_uom_qty != line.qty_done:\n line.qty_done = line.product_uom_qty\n\n else:\n raise ValidationError(_(\"Please upload 2 Columns NFC tag and Lot information with data.\"))\n return True\n\n def _action_assign(self, mo_obj, lot_name):\n move_ids = mo_obj.move_raw_ids\n assigned_moves = self.env['stock.move']\n partially_available_moves = self.env['stock.move']\n # Read the `reserved_availability` field of the moves out of the loop to prevent unwanted\n # cache invalidation when actually reserving the move.\n reserved_availability = {move: move.reserved_availability for move in move_ids}\n roundings = {move: move.product_id.uom_id.rounding for move in move_ids}\n move_line_vals_list = []\n not_available_list = []\n for move in move_ids.filtered(lambda m: m.state != 'cancel'):\n rounding = roundings[move]\n missing_reserved_uom_quantity = move.product_uom_qty - reserved_availability[move]\n missing_reserved_quantity = move.product_uom._compute_quantity(missing_reserved_uom_quantity,\n move.product_id.uom_id,\n rounding_method='HALF-UP')\n if move._should_bypass_reservation():\n # create the move line(s) but do not impact quants\n if move.product_id.tracking == 'serial' and (\n move.picking_type_id.use_create_lots or move.picking_type_id.use_existing_lots):\n for i in range(0, int(missing_reserved_quantity)):\n move_line_vals_list.append(move._prepare_move_line_vals(quantity=1))\n else:\n to_update = move.move_line_ids.filtered(lambda ml: ml.product_uom_id == move.product_uom and\n ml.location_id == move.location_id and\n ml.location_dest_id == move.location_dest_id and\n ml.picking_id == move.picking_id and\n not ml.lot_id and\n not ml.package_id and\n not ml.owner_id)\n if to_update:\n to_update[0].product_uom_qty += missing_reserved_uom_quantity\n else:\n move_line_vals_list.append(move._prepare_move_line_vals(quantity=missing_reserved_quantity))\n assigned_moves |= move\n else:\n if float_is_zero(move.product_uom_qty, precision_rounding=move.product_uom.rounding):\n assigned_moves |= move\n elif not move.move_orig_ids:\n if move.procure_method == 'make_to_order':\n continue\n # If we don't need any quantity, consider the move assigned.\n need = missing_reserved_quantity\n lot_id = None\n if move.product_id.tracking == 'none':\n if float_is_zero(need, precision_rounding=rounding):\n assigned_moves |= move\n continue\n else:\n move_line = move.move_line_ids\n if move_line and move_line.filtered(lambda m: m.lot_id.name == lot_name or m.lot_name == lot_name):\n if float_is_zero(need, precision_rounding=rounding):\n assigned_moves |= move\n continue\n lot_id = move_line[0].lot_id\n else:\n move_line.unlink()\n need = move.product_uom_qty\n # Reserve new quants and create move lines accordingly.\n if not lot_id and move.product_id.tracking != 'none':\n lot_id = self.env['stock.production.lot'].search([('product_id', '=', move.product_id.id), ('name', '=', lot_name), ('product_qty', '>=', need)])\n if not lot_id:\n not_available_list.append(lot_name)\n continue\n forced_package_id = move.package_level_id.package_id or None\n available_quantity = move._get_available_quantity(move.location_id, lot_id=lot_id, package_id=forced_package_id)\n if available_quantity < need:\n not_available_list.append(lot_name)\n continue\n taken_quantity = move._update_reserved_quantity(need, available_quantity, move.location_id, lot_id=lot_id,\n package_id=forced_package_id, strict=False)\n if float_is_zero(taken_quantity, precision_rounding=rounding):\n continue\n if float_compare(need, taken_quantity, precision_rounding=rounding) == 0:\n assigned_moves |= move\n else:\n partially_available_moves |= move\n else:\n # Check what our parents brought and what our siblings took in order to\n # determine what we can distribute.\n # `qty_done` is in `ml.product_uom_id` and, as we will later increase\n # the reserved quantity on the quants, convert it here in\n # `product_id.uom_id` (the UOM of the quants is the UOM of the product).\n move_lines_in = move.move_orig_ids.filtered(lambda m: m.state == 'done').mapped('move_line_ids')\n keys_in_groupby = ['location_dest_id', 'lot_id', 'result_package_id', 'owner_id']\n\n def _keys_in_sorted(ml):\n return (ml.location_dest_id.id, ml.lot_id.id, ml.result_package_id.id, ml.owner_id.id)\n\n grouped_move_lines_in = {}\n for k, g in groupby(sorted(move_lines_in, key=_keys_in_sorted), key=itemgetter(*keys_in_groupby)):\n qty_done = 0\n for ml in g:\n qty_done += ml.product_uom_id._compute_quantity(ml.qty_done, ml.product_id.uom_id)\n grouped_move_lines_in[k] = qty_done\n move_lines_out_done = (move.move_orig_ids.mapped('move_dest_ids') - move) \\\n .filtered(lambda m: m.state in ['done']) \\\n .mapped('move_line_ids')\n # As we defer the write on the stock.move's state at the end of the loop, there\n # could be moves to consider in what our siblings already took.\n moves_out_siblings = move.move_orig_ids.mapped('move_dest_ids') - move\n moves_out_siblings_to_consider = moves_out_siblings & (assigned_moves + partially_available_moves)\n reserved_moves_out_siblings = moves_out_siblings.filtered(\n lambda m: m.state in ['partially_available', 'assigned'])\n move_lines_out_reserved = (reserved_moves_out_siblings | moves_out_siblings_to_consider).mapped(\n 'move_line_ids')\n keys_out_groupby = ['location_id', 'lot_id', 'package_id', 'owner_id']\n\n def _keys_out_sorted(ml):\n return (ml.location_id.id, ml.lot_id.id, ml.package_id.id, ml.owner_id.id)\n\n grouped_move_lines_out = {}\n for k, g in groupby(sorted(move_lines_out_done, key=_keys_out_sorted),\n key=itemgetter(*keys_out_groupby)):\n qty_done = 0\n for ml in g:\n qty_done += ml.product_uom_id._compute_quantity(ml.qty_done, ml.product_id.uom_id)\n grouped_move_lines_out[k] = qty_done\n for k, g in groupby(sorted(move_lines_out_reserved, key=_keys_out_sorted),\n key=itemgetter(*keys_out_groupby)):\n grouped_move_lines_out[k] = sum(\n self.env['stock.move.line'].concat(*list(g)).mapped('product_qty'))\n available_move_lines = {key: grouped_move_lines_in[key] - grouped_move_lines_out.get(key, 0) for key\n in grouped_move_lines_in.keys()}\n # pop key if the quantity available amount to 0\n available_move_lines = dict((k, v) for k, v in available_move_lines.items() if v)\n\n if not available_move_lines:\n continue\n for move_line in move.move_line_ids.filtered(lambda m: m.product_qty):\n if available_move_lines.get((move_line.location_id, move_line.lot_id,\n move_line.result_package_id, move_line.owner_id)):\n available_move_lines[(move_line.location_id, move_line.lot_id, move_line.result_package_id,\n move_line.owner_id)] -= move_line.product_qty\n for (location_id, lot_id, package_id, owner_id), quantity in available_move_lines.items():\n need = move.product_qty - sum(move.move_line_ids.mapped('product_qty'))\n # `quantity` is what is brought by chained done move lines. We double check\n # here this quantity is available on the quants themselves. If not, this\n # could be the result of an inventory adjustment that removed totally of\n # partially `quantity`. When this happens, we chose to reserve the maximum\n # still available. This situation could not happen on MTS move, because in\n # this case `quantity` is directly the quantity on the quants themselves.\n available_quantity = move._get_available_quantity(location_id, lot_id=lot_id,\n package_id=package_id, owner_id=owner_id,\n strict=True)\n if float_is_zero(available_quantity, precision_rounding=rounding):\n continue\n taken_quantity = move._update_reserved_quantity(need, min(quantity, available_quantity),\n location_id, lot_id, package_id, owner_id)\n if float_is_zero(taken_quantity, precision_rounding=rounding):\n continue\n if float_is_zero(need - taken_quantity, precision_rounding=rounding):\n assigned_moves |= move\n break\n partially_available_moves |= move\n if move.product_id.tracking == 'serial':\n move.next_serial_count = move.product_uom_qty\n if not_available_list:\n raise UserError(_(' Lot %s added in uploaded file is not available in stock.', (\", \".join(a for a in set(not_available_list)))))\n\n self.env['stock.move.line'].create(move_line_vals_list)\n partially_available_moves.write({'state': 'partially_available'})\n assigned_moves.write({'state': 'assigned'})\n move_ids.mapped('picking_id')._check_entire_pack()\n\n def import_data_form_file(self): \n # Generating of the excel file to be read by openpyxl\n file = base64.decodebytes(self.upload_file)\n # file = self.upload_file.decode('base64')\n excel_fileobj = TemporaryFile('wb+')\n excel_fileobj.write(file)\n excel_fileobj.seek(0)\n # Create workbook\n workbook = openpyxl.load_workbook(excel_fileobj, data_only=True)\n # Get the first sheet of excel file\n sheet = workbook[workbook.get_sheet_names()[0]] \n if sheet.max_column != 2 and sheet.min_column != 2:\n raise ValidationError(_(\"Please upload 2 Columns NFC tag and Lot information.\")) \n data = {}\n for row in sheet.rows:\n # Get value\n data[row[0].value] = row[1].value \n return data\n\n def _check_filename(self):\n if self.upload_file:\n if not self.filename:\n raise ValidationError(_(\"There is no file\"))\n else:\n # Check the file's extension\n tmp = self.filename.split('.')\n ext = tmp[len(tmp)-1]\n if ext != 'xlsx':\n raise ValidationError(_(\"The file must be a xlsx file\"))\n else:\n raise ValidationError(_(\"Please upload NFC tag and Lot information file.\"))\n\n def unlink(self):\n for record in self:\n if record.state in ('confirm', 'cancel','done'):\n raise UserError(_(\"You can delete only draft state record!\"))\n return super(BoxPackage, self).unlink()\n\n\nclass BoxPackageMove(models.Model):\n _name = 'box.package.move'\n\n raw_material_box_package_id = fields.Many2one(\n 'box.package', 'Box Packaging components', check_company=True, index=True)\n bom_line_id = fields.Many2one('mrp.bom.line', 'BoM Line', check_company=True)\n company_id = fields.Many2one(\n 'res.company', 'Company',\n default=lambda self: self.env.company,\n index=True, required=True)\n product_id = fields.Many2one(\n 'product.product', 'Product',\n check_company=True,\n domain=\"[('type', 'in', ['product', 'consu']), '|', ('company_id', '=', False), ('company_id', '=', company_id)]\", index=True, required=True)\n product_uom_qty = fields.Float(\n 'Demand',\n digits='Product Unit of Measure',\n default=0.0, required=True)\n product_uom = fields.Many2one('uom.uom', 'Unit of Measure', required=True, domain=\"[('category_id', '=', product_uom_category_id)]\")\n product_uom_category_id = fields.Many2one(related='product_id.uom_id.category_id')\n origin = fields.Char(\"Source Document\")\n quantity_done = fields.Float('Quantity Done', digits='Product Unit of Measure')\n forecast_availability = fields.Float('Forecast Availability', compute='_compute_forecast_information',\n digits='Product Unit of Measure')\n\n def _compute_forecast_information(self):\n for record in self:\n mo_obj = self.env['mrp.production'].search([('box_package_id', '=', record.raw_material_box_package_id.id)])\n forecast_availability = 0\n if mo_obj:\n forecast_availability = sum([data.reserved_availability for data in mo_obj.mapped('move_raw_ids').filtered(lambda m: m.product_id.id == record.product_id.id)])\n record.forecast_availability = forecast_availability\n\n","repo_name":"hilinares1/trivicitiv14","sub_path":"trivicity_erp/models/box_package.py","file_name":"box_package.py","file_ext":"py","file_size_in_byte":35076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"4839270236","text":"# ran pip instgall translate in shell\n\nfrom translate import Translator\nEng_trans = Translator(from_lang = 'fr',to_lang='en')\n\ntranslated_dict = {}\nfrench_words= [\"Bonjour\", \"Au revoir\", \"Bienvenue\", \"A bientôt\"] \n\nfor word in french_words:\n translated_dict[word] = Eng_trans.translate(word)\n\nprint(translated_dict)","repo_name":"MiniManch/DI_Bootcamp","sub_path":"Week-10/Day-2/Daily-Challenge/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"3426560421","text":"#import nester\n\n#print_lol can be added to a module called \"nester\"\nimport sys\n\ndef print_lol(coll,indent=True,indent_amt=0,dest=sys.stdout):\n for el in coll:\n if isinstance(el,list):\n #indent += 1\n print_lol(el, indent, indent_amt+1, dest)\n else:\n if indent:\n for i in range(indent_amt):\n print(\"\")\n print(\"\\t\", end='', file=dest)\n print(el, file=dest)\n \n'''========BEGIN MAIN========'''\n\nman1 = []\nman2 = []\n\ntry:\n with open('sketch.txt') as data:\n for l in data:\n try:\n (role, line) = l.split(':',1)\n line = line.strip()\n if role == 'Man':\n man1.append(line)\n elif role == 'Other Man':\n man2.append(line)\n\n print(role, end='')\n print(' said: ' , end='')\n print(line, end='')\n except ValueError:\n pass\nexcept IOError as err:\n print('File is missing...quitting :(')\n\ntry:\n with open(\"man1.txt\", \"w\") as man1_file:\n print_lol(man1, dest=man1_file)\n with open(\"man2.txt\", \"w\") as man2_file: \n print_lol(man2, dest=man2_file)\n\nexcept IOError as err:\n print(\"error printing to file...quitting :(\")\n\n\n#print(man1)\n#print(man2)\n","repo_name":"albyh/python_bin","sub_path":"sketch_v1.py","file_name":"sketch_v1.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"11677705018","text":"\nfrom time import gmtime, strftime\nfrom django.db import models\nfrom django.db.models.fields import CharField\nfrom django.contrib import admin\nfrom ckeditor.fields import RichTextField\nfrom lang import SUPPORTED_LANG, is_traditional\nfrom opencc import convert\n\n\nclass Model(models.Model):\n \"\"\"\n T2S Extension Class\n Usage:\n class YourModel(t2s.Model):\n ...\n \"\"\"\n lang = models.IntegerField(default=1, choices=SUPPORTED_LANG)\n update_keys = None\n\n def getLanguageDisplay(self):\n return SUPPORTED_LANG[self.lang][1]\n\n def getSimplifiedObject(self):\n \"\"\"\n if you don't have `title` field or don't want to let this field\n `title` be the key of language exchange, you should overwrite this\n method\n \"\"\"\n simplifiedTitle = convert(self.title, config='t2s')\n return self.__class__.objects.filter(lang=2, title=simplifiedTitle)[0]\n\n @classmethod\n def getFields(cls):\n return cls._meta.fields\n\n @classmethod\n def cloneWithSimplified(cls, **kwargs):\n \"\"\"\n XXX(Yorkie): currently cannot filter the invalid field, will\n support later.\n\n Description:\n XX.cloneWithSimplified(title=1, name=2)\n if XX doesn't define title filed, now it(program) will breaks,\n if we support the filter, this error will be ignored.\n \"\"\"\n fields = { 'defaults':{} }\n for name, value in kwargs.items():\n val = value\n if type(value) == str or type(value) == unicode:\n val = convert(value, config='t2s')\n try:\n cls.update_keys.index(name)\n fields[name] = val\n except ValueError:\n fields['defaults'][name] = val\n except AttributeError:\n fields[name] = val\n fields['lang'] = 2\n del fields['defaults']['id']\n obj, isNew = cls.objects.get_or_create(**fields)\n \n for name, value in kwargs.items():\n item = getattr(obj, name)\n if isinstance(item, Model):\n setattr(obj, name, item.getSimplifiedObject())\n return (obj, isNew)\n\n class Meta:\n abstract = True\n\n\nclass ModelAdmin(admin.ModelAdmin):\n \"\"\"\n T2S ModelAdmin Class\n Just set `modifyDate` and `createDate` related stuffs\n Usage:\n class YourModelAdmin(t2s.ModelAdmin):\n ...\n \"\"\"\n def __init__(self, model, admin_site):\n self.coverCondition = False\n self._modifyDate = None\n self._createDate = None\n self._hasModifyDate = False\n self._hasCreateDate = False\n self._baseFields = []\n self._richTextFields = []\n\n for field in model.getFields():\n if not self._hasModifyDate:\n self._hasModifyDate = field.name == 'modify_date'\n if not self._hasCreateDate:\n self._hasCreateDate = field.name == 'create_date'\n if type(field) is RichTextField:\n self._richTextFields.append(field)\n else:\n self._baseFields.append(field)\n self.coverCondition = self._hasModifyDate and self._hasCreateDate;\n super(ModelAdmin, self).__init__(model, admin_site)\n\n \"\"\"\n Override `save_model`\n \"\"\"\n def save_model(self, request, obj, form, change):\n cur_time = gmtime()\n model = self.model\n lang = form.cleaned_data.get('lang')\n fields = {}\n\n for field in self._baseFields:\n val = form.cleaned_data.get(field.name)\n fields[field.name] = val\n\n if not lang:\n lang = self.__lang__(form)\n obj.lang = fields['lang'] = lang\n\n if self._hasModifyDate:\n fields['modify_date'] = strftime(\"%Y-%m-%d %H:%M:%S\", cur_time)\n if self._hasCreateDate:\n fields['create_date'] = strftime(\"%Y-%m-%d %H:%M:%S\", cur_time)\n\n if is_traditional(lang):\n \"\"\"\n convert value to simplifiedObj first\n \"\"\"\n simplifiedObj, isNew = model.cloneWithSimplified(**fields)\n if (not (self._hasModifyDate and\n not isNew and simplifiedObj.modify_date > obj.modify_date)):\n for field in self._richTextFields:\n text = form.cleaned_data.get(field.name)\n setattr(simplifiedObj, field.name, convert(text))\n fields[field.name] = text\n simplifiedObj.save()\n\n \"\"\"\n convert value to traditional\n \"\"\"\n for name, value in fields.items():\n if type(value) == str or type(value) == unicode:\n setattr(obj, name, convert(value, config='s2t'))\n\n if self._hasModifyDate:\n obj.modify_date = strftime(\"%Y-%m-%d %H:%M:%S\", cur_time)\n if self._hasCreateDate and not obj.create_date:\n obj.create_date = strftime(\"%Y-%m-%d %H:%M:%S\", cur_time)\n\n return super(ModelAdmin, self).save_model(request, obj, form, change)\n\n class Meta:\n abstract = True\n","repo_name":"yorkie/django-t2s","sub_path":"t2s/t2s.py","file_name":"t2s.py","file_ext":"py","file_size_in_byte":4511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26795074619","text":"from tool.runners.python import SubmissionPy\n\n\nclass RemicalixteSubmission(SubmissionPy):\n def run(self, s):\n grid = [[int(c) for c in line] for line in s.split('\\n')]\n view_top_grid = [[0 for _ in range(len(grid[0]))] for _ in range(len(grid))]\n view_bottom_grid = [[0 for _ in range(len(grid[0]))] for _ in range(len(grid))]\n view_left_grid = [[0 for _ in range(len(grid[0]))] for _ in range(len(grid))]\n view_right_grid = [[0 for _ in range(len(grid[0]))] for _ in range(len(grid))]\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n i1, j1 = i, j\n i2, j2 = len(grid) - i - 1, len(grid[0]) - j - 1\n top_left = grid[i1][j1]\n bottom_right = grid[i2][j2]\n\n if j1 > 0:\n view_left_grid[i1][j1] = 1\n j_left = j1 - 1\n while j_left > 0:\n left = grid[i1][j_left]\n if left >= top_left:\n break\n else:\n view_left = view_left_grid[i1][j_left]\n view_left_grid[i1][j1] += view_left\n j_left -= view_left\n\n if i1 > 0:\n view_top_grid[i1][j1] = 1\n i_top = i1 - 1\n while i_top > 0:\n top = grid[i_top][j1]\n if top >= top_left:\n break\n else:\n view_top = view_top_grid[i_top][j1]\n view_top_grid[i1][j1] += view_top\n i_top -= view_top\n\n if j2 < len(grid[0]) - 1:\n view_right_grid[i2][j2] = 1\n j_right = j2 + 1\n while j_right < len(grid[0]) - 1:\n right = grid[i2][j_right]\n if right >= bottom_right:\n break\n else:\n view_right = view_right_grid[i2][j_right]\n view_right_grid[i2][j2] += view_right\n j_right += view_right\n\n if i2 < len(grid) - 1:\n view_bottom_grid[i2][j2] = 1\n i_bottom = i2 + 1\n while i_bottom < len(grid) - 1:\n bottom = grid[i_bottom][j2]\n if bottom >= bottom_right:\n break\n else:\n view_bottom = view_bottom_grid[i_bottom][j2]\n view_bottom_grid[i2][j2] += view_bottom\n i_bottom += view_bottom\n\n return max(view_left_grid[i][j] * view_top_grid[i][j] * view_right_grid[i][j] * view_bottom_grid[i][j] for j in range(len(grid[0])) for i in range(len(grid)))\n\n\ndef test_remicalixte():\n \"\"\"\n Run `python -m pytest ./day-08/part-1/remicalixte.py` to test the submission.\n \"\"\"\n assert (\n RemicalixteSubmission().run(\n \"\"\"\n\"\"\".strip()\n )\n == None\n )\n","repo_name":"badouralix/adventofcode-2022","sub_path":"day-08/part-2/remicalixte.py","file_name":"remicalixte.py","file_ext":"py","file_size_in_byte":3066,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"31"} +{"seq_id":"25518284780","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 03 08:08:48 2018\n\n@author: Dario\n\"\"\"\n\nimport numpy as np\nfrom WordFrequency import WordFrequencyDist_D\n\n\ndef preprocessing(pathToFile, mode, words_to_idx, word_dict):\n\n def build_vocabulary(raw_scentences):\n words = []\n for scent in raw_scentences:\n word_scent = scent.split()\n words.extend(word_scent)\n word_dist = WordFrequencyDist_D(words)\n # , , , are not words, therfore only 19996\n word_dict = {word_dist[i][0]: word_dist[i][1] for i in range(0, 19996)}\n return word_dict\n \n def Tokenize(raw_scentences, word_dict, makedummies):\n \"\"\"\n Takes raw scentences and the word dictinary as input and returns the \n scentences with new formating:\n - max 30 words per scentence\n - and added at beginning and end of each scentence\n - word replaced with if not in dictionary\n - scentences paded with if shorter than 30 words\n \"\"\"\n words_final = []\n tokenized = []\n batchsize = 64\n for scent in raw_scentences:\n word_scent = scent.split()\n for i in range(0,len(word_scent)):\n if not(word_scent[i] in word_dict):\n word_scent[i] = ''\n \n sentence = ['']\n if len(word_scent) <= 28:\n sentence.extend(word_scent)\n sentence.extend([''])\n sentence.extend(['']*(28-len(word_scent)))\n \n tokenized.append(sentence)\n words_final.extend(sentence)\n if makedummies:\n nr_of_dummy_scentences = batchsize - len(tokenized)%batchsize\n print('len tok ', len(tokenized))\n print('nr dummy scent ', nr_of_dummy_scentences)\n for _ in range(0,nr_of_dummy_scentences):\n tokenized.append(['']*30)\n \n return tokenized, words_final\n\n # load data\n text_file_object = open(pathToFile,'r')\n raw_scentences = text_file_object.readlines()\n text_file_object.close()\n \n\n if mode=='training':\n print('preprocessing for training data')\n # build up the dictionary\n word_dict = build_vocabulary(raw_scentences) \n \n new_scentences, words_final = Tokenize(raw_scentences, word_dict, makedummies=True)\n \n word_dist_final = WordFrequencyDist_D(words_final)\n words_to_idx = {word_dist_final[indx][0]: indx for indx in range(0, 20000)}\n ## Create the training data\n X = [[words_to_idx[w] for w in sent[:-1]] for sent in new_scentences]\n y = [[words_to_idx[w] for w in sent[1:]] for sent in new_scentences]\n \n \n elif mode=='test':\n print('preprocessing for test data')\n # process the test data\n print(len(raw_scentences))\n new_scentences, words_final = Tokenize(raw_scentences, word_dict, makedummies=True)\n \n X = [[words_to_idx[w] for w in sent[:-1]] for sent in new_scentences]\n y = [[words_to_idx[w] for w in sent[1:]] for sent in new_scentences]\n \n elif mode=='1.2':\n print('preprocessing for task 1.2')\n # process the test data\n new_scentences, words_final = Tokenize(raw_scentences, word_dict, makedummies=False)\n \n X = [[words_to_idx[w] for w in sent[:-1]] for sent in new_scentences]\n y = [[words_to_idx[w] for w in sent[1:]] for sent in new_scentences]\n \n return X,y, words_to_idx, word_dict\n\n\n ","repo_name":"MauroLuzzatto/Natural-Language-Understanding","sub_path":"Preprocessing.py","file_name":"Preprocessing.py","file_ext":"py","file_size_in_byte":3647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"24218127079","text":"from titan.api_pkg.apiregistry.get_public_type_specs import get_public_type_specs\n\n\ndef get_helpers(_):\n class Helpers:\n type_spec = _.type_spec\n django_module = type_spec.django_module\n model_field_specs = [x for x in type_spec.get_field_specs() if x.has_model]\n field_specs = sorted(\n [x for x in type_spec.get_field_specs() if is_api_field(x)],\n key=lambda x: x.name,\n )\n type_specs = sorted(\n [\n x.target_type_spec\n for x in type_spec.get_field_specs([\"fk\", \"relatedSet\"])\n if x.target_type_spec\n and x.target != _.type_spec.type_name\n and x.has_api\n ],\n key=lambda x: x.type_name,\n )\n\n @property\n def excluded_field_specs(self):\n derived_field_names = [x.name for x in self.field_specs]\n result = [\n field_spec\n for field_spec in self.model_field_specs\n if not field_spec.has_api and field_spec.name not in derived_field_names\n ]\n return sorted(result, key=lambda x: x.name)\n\n return Helpers()\n\n\ndef is_api_field(field_spec):\n return field_spec.field_type == \"tags\" or (\n field_spec.has_api and not field_spec.has_model\n )\n\n\ndef get_meta_data_by_fn(_, __):\n return {\n \".\": {\"name\": \"..\"},\n \"graphql_type.py.j2\": {\n \"name\": f\"{_.type_spec.type_name.lower()}_t.py\",\n },\n }\n\n\ndef get_contexts(_):\n return [\n dict(type_spec=type_spec)\n for type_spec in get_public_type_specs(\n _.api_reg,\n include_stubs=False,\n predicate=lambda type_spec: not type_spec.no_api,\n )\n ]\n","repo_name":"mnieber/moonleap","sub_path":"titan/django_pkg/templates/src/[service]/api/types/[type]/__moonleap__.py","file_name":"__moonleap__.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"528104162","text":"import pandas as pd\nimport numpy as np\nimport dbConnector\nimport logisticRegression\nfrom sklearn.linear_model import LogisticRegression\nimport svmSmo\n\ndef getTargetVariables(oldY):\n 'Convert Y from 0,1,-1 to -1,1'\n Y = []\n for i in range(0, oldY.__len__()):\n if (oldY[i] == 0) or (oldY[i] == -1):\n Y.append(0)\n else:\n Y.append(1)\n return Y\n\ncon = dbConnector.getConnection()\n\nmatch_features = '''SELECT tr.*, tp.goal_diff_home,tp.home_win_perc,tp.home_lose_perc,tp.home_draw_perc,\ntpa.goal_diff_away,tpa.away_win_perc,tpa.away_lose_perc,tpa.away_draw_perc,\ns.full_time_score,\n(if( SUBSTRING_INDEX(s.full_time_score, ' : ', 1) > SUBSTRING_INDEX(s.full_time_score, ' : ', -1), 1,\n (if( SUBSTRING_INDEX(s.full_time_score, ' : ', 1) = SUBSTRING_INDEX(s.full_time_score, ' : ', -1), 0, -1)))) \n as 'match_outcome'\nFROM teamratings tr\ninner join\nteamperformance tp on tr.home_team_id = tp.team_id\ninner join\nteamperformance tpa on tr.away_team_id = tpa.team_id\ninner join\nseason_match_stats s on tr.match_id = s.match_id and tr.home_team_id = s.home_team_id\n;'''\n\nmatch_data = pd.read_sql(match_features,con)\n\n# print('match_data.columns.values: ', match_data.columns.values)\nprint('np.shape(match_data): ', np.shape(match_data))\n\nrowsToIgnore = ['match_id','home_team_id','away_team_id','full_time_score']\nreducedMatchData = match_data.drop(rowsToIgnore, axis=1)\n\n# print('reducedMatchData.columns.values: ', reducedMatchData.columns.values)\nprint('np.shape(reducedMatchData): ', np.shape(reducedMatchData))\n\n# get training and testing matrix\nrows, columns = np.shape(reducedMatchData)\ntrainingLimit = rows - int(rows * 0.01) # 10 percent data for testing\n\n# last column - match_outcome is response (1 - win, 0 - draw, -1 - loss)\nreducedMatchData = np.mat(reducedMatchData)\nxTraining = reducedMatchData[:trainingLimit, 0:columns-1]\nyTraining = reducedMatchData[:trainingLimit, columns-1]\nxTest = reducedMatchData[trainingLimit:, 0:columns-1]\nyTest = reducedMatchData[trainingLimit:, columns-1]\n\nprint('trainingLimit: ', trainingLimit)\nprint('np.shape(xTraining): ', np.shape(xTraining))\nprint('np.shape(yTraining): ', np.shape(yTraining))\nprint('np.shape(xTest): ', np.shape(xTest))\nprint('np.shape(yTest): ', np.shape(yTest))\n\n# convert test from win vs loss (draw is loss)\nyTraining = np.mat(getTargetVariables(yTraining))\nyTest = np.mat(getTargetVariables(yTest)) # y in 0 and 1s for logistic regression\n\n# test\nxTraining = np.squeeze(np.asarray(xTraining))\nyTraining = np.squeeze(np.asarray(yTraining))\nxTest = np.squeeze(np.asarray(xTest))\nyTest = np.squeeze(np.asarray(yTest))\n\n'''\nprint('np.shape(xTraining): ',np.shape(xTraining))\nprint('np.shape(yTraining): ', np.shape(yTraining.transpose()))\nprint(type(xTraining))\nprint(type(yTraining))\n'''\n\nlogisticRegression.logisticReg(xTraining,yTraining.transpose(),xTest, yTest.transpose())\n\nclf = LogisticRegression(random_state=0, solver='lbfgs', multi_class = 'multinomial').fit(xTraining, yTraining.transpose())\nprint('clf.predict(xTest): \\n', clf.predict(xTest))\nprint(yTest)\nprint('clf.predict_proba(X):\\n', clf.predict_proba(xTest))\nprint(clf.score(xTest, yTest.transpose()))\n\n\nsvmSmo.customSvm(xTraining, yTraining, xTest, yTest)","repo_name":"ssnavalakha/IntelligentFootballPredictions","sub_path":"classification.py","file_name":"classification.py","file_ext":"py","file_size_in_byte":3233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"29695456826","text":"\"\"\"\nUm hacker teve acesso a um log de transações com cartões de\ncrédito. O log é uma lista de tuplos, cada um com os dados de uma transação,\nnomedamente o cartão que foi usado, podendo alguns dos números estar\nocultados com um *, e o email do dono do cartão.\n\nPretende-se que implemente uma função que ajude o hacker a \nreconstruir os cartões de crédito, combinando os números que estão\nvisíveis em diferentes transações. Caso haja uma contradição nos números \nvisíveis deve ser dada prioridade à transção mais recente, i.é, a que\naparece mais tarde no log.\n\nA função deve devolver uma lista de tuplos, cada um com um cartão e um email,\ndando prioridade aos cartões com mais digitos descobertos e, em caso de igualdade\nneste critério, aos emails menores (em ordem lexicográfica).\n\"\"\"\n\ndef hacker(log):\n dic = {}\n \n for cc, email in log:\n if email in dic:\n dic[email] = ccMatcher(cc, dic[email])\n else:\n dic[email] = cc\n \n return sorted([(v, k) for k, v in dic.items()], key = lambda x: (x[0].count('*'), x[1]))\n \ndef ccMatcher(cc1, cc2):\n res = \"\"\n \n for i in range(0, 16):\n if cc2[i] != '*' and cc1[i] == '*':\n res += cc2[i]\n elif cc2[i] == '*' and cc1[i] != '*':\n res += cc1[i]\n elif cc2[i] != '*' and cc1[i] != '*':\n res += cc1[i]\n else:\n res += '*'\n \n return res\n","repo_name":"quantik-git/Laboratorios-Algoritmia-II","sub_path":"treino01/hacker.py","file_name":"hacker.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37275349506","text":"import pyscf\nimport pyscf.gto\nimport pyscf.scf\nimport pyscf.mcscf\n\nimport vayesta\nimport vayesta.ewf\n\nmol = pyscf.gto.Mole()\nmol.atom = [\"N 0 0 0\", \"N 0 0 2\"]\nmol.basis = \"aug-cc-pvdz\"\nmol.output = \"pyscf.out\"\nmol.build()\n\n# Hartree-Fock\nmf = pyscf.scf.RHF(mol)\nmf.kernel()\n\n# Reference CASCI\ncasci = pyscf.mcscf.CASCI(mf, 8, 10)\ncasci.kernel()\n\n# Reference CASSCF\ncasscf = pyscf.mcscf.CASSCF(mf, 8, 10)\ncasscf.kernel()\n\n\ndef get_emb_result(ansatz, bathtype=\"full\"):\n # Uses fastest available solver for given ansatz; PySCF if available, otherwise ebcc.\n emb = vayesta.ewf.EWF(\n mf, solver=ansatz, bath_options=dict(bathtype=bathtype), solver_options=dict(solve_lambda=False)\n )\n # Both these alternative specifications will always use an ebcc solver.\n # Note that the capitalization of the solver name other than the ansatz is arbitrary.\n # emb = vayesta.ewf.EWF(mf, solver=f'EB{ansatz}', bath_options=dict(bathtype=bathtype),\n # solver_options=dict(solve_lambda=False))\n # emb = vayesta.ewf.EWF(mf, solver='ebcc', bath_options=dict(bathtype=bathtype),\n # solver_options=dict(solve_lambda=False, ansatz=ansatz))\n\n with emb.iao_fragmentation() as f:\n with f.rotational_symmetry(2, \"y\", center=(0, 0, 1)):\n f.add_atomic_fragment(0)\n emb.kernel()\n return emb.e_tot\n\n\ne_ccsd = get_emb_result(\"CCSD\", \"full\")\ne_ccsdt = get_emb_result(\"CCSDT\", \"dmet\")\ne_ccsdtprime = get_emb_result(\"CCSDt'\", \"full\")\n\nprint(\"E(HF)= %+16.8f Ha\" % mf.e_tot)\nprint(\"E(CASCI)= %+16.8f Ha\" % casci.e_tot)\nprint(\"E(CASSCF)= %+16.8f Ha\" % casscf.e_tot)\nprint(\"E(CCSD, complete)= %+16.8f Ha\" % e_ccsd)\nprint(\"E(emb. CCSDT, DMET CAS)= %+16.8f Ha\" % e_ccsdt)\nprint(\"E(emb. CCSDt', complete+DMET active space)= %+16.8f Ha\" % e_ccsdtprime)\n","repo_name":"BoothGroup/Vayesta","sub_path":"examples/ewf/molecules/26-ebcc-solvers.py","file_name":"26-ebcc-solvers.py","file_ext":"py","file_size_in_byte":1987,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"31"} +{"seq_id":"28890022325","text":"# -*- coding: utf-8 -*-\n\"\"\"Integration test for function `import_csv`.\"\"\"\nfrom io import StringIO\nfrom contextlib import closing\n\nfrom minerva_csvimporter import import_csv\nfrom minerva_csvimporter.profile import Profile\n\nfrom minerva_db import connect\n\n\ndef test_timestamp_source_data():\n with closing(connect()) as conn:\n readfile = StringIO(\n u'CIC;CCR;Drops;t_source\\n'\n '10023;0.9919;17;20111111_0000\\n'\n '10047;0.9963;18;20101010_0000\\n')\n\n profile = Profile({\n \"storage\": {\n \"type\": \"trend\",\n \"config\": {\n \"granularity\": 86400,\n \"datasource\": \"integration_test\",\n \"timestamp_is_start\": False\n }\n },\n \"identifier\": {\n \"template\": \"Cell={CIC}\",\n \"regex\": \"(.*)\"\n },\n \"timestamp\": {\n \"type\": \"from_column\",\n \"config\": {\n \"format\": \"%Y%m%d_%H%M\",\n \"name\": \"t_source\",\n \"timezone\": \"UTC\"\n }\n },\n \"identifier_is_alias\": False,\n \"field_selector\": {\n \"type\": \"all\"\n },\n \"fields\": {},\n \"timestamp_is_start\": True,\n \"character_encoding\": \"utf-8\",\n \"dialect\": {\n \"type\": \"auto\"\n },\n \"value_mapping\": {}\n })\n\n import_csv(conn, profile, readfile)\n\n\ndef test_timestamp_as_data():\n with closing(connect()) as conn:\n readfile = StringIO(\n u'ts;CIC;CCR;Drops;created\\n'\n '20140511_1300;10023;0.9919;17;20111111_0000\\n'\n '20140511_1300;10047;0.9963;18;20101010_0000\\n')\n\n profile = Profile({\n \"storage\": {\n \"type\": \"trend\",\n \"config\": {\n \"granularity\": 86400,\n \"datasource\": \"integration_test\",\n \"timestamp_is_start\": False\n }\n },\n \"identifier\": {\n \"template\": \"Cell={CIC}\",\n \"regex\": \"(.*)\"\n },\n \"timestamp\": {\n \"type\": \"from_column\",\n \"config\": {\n \"format\": \"%Y%m%d_%H%M\",\n \"name\": \"ts\",\n \"timezone\": \"UTC\"\n }\n },\n \"identifier_is_alias\": False,\n \"field_selector\": {\n \"type\": \"all\"\n },\n \"timestamp_is_start\": True,\n \"character_encoding\": \"utf-8\",\n \"dialect\": {\n \"type\": \"auto\"\n },\n \"fields\": {\n \"created\": {\n \"datatype\": \"timestamp\",\n \"string_format\": {\n \"format\": \"%Y%m%d_%H%M\"\n }\n }\n }\n })\n\n import_csv(conn, profile, readfile)","repo_name":"hendrikx-itc/minerva-etl-46","sub_path":"csv-importer/integration_tests/test_csvimporter.py","file_name":"test_csvimporter.py","file_ext":"py","file_size_in_byte":3015,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"42958256864","text":"\"\"\"\r\nReplace None - Write a function that takes a list of input values where it replaces the None values with previous number value.\r\nTime complexity: O(n) \r\nSpace complexity: O(1)\r\n\"\"\"\r\ndef replaceNone(ls):\r\n if (len(ls) == 0):\r\n return []\r\n prev = None\r\n for i in range(len(ls)):\r\n if ls[i]:\r\n prev = ls[i]\r\n else:\r\n ls[i] = prev\r\n if not prev:\r\n return []\r\n return ls\r\n\r\nassert replaceNone([1, None, 2, None, 3, None, None, 5, None]) == [1, 1, 2, 2, 3, 3, 3, 5, 5]\r\nassert replaceNone([None, None, 8, None]) == [None, None, 8, 8]\r\nassert replaceNone([None]) == []","repo_name":"myli5/Python-examples","sub_path":"ReplaceNone.py","file_name":"ReplaceNone.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72186907288","text":"import time\nfrom tqdm import tqdm\ndef optimize(config_lists,trial,nb_trials=10,nb_iteration=3):\n #Compute default value for every parameter\n default_config = {} #best config found yet (default at first)\n doc = {}\n for param_name in config_lists:\n default_config[param_name] = config_lists[param_name][0] #the default config is the first value of each parameter in their respective lists of values\n #Optimize each parameter one by one\n for it in range(nb_iteration): #Run nb_iteration steps of optimization\n doc = {}\n for param_name in config_lists: #Select a parameter\n print(param_name)\n doc[param_name] = {}\n #Compute score for each value of parameter\n param_vals = config_lists[param_name]\n scores = [0]*len(param_vals)\n for idx_val in range(len(param_vals)): #Select a value of the parameter\n doc[param_name][param_vals[idx_val]] = []\n print(' ',param_vals[idx_val])\n for t in tqdm(range(nb_trials)):\n config_param_val = default_config.copy() #Get the best config found yet \n config_param_val[param_name] = param_vals[idx_val] #Change only one parameter\n score_param,doc_param = trial(**config_param_val) #Run the test and get results\n scores[idx_val] += score_param/nb_trials #Add the score to the average\n doc[param_name][param_vals[idx_val]].append((config_param_val,doc_param)) #Add data to the doc\n #Find the best value for the parameter\n for i,s in enumerate(scores):\n if s == max(scores): \n default_config[param_name] = param_vals[i] #Put it in the best config found yet\n return default_config,doc\n","repo_name":"Daetheys/ParameterOptimizer","sub_path":"optimizer.py","file_name":"optimizer.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"15608948855","text":"def solution(money):\n #dp1, dp2의 각 인덱스 자리의 값은 각 집의 인덱스를 지나갈때 그 위치에서 낼수 있는 최대의 값을 저장\n dp1 = [0] * len(money)\n dp1[0] = money[0]\n dp1[1] = max(money[0], money[1])\n\n for i in range(2, len(money)-1): # 첫 집을 무조건 터는 경우\n dp1[i] = max(dp1[i-1], money[i]+dp1[i-2])\n\n dp2 = [0] * len(money)\n dp2[0] = 0\n dp2[1] = money[1]\n\n for i in range(2, len(money)): # 마지막 집을 무조건 터는 경우\n dp2[i] = max(dp2[i-1], money[i]+dp2[i-2])\n\n return max(max(dp1), max(dp2)) # 두 경우 중 최대","repo_name":"JooaeSon/Daily_CodingTest","sub_path":"Programmers/Lv.4/도둑질.py","file_name":"도둑질.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"42417203721","text":"\nfrom leapp.libraries.actor import setuptargetrepos_repomap\nfrom leapp.libraries.common.config.version import get_source_major_version\nfrom leapp.libraries.stdlib import api\nfrom leapp.models import (\n CustomTargetRepository,\n RepositoriesBlacklisted,\n RepositoriesFacts,\n RepositoriesMapping,\n RepositoriesSetupTasks,\n RHELTargetRepository,\n RHUIInfo,\n SkippedRepositories,\n TargetRepositories,\n UsedRepositories\n)\n\n\ndef _get_enabled_repoids():\n \"\"\"\n Collects repoids of all enabled repositories on the source system.\n\n :param repositories_facts: Iterable of RepositoriesFacts containing info about repositories on the source system.\n :returns: Set of all enabled repository IDs present on the source system.\n :rtype: Set[str]\n \"\"\"\n enabled_repoids = set()\n for repos in api.consume(RepositoriesFacts):\n for repo_file in repos.repositories:\n for repo in repo_file.data:\n if repo.enabled:\n enabled_repoids.add(repo.repoid)\n return enabled_repoids\n\n\ndef _get_blacklisted_repoids():\n repos_blacklisted = set()\n for blacklist in api.consume(RepositoriesBlacklisted):\n repos_blacklisted.update(blacklist.repoids)\n return repos_blacklisted\n\n\ndef _get_custom_target_repos():\n custom_repos = []\n for repo in api.consume(CustomTargetRepository):\n custom_repos.append(repo)\n return custom_repos\n\n\ndef _get_used_repo_dict():\n \"\"\"\n Return dict: {used_repoid: [installed_packages]}\n \"\"\"\n used = {}\n for used_repos in api.consume(UsedRepositories):\n for used_repo in used_repos.repositories:\n used[used_repo.repository] = used_repo.packages\n return used\n\n\ndef _setup_repomap_handler(src_repoids):\n repo_mappig_msg = next(api.consume(RepositoriesMapping), RepositoriesMapping())\n rhui_info = next(api.consume(RHUIInfo), RHUIInfo(provider=''))\n repomap = setuptargetrepos_repomap.RepoMapDataHandler(repo_mappig_msg, cloud_provider=rhui_info.provider)\n # TODO(pstodulk): what about skip this completely and keep the default 'ga'..?\n default_channels = setuptargetrepos_repomap.get_default_repository_channels(repomap, src_repoids)\n repomap.set_default_channels(default_channels)\n return repomap\n\n\ndef _get_mapped_repoids(repomap, src_repoids):\n mapped_repoids = set()\n src_maj_ver = get_source_major_version()\n for repoid in src_repoids:\n if repomap.get_pesid_repo_entry(repoid, src_maj_ver):\n mapped_repoids.add(repoid)\n return mapped_repoids\n\n\ndef process():\n # load all data / messages\n used_repoids_dict = _get_used_repo_dict()\n enabled_repoids = _get_enabled_repoids()\n excluded_repoids = _get_blacklisted_repoids()\n custom_repos = _get_custom_target_repos()\n\n # TODO(pstodulk): isn't that a potential issue that we map just enabled repos\n # instead of enabled + used repos??\n # initialise basic data\n repomap = _setup_repomap_handler(enabled_repoids)\n mapped_repoids = _get_mapped_repoids(repomap, enabled_repoids)\n skipped_repoids = enabled_repoids & set(used_repoids_dict.keys()) - mapped_repoids\n\n # Now get the info what should be the target RHEL repositories\n expected_repos = repomap.get_expected_target_pesid_repos(enabled_repoids)\n target_rhel_repoids = set()\n for target_pesid, target_pesidrepo in expected_repos.items():\n if not target_pesidrepo:\n # With the original repomap data, this should not happen (this should\n # currently point to a problem in our data\n # TODO(pstodulk): add report? inhibitor? what should be in the report?\n api.current_logger().error(\n 'Missing target repository from the {} family (PES ID).'\n .format(target_pesid)\n )\n continue\n if target_pesidrepo.repoid in excluded_repoids:\n api.current_logger().debug('Skipping the {} repo (excluded).'.format(target_pesidrepo.repoid))\n continue\n target_rhel_repoids.add(target_pesidrepo.repoid)\n\n # FIXME: this could possibly result into a try to enable multiple repositories\n # from the same family (pesid). But unless we have a bug in previous actors,\n # it should not happen :) it's not blocker error anyway, so survive it.\n # - We expect to deliver the fix as part of the refactoring when we merge\n # setuptargetrepos & peseventsscanner actors together (+ blacklistrepos?)\n for task in api.consume(RepositoriesSetupTasks):\n for repo in task.to_enable:\n if repo in excluded_repoids:\n api.current_logger().debug('Skipping the {} repo from setup task (excluded).'.format(repo))\n continue\n target_rhel_repoids.add(repo)\n\n # create the final lists and sort them (for easier testing)\n rhel_repos = [RHELTargetRepository(repoid=repoid) for repoid in sorted(target_rhel_repoids)]\n custom_repos = [repo for repo in custom_repos if repo.repoid not in excluded_repoids]\n custom_repos = sorted(custom_repos, key=lambda x: x.repoid)\n\n if skipped_repoids:\n pkgs = set()\n for repo in skipped_repoids:\n pkgs.update(used_repoids_dict[repo])\n api.produce(SkippedRepositories(repos=sorted(skipped_repoids), packages=sorted(pkgs)))\n\n api.produce(TargetRepositories(\n rhel_repos=rhel_repos,\n custom_repos=custom_repos,\n ))\n","repo_name":"holser/leapp-repository","sub_path":"repos/system_upgrade/common/actors/setuptargetrepos/libraries/setuptargetrepos.py","file_name":"setuptargetrepos.py","file_ext":"py","file_size_in_byte":5429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"31"} +{"seq_id":"69887293850","text":"import pygame\nimport math\nfrom pygame.sprite import Sprite\n\nclass Background(Sprite):\n\n def __init__(self, game):\n Sprite.__init__(self)\n \n self.image = pygame.image.load(game.assets[\"BG_FULL\"]).convert()\n \n _logo = pygame.image.load(game.assets[\"LOGO\"]).convert_alpha()\n _logo_rect = _logo.get_rect()\n\n self.rect = pygame.rect.Rect(0,0, game.size[0], game.size[1])\n \n #если запихают другого размера\n self.image = pygame.transform.smoothscale(self.image, game.size)\n\n #logo, так как статичный рендер\n _logo_rect = _logo_rect.move(50,36)\n self.image.blit(_logo, _logo_rect)\n\n _line = pygame.Rect(45, 80, 645, 3)\n pygame.draw.rect(self.image,(255,255,255), _line)\n\n _bg = pygame.surface.Surface(\n [450, 420], pygame.SRCALPHA, 32).convert_alpha()\n _bg.fill([255, 255, 255, 20])\n \n self.image.blit(_bg, (240, 90))\n\n #end of init\n\n \n def draw(self, renderer):\n renderer.blit(self.image, self.rect)\n \n #end of draw\n\n#end of Background","repo_name":"eXponenta/pyDash","sub_path":"background.py","file_name":"background.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"28282076946","text":"import torch.nn as nn\nfrom torch.optim import Adam\nfrom src.utils import set_device\nfrom src.models import BaseModel, Unsqueeze\nfrom torch.distributions import Categorical\n\n\nclass MNISTConvNetV2(BaseModel):\n \"\"\"\n Convolutional neural network for MNIST classification with dropout layers \n and ReLU activations. Architecture from BatchBALD paper.\n \"\"\"\n def __init__(self, n_train, device=None):\n super().__init__(n_train=n_train, device=device)\n self.device = set_device(device)\n\n self.likelihood = 'classification'\n self.noise_scale = 1 # Not used for classification\n\n # First conv block (conv, dropout, max-pool)\n self.conv_1 = nn.Conv2d(1, 32, kernel_size=5)\n self.conv_1_drop = nn.Dropout2d(p=0.5)\n self.conv_1_mp = nn.MaxPool2d(kernel_size=2)\n\n # Second conv block (conv, dropout, max-pool)\n self.conv_2 = nn.Conv2d(32, 64, kernel_size=5)\n self.conv_2_drop = nn.Dropout2d(p=0.5)\n self.conv_2_mp = nn.MaxPool2d(kernel_size=2)\n\n # Fully connected block\n self.fc_1 = nn.Linear(1024, 128)\n self.fc_1_drop = nn.Dropout(p=0.5)\n self.fc_2 = nn.Linear(128, 10)\n\n # All modules in order\n self.ordered_modules = nn.ModuleList([\n Unsqueeze(dim=1), # Add empty dimension as input channel\n self.conv_1,\n self.conv_1_drop,\n self.conv_1_mp,\n nn.ReLU(),\n self.conv_2,\n self.conv_2_drop,\n self.conv_1_mp,\n nn.ReLU(),\n nn.Flatten(start_dim=1),\n self.fc_1,\n self.fc_1_drop,\n nn.ReLU(),\n self.fc_2\n ])\n\n # Move to model to device\n self.to(device=self.device)\n\n\n def log_prior(self):\n return 0\n\n\n def log_density(self, model_output, target):\n return Categorical(logits=model_output).log_prob(target)\n\n\n def optimizer(self, weight_decay=0, lr=1e-3): \n optimizer = Adam(self.parameters(), weight_decay=weight_decay, lr=lr)\n \n return optimizer\n\n\nif __name__ == '__main__':\n model = MNISTConvNetV2(n_train=100, device='cpu')\n total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)\n print(total_params) # 184,586","repo_name":"jonasvj/active-learning","sub_path":"src/models/mnist_conv_net_v2.py","file_name":"mnist_conv_net_v2.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"24081437422","text":"# Here is the link to this Python coding challenge\n# https://www.codewars.com/kata/5ba58763924c945f950000a5/train/python\ndef RadioDataSystem():\n str_input = True\n arr = ['']\n while str_input:\n input_string = input('Type any string or \"endstr\" to end\\n')\n if input_string != 'endstr':\n counter = 0\n for n in range(len(arr)):\n # print(arr[n], input_string)\n # print(arr[n] == input_string)\n if arr[n] != input_string:\n counter += 1\n if counter == len(arr):\n arr.append(input_string)\n # print(f'counter = {counter}')\n # del arr[0]\n print(arr)\n print(' '.join(arr))\n else:\n str_input = False\n del arr[0]\n # arr.pop(0)\n print(arr)\n print(' '.join(arr))\nRadioDataSystem()\n\n# Here set an array or list and has a single space as the initial value. The purpose of this is to have a basis\n# when we are trying to make a comparison in the if statement \"if arr[n] != input_string:\". We increment the counter\n# by one on every iteration if it detects a False, then we compare the counter to the length of the list \"arr\",\n# if it equals the length of arr, then that is the time we append the 'input_string' meaning in the given arr/list,\n# it currently has no same value of string as the 'input_string. All of these are inside a while statement,\n# setting first the value of str_input to True, and making it to False if the user inputs endstr to stop and output\n# the final string\n","repo_name":"carleyeshield21/PythonCodingChallenges","sub_path":"RadioDataSystem.py","file_name":"RadioDataSystem.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"38556276480","text":"from itertools import combinations\nfrom collections import Counter\n\ndef solution(orders, course):\n answer = []\n\n for c in course:\n tmp = []\n\n for order in orders:\n comb = combinations(sorted(order), c)\n tmp += comb\n cnt = Counter(tmp)\n\n if cnt:\n max_cnt = max(cnt.values())\n if max_cnt < 2:\n continue\n \n for key, value in cnt.items():\n if cnt[key] == max_cnt:\n answer.append(''.join(key))\n answer.sort()\n return answer\n\n\nprint(solution([\"ABCFG\", \"AC\", \"CDE\", \"ACDE\", \"BCFG\", \"ACDEH\"], [2, 3, 4]))\n\n","repo_name":"HYEEWON/solve-algorithm-problems","sub_path":"programmers/2021_kakao/210822_메뉴리뉴얼_L2.py","file_name":"210822_메뉴리뉴얼_L2.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"12088454403","text":"import os\nimport numpy as np\nimport argparse\nimport cv2\nfrom tqdm import tqdm\n\nfrom segmentation.unet.dataset.dataset_generator import SteelDatasetGenerator\n\nboxes_colors = [\n (255, 0, 0),\n (0, 255, 0),\n (0, 0, 255),\n (255, 255, 0)\n]\n\n\ndef boxes_by_mask(mask):\n contours, _ = cv2.findContours(\n mask.copy(),\n cv2.RETR_TREE,\n cv2.CHAIN_APPROX_SIMPLE\n )\n\n return [cv2.boundingRect(contour) for contour in contours]\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Generate YOLO dataset')\n\n parser.add_argument('--images-folder', required=True, type=str,\n help='Path to folder with images.')\n parser.add_argument('--table-csv', required=True, type=str,\n help='Path to csv table dataset file.')\n parser.add_argument('--result-folder', required=True, type=str,\n help='Path to result folder.')\n\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n args = parse_args()\n\n dataset = SteelDatasetGenerator(\n args.images_folder,\n args.table_csv,\n False,\n 0.0\n )\n\n if not os.path.isdir(args.result_folder):\n os.makedirs(args.result_folder)\n\n for i in tqdm(range(len(dataset))):\n image_tensor, masks_tensor = dataset[i]\n\n save_image_path = os.path.join(\n args.result_folder,\n '{}_image.jpg'.format(i + 1)\n )\n\n image = (image_tensor.squeeze(0).numpy() * 255.0).astype(np.uint8)\n image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)\n\n for cls, ch in enumerate(masks_tensor):\n channel_mask = (ch.numpy() * 255.0).astype(np.uint8)\n\n for rect in boxes_by_mask(channel_mask):\n x, y, w, h = rect\n image = cv2.rectangle(\n image,\n (x, y),\n (x + w, y + h),\n boxes_colors[cls],\n 2\n )\n\n cv2.imwrite(save_image_path, image)\n","repo_name":"AlexeySrus/KaggleSteelDefect","sub_path":"dataset_preparation/drawn_boxes_on_steel_dataset.py","file_name":"drawn_boxes_on_steel_dataset.py","file_ext":"py","file_size_in_byte":2025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1992311515","text":"import turtle\nimport random\nimport time\n\n# Set up the screen\nwn = turtle.Screen()\nwn.title(\"Flappy Bird\")\nwn.bgcolor(\"blue\")\nwn.setup(width=500, height=500)\n\ndefault_sizer = 7\n\n# Draw the bird\nbird = turtle.Turtle()\nbird.color(\"yellow\")\nbird.shape(\"circle\")\nbird.penup()\nbird.speed(0)\nbird.goto(0, 0)\n\n# Draw the pipes\npipes = []\nfor i in range(2):\n pipe = turtle.Turtle()\n pipe.color(\"green\")\n pipe.shape(\"square\")\n pipe.shapesize(stretch_wid=20, stretch_len=3)\n pipe.penup()\n pipe.speed(0)\n pipe.goto(200 + i * 200, random.randint(-200, 200))\n pipes.append(pipe)\n\n# Define the bird's movement\ndef bird_jump():\n bird.sety(bird.ycor() + 40)\n\n# Add event listener for bird jumping\nwn.listen()\nwn.onkeypress(bird_jump, \"space\")\n\nwn.update()\n\n# Move the pipes\nfor pipe in pipes:\n pipe.setx(pipe.xcor() - 5)\n if pipe.xcor() < -200:\n pipe.goto(400, random.choice([-200, 200]))\n\ntime.sleep(0.2)\n\n# Game loop\nwhile True:\n wn.update()\n\n # Move the pipes\n for pipe in pipes:\n pipe.setx(pipe.xcor() - 5)\n if pipe.xcor() < -200:\n pipe.goto(400, random.choice([-200, 200]))\n\n bird.sety(bird.ycor() - 5)\n\n # # Check for collision with pipes\n for pipe in pipes:\n # Get bounding box of bird\n bird_x1, bird_y1, bird_x2, bird_y2 = (\n bird.xcor()-default_sizer, \n bird.ycor()-default_sizer,\n bird.xcor()+default_sizer,\n bird.ycor()+default_sizer\n )\n # Get bounding box of pipe\n pipe_x1, pipe_y1, pipe_x2, pipe_y2 = (\n pipe.xcor()-(pipe.shapesize()[0]+default_sizer), \n pipe.ycor()-(pipe.shapesize()[1]*22*3), \n pipe.xcor()+(pipe.shapesize()[0]+default_sizer), \n pipe.ycor()+(pipe.shapesize()[1]*22*3)\n )\n if (bird_x1 < pipe_x2 and bird_x2 > pipe_x1) and ((bird_y1 < pipe_y2 and bird_y2 > pipe_y1) or (bird_y1 > pipe_y2 and bird_y2 < pipe_y1)):\n # DEBUG: Draw bounding boxes\n turtle.penup()\n turtle.goto(bird_x1, bird_y1)\n turtle.pendown()\n turtle.goto(bird_x1, bird_y2)\n turtle.goto(bird_x2, bird_y2)\n turtle.goto(bird_x2, bird_y1)\n turtle.goto(bird_x1, bird_y1)\n turtle.penup()\n turtle.penup()\n turtle.goto(pipe_x1, pipe_y1)\n turtle.pendown()\n turtle.goto(pipe_x1, pipe_y2)\n turtle.goto(pipe_x2, pipe_y2)\n turtle.goto(pipe_x2, pipe_y1)\n turtle.goto(pipe_x1, pipe_y1)\n turtle.penup()\n\n print(\"Game Over\")\n time.sleep(2)\n exit()\n\n","repo_name":"Phlair/games-by-chatgpt","sub_path":"flappy-bird/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"30644596431","text":"import sys, os, glob\nimport random\nimport numpy as np\nfrom common.data_io import read_binvox\n\nDEVELOPMENT = os.getenv(\"ENV\") == \"DEVELOPMENT\"\nif not DEVELOPMENT:\n import matplotlib\n matplotlib.use('Agg')\n\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nplt.style.use(\"ggplot\")\n\ndef plot_voxels(in_path, title, out_path):\n files = glob.glob(in_path + '*.binvox')\n \n # plot\n for i, path in enumerate(files):\n print(path)\n voxel = read_binvox(path)\n\n plot_voxel(voxel.astype(np.bool),\n '{}{}.png'.format(out_path, i))\n\ndef plot_voxel(voxel, title=None, save_file = None):\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n ax.voxels(voxel, edgecolor='k')\n # plt.title(title)\n if save_file is None:\n plt.show()\n else:\n plt.savefig(save_file)\n\ndef main():\n # check arguments\n if len(sys.argv) == 1:\n print(\"usage: python visualize.py ()\")\n sys.exit()\n \n # .binvox paths\n path = sys.argv[1]\n if path.split('.')[-1] == 'binvox':\n files = [path]\n else:\n if path[-1] != '/':\n path += '/'\n print(path)\n files = glob.glob(path + '*.binvox')\n\n if len(files) == 0:\n print(\"invalid path (.binvox file not found)\")\n sys.exit()\n else:\n print(\"{} files found\".format(len(files)))\n \n # save directory\n save_dir = None\n if len(sys.argv) > 2:\n save_dir = sys.argv[2]\n if save_dir[-1] != '/':\n save_dir += '/'\n os.makedirs(save_dir, exist_ok=True)\n \n # plot\n for i, path in enumerate(files):\n # print('plotting {}/{}'.format(i+1, len(files)))\n voxel = read_binvox(path, fix_coords=False)\n filename = path.split('/')[-1]\n\n if save_dir is None:\n plot_voxel(voxel.astype(np.bool), filename)\n else:\n plot_voxel(voxel.astype(np.bool), filename,\n '{}{}.png'.format(save_dir, i))\n\nif __name__==\"__main__\":\n main()\n","repo_name":"raahii/3dgan-chainer","sub_path":"visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"31"} +{"seq_id":"11421501175","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n/***************************************************************************\n Chole\n A QGIS plugin\n description\n -------------------\n begin : 2017-10-17\n author : Jean-Charles Naud, Olivier Bedel, Hugues Boussard\n\n email : hugues.boussard at inra.fr\n ***************************************************************************/\n\n\"\"\"\n\nfrom builtins import str\n__author__ = 'Jean-Charles Naud/Alkante'\n__date__ = '2017-10-17'\n\n\n# This will get replaced with a git SHA1 when you do a git archive\n\n__revision__ = '$Format:%H$'\n\n\nimport os\nimport io\nimport subprocess\nimport time\nfrom qgis.PyQt.QtCore import QSettings\nfrom qgis.core import QgsVectorFileWriter\n\nfrom qgis.core import (\n QgsProcessingAlgorithm,\n QgsProcessingParameterVectorLayer,\n QgsProcessingParameterRasterLayer,\n QgsProcessingParameterMultipleLayers,\n QgsProcessingParameterField,\n QgsProcessingParameterNumber,\n QgsProcessingParameterBoolean,\n QgsProcessingParameterEnum,\n QgsProcessingParameterString,\n QgsProcessingParameterFeatureSource,\n QgsProcessingParameterFile,\n QgsProcessingOutputVectorLayer,\n QgsProcessingOutputRasterLayer,\n QgsProcessingParameterFileDestination,\n QgsProcessingParameterRasterDestination,\n QgsProcessingOutputFolder,\n QgsProcessingFeedback\n)\n\nfrom processing.tools import dataobjects, vector\n\nfrom processing.core.ProcessingConfig import ProcessingConfig\n\nfrom processing.tools.system import getTempFilename, isWindows, isMac\n\nfrom osgeo import osr\nfrom time import gmtime, strftime\n\nfrom ast import literal_eval\n\n\nfrom qgis.PyQt.QtGui import QIcon\nfrom ..ChloeUtils import ChloeUtils\nimport tempfile\n\n\n# Mother class\nfrom ..chloe_algorithm import ChloeAlgorithm\nfrom ..chloe_algorithm_dialog import ChloeASCParameterFileDestination\n\nclass FilterAlgorithm(ChloeAlgorithm):\n \"\"\"\n Algorithm filtering ascii grid\n \"\"\"\n \n def __init__(self):\n super().__init__()\n\n def initAlgorithm(self, config=None):\n # === INPUT PARAMETERS ===\n inputAscParam = QgsProcessingParameterRasterLayer(\n name=self.INPUT_ASC,\n description=self.tr('Input layer asc'))\n inputAscParam.setMetadata({\n 'widget_wrapper': {\n 'class': 'Chloe.chloe_algorithm_dialog.ChloeAscRasterWidgetWrapper'\n }\n })\n self.addParameter(inputAscParam)\n \n ascFilterParam = QgsProcessingParameterRasterLayer(\n name=self.ASCII_FILTER,\n description=self.tr('Ascii Grid Filter'))\n ascFilterParam.setMetadata({\n 'widget_wrapper': {\n 'class': 'Chloe.chloe_algorithm_dialog.ChloeAscRasterWidgetWrapper'\n }\n })\n self.addParameter(ascFilterParam)\n \n fieldsParam = QgsProcessingParameterString(\n name= self.FILTER_VALUES,\n description=self.tr('Filter value(s)'),\n defaultValue='')\n fieldsParam.setMetadata({\n 'widget_wrapper': {\n 'class': 'Chloe.chloe_algorithm_dialog.ChloeValuesWidgetWrapper'\n }\n })\n self.addParameter(fieldsParam)\n \n # === OUTPUT PARAMETERS ===\n \n \n fieldsParam = ChloeASCParameterFileDestination(\n name=self.OUTPUT_ASC,\n description=self.tr('Output Raster ascii'))\n\n self.addParameter(fieldsParam, createOutput=True)\n\n self.addParameter(QgsProcessingParameterFileDestination(\n name=self.SAVE_PROPERTIES,\n description=self.tr('Properties file'),\n fileFilter='Properties (*.properties)'))\n\n def name(self):\n return 'filter'\n\n def displayName(self):\n return self.tr('filter')\n\n def group(self):\n return self.tr('util')\n\n def groupId(self):\n return 'util'\n\n def commandName(self):\n return 'filter'\n\n def PreRun(self, parameters, context, feedback, executing=True):\n \"\"\"Here is where the processing itself takes place.\"\"\"\n print('processAlgorithm')\n # === INPUT\n self.input_asc = self.parameterRasterAsFilePath(\n parameters, self.INPUT_ASC, context)\n self.ascii_filter = self.parameterRasterAsFilePath(\n parameters, self.ASCII_FILTER, context)\n self.filter_values = self.parameterAsString(\n parameters, self.FILTER_VALUES, context)\n\n # === OUTPUT\n self.output_asc = self.parameterAsString(\n parameters, self.OUTPUT_ASC, context)\n \n self.setOutputValue(self.OUTPUT_ASC, self.output_asc)\n\n # Constrution des chemins de sortie des fichiers\n base_in = os.path.basename(self.input_asc)\n name_in = os.path.splitext(base_in)[0]\n #ext_in = os.path.splitext(base_in)[1]\n\n dir_out = os.path.dirname(self.output_asc)\n base_out = os.path.basename(self.output_asc)\n name_out = os.path.splitext(base_out)[0]\n #ext_out = os.path.splitext(base_out)[1]\n #feedback.pushInfo('self.f_path')\n\n # === SAVE_PROPERTIES\n f_save_properties = self.parameterAsString(\n parameters, self.SAVE_PROPERTIES, context)\n\n if f_save_properties:\n self.f_path = f_save_properties\n else:\n if not self.f_path:\n self.f_path = getTempFilename(ext=\"properties\")\n\n # === Properties file\n self.createPropertiesTempFile()\n # Create Properties file (temp or chosed)\n\n # === CORE\n #commands = self.getConsoleCommandsJava(f_save_properties)\n\n #commands = self.getConsoleCommands(parameters, context, feedback, executing=True)\n #print('------- before')\n #ChloeUtils.runChole(commands, feedback)\n #print('------- after')\n # === Projection file\n f_prj = dir_out+os.sep+name_out+\".prj\"\n self.createProjectionFile(f_prj)\n\n def createPropertiesTempFile(self):\n \"\"\"Create Properties File.\"\"\"\n s_time = strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())\n with open(self.f_path, \"w+\") as fd:\n fd.write(\"#\"+s_time+\"\\n\")\n fd.write('treatment=filter'+\"\\n\")\n fd.write( ChloeUtils.formatString('input_ascii='+self.input_asc +\"\\n\",isWindows())) \n fd.write( ChloeUtils.formatString('ascii_filter='+self.ascii_filter +\"\\n\",isWindows())) \n fd.write( ChloeUtils.formatString('output_asc=' +self.output_asc+\"\\n\",isWindows())) \n fd.write(\"filter_values={\" + self.filter_values +\"}\\n\")\n fd.write(\"visualize_ascii=false\\n\")\n","repo_name":"hboussard/chloe_qgis","sub_path":"algorithms/filter_algorithm.py","file_name":"filter_algorithm.py","file_ext":"py","file_size_in_byte":6702,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"16406611868","text":"import csv\n\n\nclass StrategyImporter(object):\n\t\"\"\"\n\t\"\"\"\n\thard_strategy = {}\n\tsoft_strategy = {}\n\tpair_strategy = {}\n\tdealer_strategy = {}\n\n\tdef __init__(self, player_file):\n\t\tself.player_file = player_file\n\n\tdef import_player_strategy(self):\n\t\thard = 21\n\t\tsoft = 21\n\t\tpair = 20\n\n\t\twith open(self.player_file, 'rU') as player_csv:\n\t\t\treader = csv.DictReader(player_csv, delimiter = ';')\n\t\t\tfor row in reader:\n\t\t\t\tif hard >= 5:\n\t\t\t\t\tself.hard_strategy[hard] = row\n\t\t\t\t\thard -= 1 \n\t\t\t\telif soft >= 12:\n\t\t\t\t\tself.soft_strategy[soft] = row\n\t\t\t\t\tsoft -= 1\n\t\t\t\telif pair >= 4:\n\t\t\t\t\tself.pair_strategy[pair] = row\n\t\t\t\t\tpair -= 2\n\n\t\treturn self.hard_strategy, self.soft_strategy, self.pair_strategy","repo_name":"seblau/BlackJack-Simulator","sub_path":"importer/StrategyImporter.py","file_name":"StrategyImporter.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":147,"dataset":"github-code","pt":"31"} +{"seq_id":"5541104691","text":"import logging\nfrom typing import Dict, Optional, Union\n\nfrom manager.apis.base import BaseResource, Router\nfrom manager.schemas import model\nfrom manager.src.controllers.image import ImageOCRController\n\nlogger = logging.getLogger(__name__)\n\n\n@Router.prefix(\"image\", summary=\"图像识别\")\nclass ImageOcrResource(BaseResource):\n @Router.post(\"/ocr\", summary=\"图片链接识别\")\n def image_url_search(\n self, url: str = model.Source(embed=True, description=\"图片地址\", data_from=\"JSON\")\n ):\n c = ImageOCRController()\n return c.image_ocr(url)\n\n @Router.post(\"/ocr/file\", summary=\"图片文件识别\")\n def image_file_search(\n self,\n file: model.File = model.Source(\n embed=True, description=\"图片文件\", data_from=\"FILE\"\n ),\n ):\n c = ImageOCRController()\n return c.image_file_ocr(file)\n","repo_name":"yuyuyuhaoshi/OcrManager","sub_path":"manager/apis/v1/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"24300519176","text":"#!/usr/bin/env python3\nimport os\nimport subprocess\n\nwith open('payload/sbx/sbx', 'rb') as f:\n stage2 = f.read()\n\nwith open('payload/loader/loader.bin', 'rb') as f:\n stage1 = f.read()\n\nwith open('stage0.bin', 'rb') as f:\n stage0 = f.read()\n\ndef js_repr(_b):\n return ', '.join(map(hex, map(int, _b)))\n\noutput = '''\nconst stage0 = [\n %s\n];\nconst stage1 = [\n %s\n];\nconst stage2 = [\n %s\n];\n\nstage1Arr = new Uint8Array(stage1);\nstage2Arr = new Uint8Array(stage2);\n''' % (\n js_repr(stage0),\n js_repr(stage1),\n js_repr(stage2),\n)\noutput = output[1:]\n\nwith open('payload.js', 'w') as f:\n f.write(output)\n","repo_name":"sslab-gatech/pwn2own2020","sub_path":"make.py","file_name":"make.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":391,"dataset":"github-code","pt":"31"} +{"seq_id":"13942375502","text":"\"\"\"\n@author: chkarada\n\"\"\"\n\n# Note that this file picks the clean speech files randomly, so it does not guarantee that all\n# source files will be used\n\n\nimport os\nimport glob\nimport argparse\nimport ast\nimport configparser as CP\nfrom itertools import repeat\nimport multiprocessing\nfrom multiprocessing import Pool\nimport random\nfrom random import shuffle\nimport librosa\nimport numpy as np\nfrom audiolib_multimic import is_clipped, audioread, audiowrite, pyroom_mixer, activitydetector\nimport utils\nimport json\n\nPROCESSES = multiprocessing.cpu_count()\nMAXTRIES = 50\nMAXFILELEN = 100\n\nnp.random.seed(2)\nrandom.seed(3)\n\nclean_counter = None\nnoise_counter = None\ninterf_counter = None\n\nusers = []\n\ndef init(args1, args2, args3):\n ''' store the counter for later use '''\n global clean_counter, noise_counter, interf_counter\n clean_counter = args1\n noise_counter = args2\n interf_counter = args3\n\n\ndef build_audio(audio_type, params, filenum, audio_samples_length=-1, soi_user=\"\"):\n '''Construct an audio signal from source files'''\n\n fs_output = params['fs']\n silence_length = params['silence_length']\n if audio_samples_length == -1:\n audio_samples_length = int(params['audio_length']*params['fs'])\n\n output_audio = np.zeros(0)\n remaining_length = audio_samples_length\n files_used = []\n clipped_files = []\n\n global clean_counter, noise_counter, interf_counter, users\n if audio_type == \"clean\":\n this_user = random.choice(users)\n while len(params['cleanfilenames_byuser'][this_user]) < 2: #we need more than 1 recording of the same user for embedding\n this_user = random.choice(users)\n source_files = params['cleanfilenames_byuser'][this_user] #make sure that these are from the same user\n idx_counter = clean_counter\n elif audio_type == \"interf\":\n this_user = random.choice(users)\n while this_user == soi_user: #we need it to be another user than the SOI, to avoid confusion\n this_user = random.choice(users)\n source_files = params['cleanfilenames_byuser'][this_user] #make sure that these are from the same user\n idx_counter = interf_counter\n elif audio_type == \"noise\":\n source_files = params['noisefilenames']\n idx_counter = noise_counter\n else:\n assert True, \"invalid audio type\"\n\n # initialize silence\n silence = np.zeros(int(fs_output*silence_length))\n\n # iterate through multiple clips until we have a long enough signal\n tries_left = MAXTRIES\n while remaining_length > 0 and tries_left > 0:\n\n # read next audio file and resample if necessary\n with idx_counter.get_lock():\n idx_counter.value += 1\n idx = idx_counter.value % np.size(source_files)\n\n input_audio, fs_input = audioread(source_files[idx])\n if fs_input != fs_output:\n input_audio = librosa.resample(input_audio, orig_sr=fs_input, target_sr=fs_output)\n\n # if current file is longer than remaining desired length, and this is\n # noise generation or this is training set, subsample it randomly\n if len(input_audio) > remaining_length and (not (audio_type == \"clean\" or audio_type == \"interf\") or not params['is_test_set']):\n idx_seg = np.random.randint(0, len(input_audio)-remaining_length)\n input_audio = input_audio[idx_seg:idx_seg+remaining_length]\n\n # check for clipping, and if found move onto next file\n if is_clipped(input_audio):\n clipped_files.append(source_files[idx])\n tries_left -= 1\n continue\n\n # concatenate current input audio to output audio stream\n files_used.append(source_files[idx])\n output_audio = np.append(output_audio, input_audio)\n remaining_length -= len(input_audio)\n\n # add some silence if we have not reached desired audio length\n if remaining_length > 0:\n silence_len = min(remaining_length, len(silence))\n output_audio = np.append(output_audio, silence[:silence_len])\n remaining_length -= silence_len\n\n if tries_left == 0:\n print(\"Audio generation failed for filenum \" + str(filenum))\n return [], [], clipped_files\n \n if audio_type == \"clean\" or audio_type == \"interf\":\n return output_audio, files_used, clipped_files, this_user\n else:\n return output_audio, files_used, clipped_files, \"\"\n\n\ndef gen_audio(audio_type, params, filenum, audio_samples_length=-1, soi_user=\"\"):\n '''Calls build_audio() to get an audio signal, and verify that it meets the\n activity threshold'''\n\n clipped_files = []\n low_activity_files = []\n if audio_samples_length == -1:\n audio_samples_length = int(params['audio_length']*params['fs'])\n \n if audio_type == \"clean\":\n activity_threshold = params['clean_activity_threshold']\n elif audio_type == \"interf\":\n activity_threshold = params['interf_activity_threshold']\n elif audio_type == \"noise\":\n activity_threshold = params['noise_activity_threshold']\n else:\n assert True, \"invalid audio type\"\n\n while True:\n if audio_type == \"interf\":\n audio, source_files, new_clipped_files, user = \\\n build_audio(audio_type, params, filenum, audio_samples_length, soi_user=soi_user)\n else:\n audio, source_files, new_clipped_files, user = \\\n build_audio(audio_type, params, filenum, audio_samples_length)\n\n clipped_files += new_clipped_files\n if len(audio) < audio_samples_length:\n continue\n\n if activity_threshold == 0.0:\n break\n\n percactive = activitydetector(audio=audio)\n if percactive > activity_threshold:\n break\n else:\n low_activity_files += source_files\n\n return audio, source_files, clipped_files, low_activity_files, user\n\n\ndef main_gen(params, filenum):\n '''Calls gen_audio() to generate the audio signals, verifies that they meet\n the requirements, and writes the files to storage'''\n\n print(\"Generating file #\" + str(filenum))\n\n clean_clipped_files = []\n clean_low_activity_files = []\n noise_clipped_files = []\n noise_low_activity_files = []\n\n user = \"\"\n while True:\n # generate clean speech\n clean, clean_source_files, clean_cf, clean_laf, user = \\\n gen_audio(\"clean\", params, filenum)\n # generate noise\n noise, noise_source_files, noise_cf, noise_laf, _ = \\\n gen_audio(\"noise\", params, filenum, len(clean))\n \n interf_num = np.random.randint(params['interf_lower'], params['interf_upper'])\n audio_samples_length = int(params['audio_length']*params['fs'])\n interf_source_files = []\n \n clean_clipped_files += clean_cf\n clean_low_activity_files += clean_laf\n noise_clipped_files += noise_cf\n noise_low_activity_files += noise_laf\n\n # mix clean speech and noise\n # if specified, use specified SNR value\n if not params['randomize_snr']:\n snr = params['snr']\n # use a randomly sampled SNR value between the specified bounds\n else:\n snr = np.random.randint(params['snr_lower'], params['snr_upper'])\n \n \n sir = 100\n interfs = []\n interf_ids = []\n \n # generate interferences\n for interf_i in range(interf_num):\n interf, this_interf_source_files, interf_cf, interf_laf, interf_user = \\\n gen_audio(\"interf\", params, filenum, len(clean), soi_user=user)\n while interf_user in interf_ids:\n interf, this_interf_source_files, interf_cf, interf_laf, interf_user = \\\n gen_audio(\"interf\", params, filenum, len(clean), soi_user=user)\n interf_ids.append(interf_user)\n interfs.append(interf)\n interf_source_files += this_interf_source_files\n \n # mix clean speech and interferences\n # if specified, use specified SIR value\n if not params['randomize_sir']:\n sir = params['sir']\n # use a randomly sampled SIR value between the specified bounds\n elif interf_num > 0:\n sir = np.random.randint(params['sir_lower'], params['sir_upper'])\n \n if not params['randomize_rt60']:\n rt60 = params['rt60']\n else:\n rt60 = (params['rt60_upper']-params['rt60_lower'])*np.random.random_sample() + params['rt60_lower']\n \n clean_snr, beam_int, beam_soi, target_level, mic = pyroom_mixer(params=params, \n clean=clean,\n noise=noise,\n interfs=interfs,\n snr=snr,\n sir=sir,\n rt60=rt60)\n \n # unexpected clipping\n if is_clipped(clean_snr) or is_clipped(beam_int) or is_clipped(beam_soi):\n continue\n else:\n break\n\n # write resultant audio streams to files\n hyphen = '-'\n clean_source_filenamesonly = [i[:-4].split(os.path.sep)[-1] for i in clean_source_files]\n clean_files_joined = hyphen.join(clean_source_filenamesonly)[:MAXFILELEN]\n noise_source_filenamesonly = [i[:-4].split(os.path.sep)[-1] for i in noise_source_files]\n noise_files_joined = hyphen.join(noise_source_filenamesonly)[:MAXFILELEN]\n\n clean_source_files_not_used = [p for p in params['cleanfilenames_byuser'][user] if p not in clean_source_files]\n embed_source_file = random.choice(clean_source_files_not_used)\n\n #noisyfilename = clean_files_joined + '_' + noise_files_joined + '_snr' + \\\n # str(snr)+ '_sir' + str(sir) + '_fileid_' + str(filenum) + '.wav'\n noisyfilename = 'snr' + str(snr) + '_rt' + str(f'{rt60:.2f}') + '_ints' + str(interf_num) + '_sir' + str(sir) + '_' + clean_files_joined + '_' + noise_files_joined + '_fileid_' + str(filenum) + '.wav'\n cleanfilename = 'clean_fileid_'+str(filenum)+'.wav'\n interffilename = 'interf_fileid_'+str(filenum)+'.wav'\n micfilename = 'mic_fileid_'+str(filenum)+'.wav'\n\n soipath = os.path.join(params['noisyspeech_dir'], noisyfilename)\n cleanpath = os.path.join(params['clean_proc_dir'], cleanfilename)\n interfpath = os.path.join(params['noise_proc_dir'], interffilename)\n micpath = os.path.join(params['mic_proc_dir'], micfilename)\n\n audio_signals = [beam_soi, clean_snr, beam_int, mic]\n file_paths = [soipath, cleanpath, interfpath, micpath]\n \n for i in range(len(audio_signals)):\n try:\n audiowrite(file_paths[i], audio_signals[i], params['fs'])\n except Exception as e:\n print(str(e))\n pass\n\n #creating info text file with paths of clean file, beamform interference output, and interference files\n infofilename = noisyfilename.replace(\".wav\",\".txt\")\n infopath = os.path.join(params['noisyspeech_dir'], infofilename)\n info_file = open(infopath,\"w\")\n info_file.write(os.path.join(os.getcwd(),cleanpath)+\"\\n\")\n info_file.write(os.path.join(os.getcwd(),embed_source_file)+\"\\n\")\n info_file.write(os.path.join(os.getcwd(),interfpath)+\"\\n\")\n info_file.write(os.path.join(os.getcwd(),micpath)+\"\\n\")\n for interf_file in interf_source_files:\n info_file.write(os.path.join(os.getcwd(),interf_file)+\"\\n\")\n info_file.close()\n\n return clean_source_files, clean_clipped_files, clean_low_activity_files, \\\n noise_source_files, noise_clipped_files, noise_low_activity_files\n\n\ndef extract_list(input_list, index):\n output_list = [i[index] for i in input_list]\n flat_output_list = [item for sublist in output_list for item in sublist]\n flat_output_list = sorted(set(flat_output_list))\n return flat_output_list\n\n\ndef main_body():\n '''Main body of this file'''\n\n parser = argparse.ArgumentParser()\n\n # Configurations: read noisyspeech_synthesizer.cfg and gather inputs\n parser.add_argument('--cfg', default='noisyspeech_synthesizer_multimic.cfg',\n help='Read noisyspeech_synthesizer.cfg for all the details')\n parser.add_argument('--cfg_str', type=str, default='noisy_speech')\n parser.add_argument('--set_to_create', type=str, default='None')\n args = parser.parse_args()\n\n params = dict()\n params['args'] = args\n cfgpath = os.path.join(os.path.dirname(__file__), args.cfg)\n assert os.path.exists(cfgpath), f'No configuration file as [{cfgpath}]'\n\n cfg = CP.ConfigParser()\n cfg._interpolation = CP.ExtendedInterpolation()\n cfg.read(cfgpath)\n params['cfg'] = cfg._sections[args.cfg_str]\n cfg = params['cfg']\n\n clean_dir = os.path.join(os.path.dirname(__file__), 'CleanSpeech')\n if cfg['speech_dir'] != 'None':\n clean_dir = cfg['speech_dir']\n if not os.path.exists(clean_dir):\n assert False, ('Clean speech data is required, '+clean_dir+' does not exists')\n\n noise_dir = os.path.join(os.path.dirname(__file__), 'Noise')\n if cfg['noise_dir'] != 'None':\n noise_dir = cfg['noise_dir']\n if not os.path.exists(noise_dir):\n assert False, ('Noise data is required')\n\n params['fs'] = int(cfg['sampling_rate'])\n params['audioformat'] = cfg['audioformat']\n params['audio_length'] = float(cfg['audio_length'])\n params['silence_length'] = float(cfg['silence_length'])\n params['total_hours'] = float(cfg['total_hours'])\n \n if cfg['fileindex_start'] != 'None' and cfg['fileindex_start'] != 'None':\n params['fileindex_start'] = int(cfg['fileindex_start'])\n params['fileindex_end'] = int(cfg['fileindex_end']) \n params['num_files'] = int(params['fileindex_end'])-int(params['fileindex_start'])\n else:\n params['num_files'] = int((params['total_hours']*60*60)/params['audio_length'])\n\n params['is_test_set'] = utils.str2bool(cfg['is_test_set'])\n params['clean_activity_threshold'] = float(cfg['clean_activity_threshold'])\n params['noise_activity_threshold'] = float(cfg['noise_activity_threshold'])\n params['interf_activity_threshold'] = float(cfg['interf_activity_threshold'])\n params['snr_lower'] = int(cfg['snr_lower'])\n params['snr_upper'] = int(cfg['snr_upper'])\n params['randomize_snr'] = utils.str2bool(cfg['randomize_snr'])\n params['interf_lower'] = int(cfg['interf_lower'])\n params['interf_upper'] = int(cfg['interf_upper'])\n params['sir_lower'] = int(cfg['sir_lower'])\n params['sir_upper'] = int(cfg['sir_upper'])\n params['randomize_sir'] = utils.str2bool(cfg['randomize_sir'])\n params['room_dim_x'] = float(cfg['room_dim_x'])\n params['room_dim_y'] = float(cfg['room_dim_y'])\n params['rt60_lower'] = float(cfg['rt60_lower'])\n params['rt60_upper'] = float(cfg['rt60_upper'])\n params['randomize_rt60'] = utils.str2bool(cfg['randomize_rt60'])\n params['micdist_lower'] = float(cfg['micdist_lower'])\n params['micdist_upper'] = float(cfg['micdist_upper'])\n params['phase_threshold'] = float(cfg['phase_threshold'])\n params['target_level_lower'] = int(cfg['target_level_lower'])\n params['target_level_upper'] = int(cfg['target_level_upper'])\n \n if 'snr' in cfg.keys():\n params['snr'] = int(cfg['snr'])\n else:\n params['snr'] = int((params['snr_lower'] + params['snr_upper'])/2)\n\n if 'speech_csv' in cfg.keys() and cfg['speech_csv'] != 'None':\n cleanfilenames = pd.read_csv(cfg['speech_csv'])\n cleanfilenames = cleanfilenames['filename']\n else:\n cleanfilenames = glob.glob(os.path.join(clean_dir, params['audioformat']))\n params['cleanfilenames'] = cleanfilenames\n shuffle(params['cleanfilenames'])\n \n if(os.path.exists(\"cleanfilenames_byuser.json\")):\n print(\"Loading data from cleanfilenames_byuser.json\")\n json_file = open(\"cleanfilenames_byuser.json\",\"r\")\n params['cleanfilenames_byuser_full'] = json.load(json_file)\n json_file.close()\n \n users_full = list(params['cleanfilenames_byuser_full'].keys())\n else:\n print(\"Building cleanfilenames_byuser data\")\n users_full = [os.path.basename(path).split(\".wav\")[0].split(\"_\")[-2] for path in params['cleanfilenames']]\n params['cleanfilenames_byuser_full'] = {}\n user_i = 0\n user_num = len(users_full)\n for i in users_full:\n this_user_files = [s for s in params['cleanfilenames'] if \"reader_\"+i in s]\n params['cleanfilenames_byuser_full'][i] = this_user_files\n print(str(user_i)+\"/\"+str(user_num)+\" : \"+format((1 + user_i) / user_num, \" 3.1%\"), end='\\r')\n user_i += 1\n #if user_i >= 10:\n # break\n print(\"\")\n \n print(\"Storing data in cleanfilenames_byuser.json\")\n json_file = open(\"cleanfilenames_byuser.json\",\"w\")\n json.dump(params['cleanfilenames_byuser_full'], json_file, indent=4)\n json_file.close()\n\n global users\n \n if args.set_to_create == 'None':\n params['set_to_create'] = cfg['set_to_create']\n else:\n params['set_to_create'] = args.set_to_create\n \n if params['set_to_create'] == 'train':\n print(\"Creating training set...\")\n params['train_perc'] = float(cfg['train_perc'])\n \n new_users_len = int(len(users_full)*params['train_perc'])\n users = users_full[:new_users_len]\n \n params['cleanfilenames_byuser'] = {k: params['cleanfilenames_byuser_full'][k] for k in users} \n \n params['num_files'] = int(params['num_files']*params['train_perc'])\n \n params['noisyspeech_dir'] = utils.get_dir(cfg, 'noisy_destination', 'noisy')\n params['clean_proc_dir'] = utils.get_dir(cfg, 'clean_destination', 'clean')\n params['noise_proc_dir'] = utils.get_dir(cfg, 'noise_destination', 'noise')\n params['mic_proc_dir'] = utils.get_dir(cfg, 'mic_destination', 'mic')\n \n elif params['set_to_create'] == 'validtest':\n print(\"Creating validtest set...\")\n params['train_perc'] = float(cfg['train_perc'])\n \n new_users_len = int(len(users_full)*(1-params['train_perc']))\n users = users_full[-new_users_len:]\n \n params['cleanfilenames_byuser'] = {k: params['cleanfilenames_byuser_full'][k] for k in users} \n \n params['num_files'] = int(params['num_files']*(1-params['train_perc']))\n \n params['noisyspeech_dir'] = utils.get_dir(cfg, 'validtest_noisy_destination', 'noisy')\n params['clean_proc_dir'] = utils.get_dir(cfg, 'validtest_clean_destination', 'clean')\n params['noise_proc_dir'] = utils.get_dir(cfg, 'validtest_noise_destination', 'noise')\n params['mic_proc_dir'] = utils.get_dir(cfg, 'validtest_mic_destination', 'mic')\n \n else:\n print(\"Creating full set...\")\n params['cleanfilenames_byuser'] = params['cleanfilenames_byuser_full']\n users = users_full\n \n params['noisyspeech_dir'] = utils.get_dir(cfg, 'noisy_destination', 'noisy')\n params['clean_proc_dir'] = utils.get_dir(cfg, 'clean_destination', 'clean')\n params['noise_proc_dir'] = utils.get_dir(cfg, 'noise_destination', 'noise')\n params['mic_proc_dir'] = utils.get_dir(cfg, 'mic_destination', 'mic')\n\n params['noisefilenames'] = glob.glob(os.path.join(noise_dir, params['audioformat']))\n shuffle(params['noisefilenames'])\n\n # Invoke multiple processes and fan out calls to main_gen() to these processes\n global clean_counter, noise_counter, interf_counter\n clean_counter = multiprocessing.Value('i', 0)\n noise_counter = multiprocessing.Value('i', 0)\n interf_counter = multiprocessing.Value('i', 0)\n \n print('Number of files to be synthesized:', params['num_files'])\n multi_pool = multiprocessing.Pool(processes=PROCESSES, initializer = init, initargs = (clean_counter, noise_counter, interf_counter, ))\n fileindices = range(params['num_files'])\n output_lists = multi_pool.starmap(main_gen, zip(repeat(params), fileindices))\n\n flat_output_lists = []\n num_lists = 6\n for i in range(num_lists):\n flat_output_lists.append(extract_list(output_lists, i))\n\n # Create log directory if needed, and write log files of clipped and low activity files\n log_dir = utils.get_dir(cfg, 'log_dir', 'Logs')\n\n utils.write_log_file(log_dir, 'source_files.csv', flat_output_lists[0] + flat_output_lists[3])\n utils.write_log_file(log_dir, 'clipped_files.csv', flat_output_lists[1] + flat_output_lists[4])\n utils.write_log_file(log_dir, 'low_activity_files.csv', flat_output_lists[2] + flat_output_lists[5])\n \n # Compute and print stats about percentange of clipped and low activity files\n total_clean = len(flat_output_lists[0]) + len(flat_output_lists[1]) + len(flat_output_lists[2])\n total_noise = len(flat_output_lists[3]) + len(flat_output_lists[4]) + len(flat_output_lists[5])\n pct_clean_clipped = round(len(flat_output_lists[1])/total_clean*100, 1)\n pct_noise_clipped = round(len(flat_output_lists[4])/total_noise*100, 1)\n pct_clean_low_activity = round(len(flat_output_lists[2])/total_clean*100, 1)\n pct_noise_low_activity = round(len(flat_output_lists[5])/total_noise*100, 1)\n \n print(\"Of the \" + str(total_clean) + \" clean speech files analyzed, \" + str(pct_clean_clipped) + \\\n \"% had clipping, and \" + str(pct_clean_low_activity) + \"% had low activity \" + \\\n \"(below \" + str(params['clean_activity_threshold']*100) + \"% active percentage)\")\n print(\"Of the \" + str(total_noise) + \" noise files analyzed, \" + str(pct_noise_clipped) + \\\n \"% had clipping, and \" + str(pct_noise_low_activity) + \"% had low activity \" + \\\n \"(below \" + str(params['noise_activity_threshold']*100) + \"% active percentage)\")\n\n\nif __name__ == '__main__':\n main_body()\n","repo_name":"balkce/demucstargetsel","sub_path":"datasetcreation/noisyspeech_synthesizer_multiprocessing_multimic.py","file_name":"noisyspeech_synthesizer_multiprocessing_multimic.py","file_ext":"py","file_size_in_byte":22076,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"974170643","text":"import math\n#3\n\na = int(input(\"A\"))\nb = int(input(\"B\"))\nc = int(input(\"C\"))\n\nP=a+b+c\nl=P/2\nq=l*(l-a)*(l-b)*(l-c)\nS=math.sqrt(q)\nprint(P)\nprint(S)\n\n#4\n\nx=int(input(\"X\"))\ny=int(input(\"Y\"))\nc=int(input(\"C\"))\n\nprint(f'A[{x},{y},{c}]')\n\n\n#5\n\n\na=int(input('a'))\nb=int(input('b'))\nc=a+b\nv=math.log(c,89)\nprint(v)\n\n\n#6\n\n\na=int(input('a'))\nb=int(input('b'))\nc=int(input('c'))\n\nif (a+b==c) or (a+c==b) or (b+c==a):\n print('yes')\nprint('no')\n\n#7\n\na=int(input('a'))\nb=int(input('b'))\nc=int(input('c'))\nd=int(input('d'))\n\na1=a%2\nb1=b%2\nc1=c%2\nd1=d%2\n\nma='not found'\nif a1==0:\n ma=a\nif b1==0 and b>ma:\n ma=b\nif c1==0 and c>ma:\n ma=c\nif d1==0 and d>ma:\n ma=d\nprint(ma)\n\n#8\n\nk=int(input('k'))\na=10\nif k%2==0:\n k1=k/2\n while k1>1:\n a+=1\n k1+=-1\n print(a%10)\nif k%2==1:\n k1=(k+1)/2\n while k1>1:\n a+=1\n k1+=-1\n print(a//10)\n\n \n#9\n\na=3\nb=5\nv=0\n\nwhile a<112:\n c=a/b\n v+=math.cos(c)\n a+=2\n b+=2\nprint(v)\n\n#10\n\nimport requests\nfrom bs4 import BeautifulSoup\n\n\n#10\n\nimport requests\nfrom bs4 import BeautifulSoup\n\n\ndef world_covid19_stats(url) -> dict:\n soup = BeautifulSoup(requests.get(url).text, \"html.parser\")\n keys = soup.findAll(\"h1\")\n values = soup.findAll(\"div\", {\"class\": \"maincounter-number\"})\n keys += soup.findAll(\"span\", {\"class\": \"panel-title\"})\n values += soup.findAll(\"div\", {\"class\": \"number-table-main\"})\n return {key.text.strip(): value.text.strip() for key, value in zip(keys, values)}\n\n\nif __name__ == \"__main__\":\n print(\"\\033[1m\" + \"COVID-19 Status of Georgia\" + \"\\033[0m\\n\")\n str1 = \"https://www.worldometers.info/coronavirus/country/georgia/\"\n for key, value in world_covid19_stats(str1).items():\n print(f\"{key}\\n{value}\\n\")\n print(\"\\033[1m\" + \"COVID-19 Status of Australia\" + \"\\033[0m\\n\")\n str2 = \"https://www.worldometers.info/coronavirus/country/australia/\"\n for key, value in world_covid19_stats(str2).items():\n print(f\"{key}\\n{value}\\n\")\n\n \n#11\n\n\nn=int(input('длина списка'))\na=[]\nfor i in range(n):\n c=int(input())\n a.append(c)\nb=len(a)\na1=set(a)\nl=len(a1)\nif b==l:\n print('без повторений')\nelse:print('есть повторения')\n\n\n#12.1\nx=int(input('x'))\ny=int(input('y'))\na=x\nx=y\ny=a\nprint(f'x={x}, y={y}')\n#12.2\nx=int(input('x'))\ny=int(input('y'))\nx=x+y\ny=x-y\nx=x-y\nprint(f'x={x}, y={y}')\n#12.3\nx=int(input('x'))\ny=int(input('y'))\nx, y =y, x\nprint(f'x={x}, y={y}')\n","repo_name":"yaserg555/Lab_k13-2","sub_path":"Vasiclhenko/sam_Vasilchenko.py","file_name":"sam_Vasilchenko.py","file_ext":"py","file_size_in_byte":2443,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"74465744727","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n__author__ = \"esemi\"\n\nfrom scipy.spatial.distance import pdist\nfrom scipy.cluster import hierarchy\nimport matplotlib.pyplot as plt\n\nfrom shared import get_data\n\n\ndef hierarchy_draw(Z, labels, level):\n \"\"\"Рисуем дендрограмму и сохраняем её\"\"\"\n\n plt.figure()\n hierarchy.dendrogram(Z, labels=labels, color_threshold=level, leaf_font_size=5, count_sort=True)\n plt.show()\n\nif __name__ == '__main__':\n names, data = get_data()\n\n dist = pdist(data, 'euclidean')\n plt.hist(dist, 500, color='green', alpha=0.5)\n Z = hierarchy.linkage(dist, method='average')\n\n hierarchy_draw(Z, names, .25)","repo_name":"esemi/blogpost_clustering","sub_path":"src/hierarhy.py","file_name":"hierarhy.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"5223614830","text":"from models.image import ImageViewModel\n\nclass Image:\n def __init__(self, client):\n self.client = client\n\n ## listing all images details\n def all(self):\n image_list = []\n images = self.client.images.list()\n for image in images:\n obj = ImageViewModel(image)\n image_list.append(obj.__dict__)\n \n return image_list\n","repo_name":"nerudadhich/dockermgtapi","sub_path":"resources/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"3123058392","text":"'''Use dynamic programming to create a dp matrix.\nCheck if the value is 1 in the matrix firstly. \nIf i = 0, same row last dp value +1.\nIf j = 0, same col last dp value +1.\nOtherwise pick the min value among three values from left, up and left conner.'''\nclass Solution(object):\n def maximalSquare(self, matrix):\n \"\"\"\n :type matrix: List[List[str]]\n :rtype: int\n \"\"\"\n if not matrix:\n return 0\n m = len(matrix)\n n = len(matrix[0])\n dp = [[0 for _ in range(n)] for _ in range(m) ]\n res = 0\n for i in range(m):\n for j in range(n):\n dp[i][j] = int(matrix[i][j])\n if dp[i][j] == 0:\n continue\n if i == 0 or j == 0:\n pass\n elif i == 0:\n dp[i][j] = dp[i][j-1] + 1\n elif j == 0:\n dp[i][j] = dp[i-1][j] + 1\n else:\n dp[i][j] = min(dp[i][j-1],dp[i-1][j],dp[i-1][j-1]) + 1\n res = max(res,dp[i][j] * dp[i][j])\n return res\n","repo_name":"tr1503/LeetCode","sub_path":"Dynamic Programming/maximalSquare.py","file_name":"maximalSquare.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34084560275","text":"# coding: utf-8\r\n\r\n\"\"\"Parsers for completion and hightlighting\r\n\"\"\"\r\nimport sys\r\nfrom pyparsing import (\r\n alphanums, Word, Literal, Optional, oneOf, OneOrMore, Group,\r\n delimitedList, ParseException\r\n)\r\n\r\n\r\nclass Parser(object):\r\n \"\"\"\r\n \"\"\"\r\n\r\n def parse(self, input_str):\r\n return self.expr.parseString(input_str)\r\n\r\n\r\nclass EsParser(Parser):\r\n \"\"\"\r\n \"\"\"\r\n\r\n def __init__(self):\r\n \"\"\"Constructor.\r\n \"\"\"\r\n char_index = alphanums + '+-*'\r\n index = Word(char_index)\r\n indices = delimitedList(index, delim=',')\r\n self.es_indices = indices('indices')\r\n\r\n\r\nclass EsCatCommand(Parser):\r\n \"\"\"\r\n \"\"\"\r\n\r\n def __init__(self):\r\n \"\"\"Constructor.\r\n \"\"\"\r\n es_sub_commands = [\r\n 'aliases',\r\n 'allocation',\r\n {'name': 'count', 'query': ['index', 'type']},\r\n {'name': 'fielddata', 'params': [{'name': 'fields', 'values': 'ES_FIELDS'}]}\r\n ]\r\n\r\n\r\n slash = Literal('/').suppress()\r\n cmd = Word('_cat')\r\n\r\n sub_cmds = [\r\n Literal('aliases'),\r\n Literal('count'),\r\n Literal('fielddata')\r\n ]\r\n\r\n self.expr = slash + cmd('cmd') + slash + ()\r\n\r\n\r\ndef main():\r\n \"\"\"Main entry point.\r\n \"\"\"\r\n from pprint import pprint as pp\r\n # p = EsCatCommand()\r\n # r = p.parse('/_cat/aliases')\r\n\r\n string = 'index1, index2'\r\n if len(sys.argv) > 1:\r\n string = sys.argv[1]\r\n\r\n es_parser = EsParser()\r\n\r\n pp(es_parser.es_indices.parseString(string))\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"wonzbak/estoolkit","sub_path":"estoolkit/parsers.py","file_name":"parsers.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"16248910621","text":"word = input()\n\ndef solve(word):\n ERROR = \"Just a legend\"\n freq = set(word)\n if len(freq) == 1:\n return ERROR\n \n start = word.find(word[-1])\n if start == len(word) -1:\n return ERROR\n \n search = word[:start + 1]\n if search in word[start + 1: - start]:\n return search\n return ERROR\n\n\nprint(solve(word))\n\n\n","repo_name":"AmanuelD02/Competitive-Programming","sub_path":"contest/may/D_Password.py","file_name":"D_Password.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70701211288","text":"import cv2 as cv\n\nTHRESHOLD = 7500 # Movement detection threshold\n\n\nclass Video:\n def __init__(self, file):\n self.file = file\n self.contains_motion = None\n\n def check_for_motion(self):\n self.contains_motion = False\n cap = cv.VideoCapture(self.file)\n\n ret, frame1 = cap.read()\n ret, frame2 = cap.read()\n \n while cap.isOpened() and frame1 is not None:\n difference = cv.absdiff(frame1, frame2)\n gray_difference = cv.cvtColor(difference, cv.COLOR_BGR2GRAY)\n blur = cv.GaussianBlur(gray_difference, (5, 5), 0)\n __, thresh = cv.threshold(blur, 20, 255, cv.THRESH_BINARY)\n dilated = cv.dilate(thresh, None, iterations=3)\n contours, __ = cv.findContours(dilated, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)\n\n for contour in contours:\n if cv.contourArea(contour) >= THRESHOLD:\n self.contains_motion = True\n return\n\n frame1 = frame2\n ret, frame1 = cap.read()\n\n return\n\n","repo_name":"cal-overflow/serverless-security-system-process-s3-videos-lambda-fn","sub_path":"src/Video.py","file_name":"Video.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27402831698","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/4/12 下午7:44\n# @Author : Sulong\n# @File : object_key.py\n\n# 面向对象编程\n# 继承,多态 多重继承,枚举类\n# @Software: PyCharm\n# 1.类与实例\n\nclass Student(object):\n\tdef __init__(self, name, age):\n\t\tself.name = name\n\t\tself.age = age\n\n\tdef course(self):\n\t\tpass\n\n\tdef introduce(self):\n\t\tprint(\"我叫%s,今年%s岁\" % (self.name, self.age))\n\n\nstudent_1 = Student(\"Tom\", 16)\n# student_1.name='Tom'\nprint(student_1.name, student_1.age)\nstudent_1.introduce()\nprint(\"**************************************************\")\n\n\n# 2.数据封装\n# 实例本身存在的属性 不在外部的函数去访问 直接在类的内部定义访问数据的函数 --数据封装\ndef introduce_wai(student_1):\n\tprint(\"外部函数调用;introduce_wai\")\n\tprint(\"我叫%s,今年%s岁\" % (student_1.name, student_1.age))\n\n\n# print (\"His name is %s\"%(\"Aviad\"))\nintroduce_wai(student_1)\nprint(\"类的内部调用; introduce\")\nstudent_1.introduce()\nprint(\"**************************************************\")\n\n# 3.访问限制\n# 上述类 Student 中的 name age 可以访问/ 更改 作为一个类的属性 有时 不需要该规则 或 不允许直接访问或改动 可以把属性的名称前加上两个下划线__ 就变成了类的 私有变量 只有内部可以访问 外部不允许访问\nstudent_2 = Student(\"Tony\", 14)\nprint(student_2.name)\nstudent_2.name = 'Tony2'\nprint(student_2.name)\n\n\n# 定义一个 teacher类\nclass Teacher(object):\n\tdef __init__(self, name, age):\n\t\tself.__name = name\n\t\tself.__age = age\n\n\tdef introduce(self):\n\t\tprint(\"师名%s,贵庚%s\" % (self.__name, self.__age))\n\n\t# 当类的属性不允许访问时 则类内部需要给出 get set 方法 原因是 在方法中,可以对参数做检查,避免传入无效的参数\n\tdef get_name(self):\n\t\treturn self.__name\n\n\tdef get_age(self):\n\t\treturn self.__age\n\n\tdef set__name(self, name):\n\t\tself.__name = name\n\n\tdef set__age(self, age):\n\t\tif 0 <= age <= 100:\n\t\t\tself.__age = age\n\t\telse:\n\t\t\traise ValueError('bad age')\n\n\ntecher_1 = Teacher(\"汉文\", 66)\n# 对techer_1 的属性做更改\ntry:\n\t# print(techer_1.age)\n\t# 不允许访问可以更改 更改后可以访问 ? 还是 换了一个对象\n\t# 更改的是这个实例的 属性 并不是 同一个变量 而是外部代码给对象实例新增了一个age变量\n\ttecher_1.age = 67\n\tprint(techer_1.age)\nexcept:\n\tprint(\"出错!!!\")\nprint(techer_1.get_age())\nprint(\"**************************************************\")\n\n\n# 4.继承与多态\nclass Animal(object):\n\tdef run(self):\n\t\tprint(\"Animal is runing\")\n\n\n# 两个雷继承Animal\nclass Dog(Animal):\n\t# 重写\n\tdef run(self):\n\t\tprint(\"Dog is runing\")\n\n\nclass Cat(Animal):\n\t# 自雷的run()方法 覆盖 了父类的run(),在代码运行的时候,总是会调用子类的run()\n\tdef run(self):\n\t\tprint(\"Cat is runing\")\n\n\ndog = Dog()\ndog.run()\ncat = Cat()\ncat.run()\nanimal = Animal()\n# 判断一个变量是否是某个类型可以用isinstance()判断:\nprint(isinstance(dog, Animal))\nprint(isinstance(dog, Dog))\n# Dog可以看成Animal,但Animal不可以看成Dog\nprint(isinstance(animal, Dog))\n\n\ndef run_twice(animal):\n\tanimal.run()\n\tanimal.run()\n\n\nrun_twice(animal)\nrun_twice(dog)\n\n\n# 定义一个带有run的类 也可以传入run_twice 动态语言的“鸭子类型”,它并不要求严格的继承体系,一个对象只要“看起来像鸭子,走起路来像鸭子”,那它就可以被看做是鸭子\nclass Run(object):\n\tdef run(self):\n\t\tprint(\"only runing\")\n\n\nrun_twice(Run())\n# type 获取对象类型\nprint(type(dog))\nprint(isinstance(dog, Animal))\n# 获取对象的所有方法 和 属性 反会一个List\nprint(dir(dog))\n\nprint(\"**************************************************\")\n\n# 5.__slots__ 限制实例属性\n# python 中是可以给实例绑定属性 和 方法\n\n# 绑定属性\ndog.sex = 1\nprint(dog.sex)\n\n\n# 绑定方法\ndef charge_sex(self, sex):\n\tif sex != 1 and sex != 0:\n\t\treturn ValueError('bad sex')\n\telif sex == 1:\n\t\tself.sex = 0\n\telse:\n\t\tself.sex = 1\n\tprint(self.sex)\n\n\n# 绑定 方法\nfrom types import MethodType\n\ndog.ch_sex = MethodType(charge_sex, dog)\ndog.ch_sex(1)\n\n\n# Python允许在定义class的时候,定义一个特殊的__slots__变量,来限制该class实例能添加的属性\n# 定义的属性仅对当前类实例起作用,对继承的子类是不起作用的:\nclass Student(object):\n\t__slots__ = ('name', 'age') # 用tuple定义允许绑定的属性名称\n\nprint(\"**************************************************\")\n\n\n\n# 6.枚举类\n\nfrom enum import Enum\n\nMonth = Enum('Month', ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'))\nprint(Month.Jan)\n# @unique装饰器 检查保证没有重复值\n#","repo_name":"Sulongsl/python","sub_path":"foundations_for_python/object_key.py","file_name":"object_key.py","file_ext":"py","file_size_in_byte":4745,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37421228290","text":"import os\nimport sys\n\nvideos = [\n\t{\n\t\t\"name\": \"speaker_name\",\n\t\t#the paths can be whatever you want, but the containing\n\t\t#folders should already exist\n\t\t\"original\": \"/path/to/original/video.mov\",\n\t\t\"frameSkipped\": \"videos/speaker_name_frameskipped.mov\",\n\t\t\"roid\": \"videos/speaker_name_roi.mov\",\n\t\t\"outputPath\": \"output/speaker_name\",\n\t\t#top left of Region Of Interest\n\t\t\"x1\": 500,\n\t\t\"y1\": 0,\n\t\t#bottom right of Region Of Interest\n\t\t\"x2\": 1920,\n\t\t\"y2\": 1080,\n\t\t\"excludeBefore\": 20 #will not consider anything before 20 secodns as a keyframe\n\t}\n]\n\n\ndef processAll(videos):\n\tfor video in videos:\n\t\tos.system(\"python preprocess.py --command shrink --source {0} --dest {1}\".format(video[\"original\"], video[\"frameSkipped\"]))\n\n\t\tos.system(\"python preprocess.py --command roi --source {0} --dest {1} --rect {2},{3},{4},{5}\"\n\t\t\t.format(video[\"frameSkipped\"], video[\"roid\"], video[\"x1\"], video[\"y1\"], video[\"x2\"], video[\"y2\"]))\n\n\t\tos.system(\"python scene_detection.py -s {0} -d {1} -n {2} -a {3}\"\n\t\t\t.format(video[\"roid\"], video[\"outputPath\"], video[\"name\"], video[\"excludeBefore\"]))\n\n\t\tsys.stdout.write('.')\n\t\tsys.stdout.flush()\n\nprocessAll(videos)\n\n#Make sure to set this to the output folder used in the video objects above\n#(the folder) above the one for individual speakers.\noutputPath = \"output\"\nos.system(\"python postprocess.py -s {0} -d {0}\".format(outputPath, outputPath))","repo_name":"tafsiri/filmstrip","sub_path":"filmstrip.py","file_name":"filmstrip.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","stars":87,"dataset":"github-code","pt":"31"} +{"seq_id":"14658189966","text":"import random\nimport time\nimport shutil\nimport os\nfrom argparse import ArgumentParser\nfrom utils.tools import get_config, decode_segmap\n\nimport cv2\nfrom seg_trainer import Seg_Trainer\nimport torch\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nfrom utils.logger import get_logger\nfrom utils.dataset import Parse_Dataset\nfrom utils.test_dataset import Test_Dataset\n\nimport numpy as np\nimport scipy.misc as misc\n\nparser = ArgumentParser()\nparser.add_argument('--config', type=str, default='config/seg_config.yaml',\n help=\"training configuration\")\nparser.add_argument('--seed', type=int, help='manual seed')\n\ndef main():\n\n # Config file reading\n args = parser.parse_args()\n config = get_config(args.config)\n\n # ------ CUDA configuration\n cuda = config['cuda']\n device_ids = config['gpu_ids']\n if cuda:\n os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(str(i) for i in device_ids)\n device_ids = list(range(len(device_ids)))\n config['gpu_ids'] = device_ids\n cudnn.benchmark = True\n\n # ----- Directory to save checkpoint file\n checkpoint_path = os.path.join(config['checkpoint_path'])\n\n if not os.path.exists(checkpoint_path):\n os.makedirs(checkpoint_path)\n shutil.copy(args.config, os.path.join(checkpoint_path, os.path.basename(args.config)))\n logger = get_logger(checkpoint_path) # get logger and configure it at the first call\n if not os.path.exists(config['output_test_dir']):\n os.makedirs(config['output_test_dir'])\n\n\n logger.info(f\"Arguments: {args}\")\n # Set random seed\n if args.seed is None:\n args.seed = random.randint(1, 10000)\n logger.info(f\"Random seed: {args.seed}\")\n random.seed(args.seed)\n torch.manual_seed(args.seed)\n if cuda:\n torch.cuda.manual_seed_all(args.seed)\n\n\n logger.info(f\"configuration : {config}\")\n\n # first, read images and pick labels with same name\n # we will train all images from HQ dataset\n\n\n # ---------- train and test dataset&loader\n try: # for unexpected error logging\n # Load the dataset\n logger.info(f\"Training on dataset: {config['dataset_name']}\")\n train_dataset = Parse_Dataset(data_path=config['all_data_path'],\n gt_path=config['gt_data_path'],\n with_subfolder=config['data_with_subfolder'],\n image_shape=config['image_shape'],\n random_crop=config['random_crop'], return_name=True)\n train_loader = torch.utils.data.DataLoader(dataset=train_dataset,\n batch_size=config['batch_size'],\n shuffle=True,\n num_workers=config['num_workers'])\n\n test_dataset = Test_Dataset(data_path=config['test_data_path'],\n with_subfolder=config['data_with_subfolder'],\n image_shape=config['image_shape'],\n random_crop=config['random_crop'], return_name=True)\n test_loader = torch.utils.data.DataLoader(dataset=test_dataset,\n batch_size=config['batch_size'],\n shuffle=False, num_workers=config['num_workers'])\n\n # [Trainer]\n trainer = Seg_Trainer(config)\n logger.info(f\"\\n{trainer.netParser}\")\n\n\n # CUDA AVAILABLE\n if cuda:\n trainer = nn.parallel.DataParallel(trainer, device_ids=device_ids)\n trainer_module = trainer.module\n else:\n trainer_module = trainer\n\n # Get the resume iteration to restart training\n #start_iteration = trainer_module.resume(config['resume']) if config['resume'] else 1\n start_iteration = trainer_module.resume(config['resume'], config['resume_iter']) if config['resume'] else 1\n\n\n # connect loaders to iter()\n iterable_train_loader = iter(train_loader)\n iterable_test_loader = iter(test_loader)\n\n\n # learing rate\n lr = config['lr']\n\n\n print('Training Start.........')\n\n\n\n for iteration in range(start_iteration, config['niter'] + 1):\n\n #=============== TRAIN ===================\n # ------ [ train batch loader ] ---------\n try:\n train_img_names, gt_images, gt_targets, orig_images = iterable_train_loader.next()\n except StopIteration:\n iterable_train_loader = iter(train_loader)\n train_img_names, gt_images, gt_targets, orig_images = iterable_train_loader.next()\n\n # ------ [ train batch ] ---------\n if cuda:\n orig_images = orig_images.cuda()\n gt_images = gt_images.cuda()\n\n\n\n # Forward\n loss, predict = trainer(orig_images, gt_images)\n\n if not loss.dim() == 0:\n loss = torch.mean(loss)\n\n # Backward (update optimizer)\n trainer_module.optimizerSGD.zero_grad()\n loss.backward()\n trainer_module.optimizerSGD.step()\n\n\n # [print loss] (in this, 1 print for 30 iteration)\n if iteration % 50 == 0:\n print(\"Epoch [%d/%d] Loss: %.10f lr:%.6f\" % (iteration ,config['niter'], loss.data, lr))\n\n\n #=============== TEST ===================\n if iteration % config['viz_iter'] == 0:\n try:\n test_img_names, test_orig_images = iterable_test_loader.next()\n except StopIteration:\n iterable_test_loader = iter(test_loader)\n test_img_names, test_orig_images = iterable_test_loader.next()\n\n if cuda:\n test_orig_images = test_orig_images.cuda()\n\n # \n test_predict = trainer.module.netParser(test_orig_images)\n\n for test_idx in range(config['test_batch']):\n pred_out = torch.argmax(test_predict[test_idx], dim=0)\n test_sam = pred_out.cpu().numpy()\n\n if config['save_result_as_colored']:\n decoded = decode_segmap(test_sam)\n misc.imsave(os.path.join(config['output_test_dir'], test_img_names[test_idx].split('.')[0] + '.png'), decoded)\n else:\n cv2.imwrite(os.path.join(config['output_test_dir'], test_img_names[test_idx].split('.')[0] + '.png'), test_sam)\n\n\n\n # \n if iteration % 50000 == 0:\n lr = lr * config['lr_decay']\n for param_group in trainer_module.optimizerSGD.param_groups:\n param_group['lr'] = lr\n\n # save the model\n if iteration % config['snapshot_save_iter'] == 0:\n trainer_module.save_model(checkpoint_path, iteration)\n\n\n except Exception as e: # for unexpected error logging\n logger.error(f\"{e}\")\n raise e\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"easternCar/Face-Parsing-Network","sub_path":"seg_train.py","file_name":"seg_train.py","file_ext":"py","file_size_in_byte":7176,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"31"} +{"seq_id":"41113083650","text":"from rest_framework import serializers\n\nfrom .models import Category, Comment, EncodeProfile, Media, Playlist, Tag\n\n# TODO: put them in a more DRY way\n\n\nclass MediaSerializer(serializers.ModelSerializer):\n # to be used in APIs as show related media\n user = serializers.ReadOnlyField(source=\"user.username\")\n url = serializers.SerializerMethodField()\n api_url = serializers.SerializerMethodField()\n thumbnail_url = serializers.SerializerMethodField()\n author_profile = serializers.SerializerMethodField()\n author_thumbnail = serializers.SerializerMethodField()\n\n def get_url(self, obj):\n return self.context[\"request\"].build_absolute_uri(obj.get_absolute_url())\n\n def get_api_url(self, obj):\n return self.context[\"request\"].build_absolute_uri(obj.get_absolute_url(api=True))\n\n def get_thumbnail_url(self, obj):\n if obj.thumbnail_url:\n return self.context[\"request\"].build_absolute_uri(obj.thumbnail_url)\n else:\n return None\n\n def get_author_profile(self, obj):\n return self.context[\"request\"].build_absolute_uri(obj.author_profile())\n\n def get_author_thumbnail(self, obj):\n return self.context[\"request\"].build_absolute_uri(obj.author_thumbnail())\n\n class Meta:\n model = Media\n read_only_fields = (\n \"friendly_token\",\n \"user\",\n \"add_date\",\n \"media_type\",\n \"state\",\n \"duration\",\n \"encoding_status\",\n \"views\",\n \"likes\",\n \"dislikes\",\n \"reported_times\",\n \"size\",\n \"is_reviewed\",\n \"featured\",\n )\n fields = (\n \"friendly_token\",\n \"url\",\n \"api_url\",\n \"user\",\n \"title\",\n \"description\",\n \"add_date\",\n \"views\",\n \"media_type\",\n \"state\",\n \"duration\",\n \"thumbnail_url\",\n \"is_reviewed\",\n \"preview_url\",\n \"author_name\",\n \"author_profile\",\n \"author_thumbnail\",\n \"encoding_status\",\n \"views\",\n \"likes\",\n \"dislikes\",\n \"reported_times\",\n \"featured\",\n \"user_featured\",\n \"size\",\n )\n\n\nclass SingleMediaSerializer(serializers.ModelSerializer):\n user = serializers.ReadOnlyField(source=\"user.username\")\n url = serializers.SerializerMethodField()\n\n def get_url(self, obj):\n return self.context[\"request\"].build_absolute_uri(obj.get_absolute_url())\n\n class Meta:\n model = Media\n read_only_fields = (\n \"friendly_token\",\n \"user\",\n \"add_date\",\n \"views\",\n \"media_type\",\n \"state\",\n \"duration\",\n \"encoding_status\",\n \"views\",\n \"likes\",\n \"dislikes\",\n \"reported_times\",\n \"size\",\n \"video_height\",\n \"is_reviewed\",\n )\n fields = (\n \"url\",\n \"user\",\n \"title\",\n \"description\",\n \"add_date\",\n \"edit_date\",\n \"media_type\",\n \"state\",\n \"duration\",\n \"thumbnail_url\",\n \"poster_url\",\n \"thumbnail_time\",\n \"url\",\n \"sprites_url\",\n \"preview_url\",\n \"author_name\",\n \"author_profile\",\n \"author_thumbnail\",\n \"encodings_info\",\n \"encoding_status\",\n \"views\",\n \"likes\",\n \"dislikes\",\n \"reported_times\",\n \"user_featured\",\n \"original_media_url\",\n \"size\",\n \"video_height\",\n \"enable_comments\",\n \"categories_info\",\n \"is_reviewed\",\n \"edit_url\",\n \"tags_info\",\n \"hls_info\",\n \"license\",\n \"subtitles_info\",\n \"ratings_info\",\n \"add_subtitle_url\",\n \"allow_download\",\n )\n\n\nclass MediaSearchSerializer(serializers.ModelSerializer):\n url = serializers.SerializerMethodField()\n api_url = serializers.SerializerMethodField()\n\n def get_url(self, obj):\n return self.context[\"request\"].build_absolute_uri(obj.get_absolute_url())\n\n def get_api_url(self, obj):\n return self.context[\"request\"].build_absolute_uri(obj.get_absolute_url(api=True))\n\n class Meta:\n model = Media\n fields = (\n \"title\",\n \"author_name\",\n \"author_profile\",\n \"thumbnail_url\",\n \"add_date\",\n \"views\",\n \"description\",\n \"friendly_token\",\n \"duration\",\n \"url\",\n \"api_url\",\n \"media_type\",\n \"preview_url\",\n \"categories_info\",\n )\n\n\nclass EncodeProfileSerializer(serializers.ModelSerializer):\n class Meta:\n model = EncodeProfile\n fields = (\"name\", \"extension\", \"resolution\", \"codec\", \"description\")\n\n\nclass CategorySerializer(serializers.ModelSerializer):\n user = serializers.ReadOnlyField(source=\"user.username\")\n\n class Meta:\n model = Category\n fields = (\n \"title\",\n \"description\",\n \"is_global\",\n \"media_count\",\n \"user\",\n \"thumbnail_url\",\n )\n\n\nclass TagSerializer(serializers.ModelSerializer):\n class Meta:\n model = Tag\n fields = (\"title\", \"media_count\", \"thumbnail_url\")\n\n\nclass PlaylistSerializer(serializers.ModelSerializer):\n user = serializers.ReadOnlyField(source=\"user.username\")\n\n class Meta:\n model = Playlist\n read_only_fields = (\"add_date\", \"user\")\n fields = (\"add_date\", \"title\", \"description\", \"user\", \"media_count\", \"url\", \"api_url\", \"thumbnail_url\")\n\n\nclass PlaylistDetailSerializer(serializers.ModelSerializer):\n user = serializers.ReadOnlyField(source=\"user.username\")\n\n class Meta:\n model = Playlist\n read_only_fields = (\"add_date\", \"user\")\n fields = (\"title\", \"add_date\", \"user_thumbnail_url\", \"description\", \"user\", \"media_count\", \"url\", \"thumbnail_url\")\n\n\nclass CommentSerializer(serializers.ModelSerializer):\n author_profile = serializers.ReadOnlyField(source=\"user.get_absolute_url\")\n author_name = serializers.ReadOnlyField(source=\"user.name\")\n author_thumbnail_url = serializers.ReadOnlyField(source=\"user.thumbnail_url\")\n\n class Meta:\n model = Comment\n read_only_fields = (\"add_date\", \"uid\")\n fields = (\n \"add_date\",\n \"text\",\n \"parent\",\n \"author_thumbnail_url\",\n \"author_profile\",\n \"author_name\",\n \"media_url\",\n \"uid\",\n )\n","repo_name":"mediacms-io/mediacms","sub_path":"files/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":6850,"program_lang":"python","lang":"en","doc_type":"code","stars":2011,"dataset":"github-code","pt":"31"} +{"seq_id":"31715253998","text":"path = \"C:\\\\Users\\\\Collin\\\\Source\\Advent_Of_Code\\\\Advent-of-Code\\\\Day7\\\\Day7_Input.txt\"\ndirectories = {'/':0}\ncurrentDir = ''\ndirHist = []\ndirectoryList = {}\nwith open(path) as inputFile:\n input = inputFile.read().splitlines()\n for i in input:\n if '$ cd' in i:\n if '$ cd ..' in i:\n depth = dirHist.index(currentDir)\n currentDir = dirHist[depth-1]\n dirHist.pop()\n else:\n currentDir = currentDir + i[5:]\n dirHist.append(currentDir)\n\n elif 'dir' in i:\n directoryName = currentDir + (i[4:])\n directories[directoryName] = 0\n elif '$ ls' in i:\n continue\n else:\n for x in dirHist:\n currentSize = directories[x]\n currentSize+=int(i[:i.find(' ')])\n directories[x] = currentSize\ntotal = 70000000-directories['/']\ntotal = 30000000-total\n\nfor z in directories:\n if directories[z] > total:\n directoryList[z]=directories[z]\nx=[]\nfor y in directoryList:\n x.append(directoryList[y])\nx.sort()\nprint(x[0])\n\n\n\n\n \n\n \n\n\n","repo_name":"cmcgregor85/Advent-of-Code","sub_path":"Day7/Day7_Part2.py","file_name":"Day7_Part2.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"38115880271","text":"# id 79728615\n\ndef quicksort(arr, left, right):\n if left >= right:\n return\n i, j = left, right\n pivot = arr[(left+right)//2]\n\n while i <= j:\n while arr[i] < pivot:\n i += 1\n while arr[j] > pivot:\n j -= 1\n if i <= j:\n arr[i], arr[j] = arr[j], arr[i]\n i, j = i + 1, j - 1\n quicksort(arr, left, j)\n quicksort(arr, i, right)\n\n\ndef main():\n n = int(input())\n persons = [None] * n\n for i in range(n):\n name, solved, errors = input().split()\n persons[i] = (-int(solved), int(errors), name)\n\n quicksort(persons, 0, len(persons)-1)\n for person in persons:\n print(person[2])\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Kaun/algorithms","sub_path":"sorted/effective_quicksort.py","file_name":"effective_quicksort.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1377702918","text":"import sys\nimport copy\nimport torch\nimport torch.nn.parallel\nfrom torch.optim import Adam\nimport torch.utils.data\nimport torch.utils.data.distributed\nimport torch.nn.functional as F\n\nsys.path.append('.')\nfrom domain_adaptation.modules.domain_discriminator import DomainDiscriminator\nfrom domain_adaptation.adaptation.dann import DomainAdversarialLoss, ParticlesClassifier\nfrom domain_adaptation.modules.classifier import Classifier as SourceClassifier\nfrom tools.utils import ForeverDataIterator\nfrom tools.lr_scheduler import StepwiseLR\n\n# my utils\nfrom utils.utils import load_pickle, save_pickle\nfrom utils.training_stats import TrainingStats\nfrom utils.validation import validate\nfrom utils.config import Config\nfrom utils.models import Net\nfrom utils.plotting import plot_training_stats\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nmodel_name = 'dann'\n\n\ndef main():\n \"\"\"\n Before starting this script, the source model must be trained and saved in data/trained_models.\n If Config.train_dann is set to true, the script trains dan model and saves it into data/trained_models dir.\n If Config.train_dann is set to false, the script load dan model from data/trained_models and validates it\n on the training dataset.\n\n The training parameters can be set in config.py under the section DANN\n \"\"\"\n print(f\"Training {model_name} classifiers.\")\n precision_recall_scores_list = []\n for particle_code in Config.particles_dict:\n training_stats = TrainingStats()\n particle_name = Config.particles_dict[particle_code]\n datasets_dict = load_pickle(f'{Config.training_data_fp}/datasets_dict_{particle_name}.pkl')\n print(f'Creating {model_name} model for {particle_name}')\n n_iters = min(len(datasets_dict['train_source_loader']), len(datasets_dict['train_target_loader']))\n val_loader = datasets_dict['test_target_loader']\n\n # load source model and set as a backbone\n backbone = Net(Config.n_features)\n source_classifier = SourceClassifier(backbone, Config.n_features, classifier_type='source').to(device)\n source_classifier.load_state_dict(\n torch.load(f'{Config.source_model_fp}_{particle_name}.pt',\n map_location=torch.device(device)))\n backbone = source_classifier.backbone\n\n # create initial dann classifier\n classifier = ParticlesClassifier(backbone, Config.n_classes).to(device)\n domain_discriminator = DomainDiscriminator(in_feature=classifier.features_dim, hidden_size=1024).to(device)\n\n # define optimizer and lr scheduler\n optimizer = Adam(classifier.get_parameters() + domain_discriminator.get_parameters(),\n Config.dann_lr, weight_decay=Config.dann_weight_decay)\n lr_scheduler = StepwiseLR(optimizer, init_lr=Config.dann_lr, gamma=0.001, decay_rate=0.75)\n\n # define loss function\n domain_adv = DomainAdversarialLoss(domain_discriminator).to(device)\n\n best_test_loss = 0\n\n # start training\n if Config.train_dann:\n for epoch in range(Config.dann_epochs):\n\n train_source_iter = ForeverDataIterator(datasets_dict['train_source_loader'])\n train_target_iter = ForeverDataIterator(datasets_dict['train_target_loader'])\n test_source_iter = ForeverDataIterator(datasets_dict['test_source_loader'])\n test_target_iter = ForeverDataIterator(datasets_dict['test_target_loader'])\n\n classifier.train()\n domain_adv.train()\n\n loss_item, loss_test_item, classification_loss_item, classification_loss_test_item, trans_loss_item, trans_loss_test_item = 0, 0, 0, 0, 0, 0\n\n for i in range(n_iters):\n lr_scheduler.step()\n\n x_s, labels_s = next(train_source_iter)\n x_t, labels_t = next(train_target_iter)\n\n x_s = x_s.to(device)\n x_t = x_t.to(device)\n labels_s = labels_s.to(device)\n\n x = torch.cat((x_s, x_t), dim=0)\n y, f = classifier(x)\n y_s, y_t = y.chunk(2, dim=0)\n f_s, f_t = f.chunk(2, dim=0)\n\n cls_loss = F.cross_entropy(y_s, labels_s)\n transfer_loss = domain_adv(f_s, f_t)\n loss = cls_loss + transfer_loss * Config.dann_trade_off\n\n # testing iterator\n x_s_test, labels_s_test = next(test_source_iter)\n x_t_test, _ = next(test_target_iter)\n\n x_s_test = x_s_test.to(device)\n x_t_test = x_t_test.to(device)\n labels_s_test = labels_s_test.to(device)\n\n x_test = torch.cat((x_s_test, x_t_test), dim=0)\n y_test, f_test = classifier(x_test)\n y_s_test, y_t_test = y_test.chunk(2, dim=0)\n f_s_test, f_t_test = f_test.chunk(2, dim=0)\n\n cls_loss_test = F.cross_entropy(y_s_test, labels_s_test)\n transfer_loss_test = domain_adv(f_s_test, f_t_test)\n loss_test = cls_loss_test + transfer_loss_test * Config.dann_trade_off\n\n # updating tracked losses\n loss_item += loss.item()\n loss_test_item += loss_test.item()\n classification_loss_item += cls_loss.item()\n classification_loss_test_item += cls_loss_test.item()\n trans_loss_item += transfer_loss.item()\n trans_loss_test_item += transfer_loss_test.item()\n\n # compute gradient and do step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n training_stats.update('epoch', epoch)\n training_stats.update('loss', loss_item / n_iters)\n training_stats.update('loss_test', loss_test_item / n_iters)\n training_stats.update('classification_loss', classification_loss_item / n_iters)\n training_stats.update('classification_loss_test', classification_loss_test_item / n_iters)\n training_stats.update('trans_loss', trans_loss_item / n_iters)\n training_stats.update('trans_loss_test', trans_loss_test_item / n_iters)\n training_stats.update('precision_recall_source',\n validate(datasets_dict['test_source_loader'], classifier, loud=False))\n training_stats.update('precision_recall_target',\n validate(datasets_dict['test_target_loader'], classifier, loud=False))\n training_stats.update('precision_recall_source_train',\n validate(datasets_dict['train_source_loader'], classifier, loud=False))\n training_stats.update('precision_recall_target_train',\n validate(datasets_dict['train_target_loader'], classifier, loud=False))\n print(f'particle_type: {particle_name}, epoch: {epoch}/{Config.dann_epochs}')\n\n if loss_test > best_test_loss:\n best_test_loss = loss_test\n best_model = copy.deepcopy(classifier.state_dict())\n torch.save(best_model, f'{Config.dann_model_fp}_{particle_name}.pt')\n\n else:\n classifier.load_state_dict(torch.load(f'{Config.dann_model_fp}_{particle_name}.pt',\n map_location=torch.device(device)))\n best_model = copy.deepcopy(classifier.state_dict())\n\n # evaluate on test set\n classifier.load_state_dict(best_model)\n precision_recall_auc_score = validate(val_loader, classifier, print_classification_report=False, loud=False)\n precision_recall_scores_list.append(precision_recall_auc_score)\n print(f'-------------')\n print(f'precision_recall for {particle_name} classifier: {precision_recall_auc_score}')\n print(f'-------------')\n\n training_stats_df = training_stats.get_training_stats_df()\n save_pickle(training_stats_df,\n f'{Config.source_fp}/pickles/training_stats/training_stats_df_dann_{particle_name}.pkl')\n plot_training_stats(training_stats_df, ['loss', 'loss_test'], model_name=model_name,\n particle_name=particle_name, label='losses')\n plot_training_stats(training_stats_df, ['precision_recall_source', 'precision_recall_target', 'loss_test'],\n model_name=model_name, particle_name=particle_name, label='percisions')\n\n print(\n f'Mean precision_recall for DANN classifier: {sum(precision_recall_scores_list) / len(precision_recall_scores_list)}')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"DawidSitnik/Application-of-Domain-Adaptation-Techniques-for-Classifying-Particles-Basing-On-the-Data-From-ALICE","sub_path":"training_models/train_dann_model.py","file_name":"train_dann_model.py","file_ext":"py","file_size_in_byte":8982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40967175526","text":"from asyncio import sleep\n\nfrom pyrogram import Client, enums, filters\nfrom pyrogram.raw import functions\nfrom pyrogram.types import Message\n\nfrom config import CMD_HANDLER as cmd\nfrom ProjectMan.helpers.PyroHelpers import ReplyCheck\n\nfrom .help import add_command_help\n\ncommands = {\n \"ftyping\": enums.ChatAction.TYPING,\n \"fvideo\": enums.ChatAction.RECORD_VIDEO,\n \"faudio\": enums.ChatAction.RECORD_AUDIO,\n \"fround\": enums.ChatAction.RECORD_VIDEO_NOTE,\n \"fphoto\": enums.ChatAction.UPLOAD_PHOTO,\n \"fsticker\": enums.ChatAction.CHOOSE_STICKER,\n \"fdocument\": enums.ChatAction.UPLOAD_DOCUMENT,\n \"flocation\": enums.ChatAction.FIND_LOCATION,\n \"fgame\": enums.ChatAction.PLAYING,\n \"fcontact\": enums.ChatAction.CHOOSE_CONTACT,\n \"fstop\": enums.ChatAction.CANCEL,\n \"fscreen\": \"screenshot\",\n}\n\n\n@Client.on_message(filters.command(list(commands), cmd) & filters.me)\nasync def fakeactions_handler(client: Client, message: Message):\n cmd = message.command[0]\n try:\n sec = int(message.command[1])\n if sec > 60:\n sec = 60\n except:\n sec = None\n await message.delete()\n action = commands[cmd]\n try:\n if action != \"screenshot\":\n if sec and action != enums.ChatAction.CANCEL:\n await client.send_chat_action(chat_id=message.chat.id, action=action)\n await sleep(sec)\n else:\n return await client.send_chat_action(\n chat_id=message.chat.id, action=action\n )\n else:\n for _ in range(sec if sec else 1):\n await client.send(\n functions.messages.SendScreenshotNotification(\n peer=await client.resolve_peer(message.chat.id),\n reply_to_msg_id=0,\n random_id=client.rnd_id(),\n )\n )\n await sleep(0.1)\n except Exception as e:\n return await client.send_message(\n message.chat.id,\n f\"**ERROR:** `{e}`\",\n reply_to_message_id=ReplyCheck(message),\n )\n\n\nadd_command_help(\n \"fakeaction\",\n [\n [\"ftyping [detik]\", \"Menampilkan Pengetikan Palsu dalam obrolan.\"],\n [\"fgame [detik]\", \"Menampilkan sedang bermain game Palsu dalam obrolan.\"],\n [\n \"faudio [detik]\",\n \"Menampilkan tindakan merekam suara palsu dalam obrolan.\",\n ],\n [\n \"fvideo [detik]\",\n \"Menampilkan tindakan merekam video palsu dalam obrolan.\",\n ],\n [\n \"fround [detik]\",\n \"Menampilkan tindakan merekam video palsu dalam obrolan.\",\n ],\n [\n \"fphoto [detik]\",\n \"Menampilkan tindakan mengirim foto palsu dalam obrolan.\",\n ],\n [\n \"fsticker [detik]\",\n \"Menampilkan tindakan memilih Sticker palsu dalam obrolan.\",\n ],\n [\n \"fcontact [detik]\",\n \"Menampilkan tindakan Share Contact palsu dalam obrolan.\",\n ],\n [\n \"flocation [detik]\",\n \"Menampilkan tindakan Share Lokasi palsu dalam obrolan.\",\n ],\n [\n \"fdocument [detik]\",\n \"Menampilkan tindakan tengirim Document/File palsu dalam obrolan.\",\n ],\n [\n \"fscreen [jumlah]\",\n \"Menampilkan tindakan screenshot palsu. (Gunakan di Obrolan Pribadi)\",\n ],\n [\"fstop\", \"Memberhentikan tindakan palsu dalam obrolan.\"],\n ],\n)\n","repo_name":"mrismanaziz/PyroMan-Userbot","sub_path":"ProjectMan/modules/fakeaction.py","file_name":"fakeaction.py","file_ext":"py","file_size_in_byte":3533,"program_lang":"python","lang":"en","doc_type":"code","stars":104,"dataset":"github-code","pt":"31"} +{"seq_id":"17111857407","text":"import os\nimport csv\ncsvpath=os.path.join(\"Resources\", \"election_data.csv\") \nwith open(csvpath) as csvfile: #open file\n csvreader = csv.reader(csvfile, delimiter=',') \n csvheader=next(csvreader)\n votes_cast=0\n most_votes=0\n candidates_dic={}\n candidates_list=[]\n analysis_txt=[]\n winner=[]\n for i in csvreader:\n votes_cast=votes_cast+1\n if i[2] not in candidates_dic: \n candidates_dic[i[2]]=1\n candidates_list.append(i[2])\n else:\n candidates_dic[i[2]]+=1\n \n \n analysis_txt.append(\"Election Results\")\n analysis_txt.append(\"--------------------------\")\n analysis_txt.append(f\"Total Votes Cast:{votes_cast}\")\n analysis_txt.append(\"--------------------------\")\n count=0\n for i in candidates_list:\n analysis_txt.append(f\"{candidates_list[count]}: {round(candidates_dic[candidates_list[count]]/votes_cast*100,2)}% ({candidates_dic[candidates_list[count]]})\")\n if candidates_dic[candidates_list[count]]>most_votes:\n most_votes=candidates_dic[candidates_list[count]]\n winner=candidates_list[count]\n count+=1\n\n analysis_txt.append(\"--------------------------\")\n analysis_txt.append(f\"Winner: {winner}\")\n\n\n # save the output file path\ncsvpath=os.path.join(\"Analysis\", \"Analysis.txt\") \n\n# open the output file, create a header row, and then write the zipped object to the csv\nwith open(csvpath, \"w\") as text:\n \n for i in analysis_txt:\n text.write(i)\n text.write(\"\\n\")\n print(i)","repo_name":"DarrylB32/Python-Financial_and_Election_Analysis","sub_path":"Election_Analysis/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"20756484606","text":"# find sum and average of 10 nuumber\r\n\r\nfor i in range(0,101):\r\n while True:\r\n a[i] =int(input(\"Enter a Number : \"))\r\n if a[i] == 0:\r\n break\r\n break\r\nfor n in range(o,i):\r\n sum = a[i] + a[i+1]\r\n\r\navg = sum % 2\r\n","repo_name":"lokeshraogujja/practice","sub_path":"sum-average.py","file_name":"sum-average.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26051284071","text":"import os\nSECRET_KEY = 'fake-key'\n\n# Use nose to run all tests\n\n# Tell nose to measure coverage on the 'foo' and 'bar' apps\n\n\nINSTALLED_APPS = [\n\n 'mogi',\n 'galaxy',\n 'gfiles',\n\n 'django_tables2',\n 'django_tables2_column_shifter',\n 'django_filters',\n 'bootstrap3',\n 'django_sb_admin',\n\n 'dal',\n 'dal_select2',\n\n\n 'allauth',\n 'allauth.account',\n 'allauth.socialaccount',\n\n 'allauth.socialaccount.providers.google',\n\n 'django.contrib.sites',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n]\nDEBUG = True\n\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'simple_history.middleware.HistoryRequestMiddleware',\n]\n\nAUTH_USER_MODEL = 'gfiles.User'\n\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': 'test-django-galaxy',\n }\n}\n\n\n\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n)\n\nPROJECT_ROOT = os.path.abspath(\n os.path.join(os.path.dirname(__file__), \"../mogi\"),\n)\n\nSTATIC_ROOT = os.path.join(PROJECT_ROOT, 'static')\n\nSTATIC_URL = '/static/'\n\n#ROOT_URLCONF = 'mogi_site.urls'\n\nLOGIN_REDIRECT_URL = 'index'\nLOGIN_URL = '/login/'\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nMEDIA_ROOT = '/media/'\nMEDIA_URL = '/media/'\n\n\nEXTERNAL_DATA_ROOTS = {'TEST': {\n 'path': os.path.join(BASE_DIR, 'mogi', 'tests', 'data'),\n 'user_dirs': True,\n 'help_text': 'test data store',\n 'filepathfield': False\n # if false will use charfield path, if true filepathfield will look\n # recursively in a selected folder but will be to slow for complicated\n # folder structure\n }\n}\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [(os.path.join(BASE_DIR, 'templates')),\n ],\n\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages'\n ],\n },\n },\n]\n\nSITE_ID = 1\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_USERNAME_REQUIRED = False\nACCOUNT_SIGNUP_PASSWORD_ENTER_TWICE = False\nACCOUNT_SESSION_REMEMBER = True\nACCOUNT_AUTHENTICATION_METHOD = 'email'\nACCOUNT_UNIQUE_EMAIL = True\n","repo_name":"computational-metabolomics/django-mogi","sub_path":"mogi/test_settings.py","file_name":"test_settings.py","file_ext":"py","file_size_in_byte":3294,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"70271178649","text":"from asgiref.sync import async_to_sync\n\nfrom application.celery import app\nfrom bot.models import TelegramUser\nfrom bot.telegram.app import get_bot\nfrom events.models import Event\n\n\nclass EventCreateNotifier:\n event: Event\n\n def __init__(self, event: Event):\n self.event = event\n\n @classmethod\n def delay(cls, event):\n cls.run.delay(event.id, event.tg_owner.id)\n if event.tg_author != event.tg_owner:\n cls.run.delay(event.id, event.tg_author.id)\n\n @staticmethod\n @app.task\n def run(event_id, user_id):\n event = Event.objects.get(pk=event_id)\n user = TelegramUser.objects.get(pk=user_id)\n EventCreateNotifier(event).notify(user)\n\n def notify(self, user: TelegramUser):\n bot = get_bot()\n async_to_sync(bot.send_message)(\n user.id,\n text=self.get_notification_text(user),\n parse_mode='MarkdownV2',\n pool_timeout=10\n )\n\n def get_notification_text(self, user: TelegramUser):\n e = self.event\n display_owner = user == e.tg_owner\n owner = f\"[{e.tg_owner.first_name}](tg://user?id={e.tg_owner.id})'s\" if not display_owner else 'your'\n display_author = user != e.tg_author\n author = f\"[{e.tg_author.first_name}](tg://user?id={e.tg_author.id})\" if display_author else None\n start_date = e.start_date.strftime('%d %B, %Y')\n start_time = e.start_date.strftime('%H:%M')\n end_time = e.end_date.strftime('%H:%M')\n return (\n f'*New event on {owner} calendar:*' +\n (f'\\n📝 {e.title}' if e.title else '') +\n f'\\n🗓 {start_date}' +\n f'\\n🕓 {start_time} to {end_time}' +\n (f'\\n✍️ {author}' if display_author else '')\n )\n","repo_name":"inspectorr/NextWeekTWA","sub_path":"server/events/notifications.py","file_name":"notifications.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72496004889","text":"import os\nimport cv2\nimport glob\nimport torch\nfrom torchvision import transforms as T\nfrom torch.nn import DataParallel\n\nfrom tqdm import tqdm\nfrom PIL import Image,ImageDraw,ImageFont\nimport numpy as np\nfrom MobileNetV2 import mobilenet_v2\n\n\n\nclass Car_recog(object):\n def __init__(self, model_path=\"/content/checkpoint/mobilenet-v2_20.pth\"):\n self.device = torch.device(\"cuda\")\n self.net = mobilenet_v2().to(self.device)\n self.net = DataParallel(self.net)\n self.weights = model_path\n\n self.net.load_state_dict(torch.load(self.weights))\n\n normalize = T.Normalize(mean = [0.5, 0.5, 0.5],\n std = [0.5, 0.5, 0.5]\n )\n self.transforms = T.Compose([\n T.ToTensor(),\n normalize\n ])\n\n def recog(self, img):\n # img = cv2.imread(img_path)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = Image.fromarray(img)\n img_RGB = img.copy()\n img = img.resize((256, 256))\n img = self.transforms(img)\n img = img.unsqueeze(0)\n\n with torch.no_grad():\n self.net.eval()\n img_input = img.to(self.device)\n outputs_color, outputs_type, outputs_sub_type = self.net(img_input)\n outputs_color = torch.softmax(outputs_color, 1) \n outputs_type = torch.softmax(outputs_type, 1)\n outputs_sub_type = torch.softmax(outputs_sub_type, 1)\n\n label_color = outputs_color.argmax()\n label_type = outputs_type.argmax()\n label_sub_type = outputs_sub_type.argmax()\n return img_RGB, label_color, label_type, label_sub_type\n\n\n\nif __name__ == \"__main__\":\n color_name = ['blue', 'green', 'yellow', 'orange', 'red', 'white', 'black', 'brown', 'grey', 'other']\n car_name = [\n \"AM_General\",\n \"Acura\",\n \"Aston Martin\",\n \"Audi\",\n \"BMW\",\n \"Bentley\",\n \"Bugatti\",\n \"Buick\",\n \"Cadillac\",\n \"Chevrolet\",\n \"Chrysler\",\n \"Daewoo\",\n \"Dodge\",\n \"Eagle_Talon\",\n \"FIAT\",\n \"Ferrari\",\n \"Fisker\",\n \"Ford\",\n \"GMC\",\n \"Geo_Meotro\",\n \"HUMMER\",\n \"Honda\",\n \"Hyundai\",\n \"Infiniti\",\n \"Isuzu\",\n \"Jaguar\",\n \"Jeep\",\n \"Lamborghini\",\n \"Land Rover\",\n \"Lincoln\",\n \"Mini_Cooper\",\n \"Mazda\",\n \"Mclaren\",\n \"Benz\",\n \"Mitsubishi\",\n \"Nissan\",\n \"Plymouth\",\n \"Porsche\",\n \"Rolls-Royce\",\n \"Scion\",\n \"Spyker\",\n \"Suzuki\",\n \"Tesla\",\n \"Toyota\",\n \"Volkswagen\",\n \"Volvo\",\n \"Smart\",\n ]\n type_name = ['SUV', 'Sedan', 'Hatchback', 'Convertible', 'Coupe', 'Wagon', 'Truck', 'Van', 'Minivan']\n\n car_recog = Car_recog()\n img_list = [os.path.join(\"./car_recognition/cars_attributes/test_imgs\", i) for i in os.listdir(\"./car_recognition/cars_attributes/test_imgs\")]\n for img_path in img_list:\n img = cv2.imread(img_path)\n img_RGB, label_color, label_car, label_type = car_recog.recog(img)\n print(img_path)\n result = \"Color:%s, Car:%s, Type:%s\" % (color_name[label_color], car_name[label_car], type_name[label_type])\n print(\"车辆属性识别结果:%s\" % result)\n # 把车属性的识别结果画到图上\n draw = ImageDraw.Draw(img_RGB)\n #font = ImageFont.truetype(\"./simhei.ttf\", 24, encoding=\"utf-8\")\n font = ImageFont.load_default()\n draw.text((0, 0), result, (255, 0, 0), font=font)\n img_BGR = cv2.cvtColor(np.array(img_RGB), cv2.COLOR_RGB2BGR)\n cv2.imwrite(os.path.join(\"/content/car_recognition/cars_attributes/result_test\", os.path.basename(img_path)), img_BGR)\n\n","repo_name":"M-6J/car_recognition","sub_path":"cars_attributes/test_img.py","file_name":"test_img.py","file_ext":"py","file_size_in_byte":3605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"3136325473","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport logging\nimport os\nimport re\nimport io\n\nimport typing\nimport numpy as np\n\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle\n\nfrom builtins import str\nfrom typing import Any, Dict, List, Optional, Text, Tuple\n\nfrom rasa.nlu.extractors import EntityExtractor\nfrom rasa.nlu.model import Metadata\nfrom rasa.nlu.training_data import Message\n\nfrom rasa_nlu_gao.utils.bilstm_utils import \\\n char_mapping, tag_mapping, prepare_dataset, BatchManager, iob_iobes, \\\n iob2, save_model, create_model, input_from_line\n\nfrom rasa_nlu_gao.models.model import Model\nfrom multiprocessing import cpu_count\nimport jieba\n\nlogger = logging.getLogger(__name__)\n\nif typing.TYPE_CHECKING:\n import numpy as np\n import tensorflow as tf\n import tensorflow.contrib\n\ntry:\n import tensorflow as tf\nexcept ImportError:\n tf = None\n\n\nclass BilstmCRFEntityExtractor(EntityExtractor):\n provides = [\"entities\"]\n\n requires = [\"tokens\"]\n\n defaults = {\n \"lr\": 0.001,\n \"char_dim\": 100,\n \"lstm_dim\": 100,\n \"batches_per_epoch\": 10,\n \"seg_dim\": 20,\n \"num_segs\": 4,\n \"batch_size\": 20,\n \"zeros\": True,\n \"tag_schema\": \"iobes\",\n \"lower\": False,\n \"model_type\": \"idcnn\",\n \"clip\": 5,\n \"optimizer\": \"adam\",\n \"dropout_keep\": 0.5,\n \"steps_check\": 100,\n \"config_proto\": {\n \"device_count\": cpu_count(),\n \"inter_op_parallelism_threads\": 0,\n \"intra_op_parallelism_threads\": 0,\n \"allow_growth\": True\n },\n \"dictionary_path\": None,\n }\n\n def __init__(self,\n component_config=None,\n ent_tagger=None,\n session=None,\n char_to_id=None,\n id_to_tag=None):\n super(BilstmCRFEntityExtractor, self).__init__(component_config)\n\n self.ent_tagger = ent_tagger # 指的是训练好的model\n self.session = session\n self.char_to_id = char_to_id\n self.id_to_tag = id_to_tag\n dictionary_path = self.component_config.get('dictionary_path')\n\n if dictionary_path:\n jieba.load_userdict(dictionary_path)\n\n self.seg = jieba\n\n def train(self, training_data, config, **kwargs):\n filtered_entity_examples = \\\n self.filter_trainable_entities(training_data.training_examples)\n\n train_sentences = self._create_dataset(filtered_entity_examples)\n\n # 检测并维护数据集的tag标记\n self.update_tag_scheme(\n train_sentences, self.component_config[\"tag_schema\"])\n\n _c, char_to_id, id_to_char = char_mapping(\n train_sentences, self.component_config[\"lower\"])\n\n tag_to_id, id_to_tag = tag_mapping(train_sentences)\n self.char_to_id = char_to_id\n self.id_to_tag = id_to_tag\n\n self.component_config[\"num_chars\"] = len(char_to_id)\n self.component_config[\"num_tags\"] = len(tag_to_id)\n\n train_data = prepare_dataset(\n train_sentences, char_to_id,\n tag_to_id, self.seg,\n self.component_config[\"lower\"])\n\n # 获取可供模型训练的单个批次数据\n train_manager = BatchManager(\n train_data, self.component_config[\"batch_size\"])\n\n self._train_model(train_manager)\n\n def _create_dataset(self, examples):\n dataset = []\n for example in examples:\n entity_offsets = self._convert_example(example)\n dataset.append(self._predata(\n example.text, entity_offsets, self.component_config[\"zeros\"]))\n return dataset\n\n @staticmethod\n def _convert_example(example):\n def convert_entity(entity):\n return entity[\"start\"], entity[\"end\"], entity[\"entity\"]\n\n return [convert_entity(ent) for ent in example.get(\"entities\", [])]\n\n @staticmethod\n def _predata(text, entity_offsets, zeros):\n value = 'O'\n bilou = [value for _ in text]\n # zero_digits函数的用途是将所有数字转化为0\n\n def zero_digits(s):\n return re.sub('\\d', '0', s)\n\n text = zero_digits(text.rstrip()) if zeros else text.rstrip()\n\n cooked_data = []\n\n for (start, end, entity) in entity_offsets:\n if start is not None and end is not None:\n bilou[start] = 'B-' + entity\n for i in range(start+1, end):\n bilou[i] = 'I-' + entity\n\n for index, achar in enumerate(text):\n if achar.strip():\n temp = []\n temp.append(achar)\n temp.append(bilou[index])\n\n cooked_data.append(temp)\n else:\n continue\n\n return cooked_data\n\n @staticmethod\n def get_config_proto(component_config):\n # 配置configProto\n config = tf.ConfigProto(\n device_count={\n 'CPU': component_config['config_proto']['device_count']\n },\n inter_op_parallelism_threads=component_config\n ['config_proto']['inter_op_parallelism_threads'],\n intra_op_parallelism_threads=component_config\n ['config_proto']['intra_op_parallelism_threads'],\n gpu_options={\n 'allow_growth':\n component_config['config_proto']['allow_growth']\n }\n )\n return config\n\n def update_tag_scheme(self, sentences, tag_scheme):\n for i, s in enumerate(sentences):\n tags = [w[1] for w in s]\n # Check that tags are given in the IOB format\n if not iob2(tags):\n s_str = '\\n'.join(' '.join(w) for w in s)\n raise Exception('Sentences should be given in IOB format! ' +\n 'Please check sentence %i:\\n%s' % (i, s_str))\n if tag_scheme == 'iob':\n # If format was IOB1, we convert to IOB2\n for word, new_tag in zip(s, tags):\n word[1] = new_tag\n elif tag_scheme == 'iobes':\n new_tags = iob_iobes(tags)\n for word, new_tag in zip(s, new_tags):\n word[1] = new_tag\n else:\n raise Exception('Unknown tagging scheme!')\n\n def _train_model(self, train_manager):\n # 训练集全量跑一次需要迭代的次数\n steps_per_epoch = train_manager.len_data\n\n tf_config = self.get_config_proto(self.component_config)\n sess = tf.Session(config=tf_config)\n\n self.session = sess\n\n # 此处模型创建为项目最核心代码\n model = create_model(sess, Model, self.component_config, logger)\n self.model = model\n\n logger.warning(\"start training\")\n loss_slot = []\n\n for _ in range(self.component_config[\"batches_per_epoch\"]):\n for batch in train_manager.iter_batch(shuffle=True):\n step, batch_loss_slot = model.run_step(\n sess, True, batch)\n loss_slot.append(batch_loss_slot)\n\n if step % self.component_config[\"steps_check\"] == 0:\n iteration = step // steps_per_epoch + 1\n\n logger.warning(\"iteration:{} step:{}/{}, NER loss:{:>9.6f}\"\n \"\".format(iteration,\n step % steps_per_epoch,\n steps_per_epoch,\n np.mean(loss_slot)))\n loss_slot = []\n\n def process(self, message, **kwargs):\n # type: (Message, **Any) -> None\n extracted = self.add_extractor_name(self.extract_entities(message))\n message.set(\"entities\",\n message.get(\"entities\", []) + extracted,\n add_to_output=True)\n\n def extract_entities(self, message):\n # type: (Message) -> List[Dict[Text, Any]]\n \"\"\"Take a sentence and return entities in json format\"\"\"\n\n if self.ent_tagger is not None:\n result = self.ent_tagger.evaluate_line(\n self.session,\n input_from_line(message.text, self.char_to_id, self.seg),\n self.id_to_tag)\n return result.get(\"entities\", [])\n else:\n return []\n\n @classmethod\n def load(cls,\n meta: Dict[Text, Any],\n model_dir: Text = None,\n model_metadata: Metadata = None,\n cached_component: Optional['BilstmCRFEntityExtractor'] = None,\n **kwargs: Any\n ) -> 'BilstmCRFEntityExtractor':\n\n tf_config = cls.get_config_proto(meta)\n sess = tf.Session(config=tf_config)\n\n model = Model(meta)\n if model_dir and meta.get(\"file\"):\n file_name = meta.get(\"file\")\n checkpoint = os.path.join(model_dir, file_name + '.ckpt')\n model.saver.restore(sess, checkpoint)\n\n with io.open(os.path.join(\n model_dir,\n file_name + \"_char_to_id.pkl\"), 'rb') as f:\n char_to_id = pickle.load(f)\n with io.open(os.path.join(\n model_dir,\n file_name + \"_id_to_tag.pkl\"), 'rb') as f:\n id_to_tag = pickle.load(f)\n\n return BilstmCRFEntityExtractor(\n component_config=meta,\n ent_tagger=model,\n session=sess,\n char_to_id=char_to_id,\n id_to_tag=id_to_tag)\n\n else:\n return BilstmCRFEntityExtractor(meta)\n\n def persist(self,\n file_name: Text, model_dir: Text) -> Optional[Dict[Text, Any]]:\n # type: (Text) -> Optional[Dict[Text, Any]]\n \"\"\"Persist this model into the passed directory.\n Return the metadata necessary to load the model again.\"\"\"\n if self.session is None:\n return {\"classifier_file\": None}\n checkpoint = os.path.join(model_dir, file_name + \".ckpt\")\n\n try:\n os.makedirs(os.path.dirname(checkpoint))\n except OSError as e:\n # be happy if someone already created the path\n import errno\n if e.errno != errno.EEXIST:\n raise\n\n save_model(self.session, self.model, checkpoint, logger)\n\n with io.open(os.path.join(\n model_dir,\n file_name + \"_char_to_id.pkl\"), 'wb') as f:\n pickle.dump(self.char_to_id, f)\n with io.open(os.path.join(\n model_dir,\n file_name + \"_id_to_tag.pkl\"), 'wb') as f:\n pickle.dump(self.id_to_tag, f)\n\n return {\"file\": file_name}\n","repo_name":"GaoQ1/rasa_nlu_gq","sub_path":"rasa_nlu_gao/extractors/bilstm_crf_entity_extractor.py","file_name":"bilstm_crf_entity_extractor.py","file_ext":"py","file_size_in_byte":10849,"program_lang":"python","lang":"en","doc_type":"code","stars":300,"dataset":"github-code","pt":"31"} +{"seq_id":"31042006184","text":"# coding:utf-8\u000B\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.wait import WebDriverWait\r\nfrom selenium.common.exceptions import *\r\nfrom selenium.webdriver.support.select import Select\r\n\r\nclass Base():\r\n def __init__(self, driver):\r\n self.driver = driver\r\n self.timeout = 30\r\n self.poll = 0.5\r\n\r\n\r\n def findElement(self, loctor):\r\n # '''\r\n # :param self:\r\n # :param loctor:\r\n # :return:\r\n # args:\r\n # loctor 传元祖,如(\"id\",\"xx\")\r\n #查找元素\r\n # '''\r\n element = WebDriverWait(self.driver, self.timeout, self.poll).until(lambda x:\r\n x.find_element(*loctor))\r\n # print(\"正在定位元素:方法是-->%s, value值是-->%s\"%(loctor[0] ,loctor[1]))\r\n return element\r\n\r\n def click(self, loctor):\r\n ele = self.findElement(loctor)\r\n ele.click()\r\n\r\n def sendKeys(self, loctor, text, is_clear_first=False):\r\n '''\r\n is_clear_first默认为False,不清空输入框\u000B\r\n '''\r\n ele = self.findElement(loctor)\r\n if is_clear_first:\r\n ele.clear() # is_clear_first 为True的时候执行\r\n ele.send_keys(text)\r\n print(\"正在定位元素:方法是-->%s, value值是-->%s, 传的值是-->%s\"% (loctor[0],loctor[1],text))\r\n\r\n\r\n def isSelected(self,locator):\r\n '''\r\n 判断元素是否被选中,返回bool值\r\n '''\r\n ele = self.findElement(locator)\r\n r = ele.is_selected()\r\n return r\r\n\r\n def isElementExist(self,locator):\r\n '''单个元素判断是否存在'''\r\n try:\r\n ele = self.findElement(locator)\r\n return True\r\n except:\r\n return False\r\n\r\n def isElementExits(self,locator):\r\n '''多个元素判定存在'''\r\n eles = self.findElement(locator)\r\n n =len(eles)\r\n if n == 0:\r\n return False\r\n elif n == 1 :\r\n return True\r\n else:\r\n print(\"定位到多个元素%s\"%n)\r\n return True\r\n\r\n\r\n\r\n","repo_name":"Tank1991/DaTong","sub_path":"base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"9086086387","text":"largura = int(input(\"Digite a largura do retângulo: \"))\naltura = int(input(\"Digite a altura do retângulo: \"))\n\ni = 0\nwhile i < altura:\n j = 0\n while j < largura:\n if i == 0 or i == altura-1:\n print(\"#\", end=\"\")\n else:\n if j == 0 or j == largura-1:\n print(\"#\", end=\"\")\n if j!= 0 and j != largura-1:\n print(end=\" \")\n j += 1\n i += 1\n print()\n","repo_name":"rogersineb/python_developer","sub_path":"Ciencia_da_Computacao_USP/submissao_7_semana/retangulo_sem_preenchimento.py","file_name":"retangulo_sem_preenchimento.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"71415300248","text":"from random import randint\r\nprint(\"Molimo Vas unesite sedam brojeva od 1 do 39, jedan po jedan.\")\r\ndef f1():\r\n global LotoBrojevi\r\n Loto1= randint(1,39)\r\n LotoBrojevi = [Loto1]\r\n Loto2= randint(1,39)\r\n if Loto2 in LotoBrojevi:\r\n while Loto2 in LotoBrojevi:\r\n Loto2 = randint(1,39)\r\n LotoBrojevi.append(Loto2)\r\n Loto3= randint(1,39)\r\n if Loto3 in LotoBrojevi:\r\n while Loto3 in LotoBrojevi:\r\n Loto3 = randint(1,39)\r\n LotoBrojevi.append(Loto3)\r\n Loto4= randint(1,39)\r\n if Loto4 in LotoBrojevi:\r\n while Loto4 in LotoBrojevi:\r\n Loto4 = randint(1,39)\r\n LotoBrojevi.append(Loto4)\r\n Loto5= randint(1,39)\r\n if Loto5 in LotoBrojevi:\r\n while Loto5 in LotoBrojevi:\r\n Loto5 = randint(1,39)\r\n LotoBrojevi.append(Loto5)\r\n Loto6= randint(1,39)\r\n if Loto6 in LotoBrojevi:\r\n while Loto6 in LotoBrojevi:\r\n Loto6 = randint(1,39)\r\n LotoBrojevi.append(Loto6)\r\n Loto7= randint(1,39)\r\n if Loto7 in LotoBrojevi:\r\n while Loto7 in LotoBrojevi:\r\n Loto7 = randint(1,39)\r\n LotoBrojevi.append(Loto7)\r\nMojiBrojevi=[]\r\ny = 7\r\nwhile y != 0:\r\n MojiBrojevi.append(int(input()))\r\n y -= 1\r\nprint(\"Vasi brojevi su : \",MojiBrojevi)\r\nprint(\"Molimo Vas sacekajte.\")\r\nprint( )\r\ndef f2():\r\n global BrojSedmica\r\n BrojSedmica=0\r\n global BrojSestica\r\n BrojSestica = 0\r\n global BrojPetica\r\n BrojPetica = 0\r\n global BrojCetvorki\r\n BrojCetvorki = 0\r\n global BrojTrojki\r\n BrojTrojki = 0\r\n global BrojDvojki\r\n BrojDvojki = 0\r\n global BrojJedinica\r\n BrojJedinica = 0\r\n x=1000000\r\n while x!=0:\r\n m=0\r\n f1()\r\n if MojiBrojevi[0] in LotoBrojevi:\r\n m+=1\r\n if MojiBrojevi[1] in LotoBrojevi:\r\n m+=1\r\n if MojiBrojevi[2] in LotoBrojevi:\r\n m+=1\r\n if MojiBrojevi[3] in LotoBrojevi:\r\n m+=1\r\n if MojiBrojevi[4] in LotoBrojevi:\r\n m+=1\r\n if MojiBrojevi[5] in LotoBrojevi:\r\n m+=1\r\n if MojiBrojevi[6] in LotoBrojevi:\r\n m+=1\r\n if m==1:\r\n BrojJedinica+=1\r\n if m==2:\r\n BrojDvojki+=1\r\n if m==3:\r\n BrojTrojki+=1\r\n if m==4:\r\n BrojCetvorki+=1\r\n if m==5:\r\n BrojPetica+=1\r\n if m==6:\r\n BrojSestica+=1\r\n if m==7:\r\n print( )\r\n print(\"Cestitamo, osvojili ste sedmicu nakon pokusaja broj \", 1000000 - x)\r\n print( )\r\n BrojSedmica += 1\r\n x-=1\r\nf2()\r\nprint(\"Broj sedmica koje ste osvojili u milion pokusaja je \", BrojSedmica)\r\nprint(\"Broj sestica koje ste osvojili u milion pokusaja je \",BrojSestica)\r\nprint(\"Broj petica koje ste osvojili u milion pokusaja je\",BrojPetica)\r\nprint(\"Broj cetvorki koje ste osvojili u milion pokusaja je\",BrojCetvorki)\r\nprint(\"Broj trojki koje ste osvojili u milion pokusaja je\",BrojTrojki)\r\nprint(\"Broj dvojki koje ste osvojili u milion pokusaja je\",BrojDvojki)\r\nprint(\"Broj jedinica koje ste osvojili u milion pokusaja je\",BrojJedinica)\r\nprint( )\r\nprint(\"Sansa da osvojite sedmicu je \", BrojSedmica/10000, \"%\")\r\nprint(\"Sansa da osvojite sesticu je \", BrojSestica/10000, \"%\")\r\nprint(\"Sansa da osvojite peticu je \", BrojPetica/10000, \"%\")\r\nprint(\"Sansa da osvojite cetvorku je \", BrojCetvorki/10000, \"%\")\r\nprint(\"Sansa da osvojite trojku je \", BrojTrojki/10000, \"%\")\r\nprint(\"Sansa da osvojite dvojku je \", BrojDvojki/10000, \"%\")\r\nprint(\"Sansa da osvojite jedinicu je \", BrojJedinica/10000, \"%\")\r\n\r\n\r\n","repo_name":"IgnjatMilutinovic/Lottery_Simulator","sub_path":"Loto.py","file_name":"Loto.py","file_ext":"py","file_size_in_byte":3576,"program_lang":"python","lang":"bs","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"39942172465","text":"import tensorflow as tf\nfrom tensorflow import keras\nfrom keras import layers\n\nclass TokenizationBlock(layers.Layer):\n\tdef __init__(self, n_embd):\n\t\tsuper(TokenizationBlock, self).__init__()\n\n\t\tself.n_embd = n_embd\n\t\t\n\t\tself.conv1 = layers.Conv1D(n_embd // 2, kernel_size = 5, padding='same', activation='relu',\n\t\t\t\t\t\t\t kernel_initializer=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0))\n\t\tself.batch1 = layers.BatchNormalization()\n\t\tself.maxpool1 = layers.MaxPooling1D(pool_size = 5)\n\n\t\tself.conv2 = layers.Conv1D(n_embd, kernel_size = 3, padding='same', activation='relu',\n\t\t\t\t\t\t\t kernel_initializer=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0))\n\t\tself.batch2 = layers.BatchNormalization()\n\t\tself.maxpool2 = layers.MaxPooling1D(pool_size = 3)\n\n\t\tself.pos_emdb = layers.Embedding(input_dim=100, output_dim=n_embd, \n\t\t\t\t\t\t\tembeddings_initializer=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0))\n\n\t\tself.drop = layers.Dropout(0.5)\n\n\tdef call(self, inputs):\n\t\tx = self.conv1(inputs)\n\t\tx = self.batch1(x)\n\t\tx = self.maxpool1(x)\n\n\t\tx = self.conv2(x)\n\t\tx = self.batch2(x)\n\t\tx = self.maxpool2(x)\n\n\t\tx = x + self.pos_emdb(tf.range(100))\n\n\t\treturn self.drop(x)\n\n\tdef get_config(self):\n\t\tconfig = super().get_config()\n\t\tconfig.update({ \"n_embd\": self.n_embd })\n\t\treturn config\n\n\nclass MultiHeadSelfAttentionBlock(layers.Layer):\n\tdef __init__(self, n_embd, n_heads):\n\t\tsuper(MultiHeadSelfAttentionBlock, self).__init__()\n\t\t\n\t\tself.n_embd = n_embd\n\t\tself.n_heads = n_heads\n\t\tself.head_embd = n_embd // n_heads\n\t\t\n\t\t# Note that trick is used here\n\t\t# In reality keyW/queryW/valueW consist of n_heads heads each with head_embd dimension\n\t\t# however heads are emulated with single dense matrix instead of n_heads dense matrices\n\t\tself.keyW = layers.Dense(n_embd,\n\t\t\t\t\t\t kernel_initializer=tf.keras.initializers.RandomNormal(mean=0.0, stddev=0.1))\n\t\tself.queryW = layers.Dense(n_embd,\n\t\t\t\t\t\t\t kernel_initializer=tf.keras.initializers.RandomNormal(mean=0.0, stddev=0.1))\n\t\tself.valueW = layers.Dense(n_embd,\n\t\t\t\t\t\t\t kernel_initializer=tf.keras.initializers.RandomNormal(mean=0.0, stddev=0.1))\n\t\t\n\t\tself.projW = layers.Dense(n_embd,\n\t\t\t\t\t\t\tkernel_initializer=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0))\n\t\t\n\t\tself.drop1 = layers.Dropout(0.5)\n\t\tself.drop2 = layers.Dropout(0.5)\n\n\tdef reshapeForHeads(self, x, batch_size):\n\t\tx = tf.reshape(x, (batch_size, -1, self.n_heads, self.head_embd)) # (B, T, n_embd) -> (B, T, n_heads, head_embd)\n\t\treturn tf.transpose(x, perm = [0, 2, 1, 3]) # (B, T, n_heads, head_embd) -> (B, n_heads, T, head_embd)\n\n\tdef call(self, x):\n\t\tbatch_size = tf.shape(x)[0] # (B, T, n_embd), where B is batch size, T is number of tokens in sequence\n\t\t\n\t\t# calculate k, q, v\n\t\tk = self.keyW(x) # (B, T, N)\n\t\tq = self.keyW(x) # (B, T, N)\n\t\tv = self.keyW(x) # (B, T, N)\n\t\t\n\t\t# reshape k, q, v so each head will be processed separately (B, n_heads, T, head_embd)\n\t\tk = self.reshapeForHeads(k, batch_size)\n\t\tq = self.reshapeForHeads(q, batch_size)\n\t\tv = self.reshapeForHeads(v, batch_size)\n\t\t\n\t\t# find attention weights - how well key of each token correponds to query of each token\n\t\t# (B, n_heads, T, head_embd) * (B, n_heads, head_embd, T) -> (B, n_heads, T, T)\n\t\tweights = tf.matmul(q, k, transpose_b=True) / tf.math.sqrt(tf.cast(self.head_embd, tf.float32))\n\t\tweights = tf.nn.softmax(weights, axis=-1)\n\t\tweights = self.drop1(weights)\n\t\t\n\t\t# for each token query we now have weights of keys of other tokens\n\t\t# we now multiply these weights with Values of all tokens for each dimension of Values\n\t\t# summary: [for each token query][for each Value dimension] find dot product over all corresponding tokens\n\t\t# (B, n_heads, T, T) * (B, n_heads, T, head_embd) -> (B, n_heads, T, head_embd)\n\t\toutput = tf.matmul(weights, v)\n\t\toutput = tf.transpose(output, perm = [0, 2, 1, 3]) # (B, n_heads, T, head_embd) -> (B, T, n_heads, head_embd)\n\t\toutput = tf.reshape(output, (batch_size, -1, self.n_embd)) # (B, T, n_heads, head_embd) -> (B, T, n_embd)\n\t\t\n\t\t# apply final projection to combine information from different heads\n\t\treturn self.drop2(self.projW(output))\n\n\tdef get_config(self):\n\t\tconfig = super().get_config()\n\t\tconfig.update({ \"n_embd\": self.n_embd, \"n_heads\": self.n_heads })\n\t\treturn config\n\n\nclass TransformerEncoderBlock(layers.Layer):\n\tdef __init__(self, n_embd, n_heads, scale):\n\t\tsuper(TransformerEncoderBlock, self).__init__()\n\n\t\tself.n_embd = n_embd\n\t\tself.n_heads = n_heads\n\t\tself.scale = scale\n\t\t\n\t\tself.norm1 = layers.LayerNormalization()\n\t\tself.mhsa = MultiHeadSelfAttentionBlock(n_embd, n_heads)\n\t\t\n\t\tself.norm2 = layers.LayerNormalization()\n\t\tself.ff1 = layers.Dense(n_embd * scale, activation='relu',\n\t\t\t\t\t\t kernel_initializer=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0))\n\t\tself.ff2 = layers.Dense(n_embd,\n\t\t\t\t\t\t kernel_initializer=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0))\n\t\tself.drop = layers.Dropout(0.5)\n\n\tdef call(self, x):\n\t\tx = x + self.mhsa(self.norm1(x))\n\t\tx = x + self.drop(self.ff2(self.ff1(self.norm2(x))))\n\t\treturn x\n\n\tdef get_config(self):\n\t\tconfig = super().get_config()\n\t\tconfig.update({ \"n_embd\": self.n_embd, \"n_heads\": self.n_heads, \"scale\": self.scale })\n\t\treturn config\n\n\nclass SequencePoolingBlock(layers.Layer):\n\tdef __init__(self):\n\t\tsuper(SequencePoolingBlock, self).__init__()\n\t\t\n\t\tself.norm = layers.LayerNormalization()\n\t\tself.ll = layers.Dense(1, kernel_initializer=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0))\n\t\tself.drop = layers.Dropout(0.5)\n\n\tdef call(self, x):\n\t\tinput_shape = tf.shape(x)\n\t\t\n\t\tx = self.norm(x)\n\t\t\n\t\ty = tf.reshape(self.ll(x), (input_shape[0], 1, -1))\n\t\ty = tf.nn.softmax(y)\n\t\t\n\t\toutput = tf.matmul(y, x)\n\t\toutput = tf.squeeze(output, axis=1)\n\t\t\n\t\treturn self.drop(output)\n\n\nclass Conv1DCompactTransformer(layers.Layer):\n\tdef __init__(self, n_blocks, n_embd, n_heads, scale, **kwargs):\n\t\tsuper(Conv1DCompactTransformer, self).__init__(**kwargs)\n\n\t\tself.n_blocks = n_blocks\n\t\tself.n_embd = n_embd\n\t\tself.n_heads = n_heads\n\t\tself.scale = scale\n\t\t\n\t\tself.tokenizer = TokenizationBlock(n_embd)\n\t\tself.blocks = keras.Sequential([TransformerEncoderBlock(n_embd, n_heads, scale) for i in range(n_blocks)])\n\t\tself.seqpool = SequencePoolingBlock()\n\t\t\n\t\tself.head_p1 = layers.Dense(n_embd // 2, activation='relu',\n\t\t\t\t\t\t\t kernel_initializer=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0))\n\t\tself.head_p2 = layers.Dense(3, activation = 'softmax',\n\t\t\t\t\t\t\t kernel_initializer=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0))\n\n\tdef call(self, x):\n\t\tx = self.tokenizer(x)\n\t\tx = self.blocks(x)\n\t\tx = self.seqpool(x)\n\t\tx = self.head_p2(self.head_p1(x))\n\t\t\n\t\treturn x\n\n\tdef get_config(self):\n\t\tconfig = super().get_config()\n\t\tconfig.update({ \"n_blocks\": self.n_blocks, \"n_embd\": self.n_embd, \"n_heads\": self.n_heads, \"scale\": self.scale })\n\t\treturn config","repo_name":"reeWorlds/ECG_Test_Resnet_Transformer","sub_path":"Transformers/C1DCT_2_64_4_2/C1DCT_2_64_4_2/C1DCT.py","file_name":"C1DCT.py","file_ext":"py","file_size_in_byte":6813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"916951730","text":"# nizan mandelblit, 313485468, eldad horvitz, 314964438\nimport base64\nimport socket, os, datetime, random, sys\nimport hashlib\nimport struct\n\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import serialization, hashes\nfrom cryptography.hazmat.primitives.asymmetric import padding\n\n\ndef main():\n print(\"hi from mix\")\n Y = \"sk\" + sys.argv[1] + \".pem\"\n port = sys.argv[2]\n with open(Y, \"rb\") as key_file:\n private_key = serialization.load_pem_private_key(key_file.read(), password=None, backend=default_backend())\n\n s = socket.socket() # Create a socket object\n s.bind((\"127.0.0.1\", int(port))) # Bind to the port\n s.listen() # Now wait for client connection.\n while True:\n c, addr = s.accept() # Establish connection with client.\n print('Got connection from', addr)\n encryptedMsg = \"\"\n msg = \"\"\n while True:\n msg = c.recv(8192)\n encryptedMsg += str(msg)\n if not msg:\n break\n decryptedMsg = private_key.decrypt(encryptedMsg, padding.OAEP(mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(), label=None))\n ipSend = socket.inet_ntoa(decryptedMsg[0:4])\n portSend = struct.unpack('>h', decryptedMsg[4:6])[0]\n send = socket.socket() # Create a socket object\n send.connect((ipSend, portSend))\n send.sendall(decryptedMsg[6:])\n s.close() # Close the socket when done\n\n print(decryptedMsg)\n # c.close() # Close the connection\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"NizanMandelblit/ex2SecureNetworks-Mixnet-servers","sub_path":"mix.py","file_name":"mix.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"10782096420","text":"from collections import deque\nimport sys\ninput =sys.stdin.readline\nqueue = deque()\nopers = int(input().rstrip('\\n'))\n\ndef dopop(queue):\n if queue:\n result = queue[0]\n queue.popleft()\n return result\n else:\n return -1\n\ndef is_empty(queue):\n if queue:\n return 0\n else:\n return 1\n\ndef front(queue):\n if queue:\n return queue[0]\n else:\n return -1\n\ndef back(queue):\n if queue:\n return queue[-1]\n else:\n return -1\n\nfor _ in range(opers):\n calc = input().rstrip('\\n')\n if calc[:4] == \"push\":\n num = int(calc.split()[1])\n queue.append(num)\n elif calc == \"pop\":\n print(dopop(queue))\n elif calc == \"front\":\n print(front(queue))\n elif calc == \"back\":\n print(back(queue))\n elif calc == \"empty\":\n print(int(len(queue) == 0))\n elif calc == \"size\":\n print(len(queue))","repo_name":"entrekid/algorithms","sub_path":"basic/10845/queue.py","file_name":"queue.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"4821357702","text":"def only_floats(a, b):\n if type(a) == float and type(b) == float: return 2\n if type(a) == float or type(b) == float: return 1\n return 0\n\ndef word_index(l):\n j = 0\n r = 0\n for i in l:\n if len(i) > len(l[r]):\n r = j\n j+=1\n return r\n \na = only_floats(2.1, 0) \nb = only_floats(2.4, 3118.2)\nc = only_floats(123, 123807)\nwords1 = [\"Hate\", \"remorse\", \"vengeance\"]\nworse2 = [\"Love\", \"Hate\", \"Fate\", \"Gate\", \"Bate\"]\nprint(word_index(words1))\nprint(word_index(worse2))\nprint(a,b,c)","repo_name":"geeklay/50-Days-of-Python","sub_path":"Day1/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73641514969","text":"from selenium import webdriver\r\nURL = 'http://localhost:4444/wd/hub'\r\n\r\nbrowser = webdriver.Remote(\r\n command_executor='http://127.0.0.1:4444/wd/hub',\r\n options=webdriver.ChromeOptions()\r\n)\r\nbrowser.get(\"http://naver.com\")\r\nprint(browser.title)\r\nbrowser.close()\r\n","repo_name":"KHyeon9/crawling_study_and_mini_project","sub_path":"Dynamic_Crawling_Mini_Project/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6780059590","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def middleNode(self, head: ListNode) -> ListNode:\n mlist = head\n count = 0 \n \n while True:\n nextnode = mlist.next\n count = count + 1\n if nextnode == None:\n break\n mlist = nextnode\n \n mlist = head\n for i in range(count//2):\n mlist = mlist.next\n \n return mlist\n \n'''\n1. problem is\n- 중복되는 숫자가 들어갈 수 있나요, 수열인가요, 정렬여부 (궁금하지만, 문제와 그닥 상관은 없다) no, 문제를 푸는데 고려사항이 아님 위치만 고려해야함\n- 노드의 개수는 100개 이하\n- 예제를 보면 head노드가 가리키는 노드는 1인건가요 yes\n\n{val: 1, next: ListNode{val: 2, next: ListNode{val: 3, next: ListNode{val: 4, next: ListNode{val: 5, next: None}}}}} \n \n2. TC\nTC1) [1,2,3,4,5] :: head -> 1 -> 2 -> 3 -> 4 -> 5 -> none\n \n head is pointing\n \n List : ListNode{val: 1, next: ListNode{val: 2, next: ListNode{val: 3, next: ListNode{val: 4, next: ListNode{val: 5, next: None}}}}} \n val : 1\n next : ListNode{val: 2, next: ListNode{val: 3, next: ListNode{val: 4, next: ListNode{val: 5, next: None}}}}\n \n \n head.next (val = 1) is pointing \n \n List : ListNode{val: 2, next: ListNode{val: 3, next: ListNode{val: 4, next: ListNode{val: 5, next: None}}}}\n val : 2\n next : ListNode{val: 3, next: ListNode{val: 4, next: ListNode{val: 5, next: None}\n \n head.next.next (val = 2) is pointing \n \n List : ListNode{val: 3, next: ListNode{val: 4, next: ListNode{val: 5, next: None}}}\n val : 3\n next : ListNode{val: 4, next: ListNode{val: 5, next: None}}\n \n head.next.next.next (val = 3) is pointing \n \n List : ListNode{val: 4, next: ListNode{val: 5, next: None}}\n val : 4\n next : ListNode{val: 5, next: None}\n \n head.next.next.next.next (val = 4) is pointing \n \n List : ListNode{val: 5, next: None}\n val : 5\n next : None\n \n \n ====================================================================\n\n until next is None, 5 visits (h,1,2,3,4), same as list's length\n \n middle node is 3 node, at third visit's node is pointing 3 node \n \n so, at second visit's node's next (2 node's next) shoould be returned\n \n \nTC2) [1,2] :: head -> 1 -> 2 -> none\n \n middle node is 2 node, at second visit's node is pointing 2 node \n \n so, at first visit's node's next (1 node's next) shoould be returned\n\n \n \n3. brain stroming\n a) cal linked list's length\n until next node is none, keep visit the next node and count the visits (from head to last node)\n so, if the list's length is N, the count is N + 1\n \n list = head\n count = 0 \n for val, nextnode in list:\n count = count + 1 # representing the node index (starts 1) and last index would be same as list's length\n if nextnode == None:\n break\n list = nextnode\n \n \n \n b) visit middle node\n when linked list's length (N)\n -> even {middle node is that (N//2) visit's node is pointing }\n -> else {middle node is that (N//2)+1 visit's node is pointing }\n\n4. summarize\n\n mlist = head\n count = 0 \n visit = 0\n for val, nextnode in mlist:\n count = count + 1 # representing the node index (starts 1) and last index would be same as list's length\n if nextnode == None:\n break\n mlist = nextnode\n \n if count%2:\n visit = count//2 + 1\n else:\n visit = count//2 \n \n mlist = head\n for i in range(visit):\n mlist = mlist.next\n \n return mlist\n \n''' \n\n","repo_name":"sery270/Algorithm","sub_path":"DataStructure/LinkedList/[Leetcode] 876. Middle of the Linked List.py","file_name":"[Leetcode] 876. Middle of the Linked List.py","file_ext":"py","file_size_in_byte":4054,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"72715089047","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('latest-products/', views.LatestProduct.as_view()),\n path('category/', views.ProductCategory.as_view()),\n path('latest-products//', views.ProductDetail.as_view()),\n path('latest-products//', views.ProductDetail.as_view()),\n path('category//', views.CategoryDetail.as_view()),\n path('user-list/', views.UserList.as_view()),\n path('user-list//', views.UserDetails.as_view()),\n path('my-account/activate///', views.ActivateUser.as_view({'get':'activation'})),\n path('newsletter-subscription/', views.newsletter_email),\n path('newsletter-emails/', views.NewsletterEmails.as_view())\n]","repo_name":"SammyStonique/Gee-Collections","sub_path":"product/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"14335134498","text":"# -*- coding: utf-8 -*-\n\"\"\"\nri_rev1.pyプログラム\n乱数による数値積分プログラム\n擬似乱数を使って数値積分を行う\nグラフを描画\n\"\"\"\n\n# モジュールのインポート\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# 定数\nSEED = 1 # 乱数の種\n\n# メイン実行部\n# 試行回数nの入力\nn = int(input(\"試行回数nを入力してください:\"))\n# 乱数の初期化\nrandom.seed(SEED)\nxin_list = []\nyin_list = []\nxout_list = []\nyout_list = []\nintegral = 0\n# 積分値の計算\nfor i in range(n):\n x = random.random()\n y = random.random()\n if (x * x + y * y) <= 1: # 円の内部 \n integral += 1\n xin_list.append(x)\n yin_list.append(y)\n else:\n xout_list.append(x)\n yout_list.append(y)\n\n# 結果の出力\nres = float(integral) / n\nprint(\"積分値I = {0}, 4I = {1}\".format(res, 4*res))\n\nx_list = np.linspace(0, 1, 100)\ny_list = [ np.sqrt(1 - x**2) for x in x_list ]\n\nfig = plt.figure(figsize=(8,8))\nax = fig.add_subplot(111, xlabel='$x$', ylabel='$y$', xlim=(0.0, 1.2), ylim=(0.0, 1.2))\nax.grid(visible=True, which='major', color='#666666', linestyle='--')\nplt.scatter(xin_list, yin_list, c='red', s=5)\nplt.scatter(xout_list, yout_list, c='blue', s=5)\nplt.plot(x_list, y_list, lw=1, c='black')\n\nresult_text = \"I = {0}, 4I = {1}\".format(res, 4*res)\nfig.text(0.6, 0.8, result_text)\n\nsavefile = \"./png/ri_rev1.png\"\nplt.savefig(savefile, format=\"png\", dpi=300)\n\nplt.show()\n\n# ri_rev1.pyの終わり\n","repo_name":"knakaji1210/book_numerical","sub_path":"chapter05/ri_rev1.py","file_name":"ri_rev1.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"2502714804","text":"import sys\nimport math\ninput = sys.stdin.readline\n\nN = int(input())\nropes = []\n\nfor i in range(N):\n ropes.append(int(input()))\nropes = sorted(ropes, reverse=True)\n\nresult = []\nfor idx, rope in enumerate(ropes):\n x = rope * (idx+1)\n result.append(x)\nprint(max(result))\n","repo_name":"Sensol2/DailyBOJ","sub_path":"백준/Silver/2217. 로프/로프.py","file_name":"로프.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"3985746770","text":"# -*- coding: utf-8 -*-\n# Use unicode source code to make test character string writing easier\nfrom csvimport.tests.testcase import CommandTestCase\nfrom csvimport.tests.models import Country\nimport sys\nimport timeit\nfrom django.core.exceptions import ObjectDoesNotExist\n\npyversion = sys.version_info[0] # python 2 or 3\n\n\nclass PerformanceTest(CommandTestCase):\n \"\"\" Run test of file parsing \"\"\"\n\n def test_time_load(self, filename=\"countries.csv\"):\n \"\"\"Time the upload of a country file\"\"\"\n errs = [\"Imported 246 rows to Country\"]\n bulk_time = self.command(\n csvfile=filename,\n modelname=\"csvimport.Country\",\n defaults=\"\",\n expected_errs=errs,\n clean=False,\n bulk=True,\n time=True,\n )\n self.assertTrue(bulk_time < 0.4)\n self.assertTrue(Country.objects.count() > 240)\n Country.objects.all().delete()\n single_time = self.command(\n csvfile=filename,\n modelname=\"csvimport.Country\",\n defaults=\"\",\n expected_errs=errs,\n clean=False,\n bulk=False,\n time=True,\n )\n self.assertTrue(single_time > bulk_time)\n print(\n \"Time to run bulk countries import was %s faster than %s\"\n % (bulk_time, single_time,)\n )\n","repo_name":"edcrewe/django-csvimport","sub_path":"csvimport/tests/performance_tests.py","file_name":"performance_tests.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","stars":341,"dataset":"github-code","pt":"31"} +{"seq_id":"72838389528","text":"#!/usr/bin/python3\n# coding=utf-8\n\n####################\n# SZÓFAJ SZABÁLYOK #\n####################\n\nPOS_RULES = {\n '/Adj': ('A', 'f'),\n '/Adj|Abbr': ('A', 'f'),\n '/Adj|Attr': ('A', 'f'),\n '/Adj|Attr|Abbr': ('A', 'f'),\n '/Adj|Attr|Pro': ('P', 'd'),\n '/Adj|Pred': ('A', 'f'),\n '/Adj|Pro': ('P', 'd'),\n '/Adj|Pro|Int': ('P', 'q'),\n '/Adj|Pro|Rel': ('P', 'r'),\n '/Adj|Unit': ('A', 'f'),\n '/Adj|col': ('A', 'f'),\n '/Adj|nat': ('A', 'f'),\n\n '/Adv': ('R', 'x'),\n '/Adv|(Adj)': ('R', 'x'),\n '/Adv|(Num)': ('R', 'x'),\n '/Adv|Abbr': ('R', 'x'),\n '/Adv|Acronx': ('R', 'x'),\n '/Adv|AdjMod': ('R', 'x'),\n '/Adv|Pro': ('R', 'd'),\n '/Adv|Pro|Int': ('R', 'q'),\n '/Adv|Pro|Rel': ('R', 'r'),\n\n '/Cnj': ('C', 'c'),\n '/Cnj|Abbr': ('C', 'c'),\n\n '/Det': ('R', 'x'),\n '/Det|Art.Def': ('T', 'f'),\n '/Det|Art.NDef': ('T', 'i'),\n '/Det|Pro': ('P', 'd'),\n '/Det|Pro|(Post)': ('P', 'd'),\n '/Det|Pro|Int': ('P', 'q'),\n '/Det|Pro|Rel': ('P', 'r'),\n '/Det|Pro|def': ('P', 'd'),\n '/Det|Q.NDef': ('P', 'g'),\n\t'/Det|Q|indef': ('P', 'g'),\n '/Det|Q': ('P', 'i'),\n\n '/Inj-Utt': ('I', 'o'),\n\n '/N': ('N', 'n'),\n '/N|Abbr': ('N', 'n'),\n '/N|Abbr|ChemSym': ('N', 'n'),\n '/N|Acron': ('N', 'n'),\n '/N|Acronx': ('N', 'n'),\n '/N|Ltr': ('N', 'n'),\n '/N|Pro': ('P', 'p'),\n '/N|Pro|(Post)': ('P', 'd'),\n '/N|Pro|Abbr': ('P', 'p'),\n '/N|Pro|Int': ('P', 'q'),\n '/N|Pro|Rel': ('P', 'r'),\n '/N|Unit': ('N', 'n'),\n '/N|Unit|Abbr': ('N', 'n'),\n '/N|lat': ('N', 'n'),\n '/N|mat': ('N', 'n'),\n\n '/Num': ('M', 'c'),\n '/Num|Abbr': ('M', 'c'),\n '/Num|Attr': ('M', 'c'),\n '/Num|Digit': ('M', 'c'),\n '/Num|Pro': ('P', 'd'),\n '/Num|Pro|Int': ('P', 'q'),\n '/Num|Pro|Rel': ('P', 'r'),\n '/Num|Roman': ('M', 'c'),\n\n '/Post': ('S', 't'),\n '/Post|(Abl)': ('S', 't'),\n '/Post|(All)': ('S', 't'),\n '/Post|(Ela)': ('S', 't'),\n '/Post|(Ins)': ('S', 't'),\n '/Post|(N0)': ('S', 't'),\n '/Post|(Poss)': ('S', 't'),\n '/Post|(Subl)': ('S', 't'),\n '/Post|(Supe)': ('S', 't'),\n '/Post|(Ter)': ('S', 't'),\n\n '/Prep': ('C', 'c'),\n '/Prev': ('R', 'p'),\n '/QPtcl': ('R', 'q'),\n\n '/V': ('V', 'm'),\n\n '/S|Abbr': ('X', '_'),\n '/X': ('X', '_'),\n '/X|Abbr': ('X', '_')\n}\n\n###################\n# DERIV SZABÁLYOK #\n###################\n\nDERIV_RULES = {\n '_Abe/Adj': ('A', 'f'), # \"-A?tlAn, -tAlAn\" abessivus = melléknévképző (fosztóképző)\n '_AdjVbz_Ntr/V': ('V', 'm'), # \"-Vs?Odik, -Ul\" denominális (melléknévből) intranzitívige-képző\n '_AdjVbz_Tr/V': ('V', 'm'), # \"-ít\" denominális (melléknévből) tranzitívige-képző\n '_Adjz:i/Adj': ('A', 'f'), # \"-i\" melléknévképző\n '_VAdjz:nivaló/Adj': ('A', 'f'), # \"-nivaló\" melléknévképző\n '_Adjz:s/Adj': ('A', 'f'), # \"-Vs\" melléknévképző\n '_Adjz:Ó/Adj': ('A', 'f'), # \" -Ó\" melléknévképző, mély hangrendű magánhangzók ragozott alakjaiban\n '_Adjz:Ú/Adj': ('A', 'f'), # \" -Ú\" melléknévképző\n '_Adjz_Hab/Adj': ('A', 'f'), # \"-Ós\" melléknévképző: habituális\n '_Adjz_Loc:beli/Adj': ('A', 'f'), # \"-beli\" melléknévképző (helyjelölő)\n '_Adjz_Ord:VdlAgOs/Adj': ('A', 'f'), # \"-VdlAgOs\" melléknévképző (számnévből)\n '_Adjz_Quant/Adj': ('A', 'f'), # \"-nyi\" mennyiségnévképző\n '_Adjz_Type:fajta/Adj': ('A', 'f'), # \"-fajta\" melléknévképző (típusjelölő)\n '_Adjz_Type:forma/Adj': ('A', 'f'), # \"-forma\" melléknévképző (típusjelölő)\n '_Adjz_Type:féle/Adj': ('A', 'f'), # \"-féle\" melléknévképző (típusjelölő)\n '_Adjz_Type:szerű/Adj': ('A', 'f'), # \"-szerű\" melléknévképző (típusjelölő)\n '_AdvPerfPtcp/Adv': ('R', 'v'), # \"-vÁn\" határozói igenév\n '_AdvPtcp/Adv': ('R', 'v'), # \"-vA\" határozói igenév\n '_AdvPtcp:ttOn/Adv': ('R', 'v'), # \"-ttOn\" határozói igenév\n '_AdvPtcp:vÁst/Adv': ('R', 'v'), # \" -vÁst\" határozói igenév\n '_Advz:rét/Adv': ('R', 'x'), # \"-rét\" számnévi határozóképző\n '_Advz_LocDistr:szerte/Adv': ('R', 'x'), # \"-szerte\" határozóképző (térbeli fedés)\n '_Advz_Quant:szám/Adv': ('R', 'x'), # \"-szám\" határozóképző mennyiségekre\n '_Aggreg/Adv': ('M', 'c'), # \"-An\" csoportszámosság-határozó\n '_Caus/V': ('V', 's'), # \"-t?At\" műveltetőige-képző\n # '_Com:stUl/Adv' # \"-stUl\" comitativusi (társhatározói) esetrag # nem módosít szófajt\n # '_Comp/Adj': ('A', 'f'), # \"-bb\" középfok # nem módosít szófajt\n # '_Comp/Adv': ('R', 'x'), # \"-bb\" középfok # nem módosít szófajt\n # '_Comp/Adv|Pro': ('P', 'd'), # \"-bb\" középfok # nem módosít szófajt\n # '_Comp/Num': ('M', 'c'), # \"-bb\" középfok # nem módosít szófajt\n # '_Comp/Post|(Abl)': ('R', 'x'), # \"-bb\" középfok # nem módosít szófajt\n # '_Design/Adj': ('A', 'f'), # \"-(bb)ik\" kijelölő # nem módosít szófajt\n '_Des/N': (('N', 'n')), # \"-hatnék\" desiderativus\n # '_Dim:cskA/Adj': ('A', 'f'), # \"-VcskA\" kicsinyítő képző # nem módosít szófajt\n # '_Dim:cskA/N': (('N', 'n')), # \"-VcskA\" kicsinyítő képző # nem módosít szófajt\n '_Distr:nként/Adv': ('R', 'x'), # \"-Vnként\" disztributív\n '_DistrFrq:ntA/Adv': ('R', 'x'), # \"-VntA\" gyakorisághatározó\n '_Frac/Num': ('M', 'f'), # \"-Vd\" törtszámnév\n '_Freq/V': ('V', 'f'), # \"-O?gAt\" gyakorítóképző\n '_FutPtcp/Adj': ('A', 'u'), # \"-AndÓ\" „beálló\" melléknévi igenév\n '_Ger/N': (('N', 'n')), # \"-Ás\" nomen actionis igenominalizáló\n '_Ger:tA/N': (('N', 'n')), # \"-tA\" birtokos igenominalizáló\n '_ImpfPtcp/Adj': ('A', 'p'), # \"-Ó\" folyamatos melléknévi igenév\n # '_Manner/Adv', # \"-An, -Ul\" határozóképző: módhatározó # nem módosít szófajt\n # '_Manner:0/Adv', # határozóképző: módhatározó (zéró) # nem módosít szófajt\n # '_MedPass/V': ('V', 'm'), # \"-Ódik\" mediális ige # nem módosít szófajt\n # '_Mlt-Iter/Adv', # \"-szOr\" multiplikatív/iteratív # nem módosít szófajt\n # '_MltComp/Adv', # \"-szOrtA\" összehasonlító multiplikatív # nem módosít szófajt\n '_Mod/V': ('V', 'o'), # -hAt\" modális („ható\") igeképző\n '_ModPtcp/Adj': ('A', 'p'), # \"-hAtÓ\" modális melléknévi igenév\n # '_Mrs/N': ('N', 'p'), # \"-né\" asszonynévképző # nem módosít szófajt\n '_NAdvz:ilAg/Adv': ('A', 'f'), # \"-ilAg\" denominális (főnévből) határozóképző\n '_NVbz_Ntr:zik/V': ('V', 'm'), # \"-zik\" intranzitív igeképző\n '_NVbz_Tr:z/V': ('V', 'm'), # \"-z\" denominális (főnévből) tranzitívige-képző\n '_NegModPtcp/Adj': ('A', 'f'), # \"-hAtAtlAn\" tagadó modális melléknévi igenév\n '_NegPtcp/Adj': ('A', 'f'), # \"-AtlAn\" tagadó passzív melléknévi igenév (igei fosztóképző)\n '_Nz:s/N': (('N', 'n')), # \"-Vs\" főnévképző\n '_Nz_Abstr/N': (('N', 'n')), # \"-sÁg\" főnévképző absztraktfőnév-képző\n # '_Nz_Type:féleség/N': (('N', 'n')), # \"-féleség\" főnévképző (típusjelölő) # nem módosít szófajt\n # '_Nz_Type:szerűség/N': (('N', 'n')), # \"-szerűség\" főnévképző (típusjelölő) # nem módosít szófajt\n '_Ord/Adj': ('M', 'o'), # \"-Vdik\" sorszámnév\n '_OrdDate/N': (('N', 'n')), # \"-Vdika\" dátumokban a nap sorszámnévképzője\n # '_Pass/V': ('V', 'm'), # \"-t?Atik\" passzív # nem módosít szófajt\n '_PerfPtcp/Adj': ('A', 's'), # \"-O?tt\" befejezett melléknévi igenév\n '_PerfPtcp_Subj=tA/Adj': ('A', 's'), # \"-tA\" befejezett melléknévi igenév\n '_Tmp_Ante/Adv': (('N', 'n')), # \"-jA\" időbeli megelőzés\n '_Tmp_Loc/Adv': ('R', 'x'), # \"-vAl, -0\" időhatározói végződés\n '_VAdvz:ÓlAg/Adv': ('R', 'x'), # \"-ÓlAg\" határozóképző: igéből\n '_VNz:nivaló/N': (('N', 'n')), # \"-nivaló\" főnévképző\n '_Vbz:kOd/V': ('V', 'm') # \"-s?kOdik\" denominális igeképző\n}\n\n#########################\n# POS helyett INFL jegy #\n#########################\n\nPOS_TO_INFL_FEAT = ('/Supl',\n '/Num|Digit',\n '/Num|Roman',\n '/CmpdPfx' # ez a jegy elnyelődik (önállóan nem élő összetételi előtag (almenü)\n)\n\n###############\n# SUPERLATIVE #\n###############\n\nSUPERLATIVE = ('/Supl', )\n\n###########################\n# DERIV helyett INFL jegy #\n###########################\n\nDERIV_TO_INFL_FEAT = (\n '_Comp/Adj', # \"-bb\" középfok\n '_Comp/Adv', # \"-bb\" középfok\n '_Comp/Adv|Pro', # \"-bb\" középfok\n '_Comp/Post|(Abl)' # \"-bb\" középfok \n '_Design/Adj', # \"-(bb)ik\" kijelölő\n\n '_Aggreg/Adv', # \"-An\" csoportszámosság-határozó\n '_Com:stUl/Adv', # \"-stUl\" comitativusi (társhatározói) esetrag\n '_Distr:nként/Adv', # \"-Vnként\" disztributív\n '_Manner/Adv', # \"-An, -Ul\" határozóképző: módhatározó\n '_Manner:0/Adv', # határozóképző: módhat ha zéró toldalékként realizálódik a \"-A?tlAn/-tAlAn\" után\n '_Mlt-Iter/Adv', # \"-szOr\" multiplikatív/iteratív\n '_MltComp/Adv' # \"-szOrtA\" összehasonlító multiplikatív\n)\n\n##########################\n# inflexiós jegyek: igei #\n##########################\n\nVERBAL_INFL_RULES = {\n 'Inf': ['VForm=n'],\n \n 'Sbjv': ['VForm=m', 'Tense=p'],\n 'Cond': ['VForm=c', 'Tense=p'],\n 'Prs': ['VForm=i', 'Tense=p'],\n 'Pst': ['VForm=i', 'Tense=s'],\n\n '1Sg': ['Number=s', 'Person=1'],\n '1Sg›2': ['Definiteness=2', 'Number=s', 'Person=1'],\n '2Sg': ['Number=s', 'Person=2'],\n '3Sg': ['Number=s', 'Person=3'],\n '1Pl': ['Number=p', 'Person=1'],\n '1Pl*': ['Number=p', 'Person=1'],\n '2Pl': ['Number=p', 'Person=2'],\n '3Pl': ['Number=p', 'Person=3'],\n '3Pl*': ['Number=p', 'Person=3'],\n 'Def': ['Definiteness=y'],\n 'NDef': ['Definiteness=n']\n}\n\n#############################\n# inflexiós jegyek: névszói #\n#############################\n\nNOMINAL_INFL_RULES = {\n '/Num|Digit': ['Form=d'],\n '/Num|Roman': ['Form=r'],\n '/Supl': ['Degree=s'],\n\n '_Comp/Adj': ['Degree=c'],\n '_Comp/Adv|Pro': ['Degree=c'],\n '_Comp/Adv': ['Degree=c'],\n '_Comp/Post|(Abl)': ['Degree=c'],\n '_Design/Adj': ['Degree=c'],\n\n '_Aggreg/Adv': ['Case=w'],\n '_Com:stUl/Adv': ['Case=q'],\n '_Distr:nként/Adv': ['Case=u'],\n '_Manner/Adv': ['Case=w'], # Cas=w lehet A, N, M, P\n '_Manner:0/Adv': ['Case=w'],\n '_Mlt-Iter/Adv': ['Case=6'],\n '_MltComp/Adv': ['Case=6'],\n\n 'Nom': ['Case=n'],\n 'Acc': ['Case=a'],\n 'Dat': ['Case=d'],\n 'Ins': ['Case=i'],\n 'Cau': ['Case=c'],\n 'Ine': ['Case=2'],\n 'Supe': ['Case=p'],\n 'Ade': ['Case=3'],\n 'Ill': ['Case=x'],\n 'Ela': ['Case=e'],\n 'Del': ['Case=h'],\n 'Subl': ['Case=s'],\n 'Abl': ['Case=b'],\n 'All': ['Case=t'],\n 'Ter': ['Case=9'],\n 'Temp': ['Case=m'],\n 'Loc': ['Case=l'],\n 'Transl': ['Case=y'],\n 'Ess': ['Case=w'],\n 'EssFor:ként': ['Case=f'],\n 'EssFor:képp': ['Case=f'],\n 'EssFor:képpen': ['Case=f'],\n\n 'Pl': ['Number=p'],\n 'Fam.Pl': ['Number=p'],\n '1Sg': ['Number=s', 'Person=1'],\n '2Sg': ['Number=s', 'Person=2'],\n '3Sg': ['Number=s', 'Person=3'],\n '1Pl': ['Number=p', 'Person=1'],\n '2Pl': ['Number=p', 'Person=2'],\n '3Pl': ['Number=p', 'Person=3'],\n\n 'Poss.1Sg': ['OwnerNumber=s', 'OwnerPerson=1'],\n 'Poss.2Sg': ['OwnerNumber=s', 'OwnerPerson=2'],\n 'Poss.3Sg': ['OwnerNumber=s', 'OwnerPerson=3'],\n 'Poss.1Pl': ['OwnerNumber=p', 'OwnerPerson=1'],\n 'Poss.2Pl': ['OwnerNumber=p', 'OwnerPerson=2'],\n 'Poss.3Pl': ['OwnerNumber=p', 'OwnerPerson=3'],\n\n 'Pl.Poss.1Sg': ['Number=p', 'OwnerNumber=s', 'OwnerPerson=1'],\n 'Pl.Poss.2Sg': ['Number=p', 'OwnerNumber=s', 'OwnerPerson=2'],\n 'Pl.Poss.3Sg': ['Number=p', 'OwnerNumber=s', 'OwnerPerson=3'],\n 'Pl.Poss.1Pl': ['Number=p', 'OwnerNumber=p', 'OwnerPerson=1'],\n 'Pl.Poss.2Pl': ['Number=p', 'OwnerNumber=p', 'OwnerPerson=2'],\n 'Pl.Poss.3Pl': ['Number=p', 'OwnerNumber=p', 'OwnerPerson=3'],\n\n 'AnP': ['OwnedNumber=s'],\n 'AnP.Pl': ['OwnedNumber=p']\n}\n\n","repo_name":"vadno/emmorph2msd","sub_path":"converterdata/mappings.py","file_name":"mappings.py","file_ext":"py","file_size_in_byte":13642,"program_lang":"python","lang":"hu","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8116128883","text":"# -*- coding: utf-8 -*-\r\n# @Time : 2023/3/9 15:29\r\nimport sys\r\nimport os\r\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),'..')))\r\n\r\nimport torch\r\nfrom deep_training.data_helper import ModelArguments\r\nfrom transformers import HfArgumentParser\r\nfrom data_utils import train_info_args, NN_DataHelper, get_deepspeed_config\r\nfrom aigc_zoo.model_zoo.chatglm.llm_model import MyTransformer,ChatGLMTokenizer,setup_model_profile, ChatGLMConfig,PetlArguments\r\n\r\ndeep_config = get_deepspeed_config()\r\n\r\n\r\nif __name__ == '__main__':\r\n train_info_args['seed'] = None\r\n train_info_args['model_name_or_path'] = None\r\n\r\n train_info_args['seed'] = None\r\n parser = HfArgumentParser((ModelArguments,))\r\n (model_args,) = parser.parse_dict(train_info_args, allow_extra_keys=True)\r\n\r\n setup_model_profile()\r\n\r\n dataHelper = NN_DataHelper(model_args)\r\n tokenizer: ChatGLMTokenizer\r\n tokenizer, _, _, _ = dataHelper.load_tokenizer_and_config(\r\n tokenizer_class_name=ChatGLMTokenizer, config_class_name=ChatGLMConfig)\r\n\r\n ###################### 注意 选最新权重\r\n #选择最新的权重 , 根据时间排序 选最新的\r\n config = ChatGLMConfig.from_pretrained('../scripts/best_ckpt')\r\n config.initializer_weight = False\r\n pl_model = MyTransformer(config=config, model_args=model_args, torch_dtype=torch.float16,)\r\n if deep_config is None:\r\n train_weight = '../scripts/best_ckpt/last-v3.ckpt'\r\n else:\r\n #使用转换脚本命令 生成 ./best_ckpt/last/best.pt 权重文件\r\n # cd best_ckpt/last\r\n # python zero_to_fp32.py . best.pt\r\n train_weight = '../scripts/best_ckpt/last/best.pt'\r\n\r\n #加载微调权重\r\n pl_model.load_sft_weight(train_weight,strict=False)\r\n\r\n model = pl_model.get_llm_model()\r\n #保存hf权重\r\n #config.save_pretrained('convert/')\r\n\r\n # 保存sft p-tuning-v2 权重\r\n # pl_model.save_sft_weight('convert/pytorch_model_sft_ptv2.bin')\r\n\r\n #保存sft权重\r\n # pl_model.save_sft_weight('convert/pytorch_model_sft.bin')\r\n\r\n\r\n\r\n if not model.quantized:\r\n # 按需修改,目前只支持 4/8 bit 量化 , 可以保存量化模型\r\n model.half().quantize(4).cuda()\r\n else:\r\n #已经量化,已经保存微调后的量化模型可以 直接加载\r\n model.half().cuda()\r\n model = model.eval()\r\n\r\n text_list = [\r\n \"写一个诗歌,关于冬天\",\r\n \"晚上睡不着应该怎么办\",\r\n ]\r\n for input in text_list:\r\n response, history = model.chat(tokenizer, input, history=[],max_length=2048,\r\n eos_token_id=config.eos_token_id,\r\n do_sample=True, top_p=0.7, temperature=0.95,)\r\n print(\"input\",input)\r\n print(\"response\", response)\r\n\r\n","repo_name":"ssbuild/chatglm_finetuning","sub_path":"infer/infer_finetuning.py","file_name":"infer_finetuning.py","file_ext":"py","file_size_in_byte":2849,"program_lang":"python","lang":"en","doc_type":"code","stars":1490,"dataset":"github-code","pt":"31"} +{"seq_id":"24632137078","text":"from tkinter import *\nfrom tkinter import messagebox\nfrom tkinter.ttk import Combobox,Treeview\nfrom tkcalendar import DateEntry\nimport pymysql\n\nclass Customer:\n tstamp=\"\"\n stock=0\n def __init__(self,mywindow):\n self.window=Toplevel(mywindow)\n self.window.title(\"Pharmacy Manager/Customer\")\n self.w = self.window.winfo_screenwidth()\n self.h = self.window.winfo_screenheight()\n w1 = self.w -100\n h1 = self.h -150\n self.window.geometry(\"%dx%d+%d+%d\" % (w1, h1, 50,50)) # (width,height,x,y)\n self.window.minsize(w1, h1)\n\n mycolor1 = \"#ffbb99\"\n mycolor2 = \"#ff5500\"\n mycolor3 = \"white\"\n myfont1 = (\"Cambria\",15)\n from PIL import Image,ImageTk\n self.bkimg1 = Image.open(\"images//bg.jpg\").resize((w1,h1))\n self.bkimg2 = ImageTk.PhotoImage(self.bkimg1)\n self.bkimglbl = Label(self.window, image=self.bkimg2)\n self.bkimglbl.place(x=0,y=0)\n\n\n self.headlbl =Label(self.window,text=\"Customers\",background=mycolor2,font=(\"Cambria\",35),\n foreground=mycolor3,borderwidth=10,relief=\"groove\")\n\n self.L1 =Label(self.window,text=\"Cust. Name\",background=mycolor3,font=myfont1)\n self.L2 =Label(self.window,text=\"Cust. Mob. No.\",background=mycolor3,font=myfont1)\n self.L3 =Label(self.window,text=\"Med. Name\",background=mycolor3,font=myfont1)\n self.L4 =Label(self.window,text=\"Med. Quantity\",background=mycolor3,font=myfont1)\n self.L5 =Label(self.window,text=\"Date\",background=mycolor3,font=myfont1)\n\n self.t1 = Entry(self.window, font=myfont1, relief=\"solid\")\n self.t2 = Entry(self.window, font=myfont1, relief=\"solid\")\n self.v3=StringVar()\n self.t3 = Combobox(self.window,textvariable=self.v3,state=\"readonly\",font=myfont1,width=19)\n self.t4 = Entry(self.window,font=myfont1,relief=\"solid\")\n self.t5 = DateEntry(self.window, background=\"#ff5500\",\n foreground='white', borderwidth=2, year=2023,date_pattern='y-mm-dd',font=myfont1,width=19)\n\n #TABLE\n\n self.mytable1 = Treeview(self.window, columns=['c1', 'c2', 'c3', 'c4', 'c5', 'c6'], height=20)\n\n self.mytable1.heading(\"c1\", text=\"Time Stamp\")\n self.mytable1.heading(\"c2\", text=\"Cust. Name\")\n self.mytable1.heading(\"c3\", text=\"Cust. Mob. No.\")\n self.mytable1.heading(\"c4\", text=\"Med. Name\")\n self.mytable1.heading(\"c5\", text=\"Med. Quantity\")\n self.mytable1.heading(\"c6\", text=\"Date\")\n\n self.mytable1['show'] = 'headings'\n self.mytable1.column(\"#1\", width=90, anchor=\"center\")\n self.mytable1.column(\"#2\", width=90, anchor=\"center\")\n self.mytable1.column(\"#3\", width=90, anchor=\"center\")\n self.mytable1.column(\"#4\", width=90, anchor=\"center\")\n self.mytable1.column(\"#5\", width=90, anchor=\"center\")\n self.mytable1.column(\"#6\", width=90, anchor=\"center\")\n self.mytable1.bind(\"\", lambda e: self.getvalue())\n\n #BUTTONS\n\n self.b1 = Button(self.window,text=\"Save\",foreground=mycolor3,\n background=mycolor2,font=myfont1,command=self.saveData)\n self.b2 = Button(self.window,text=\"update\",foreground=mycolor3,\n background=mycolor2,font=myfont1,command=self.updateData)\n self.b3 = Button(self.window,text=\"Delete\",foreground=mycolor3,\n background=mycolor2,font=myfont1,command=self.deleteData)\n self.b4 = Button(self.window, text=\"Search\", foreground=mycolor3,\n background=mycolor2, font=myfont1, command=self.searchData)\n self.b5 = Button(self.window, text=\"Clear\", foreground=mycolor3,\n background=mycolor2, font=myfont1, command=self.clearPage)\n\n #PLACEMENTS\n\n self.headlbl.place(x=0,y=0,width=w1,height=80)\n x1 = 50\n y1=100\n h_diff=150\n v_diff=50\n self.mytable1.place(x=x1 + h_diff + 380, y=y1,height=250)\n self.b4.place(x=x1 + h_diff + 250, y=y1,height=40,width=100)\n self.L1.place(x=x1,y=y1)\n self.t1.place(x=x1+h_diff,y=y1)\n y1+=v_diff\n self.L2.place(x=x1,y=y1)\n self.t2.place(x=x1+h_diff,y=y1)\n y1+=v_diff\n self.L3.place(x=x1,y=y1)\n self.t3.place(x=x1+h_diff,y=y1)\n y1+=v_diff\n self.L4.place(x=x1,y=y1)\n self.t4.place(x=x1+h_diff,y=y1)\n y1+=v_diff\n self.L5.place(x=x1,y=y1)\n self.t5.place(x=x1+h_diff,y=y1)\n y1+=v_diff\n self.b1.place(x=x1,y=y1,height=40,width=100)\n self.b2.place(x=x1+110,y=y1,height=40,width=100)\n self.b3.place(x=x1+220,y=y1,height=40,width=100)\n self.b5.place(x=x1+330,y=y1,height=40,width=100)\n self.databaseconnection()\n self.clearPage()\n self.getdata()\n self.window.mainloop()\n\n def databaseconnection(self):\n myhost=\"localhost\"\n mydb=\"pharmacy\"\n myuser=\"root\"\n mypassword=\"\"\n try:\n self.conn = pymysql.connect(host=myhost,db=mydb,user=myuser,password=mypassword)\n self.curr = self.conn.cursor()\n except Exception as e:\n messagebox.showerror(\"Database Error \",\"Error in Database Connection: \\n\"+str(e),parent=self.window)\n\n def saveData(self):\n if(self.validation()==False):\n return\n try:\n qry1=\"select stock from medi where medname=%s\"\n self.curr.execute(qry1,self.v3.get())\n data=self.curr.fetchone()\n self.conn.commit()\n diff=int(data[0])-int(self.t4.get())\n print(diff)\n if(diff>0):\n qry2=\"update medi set stock=%s where medname=%s\"\n self.curr.execute(qry2,(diff,self.v3.get()))\n self.conn.commit()\n qry = \"insert into bill values(%s,%s,%s,%s,%s,%s)\"\n\n import time\n self.tstamp=str(int(time.time()))\n rowcount = self.curr.execute(qry,(self.tstamp,self.t1.get(),self.t2.get(),self.v3.get(),\n self.t4.get(),self.t5.get_date()))\n self.conn.commit()\n if rowcount==1:\n messagebox.showinfo(\"Success \",\"Customer Added Successfully\",parent=self.window)\n self.clearPage()\n self.searchData()\n else:\n messagebox.showerror(\"Stock Error \", \"Please Add stock to \"+self.v3.get()+\" Current stock is \"+data[0], parent=self.window)\n except Exception as e:\n messagebox.showerror(\"Query Error \",\"Error in Query: \\n\"+str(e),parent=self.window)\n\n def updateData(self):\n if self.validation() == False:\n return\n\n try:\n qry1 = \"select stock from medi where medname=%s\"\n self.curr.execute(qry1, self.v3.get())\n data = self.curr.fetchone()\n total=int(data[0])+int(self.stock)\n self.conn.commit()\n diff = total - int(self.t4.get())\n if (diff > 0):\n qry2 = \"update medi set stock=%s where medname=%s\"\n self.curr.execute(qry2, (diff, self.v3.get()))\n self.conn.commit()\n qry = \"update bill set cname=%s , cphone=%s , mname=%s, mqnt=%s, \" \\\n \"date=%s where tstamp=%s\"\n rowcount = self.curr.execute(qry,(self.t1.get(),self.t2.get(),\n self.v3.get(),self.t4.get(),self.t5.get(),self.tstamp))\n self.conn.commit()\n if rowcount==1:\n messagebox.showinfo(\"Success \",\"Customer Updated Successfully\",parent=self.window)\n self.clearPage()\n self.searchData()\n else:\n messagebox.showerror(\"Stock Error \", \"Please Add stock to \"+self.v3.get()+\" Current stock is \"+data[0], parent=self.window)\n except Exception as e:\n messagebox.showerror(\"Query Error \",\"Error in Query: \\n\"+str(e),parent=self.window)\n\n def getvalue(self):\n try:\n rowID = self.mytable1.focus()\n data = self.mytable1.item(rowID)\n mycontent = data['values']\n value = mycontent[0]\n self.fetchData(value)\n except Exception as e:\n messagebox.showerror(\"Value Error\", \"Error in Values: \\n\" + str(e), parent=self.window)\n\n def deleteData(self):\n ans = messagebox.askquestion(\"Confirmation\",\"Are you ready to delete ?\",parent=self.window)\n if (ans==\"yes\"):\n try:\n qry1 = \"select stock from medi where medname=%s\"\n self.curr.execute(qry1, self.v3.get())\n data = self.curr.fetchone()\n self.conn.commit()\n qry2 = \"update medi set stock=%s where medname=%s\"\n self.curr.execute(qry2, (int(self.t4.get()) + int(data[0]), self.v3.get()))\n self.conn.commit()\n qry = \"delete from bill where tstamp=%s\"\n rowcount = self.curr.execute(qry,(self.tstamp))\n self.conn.commit()\n if rowcount==1:\n messagebox.showinfo(\"Success \",\"Medicine deleted Successfully\",parent=self.window)\n self.clearPage()\n self.searchData()\n\n except Exception as e:\n messagebox.showerror(\"Query Error \",\"Error in Query: \\n\"+str(e),parent=self.window)\n\n def fetchData(self,ref=None):\n try:\n if(ref==None):\n cap=self.t1.get()\n else:\n cap=ref\n qry = \"select * from bill where tstamp = %s\"\n rowcount = self.curr.execute(qry,cap)\n data = self.curr.fetchone()\n self.clearPage()\n\n if data:\n self.t1.insert(0,data[1])\n self.t2.insert(0,data[2])\n self.v3.set(data[3])\n self.t4.insert(0,data[4])\n self.t5.set_date(data[5])\n self.tstamp=data[0]\n self.stock=self.t4.get()\n else:\n messagebox.showinfo(\"Failure\",\"No Record Found\",parent=self.window)\n\n except Exception as e:\n messagebox.showerror(\"Query Error \",\"Error in Query: \\n\"+str(e),parent=self.window)\n\n def searchData(self):\n try:\n qry = \"select * from bill where cname like %s\"\n rowcount = self.curr.execute(qry, (self.t1.get() + \"%\"))\n data = self.curr.fetchall()\n self.mytable1.delete(*self.mytable1.get_children())\n if data:\n for row in data:\n self.mytable1.insert(\"\", END, values=row)\n\n else:\n messagebox.showinfo(\"Failure\", \"No Record Found\", parent=self.window)\n\n except Exception as e:\n messagebox.showerror(\"Query Error \", \"Error in Query: \\n\" + str(e), parent=self.window)\n\n def clearPage(self):\n self.t1.delete(0,END)\n self.t2.delete(0,END)\n self.v3.set(None)\n self.t4.delete(0, END)\n self.t5.delete(0, END)\n self.tstamp=\"\"\n\n def validation(self):\n if(len(self.t1.get())<1 or self.t1.get().strip()==\"\"):\n messagebox.showerror(\"Cust. Name Error\", \"Please enter Name\",parent=self.window)\n return False\n elif(len(self.t2.get())<1 or self.t2.get().strip()==\"\"):\n messagebox.showerror(\"Cust. Phone Error\", \"Please enter Phone No.\",parent=self.window)\n return False\n elif (self.v3.get()==\"None\"):\n messagebox.showerror(\"Medicine Name Error\", \"Please enter Medicine Name\", parent=self.window)\n return False\n elif(len(self.t4.get()) < 1 or self.t3.get().strip() == \"\"):\n messagebox.showerror(\"Quantity Error\", \"Please enter Quantity\", parent=self.window)\n return False\n elif (self.t5.get().strip() == \"\"):\n messagebox.showerror(\"Date Error\", \"Please enter Date\", parent=self.window)\n return False\n\n return True\n def getdata(self):\n try:\n qry = \"select medname from medi\"\n rowcount = self.curr.execute(qry)\n data = self.curr.fetchall()\n self.l=[]\n for i in data:\n self.l.append(i)\n self.t3.config(values=self.l)\n\n except Exception as e:\n messagebox.showerror(\"Query Error \",\"Error in Query: \\n\"+str(e),parent=self.window)\n\nif __name__ == '__main__':\n Customer()\n","repo_name":"devanshmarwaha/Pharmacy-Management-System","sub_path":"Project/Customer.py","file_name":"Customer.py","file_ext":"py","file_size_in_byte":12565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5041746619","text":"import os\nfrom setuptools import setup\n\ntry:\n from ovos_config import LocalConf\n from ovos_config.locations import USER_CONFIG\n config = LocalConf(USER_CONFIG)\nexcept ImportError:\n config = None\n\n\nBASEDIR = os.path.abspath(os.path.dirname(__file__))\nPLUGIN_ENTRY_POINT = 'hivemind_nodered_plug = hivemind_PHAL_plugin_nodered.node:NodeRedMind'\nSKILL_ENTYRY_POINT = 'hivemind_nodered_skill = hivemind_PHAL_plugin_nodered.skill:NodeRedSkill'\n\n\ndef get_version():\n \"\"\" Find the version of the package\"\"\"\n version = None\n version_file = os.path.join(BASEDIR, 'hivemind_PHAL_plugin_nodered', 'version.py')\n major, minor, build, alpha = (None, None, None, None)\n with open(version_file) as f:\n for line in f:\n if 'VERSION_MAJOR' in line:\n major = line.split('=')[1].strip()\n elif 'VERSION_MINOR' in line:\n minor = line.split('=')[1].strip()\n elif 'VERSION_BUILD' in line:\n build = line.split('=')[1].strip()\n elif 'VERSION_ALPHA' in line:\n alpha = line.split('=')[1].strip()\n\n if ((major and minor and build and alpha) or\n '# END_VERSION_BLOCK' in line):\n break\n version = f\"{major}.{minor}.{build}\"\n if alpha and int(alpha) > 0:\n version += f\"a{alpha}\"\n return version\n\n\ndef package_files(directory):\n paths = []\n for (path, directories, filenames) in os.walk(directory):\n for filename in filenames:\n paths.append(os.path.join('..', path, filename))\n return paths\n\n\ndef required(requirements_file):\n \"\"\" Read requirements file and remove comments and empty lines. \"\"\"\n with open(os.path.join(BASEDIR, requirements_file), 'r') as f:\n requirements = f.read().splitlines()\n if 'MYCROFT_LOOSE_REQUIREMENTS' in os.environ:\n print('USING LOOSE REQUIREMENTS!')\n requirements = [r.replace('==', '>=').replace('~=', '>=') for r in requirements]\n return [pkg for pkg in requirements\n if pkg.strip() and not pkg.startswith(\"#\")]\n\n\nsetup(\n name='hivemind_PHAL_plugin_nodered',\n version=get_version(),\n description='OVOS hivemind PHAL plugin for Node-Red',\n url='https://github.com/emphasize/hivemind_PHAL_plugin_nodered',\n author='emphasize',\n author_email='',\n license='Apache-2.0',\n packages=['hivemind_PHAL_plugin_nodered'],\n install_requires=required(\"requirements/requirements.txt\"),\n package_data={'': package_files('hivemind_PHAL_plugin_nodered')},\n include_package_data=True,\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'Topic :: Text Processing :: Linguistic',\n 'License :: OSI Approved :: Apache Software License',\n\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n 'Programming Language :: Python :: 3.11',\n ],\n entry_points={\n 'ovos.plugin.phal': PLUGIN_ENTRY_POINT,\n 'ovos.plugin.skill': SKILL_ENTYRY_POINT\n }\n)\n\n\nif config is not None:\n config.merge(\n {\n \"PHAL\": {\n \"hivemind-PHAL-plugin-nodered\": {\n \"ssl\": False,\n \"blacklist\" : {\n \"messages\": [],\n \"skills\": [],\n \"intents\": []\n }\n }\n }\n }\n )\n config.store()\n","repo_name":"emphasize/hivemind_PHAL_plugin_nodered","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74543144089","text":"# Configuration file for the Sphinx documentation builder.\n#\n# For the full list of built-in configuration values, see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Project information -----------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information\n\nimport os\nimport sys\nimport shutil\nimport glob\n\nsys.path.insert(0, os.path.abspath('../../compositionspace/'))\n\ndef skip(app, what, name, obj, would_skip, options):\n if name in ( '__init__',):\n return False\n return would_skip\ndef setup(app):\n app.connect('autodoc-skip-member', skip)\n\nif os.path.exists(\"example\"):\n shutil.rmtree(\"example\")\nshutil.copytree(\"../../example\", \"example\")\n\nproject = 'compositionspace'\ncopyright = '2022, Alaukik Saxena, Sarath Menon'\nauthor = 'Alaukik Saxena, Sarath Menon'\n\n\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.coverage',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.githubpages',\n 'sphinx.ext.napoleon',\n 'm2r2',\n 'sphinx_markdown_tables',\n 'nbsphinx',\n]\n\nhtml_theme = 'furo'\n\nhtml_theme_options = {\n #'logo_only' : True,\n #'canonical_url' : 'https://calphy.readthedocs.io/',\n}\n\nhtml_extra_path = ['../_static' ]\n\nsource_suffix = ['.rst', '.md']\n\nexclude_patterns = []","repo_name":"Alaukiksaxena/APT_Machine_learning","sub_path":"docs/source/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"18501737368","text":"from gensim.models.doc2vec import TaggedLineDocument, TaggedDocument, Doc2Vec\nfrom gensim.models.word2vec import LineSentence, Word2Vec\nimport fasttext\nimport time\n\n\nfrom preprocess import data_load, pre_process, text_process\n\n\ninput = '../dat/data/18000*.json'\nfilename = '../dat/cleaned_text_plus_wp.txt'\ndoc2vec_model = '../trained_models/dm_win4.model'\nword2vec_model = '../trained_models/word2vec_win1.model'\nfasttext_model = '../trained_models/ft_win3'\ntrained_model_full = '../trained_models/dm.model_full'\n\n# train original word2vec\nsentences = LineSentence(filename)\nmodel = Word2Vec(sentences, size=300, window=3, min_count=5, workers=6, sg=1)\nmodel.save(word2vec_model)\n\n\n# train fasttext\nmodel = fasttext.skipgram(filename, fasttext_model , dim=300, ws=5, word_ngrams=3)\n\n\n# train doc2vec\n# build TaggedLineDocument object\ndocuments = TaggedLineDocument(filename)\ndm_model = Doc2Vec(size=300, window=4, min_count=5, workers=6)\n\n# build vocab for model\ndm_model.build_vocab(documents)\n\n# train model using corpus\ndm_model.train(documents, total_examples=dm_model.corpus_count, epochs=10)\n\n# save model\ndm_model.save(doc2vec_model)\n\n\n\n# # learn from corpus + titles data\n# # build list of TaggedDocument objects from corpus\n# alldocs = []\n# with open(filename) as alldata:\n# for line_no, line in enumerate(alldata):\n# words = line.split()\n# alldocs.append(TaggedDocument(words, ['c_%s' % line_no]))\n#\n# # load raw_data\n# raw_data = data_load(input)\n#\n# # pre_process data\n# df, fts = pre_process(raw_data)\n#\n# # build list of TaggedDocument objects from titles\n# titles = df.title.values\n# docs = [text_process(title, trained_model_full) for title in titles]\n#\n# for i, row in enumerate(docs):\n# alldocs.append(TaggedDocument(row, ['t_%s' % i]))\n#\n# # set model parameters\n# dm_model_full = Doc2Vec(size=300, window=5, min_count=5, workers=6)\n#\n# # build vocab for model\n# dm_model_full.build_vocab(alldocs)\n#\n# # train model using corpus\n# start = time.time()\n# dm_model_full.train(alldocs, total_examples=dm_model_full.corpus_count, epochs=10)\n# print(time.time()-start)\n#\n# # save model\n# dm_model_full.save(trained_model_full)","repo_name":"zhz46/system2","sub_path":"furniture/pretrain.py","file_name":"pretrain.py","file_ext":"py","file_size_in_byte":2164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26549932061","text":"import math # To use sqrt function\r\ndef points_distance(point1, point2, point3, point4):\r\n '''points_distance(a, b, c, d)\r\n Returns distance between two coordinate points.''' # Documentation\r\n horz_dist = (point2 - point1)**2\r\n vert_dist = (point4 - point3)**2\r\n length = math.sqrt(horz_dist + vert_dist) # To be returned value\r\n return length\r\nX1 = float(input('Enter X1: ')) # Taking Inputs\r\nY1 = float(input('Enter Y1: '))\r\nX2 = float(input('Enter X2: '))\r\nY2 = float(input('Enter Y2: '))\r\ndistance = points_distance(X1, X2, Y1, Y2) # Caliing Function\r\nprint(f'Distance between ({X1}, {Y1}) & ({X2}, {Y2}) is {distance:.5}')\r\n","repo_name":"saifsafsf/Semester-1-Assignments","sub_path":"Lab 06/task 2.py","file_name":"task 2.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"11070157584","text":"import numpy as np\nimport pandas as pd\nimport torch\nimport torch.nn.parallel\nimport torch.utils.data\nfrom openpyxl import Workbook\nimport math\n\ndataset=pd.read_csv('AI-DataTrain.csv')\nfinaltest=pd.read_csv('AI-DataTest.csv')\n\ntrain=dataset.iloc[:900,1:51]\ntest=dataset.iloc[900:,1:51]\n\nnb_users=900\nnb_ques=50\n\ntraining_set=train.values.tolist()\n\ntest_set=test.values.tolist()\n\ntraining_set= torch.FloatTensor(training_set)\ntest_set= torch.FloatTensor(test_set)\n\nclass RBM():\n def __init__(self,nv,nh):\n self.W = torch.randn(nh,nv)\n self.a = torch.randn(1,nh)\n self.b = torch.randn(1,nv)\n\n def sample_h(self,x):\n wx = torch.mm(x, self.W.t())\n activation = wx + self.a.expand_as(wx)\n p_h_given_v = torch.sigmoid(activation)\n return p_h_given_v, torch.bernoulli(p_h_given_v)\n\n def sample_v(self,y):\n wy = torch.mm(y, self.W)\n activation = wy + self.b.expand_as(wy)\n p_v_given_h = torch.sigmoid(activation)\n return p_v_given_h, torch.bernoulli(p_v_given_h)\n\n def train(self,v0,vk,ph0,phk):\n self.W += torch.mm(v0.t(), ph0) - torch.mm(vk.t(),phk)\n self.b += torch.sum((v0 - vk),0)\n self.a += torch.sum((ph0 - phk),0)\n\n\nnv = len(training_set[0])\nnh = 50\nbatch_size = 75\nrbm = RBM(nv,nh)\n\n\n#training the RBM\nnb_epoch=10\nfor epoch in range(1 , nb_epoch + 1):\n tain_loss = 0\n s = 0.\n for id_user in range(0, nb_users - batch_size, batch_size):\n vk = training_set[id_user:id_user+batch_size]\n v0 = training_set[id_user:id_user+batch_size]\n ph0,_ = rbm.sample_h(v0)\n for k in range(10):\n _,hk = rbm.sample_h(vk)\n _,vk = rbm.sample_v(hk)\n vk[v0<0] = v0[v0<0]\n phk,_ = rbm.sample_h(vk)\n rbm.train(v0, vk, ph0, phk)\n train_loss += torch.mean(torch.abs(v0[v0>=0] - vk[v0>=0]))\n s += 1.\n\n#testing the RBM\ntest_loss = 0\ns = 0.\nfor id_user in range(nb_users):\n v = training_set[id_user:id_user+1]\n vt = test_set[id_user:id_user+1]\n if len(vt[vt>=0]) > 0:\n _,h = rbm.sample_h(v)\n _,v = rbm.sample_v(h)\n test_loss += torch.mean(torch.abs(vt[vt>=0] - v[vt>=0]))\n s += 1.\n#print('test_loss: ' +str(test_loss/s))\n\nresult=rbm.W\n\nresult=result.tolist()\n\nfinal_result=[]\nfor i in result:\n k=0\n add=0\n for j in i:\n add=j+add\n k+=1\n avg=add/k\n final_result.append(avg)\n\nmylist=[]\nfor i in final_result:\n if i<0:\n mylist.append(float(\"-\"+str(math.sqrt(math.sqrt(abs(i))))))\n else:\n mylist.append(math.sqrt(math.sqrt(i)))\n\nminimum=min(mylist)\nscale=abs(minimum)+1\n\nnewlist=[]\nfor i in mylist:\n newlist.append(i+scale)\n\nwb = Workbook()\n\nws = wb.active\nws.title = \"Final Weights\"\n\n\nc1 = ws.cell(row = 1, column = 1 )\nc1.value = ('Quetions')\nc2 = ws.cell(row = 1, column = 2 )\nc2.value =('Weightage')\nj=2\nfor i in newlist:\n c1 = ws.cell(row = j, column = 1 )\n c1.value = ('Q'+str(j))\n c2 = ws.cell(row = j, column = 2 )\n c2.value = newlist[j-2]\n j += 1\n\nwb.save(filename = 'output.xlsx')","repo_name":"Shubhamkakade06/CAT-Using-Neural-Network","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":2912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37730825765","text":"T = int(input())\nfor _ in range(T):\n sound = input().split()\n while True:\n case = input()\n if case == 'what does the fox say?': break\n case = case.split()\n if case[2] in sound:\n while case[2] in sound: sound.remove(case[2])\n print(*sound)","repo_name":"ruidazeng/online-judge","sub_path":"Kattis/whatdoesthefoxsay.py","file_name":"whatdoesthefoxsay.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"7704028777","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 1 16:35:40 2020\n\n@author: YuJeong\n\"\"\"\nimport os \nimport random\nfrom pathlib import Path\n\ndef readRepresentGraph():\n path = str(Path(__file__).parent.parent) \n files = path + '\\\\datasets\\\\structure_fsm\\\\rep2.txt'\n print(files) \n adMatrix = []\n with open(files, 'r') as f:\n for line in f: \n l = []\n for num in line.split(' '):\n if num.isdigit(): \n l.append(int(num))\n adMatrix.append(l)\n \n return adMatrix\n #inp = np.loadtxt(files, dtype='i', delimiter=' ')\n #print(inp)\n\nclass Graph(object):\n\n # Initialize the matrix\n def __init__(self, size, index):\n self.adjMatrix = []\n self.index = index\n for i in range(size):\n self.adjMatrix.append([0 for i in range(size)])\n self.size = size\n\n # Add edges\n def add_edge(self, v1, v2, weight):\n if v1 == v2:\n print(\"Same vertex %d and %d\" % (v1, v2))\n self.adjMatrix[v1][v2] = weight\n self.adjMatrix[v2][v1] = weight\n\n # Remove edges\n def remove_edge(self, v1, v2):\n if self.adjMatrix[v1][v2] == 0:\n print(\"No edge between %d and %d\" % (v1, v2))\n return\n self.adjMatrix[v1][v2] = 0\n self.adjMatrix[v2][v1] = 0\n\n def __len__(self):\n return self.size\n\n def writeFile(self):\n ind = '0 '\n ind0 = '0 '\n for i in range(self.size):\n ind = ind + chr(ord('A') + i) + ' '\n ind0 = ind0 + '0 '\n \n filename = \"\\\\datasets\\\\structure_fsm\\\\random\" + \"\\\\2graph\" + str(self.index) + '.txt'\n path = str(Path(__file__).parent.parent) + filename\n \n f = open(path, 'w')\n for row in self.adjMatrix:\n line = ''\n for val in row:\n line = line + str(val) + ' '\n f.write(line+'\\n')\n f.close()\n #f.write()\n\n\ndef createRandomGraph(rangeN):\n for i in range(100):\n if rangeN == 1: # weight range 0-10\n #sumVal = random.randint(0, 5)\n sumVal = 0\n elif rangeN == 2: # weight range 30-50\n sumVal = random.randint(30, 50)\n elif rangeN == 3: # weight range 80-100\n sumVal = random.randint(80, 100)\n randG = Graph(len(originMat), i)\n randEdge = random.randint(0, len(originMat))\n for row in range(len(originMat)):\n for val in range(row):\n if originMat[row][val] != 0:\n #randG.add_edge(row, val, sumVal+round(originMat[row][val]*random.uniform(0, 1), 3))\n randG.add_edge(row, val, round((originMat[row][val]+random.uniform(0, 1)), 4))\n # if val == randEdge:\n # break\n randG.writeFile()\n\n \nif __name__ == \"__main__\":\n originMat = readRepresentGraph() \n rangeN = int(input(\"Random value range(1: 0-10, 2: 30-50, 3: 80-100): \"))\n createRandomGraph(rangeN) \n\n \n \n \n \n \n \n \n \n ","repo_name":"YYuJeong/Frequent-Subgraph-Mining-Using-Deep-Learning","sub_path":"create/createRandomGraph.py","file_name":"createRandomGraph.py","file_ext":"py","file_size_in_byte":3064,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"39150693850","text":"import openpyxl\nimport sys\nfrom openpyxl.utils import get_column_letter\n\nfilename = sys.argv[1]\nwb = openpyxl.load_workbook(filename)\nsheet = wb.active\n\nmax_row = sheet.max_row\nmax_col = sheet.max_column\n\ndata = []\n\nfor i in range(1,max_row+1):\n data_in_row = []\n for j in range(1,max_col+1):\n data_in_row.append(sheet.cell(i,j).value)\n data.append(data_in_row)\n\nfor i in range(1,max_row+1):\n for j in range(1,max_col+1): \n letter = get_column_letter(j)\n sheet[letter+str(i)] = None\n\nfor i in range(1,max_col+1):\n for j in range(1,max_row+1):\n letter = get_column_letter(j)\n sheet[letter+str(i)] = data[j-1][i-1]\n\nwb.save('new'+filename)\n\n","repo_name":"Yakouseihaizu/learning_repository","sub_path":"autopy/cha13/project3/rotate.py","file_name":"rotate.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34504353604","text":"import collections\nfrom concurrent import futures\n\nimport requests\nimport tqdm\n\nfrom flags2_common import main, HTTPStatus\nfrom flags2_sequential import download_one\n\nDEFAULT_CONCUR_REQ = 30 # 默认并发请求数的最大值\nMAX_CONCUR_REQ = 1000 # 限制最大的并发请求数,一项安全措施\n\n\ndef download_many(cc_list, base_url, verbose, concur_req):\n counter = collections.Counter()\n with futures.ThreadPoolExecutor(max_workers=concur_req) as executor: # main 函数中会选取 concur_req:MAX_CONCUR_REQ, len(cc_list), -m/--max_req 命令行选项的值,如此,能避免创建超过所需的线程\n to_do_map = {} # 把各个 Futute 实例(表示一次下载)映射到相应的国家代码上,在处理错误时使用\n for cc in sorted(cc_list):\n future = executor.submit(download_one, cc, base_url, verbose) # 排定一个可调用对象的执行时间,然后返回一个 Future 实例。第一个参数是可调用对象,其余参数是传递给可调用对象的参数\n to_do_map[future] = cc\n done_iter = futures.as_completed(to_do_map) # 返回迭代器,在 future 运行结束后产出 future\n if not verbose:\n done_iter = tqdm.tqdm(done_iter, total=len(cc_list)) # done_iter 没有 len 函数,需要通过 totoal 参数,借助 len(cc_list) 来指定\n for future in done_iter:\n try:\n res = future.result() # 要么返回结果,要么抛出可调用对象在执行过程中抛出的异常。 在此示例中,不会发生阻塞,因为 as_completed 方法只返回已经运行结速的 future\n except requests.exceptions.HTTPError as exc: # 处理 get_flag 函数抛出的与 HTTP 有关的且 download_one 没有处理的异常\n error_msg = 'HTTP error {res.status_code} - {res.reason}'\n error_msg = error_msg.format(res=exc.response)\n except requests.exceptions.ConnectionError as exc:\n error_msg = 'Connection error'\n else:\n error_msg = ''\n status = res.status\n\n if error_msg:\n status = HTTPStatus.error\n counter[status] += 1 # 以 HTTPStatus(一个 Enum) 中的值为键,增加计数器\n if verbose and error_msg: # 如果是详细模式,则有错误,则显示带有当前国家代码的错误信息\n cc = to_do_map[future] # 以当前 future 为键,从 to_do_map 中获取国家代码\n print('*** Error for {}:{}'.format(cc, error_msg))\n\n return counter\n\n\nif __name__ == '__main__':\n main(download_many, DEFAULT_CONCUR_REQ, MAX_CONCUR_REQ)\n","repo_name":"sbwcwso/fluent_python_notes","sub_path":"ch17/flags2_threadpool.py","file_name":"flags2_threadpool.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"71159040408","text":"###############################################################################################\n#Aim: Hyper-parameter search\n#Description: To search the optimal parameters for NSCLC-specific machine learning models.\n#\n#Run command, e.g.: python 08_1.NSCLC_20Models_HyperParams_Search.py DecisionTree 1\n###############################################################################################\n\n\nimport time\nimport sys\nimport pandas as pd\nfrom collections import Counter\nimport utils2\n\n\nif __name__ == \"__main__\":\n start_time = time.time()\n\n ############################################## 0. Parameters setting ##############################################\n MLM_list1=['RF6', 'DecisionTree', 'RandomForest'] # data scaling: None\n MLM_list2=['LogisticRegression','GBoost', 'AdaBoost', 'HGBoost', 'XGBoost', 'CatBoost', 'LightGBM',\n 'SupportVectorMachineRadial','kNearestNeighbourhood','NeuralNetwork1','NeuralNetwork2','NeuralNetwork3',\n 'NeuralNetwork4','GaussianProcess'] # StandardScaler\n MLM = sys.argv[1]\n if MLM in MLM_list1:\n SCALE = 'None'\n elif MLM in MLM_list2:\n SCALE = 'StandardScaler'\n else:\n raise Exception('MLM not recognized!')\n try:\n randomSeed = int(sys.argv[2])\n except:\n randomSeed = 1\n CPU_num = -1\n N_repeat_KFold = 1\n info_shown = 1\n Kfold = 5\n randomSearchNumber = 10000\n\n phenoNA = 'Response'\n model_hyperParas_fn = '../../03.Results/NSCLC_Chowell_ModelParaSearchResult_' + MLM + '_Scaler(' + SCALE + ')_CV' + str(\n Kfold) + 'Rep' + str(N_repeat_KFold) + '_random' + str(randomSeed) + '.txt'\n if MLM not in ['RF6']:\n featuresNA = ['TMB', 'PDL1_TPS(%)', 'Chemo_before_IO', 'Albumin', 'FCNA', 'NLR', 'Age', 'Drug', 'Sex', 'MSI',\n 'Stage', 'HLA_LOH', 'HED', 'Platelets', 'HGB', 'BMI']\n else:\n featuresNA = ['TMB', 'PDL1_TPS(%)', 'Chemo_before_IO', 'Albumin', 'NLR', 'Age']\n dataALL_fn = '../../02.Input/features_phenotype_allDatasets.xlsx'\n data_train1 = pd.read_excel(dataALL_fn, sheet_name='Chowell2015-2017', index_col=0)\n data_train2 = pd.read_excel(dataALL_fn, sheet_name='Chowell2018', index_col=0)\n data_train = pd.concat([data_train1,data_train2],axis=0)\n data_train = data_train.loc[data_train['CancerType']=='NSCLC',]\n\n if MLM == 'RF6':\n MLM = 'RandomForest'\n xy_colNAs = featuresNA + [phenoNA]\n data_train = data_train[xy_colNAs].dropna()\n cat_features = []\n\n ################################################# 1. Data read in #################################################\n model_hyperParas_fh = open(model_hyperParas_fn, 'w')\n print('Raw data processing ...', file=model_hyperParas_fh)\n\n # Data truncation\n TMB_upper = 50\n Age_upper = 85\n NLR_upper = 25\n data_train['TMB'] = [c if c < TMB_upper else TMB_upper for c in data_train['TMB']]\n data_train['Age'] = [c if c < Age_upper else Age_upper for c in data_train['Age']]\n data_train['NLR'] = [c if c < NLR_upper else NLR_upper for c in data_train['NLR']]\n counter = Counter(data_train[phenoNA]) # count examples in each class\n pos_weight = counter[0] / counter[1] # estimate scale_pos_weight value\n print(' Number of all features: ', len(featuresNA), '\\n Their names: ', featuresNA, file=model_hyperParas_fh)\n print(' Phenotype name: ', phenoNA, file=model_hyperParas_fh)\n print(' Negative/Positive samples in training set: ', pos_weight, file=model_hyperParas_fh)\n print('Data size: ', data_train.shape[0], file=model_hyperParas_fh)\n\n scoring_dict = 'roc_auc'\n ############################### 2. Optimal model hyperparameter combination search ################################\n search_cv = utils2.optimalHyperParaSearcher(MLM, SCALE, data_train, featuresNA, phenoNA,scoring_dict, \\\n randomSeed, CPU_num, N_repeat_KFold, info_shown,Kfold,cat_features, randomSearchNumber)\n print('Best params on CV sets: ', search_cv.best_params_, file=model_hyperParas_fh)\n print('Best score on CV sets: ', search_cv.best_score_, file=model_hyperParas_fh)\n print('Hyperparameter screening done! Time used: ',time.time()-start_time, file=model_hyperParas_fh)\n","repo_name":"rootchang/ICBpredictor","sub_path":"code/08_1.NSCLC_20Models_HyperParams_Search.py","file_name":"08_1.NSCLC_20Models_HyperParams_Search.py","file_ext":"py","file_size_in_byte":4210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74708768087","text":"from tkinter import *\n\ndef calculate():\n total = int(e1.get()) + int(e2.get())\n resultText = \"Sum of 2 numbers: {}\".format(total)\n resultLabel.config(text=resultText)\n\nmaster = Tk() # define canvas\nmaster.geometry(\"300x300\")\nmaster.config(bg='lightblue')\nl1 = Label(master, text='First Number:')\nl2 = Label(master, text='Second Number:')\nl1.grid(row=0, column=0)\nl2.grid(row=1, column=0)\ne1= Entry(master)\ne2= Entry(master)\ne1.grid(row=0, column=1)\ne2.grid(row=1, column=1)\nButton(master, text=\"Calculate\", command=calculate).grid(row=2, column=0)\nresultLabel = Label(master, text=\"Answer:\")\nresultLabel.grid(row=2, column=1)\n\nmaster.mainloop()\n\n\n\n","repo_name":"itenabler-python/PythonDay2","sub_path":"tkinter-2.py","file_name":"tkinter-2.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"7050157201","text":"import numpy as np\n\naa=[[1,2,3],[4,5,6],[7,8,9]]\nb=np.array(aa)\n\n\n'''\nfor a in range(1,10):\n file1='d:/m/text'+str(a)+'.txt'\n fp=open(file1,'a+')\n print(aa,file=fp)\n fp.close()\n'''\n\n\n\nnp.savetxt('d:/m/aa.csv', b, delimiter = ',')","repo_name":"jamdou/jam","sub_path":"jam/test_ip.py","file_name":"test_ip.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34103376739","text":"a, b, c, d, e, f, g = \"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\"\n\n\nvar_string1 = \"Alphabet_Is_Key\"\nvar_string2 = \"string\\n10winewine\"\nvar_string3 = \"Čenda sere sračky\"\nvar_string4 = \"Tom\"\nvar_int = 10\nvar_float = 10.555\nvar_bool = True\nvar_list = [a, b, c]\nvar_tuple = (a, b, c)\nvar_set1 = {a, b, c}\nvar_set2 = {a, b, c, d, e, f, g}\nvar_dict = {\"d\": \"ddd\", \"e\": \"eee\", \"f\": \"fff\"}\n\nx = var_set1\nprint(x)\ndef my_function():\n return 1\n\n\nclass Person:\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n\nperson1 = Person(\"Tom\", 25)\nperson2 = Person(\"Lada\", 20)\n\n\n# Function\n# def max(num1, num2):\n# function()\nprint(max(1, 2))\n\n# Method\n# is object oriented - through class\n# object.Method()\nshit = \"shit\"\nprint(\" \".join(shit))\n","repo_name":"Ismaril/training","sub_path":"Python/old/programming beginnings (ignore)/s_syntax.py","file_name":"s_syntax.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"2860673186","text":"# Import the necessary libraries\nfrom cryptography.hazmat.primitives.asymmetric import rsa\nfrom cryptography.hazmat.primitives import serialization\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives.asymmetric import padding\n\n# Generate RSA key pair\nprivate_key = rsa.generate_private_key(\n public_exponent=65537,\n key_size=2048\n)\n\n# Serialize and save the private key to a file\nprivate_key_pem = private_key.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.PKCS8,\n encryption_algorithm=serialization.NoEncryption()\n)\nwith open(\"private_key.pem\", \"wb\") as f:\n f.write(private_key_pem)\n\n# Extract the public key from the private key\npublic_key = private_key.public_key()\n\n# Serialize and save the public key to a file\npublic_key_pem = public_key.public_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PublicFormat.SubjectPublicKeyInfo\n)\nwith open(\"public_key.pem\", \"wb\") as f:\n f.write(public_key_pem)\n\n# Encrypt a message using the public key\nmessage = b\"Hello, World!\"\nencrypted_message = public_key.encrypt(\n message,\n padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n)\nprint(\"Encrypted Message:\", encrypted_message.hex())\n\n# Decrypt the message using the private key\ndecrypted_message = private_key.decrypt(\n encrypted_message,\n padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n)\nprint(\"Decrypted Message:\", decrypted_message.decode())\n","repo_name":"Divesh-Kumar-Chordia/Archives-of-2023","sub_path":"May/BOSCH Cryptography/pki_example.py","file_name":"pki_example.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"7109517426","text":"from flask import Flask, render_template, flash, redirect, url_for, session, request, logging\nfrom ast import literal_eval\nfrom wtforms import Form, StringField, TextAreaField, PasswordField, validators\nfrom passlib.hash import sha256_crypt\nfrom functools import wraps\nfrom tinydb import TinyDB, Query\n\nusers = TinyDB('users.json')\ncharacters = TinyDB('characters.json')\ntraits = TinyDB('traits.json')\nbaseStats = TinyDB('baseStats.json') # Never update base traits. This is for read only\napp = Flask(__name__)\napp.config['SESSION_TYPE'] = 'filesystem'\napp.config['SECRET_KEY'] = 'super secret key'\n\n# Config MySQL\n# app.config['MYSQL_HOST'] = 'wandrade1.mysql.pythonanywhere-services.com'\n# app.config['MYSQL_USER'] = 'wandrade1'\n# app.config['MYSQL_PASSWORD'] = 'lmaolmao1'\n# app.config['MYSQL_DB'] = 'wandrade1$myflaskapp'\n# app.config['MYSQL_CURSORCLASS'] = 'DictCursor'\n# init MYSQL\n# Index\n@app.route('/')\ndef index():\n return render_template('home.html')\n\n\n# About\n@app.route('/about')\ndef about():\n return render_template('about.html')\n\n\n# Register Form Class\nclass RegisterForm(Form):\n email = StringField('Email', [validators.Length(min=4, max=25)])\n username = StringField('Username', [validators.Length(min=4, max=25)])\n password = PasswordField('Password', [\n validators.DataRequired(),\n validators.EqualTo('confirm', message='Passwords do not match')\n ])\n confirm = PasswordField('Confirm Password')\n\n\n# User Register\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n form = RegisterForm(request.form)\n if request.method == 'POST' and form.validate():\n username = form.username.data\n # password = sha256_crypt.encrypt(str(form.password.data))\n password = form.password.data\n email = form.email.data\n \n users.insert({'username': username, 'password': password, 'email': email})\n\n flash('You are now registered and can log in', 'success')\n\n return redirect(url_for('login'))\n return render_template('register.html', form=form)\n\ndef getFieldData(fieldName, query):\n result = [str(r[fieldName]) for r in query]\n return result\n\ndef getIds(query):\n result = [r.eid for r in query]\n return result\n\n# User login\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n if request.method == 'POST':\n # Get Form Fields\n username = request.form['username']\n password_candidate = request.form['password']\n\n # Get user by username\n User = Query()\n result = users.search(User.username == username)\n print(result)\n if len(result) > 0:\n # Get stored hash\n password = getFieldData(\"password\", result)[0]\n # Compare Passwords\n # if sha256_crypt.verify(password_candidate, password):\n if password == password_candidate:\n # Passed\n session['logged_in'] = True\n session['username'] = username\n\n flash('You are now logged in', 'success')\n return redirect(url_for('dashboard'))\n else:\n error = 'Invalid login'\n return render_template('login.html', error=error)\n else:\n error = 'Username not found'\n return render_template('login.html', error=error)\n\n return render_template('login.html')\n\n# Check if user logged in\ndef is_logged_in(f):\n @wraps(f)\n def wrap(*args, **kwargs):\n if 'logged_in' in session:\n return f(*args, **kwargs)\n else:\n flash('Unauthorized, Please login', 'danger')\n return redirect(url_for('login'))\n return wrap\n\n# Logout \n@app.route('/logout')\n@is_logged_in\ndef logout():\n session.clear()\n flash('You are now logged out', 'success')\n return redirect(url_for('login'))\n\n\n\n# Dashboard\n@app.route('/dashboard')\n@is_logged_in\ndef dashboard():\n Characters = Query()\n result = characters.search(Characters.username == session[\"username\"])\n # characters.insert({'name': 'wescules', 'sex': \"Male\", 'race': 'asian', 'username': 'wescules'})\n\n names = getFieldData(\"name\", result)\n ids = getIds(result)\n data = zip(names, ids)\n\n if len(result) > 0:\n return render_template('dashboard.html', data=data)\n else:\n msg = 'No Articles Found'\n return render_template('dashboard.html', msg=msg)\n\n# Article Form Class\nclass ArticleForm(Form):\n name = StringField('Name', [validators.Length(min=1, max=200)])\n sex = StringField('Sex', [validators.Length(min=1, max=200)])\n age = StringField('Age', [validators.Length(min=1, max=200)])\n height = StringField('Height', [validators.Length(min=1, max=200)])\n race = StringField('Race', [validators.Length(min=1, max=200)])\n weight = StringField('Weight', [validators.Length(min=1, max=200)])\n eyeColor = StringField('Eye Color', [validators.Length(min=1, max=200)])\n hairColor = StringField('Hair Color', [validators.Length(min=1, max=200)])\n backgroundInfo = StringField('Background Info', [validators.Length(min=1, max=200)])\n\n# Add Article\n@app.route('/add_character/', methods=['GET', 'POST'])\n@is_logged_in\ndef add_article():\n form = ArticleForm(request.form)\n if request.method == 'POST' and form.validate():\n name = form.name.data\n sex = form.sex.data\n age = form.age.data\n height = form.height.data\n race = form.race.data\n weight = form.weight.data\n eyeColor = form.eyeColor.data\n hairColor = form.hairColor.data\n backgroundInfo = form.backgroundInfo.data\n stats = getDocumentById(1, baseStats.all())[0]\n characters.insert({'name': name, 'stats':stats, 'sex': sex, 'race': race, 'username': session['username'], 'age':age, 'height': height,'weight':weight, 'eyeColor':eyeColor, \"hairColor\":hairColor, \"backgroundInfo\": backgroundInfo})\n flash('Character Created', 'success')\n return redirect(url_for('dashboard'))\n\n return render_template('add_article.html', form=form)\n\ndef getDocumentById(id, query):\n result = []\n for doc in query:\n if doc.eid is int(id):\n result.append(doc)\n return result\n\n# Edit Character based on Id\n@app.route('/edit_character/', methods=['GET', 'POST'])\n@is_logged_in\ndef edit_character(id):\n form = ArticleForm(request.form)\n\n # populate page on GET\n # cant fucking query by id for some goddamn reason...\n result = getDocumentById(id, characters.all())\n\n form.name.data = getFieldData(\"name\", result)[0]\n form.sex.data = getFieldData(\"sex\", result)[0]\n form.age.data = getFieldData(\"age\", result)[0]\n form.height.data = getFieldData(\"height\", result)[0]\n form.race.data = getFieldData(\"race\", result)[0]\n form.weight.data = getFieldData(\"weight\", result)[0]\n form.eyeColor.data = getFieldData(\"eyeColor\", result)[0]\n form.hairColor.data = getFieldData(\"hairColor\", result)[0]\n form.backgroundInfo.data = getFieldData(\"backgroundInfo\", result)[0]\n\n if request.method == 'POST' and form.validate():\n name = request.form['name']\n sex = request.form['sex']\n age = request.form['age']\n height = request.form['height']\n race = request.form['race']\n weight = request.form['weight']\n eyeColor = request.form['eyeColor']\n hairColor = request.form['hairColor']\n backgroundInfo = request.form['backgroundInfo']\n\n # write back to json store because update structs are fucked\n for res in result:\n res['name'] = name\n res['sex'] = sex\n res['age'] = age\n res['height'] = height\n res['weight'] = weight\n res['eyeColor'] = eyeColor\n res['hairColor'] = hairColor\n res['backgroundInfo'] = backgroundInfo\n res['race'] = race\n characters.write_back(result)\n flash('Character Updated', 'success')\n return redirect(url_for('dashboard'))\n\n return render_template('edit_character.html', form=form, id=id)\n\n# Add traits to character\n# TODO on GET load quantity\n@app.route('/add_traits/', methods=['GET', 'POST'])\n@is_logged_in\ndef add_traits(id):\n \n # traits.insert({'ap': 3, 'description': 'some description', 'name': 'some name'})\n character = getDocumentById(id, characters.all())[0]\n\n result = traits.all()\n ap = getFieldData(\"ap\", result)\n description = getFieldData(\"description\", result)\n name = getFieldData(\"name\", result)\n effect = getFieldData('effect', result)\n\n columns = zip(ap, description, name, effect)\n if request.method == 'POST':\n quantity = request.form['text']\n effects = literal_eval(request.form['effect']) # turn unicode to dict\n\n # update the character based on quantity of trait allocated\n for key in effects:\n character['stats'][key] = quantity * effects[key]\n characters.write_back([character])\n\n flash('Added Traits', 'success')\n flash(character, 'success')\n\n return render_template('traits.html', columns=columns, id=id)\n\n@app.route('/character_sheet/', methods=['GET', 'POST'])\n@is_logged_in\ndef character_sheet(id):\n \n # traits.insert({'ap': 3, 'description': 'some description', 'name': 'some name'})\n character = getDocumentById(id, characters.all())[0]\n\n stats = character['stats']\n\n keys = []\n values = []\n\n for key in stats:\n keys.append(key)\n values.append(stats[key])\n\n columns = zip(keys, values)\n\n return render_template('character.html', columns=columns, id=id)\n\nif __name__ == '__main__':\n app.run(debug=True)\n \n#sudo apt-get install mysql-server\n#apt-get install python-dev\n#sudo apt-get install libmysqlclient-dev\n#pip install flask_mysqldb\n#pip install wtforms\n#pip install passlib\n\n###################\n#***create database***(set password to 'root')\n# mysql -u root -p\n# create database myflaskapp;\n# mysql -u root -proot myflaskapp < admin_backup.sql\n","repo_name":"wescules/DnD-Stat-Tracker-","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":10037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"16582863530","text":"from pickle import TRUE\nimport requests\nimport jieba\nimport jieba.analyse\nfrom lxml import etree\nimport os\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n# tf-idf实现模式:模式1为jieba实现,模式2为jieba+sklearn实现\n# 模式2使用了自己设置的停用词表,所以两种模式结果有所不同\nMODE=1\n\n# 分词停用词表\nSTOP_WORDS = os.path.join(os.getcwd(), \"stop_words.txt\")\n# 待比较的关键字\nKEY_WORD=\"国科大\"\n# 爬取文章页数\nMAX_PAGE_NUM = 1\n# 基本URL\nBASE_URL = \"http://www.ucas.edu.cn/site/26?pn=\"\nHEADERS = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.71 Safari/537.36 Edg/94.0.992.38\"\n }\n\nreq = requests\n# 添加关键字到jieba词典\njieba.add_word(KEY_WORD)\n\n# 文章类\nclass Article():\n def __init__(self,title,link):\n self.title = title\n self.link = link\n # TF-IDF算法得出的top10关键字及对应得分\n self.top_25 = {}\n \n # 获取文章内容\n def GetText(self):\n content=\"\"\n html = etree.HTML(req.get(url=self.link, headers=HEADERS,verify=False).text)\n texts_list = html.xpath(\n '/html/body/div[4]/div[1]/div/div[1]/div[2]/div[2]/p/span/text()')\n\n for text in texts_list:\n if(text!=''):\n content+=text+'\\n'\n return content\n\n # 使用sklearn提取关键字\n def GetTFIDFBySklearn(self):\n text = self.GetText()\n words_list = []\n stopwords = [line.strip() for line in open(\n STOP_WORDS, 'r', encoding='utf-8').readlines()]\n \n # 分词(剔除停用词)\n for word in jieba.cut(text):\n if word not in stopwords and word.strip() != '':\n words_list.append(word.strip())\n \n # tf-idf\n data = [' '.join(words_list)]\n tfidf = TfidfVectorizer()\n res = tfidf.fit_transform(data)\n res=res.toarray()\n voc=tfidf.vocabulary_\n\n details = {str(key): float(res[0,voc[key]]) for key in voc.keys()}\n\n # keywords = sorted(details.items(),key=lambda x: x[1], reverse=TRUE)\n print(\"文章:%s 关键字提取完毕...\" % (self.title))\n \n for i in range(25):\n max_temp=0\n for key in details.keys():\n if(details[key]>=max_temp and key not in self.top_25.keys()):\n self.top_25[key] = details[key]\n max_temp = details[key]\n \n print(\"=======================================\")\n print(\"top 10 关键字为:\")\n count=0\n for key in self.top_25.keys():\n count+=1\n print(key+\"\\t\"+str(self.top_25[key]))\n if(count==10):\n break\n print(\"=======================================\\n\")\n\n # 直接使用jieba提取关键字\n def GetTFIDFByJieba(self):\n text = self.GetText()\n \n # jieba提取关键字\n keywords=jieba.analyse.extract_tags(\n text, topK=25, withWeight=True)\n \n print(\"文章:%s 关键字提取完毕...\" % (self.title))\n\n print(\"=======================================\")\n print(\"top 10 关键字为:\")\n for i in range(25):\n if(i<10):\n print(keywords[i][0]+\"\\t\"+str(keywords[i][1]))\n self.top_25[keywords[i][0]] = keywords[i][1]\n print(\"=======================================\\n\")\n\n\narticles_list=[]\n\n# 获取前MAX_PAGE_NUM页文章的标题和链接\ndef CreateArtiles():\n for i in range(1,MAX_PAGE_NUM+1):\n url=BASE_URL+str(i)\n html=etree.HTML(req.get(url=url,headers=HEADERS).text)\n titles = html.xpath('//html/body/div[4]/div[2]/div[2]/div[3]/p/a/@title')\n links = html.xpath('//html/body/div[4]/div[2]/div[2]/div[3]/p/a/@href')\n\n for j in range(len(titles)):\n articles_list.append(Article(titles[j],links[j]))\n print(\"第%d页文章收集完毕...\"%i)\n\n# 获取与关键字最接近的文章\ndef GetBestMathcedArticle(mode):\n tf_idf=0\n best_matched_article=Article\n\n for article in articles_list:\n if(mode==1):\n article.GetTFIDFByJieba()\n else:\n article.GetTFIDFBySklearn()\n if KEY_WORD in article.top_25.keys():\n if(article.top_25[KEY_WORD]>tf_idf):\n best_matched_article=article\n tf_idf = article.top_25[KEY_WORD]\n \n if(tf_idf==0):\n print(\"未找到和%s有关的文章...增大MAX_PAGE_NUM后再试试吧...\"%KEY_WORD)\n else:\n print(\"已找到关键字最佳相似文章:\")\n print(\"*********************************\")\n print(\"关键字:%s\\n文章标题:%s\\n文章链接:%s\\ntf-idf:%f\" %\n (KEY_WORD, best_matched_article.title, best_matched_article.link,tf_idf))\n print(\"*********************************\")\n\n\n\ndef main():\n CreateArtiles()\n \n GetBestMathcedArticle(MODE)\n \nmain()\n","repo_name":"xiaoyu2018/HW-MobileInternetTechnology","sub_path":"4/题目2.py","file_name":"题目2.py","file_ext":"py","file_size_in_byte":5077,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"7105767395","text":"import openai\r\nimport requests\r\nimport nbformat\r\nimport chardet\r\nimport re\r\nimport streamlit as st\r\nimport os\r\nfrom config import api_key, github_token\r\nimport json\r\n# from langchain import PromptTemplate\r\n\r\n\r\n# Set up your OpenAI API credentials\r\nopenai.api_key = api_key\r\ngithub_token = github_token\r\n\r\ndef get_user_repositories(github_url):\r\n # Extract the username from the GitHub URL\r\n global username \r\n username = github_url.split(\"/\")[-1]\r\n\r\n # Make the API request to retrieve the user's repositories\r\n url = f\"https://api.github.com/users/{username}/repos\"\r\n headers = {\"Accept\": \"application/vnd.github.v3+json\"}\r\n response = requests.get(url, headers=headers)\r\n\r\n if response.status_code == 200:\r\n # Parse the JSON response and extract the repository names and URLs\r\n repositories = []\r\n data = response.json()\r\n for repo in data:\r\n repo_name = repo[\"name\"]\r\n repo_url = repo[\"html_url\"]\r\n repositories.append({\"name\": repo_name, \"url\": repo_url})\r\n\r\n return repositories\r\n else:\r\n # Handle API request errors\r\n print(\"Error: Failed to fetch user repositories.\")\r\n return []\r\n\r\ndef preprocess_code(repository):\r\n processed_files = set()\r\n contents = preprocess_files(repository)\r\n preprocessed_contents = []\r\n for file in contents:\r\n file_type = file[\"type\"]\r\n content = file[\"content\"]\r\n if file_type == \"jupyter_notebook\":\r\n preprocessed_contents.extend(preprocess_jupyter_notebook(content))\r\n elif file_type == \"package_file\":\r\n preprocessed_contents.extend(preprocess_package_file(content))\r\n elif file_type == \"regular_file\":\r\n preprocessed_contents.extend(preprocess_regular_file(content))\r\n\r\n #convert preprocessed_contents to a string\r\n preprocessed_contents = [str(content) for content in preprocessed_contents]\r\n\r\n # Combine preprocessed contents into a single string\r\n preprocessed_content = \" \".join(preprocessed_contents)\r\n\r\n # Limit the token count to 2000\r\n if len(preprocessed_content.split()) > 2000:\r\n preprocessed_content = \" \".join(preprocessed_content.split()[:2000])\r\n\r\n return preprocessed_content\r\n\r\n\r\ndef preprocess_files(repository):\r\n repository_url = repository[\"url\"]\r\n files = fetch_repository_files(repository_url, github_token)\r\n contents = []\r\n for file in files:\r\n file_path = file[\"name\"]\r\n content = fetch_file_content(file[\"download_url\"])\r\n contents.append({\"name\": file_path, \"type\": file[\"type\"], \"content\": content})\r\n\r\n return contents\r\n\r\ndef fetch_repository_files(repo_url, github_token):\r\n # Construct the API endpoint to fetch repository contents\r\n api_url = repo_url.replace(\"https://github.com/\", \"https://api.github.com/repos/\") + \"/contents\"\r\n \r\n headers = {\r\n \"Authorization\": \"token \" + github_token,\r\n \"Accept\": \"application/vnd.github.v3+json\"\r\n }\r\n\r\n response = requests.get(api_url, headers=headers, timeout=10)\r\n if response.status_code == 200:\r\n try:\r\n data = response.json()\r\n files = []\r\n fetch_files_recursive(data, files)\r\n return files\r\n except json.JSONDecodeError as e:\r\n print(f\"Error: Failed to parse API response as JSON - {e}\")\r\n return []\r\n else:\r\n print(f\"Error: Failed to fetch files in the repository.\")\r\n return []\r\n\r\ndef fetch_files_recursive(data, files):\r\n for item in data:\r\n if item[\"type\"] == \"file\":\r\n file_name = item[\"name\"]\r\n file_extension = file_name.split(\".\")[-1].lower()\r\n if file_extension not in [\"jpg\", \"jpeg\", \"png\", \"gif\", \"ico\", \"h5\", \"pkl\", \"gitignore\", \"json\", \"node\"]:\r\n file_type = determine_file_type(file_name)\r\n files.append({\"name\": file_name, \"type\": file_type, \"download_url\": item[\"download_url\"]})\r\n elif item[\"type\"] == \"dir\":\r\n url = item[\"url\"]\r\n headers = {\"Accept\": \"application/vnd.github.v3+json\"}\r\n response = requests.get(url, headers=headers, timeout=10)\r\n if response.status_code == 200:\r\n subdir_data = response.json()\r\n fetch_files_recursive(subdir_data, files)\r\n else:\r\n print(f\"Error: Failed to fetch files in directory {item['name']}.\")\r\n\r\ndef determine_file_type(file_name):\r\n if file_name.endswith(\".ipynb\"):\r\n return \"jupyter_notebook\"\r\n elif file_name.endswith(\".py\"):\r\n return \"package_file\"\r\n elif file_name.endswith(\".h5\") or file_name.endswith(\".pkl\"):\r\n return \"binary_file\"\r\n else:\r\n return \"regular_file\"\r\n\r\ndef fetch_file_content(download_url):\r\n response = requests.get(download_url)\r\n\r\n if response.status_code == 200:\r\n return response.content\r\n else:\r\n print(f\"Error: Failed to fetch file content from {download_url}.\")\r\n return None\r\n\r\n# def preprocess_jupyter_notebook(content):\r\n# notebook = nbformat.reads(content, nbformat.NO_CONVERT)\r\n# preprocessed_cells = []\r\n# for cell in notebook.cells:\r\n# if cell.cell_type == \"code\":\r\n# preprocessed_cells.append(preprocess_code_cell(cell))\r\n\r\n# # Flatten the preprocessed cells into a single string\r\n# preprocessed_content = \" \".join(preprocessed_cells)\r\n\r\n# # Limit the token count to 500\r\n# if len(preprocessed_content.split()) > 500:\r\n# preprocessed_content = \" \".join(preprocessed_content.split()[:500])\r\n\r\n# return preprocessed_content\r\n\r\ndef preprocess_jupyter_notebook(content):\r\n notebook = nbformat.reads(content, nbformat.NO_CONVERT)\r\n preprocessed_cells = []\r\n for cell in notebook.cells:\r\n if cell.cell_type == \"code\":\r\n preprocessed_cells.append(preprocess_code_cell(cell))\r\n\r\n return preprocessed_cells\r\n\r\ndef preprocess_package_file(content):\r\n # You can limit the token count or chunk the file as necessary\r\n # Example: Limit the token count to 1000\r\n if len(content.split()) > 500:\r\n content = \" \".join(content.split()[:500])\r\n\r\n return [content]\r\n\r\ndef preprocess_regular_file(content):\r\n result = chardet.detect(content)\r\n encoding = result[\"encoding\"]\r\n\r\n if encoding is None:\r\n encoding = \"utf-8\"\r\n\r\n try:\r\n decoded_content = content.decode(encoding, errors=\"ignore\")\r\n if len(decoded_content.split()) > 200:\r\n decoded_content = \" \".join(decoded_content.split()[:200])\r\n\r\n return [decoded_content]\r\n except UnicodeDecodeError:\r\n print(\"Error: Failed to decode file content.\")\r\n\r\ndef preprocess_code_cell(cell):\r\n # Implement your preprocessing logic for code cells within Jupyter notebooks\r\n # You can limit the token count or handle large code cells as necessary\r\n # Example: Limit the token count to 200\r\n if len(cell[\"source\"].split()) > 100:\r\n cell[\"source\"] = \" \".join(cell[\"source\"].split()[:100])\r\n\r\n return cell[\"source\"]\r\n\r\ndef generate_prompt(repository, code):\r\n #the models max token count is 2048, so we need to limit the token count to 2000\r\n if len(code.split()) > 100:\r\n code = \" \".join(code.split()[:100])\r\n \r\n prompt = f\"\"\"\r\n Generate a code complexity score for the following code snippet:\r\n --------------------------------------------------\r\n Code:\r\n {code}\r\n --------------------------------------------------\r\n\r\n \"\"\"\r\n\r\n return prompt\r\n\r\n# def generate_prompt(repository, code):\r\n# #the models max token count is 2048, so we need to limit the token count to 2000\r\n# if len(code.split()) > 100:\r\n# code = \" \".join(code.split()[:100])\r\n \r\n# Prompt = PromptTemplate.from_template(\r\n# \"\"\"Generate a code complexity score for the following code snippet:\r\n# --------------------------------------------------\r\n# Code:\r\n# {code}\r\n# --------------------------------------------------\r\n# \"\"\"\r\n# )\r\n# prompt = Prompt.render(code=code)\r\n# return prompt\r\n\r\n# Use GPT-3 to analyze the code\r\ndef analyze_code(prompts):\r\n complexity_scores = []\r\n for prompt in prompts:\r\n response = openai.Completion.create(\r\n engine=\"ada\",\r\n prompt=prompt,\r\n max_tokens=100,\r\n temperature=0.7,\r\n n=1,\r\n stop=None\r\n )\r\n score = extract_complexity_score(response.choices[0].text)\r\n complexity_scores.append(score if score is not None else 0)\r\n return complexity_scores\r\n\r\ndef extract_complexity_score(text):\r\n # Extract the complexity score using a regular expression\r\n pattern = r\"Complexity Score: (\\d+)\"\r\n match = re.search(pattern, text)\r\n if match:\r\n complexity_score = int(match.group(1))\r\n return complexity_score\r\n else:\r\n return None\r\n\r\n\r\n\r\ndef identify_most_complex_repository(repositories):\r\n complexity_scores = {}\r\n\r\n for repository in repositories:\r\n preprocessed_contents = preprocess_code(repository)\r\n if preprocessed_contents:\r\n prompts = []\r\n for content in preprocessed_contents:\r\n prompt = generate_prompt(repository, content)\r\n prompts.append(prompt)\r\n\r\n scores = analyze_code(prompts)\r\n complexity_score = sum(scores) / len(scores)\r\n complexity_scores[repository[\"name\"]] = complexity_score\r\n\r\n if complexity_scores:\r\n most_complex_repository = max(complexity_scores, key=complexity_scores.get)\r\n justification = generate_justification(most_complex_repository)\r\n most_complex_repo_url = None\r\n for repo in repositories:\r\n if repo[\"name\"] == most_complex_repository:\r\n most_complex_repo_url = repo[\"url\"]\r\n break\r\n return most_complex_repository, complexity_scores[most_complex_repository], justification, most_complex_repo_url\r\n else:\r\n return None, 0, \"\", None\r\n\r\n\r\ndef generate_justification(repository):\r\n #the models max token count is 2048, so we need to limit the token count to 2000\r\n if len(repository.split()) > 2000:\r\n repository = \" \".join(repository.split()[:2000])\r\n prompt = f\"Justify why the repository '{repository}' is considered the most complex:\"\r\n\r\n response = openai.Completion.create(\r\n engine=\"babbage\",\r\n prompt=prompt,\r\n max_tokens=200,\r\n temperature=0.7,\r\n n=1,\r\n stop=None\r\n )\r\n\r\n if response.choices:\r\n justification = response.choices[0].text.strip()\r\n return justification\r\n else:\r\n return \"\"\r\n\r\n# def generate_justification(repository):\r\n# #the models max token count is 2048, so we need to limit the token count to 2000\r\n# if len(repository.split()) > 2000:\r\n# repository = \" \".join(repository.split()[:2000])\r\n# Prompt = PromptTemplate.from_template(\r\n# \"\"\"Justify why the repository '{repository}' is considered the most complex:\r\n# \"\"\"\r\n# )\r\n# prompt = Prompt.render(repository=repository)\r\n\r\n# response = openai.Completion.create(\r\n# engine=\"babbage\",\r\n# prompt=prompt,\r\n# max_tokens=200,\r\n# temperature=0.7,\r\n# n=1,\r\n# stop=None\r\n# )\r\n\r\n# if response.choices:\r\n# justification = response.choices[0].text.strip()\r\n# return justification\r\n# else:\r\n# return \"\"\r\n\r\n# Streamlit app\r\ndef main():\r\n st.set_page_config(page_title=\"Code Complexity Analyzer\")\r\n st.title(\"Code Complexity Analyzer\")\r\n #title on the tab\r\n \r\n\r\n github_url = st.text_input(\"Enter GitHub URL:\")\r\n if st.button(\"Analyze\"):\r\n repositories = get_user_repositories(github_url)\r\n if repositories:\r\n most_complex_repository, complexity_score, justification, most_complex_repo_url = identify_most_complex_repository(repositories)\r\n if most_complex_repository:\r\n st.success(f\"The most complex repository is {most_complex_repository}.\")\r\n st.markdown(f\"**Repository URL**: {most_complex_repo_url}\")\r\n st.markdown(f\"**Justification**: {justification}\")\r\n else:\r\n st.warning(\"No code files found in the user's repositories.\")\r\n else: \r\n st.error(\"No repositories found for the given GitHub URL.\")\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"suhasml/GitHub-Complexity-Analyser","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":12524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"36218427066","text":"def uppercase_decorator(function):\n def wrapper():\n func = function()\n make_upper = func.upper()\n return make_upper\n \n return wrapper\n\n\n\ndef split(function):\n def wrapper():\n func = function()\n split_string = func.split()\n return split_string\n return wrapper\n\n\n\ndef timeit(function):\n import time\n def wrapper():\n start = time.time()\n func = function()\n end = time.time()\n print(f\"Elapsed time is ${end-start}\")\n return 0\n return wrapper\n\n@uppercase_decorator\ndef printHelloWorld():\n return(\"Hello world!\")\n\n\n@split\n@uppercase_decorator\ndef printUpperSplit():\n return(\"Hello world!\")\n\n\n\n@timeit\ndef timingLoop():\n i = 0\n i_end = 100000\n while(i None:\n _dir = safe_long_path(_dir)\n if not _dir.exists():\n return\n\n test_data = os.environ.get(\"TEST_SAVE_DATA\")\n if test_data:\n shutil.move(_dir, test_data)\n return\n\n try:\n for path, folders, filenames in os.walk(_dir):\n dirpath = normalized_path(path)\n for folder in folders:\n unset_path_readonly(dirpath / folder)\n for filename in filenames:\n unset_path_readonly(dirpath / filename)\n shutil.rmtree(_dir)\n except Exception:\n if retry < max_retries:\n sleep(2)\n clean_dir(_dir, retry=retry + 1)\n\n\ndef random_png(filename: Path = None, size: int = 0) -> Union[None, bytes]:\n \"\"\"Generate a random PNG file.\n\n :param filename: The output file name. If None, returns\n the picture content.\n :param size: The number of black pixels of the picture.\n :return: None if given filename else bytes\n \"\"\"\n\n if not size:\n size = random.randint(1, 42)\n else:\n size = max(1, size)\n\n pack = struct.pack\n\n def chunk(header, data):\n return (\n pack(\">I\", len(data))\n + header\n + data\n + pack(\">I\", zlib.crc32(header + data) & 0xFFFFFFFF)\n )\n\n magic = pack(\">8B\", 137, 80, 78, 71, 13, 10, 26, 10)\n png_filter = pack(\">B\", 0)\n scanline = pack(\">{}B\".format(size * 3), *[0] * (size * 3))\n content = [png_filter + scanline for _ in range(size)]\n png = (\n magic\n + chunk(b\"IHDR\", pack(\">2I5B\", size, size, 8, 2, 0, 0, 0))\n + chunk(b\"IDAT\", zlib.compress(b\"\".join(content)))\n + chunk(b\"IEND\", b\"\")\n )\n\n if not filename:\n return png\n\n filename.write_bytes(png)\n\n\ndef salt(text: str, prefix: str = \"ndt-\", with_suffix: bool = True) -> str:\n \"\"\"\n Add some salt to the given text to ensure no collisions.\n To use for workspace titles, usernames, groups names ...\n \"\"\"\n suffix = random.randint(1, 99999) if with_suffix else \"\"\n return f\"{prefix}{text}-{suffix}\"\n","repo_name":"nuxeo/nuxeo-drive","sub_path":"tests/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","stars":65,"dataset":"github-code","pt":"31"} +{"seq_id":"39736820465","text":"from flask import Flask, render_template, request, redirect\n\n\napp = Flask(__name__)\n\n\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n@app.route(\"/sendmsg\", methods=['POST'])\ndef sendmsg():\n number = request.form['phone']\n if (len(number) > 12) or (len(number) <= 10):\n error = \"Invalid Format, Format: 91948111xxxx\"\n return render_template(\"index.html\", error=error)\n else:\n url = \"https://wa.me/\"+number\n return redirect(url)\n\n\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"v-senthil/whats-app-messages","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"32370616097","text":"n = int( input() )\np = tuple( map( int, input().split() ) )\nq = tuple( map( int, input().split() ) )\n\nimport itertools\npermutations = list( itertools.permutations( range( 1, n + 1 ) ) )\np_order = permutations.index( p )\nq_order = permutations.index( q )\n\nprint( abs( p_order - q_order ) )\n\n# kaijo = [ 1 ]\n# for k in range( n ):\n# kaijo.append( kaijo[ -1 ] * ( k + 1 ) )\n\n# print( kaijo )\n\n# import copy\n\n# def make_list( s, used, length ):\n# if length == n:\n# seq_list.append( s )\n# return\n# else:\n# for k in range( n ):\n# print( s, used, length )\n# if used[ k ] == 1:\n# continue\n# else:\n# next_used = copy.deepcopy( used )\n# next_used[ k ] = 1\n# make_list( s + str( k + 1 ), next_used, length + 1 )\n\n# make_list( \"\", [ 0 for _ in range( n ) ], 0 )\n\n# print( seq_list )","repo_name":"tsukasa2/AtCoder","sub_path":"practice/ABC/150/abc150-c.py","file_name":"abc150-c.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34660209600","text":"import json\nimport requests\n\n\nclass CoinPaprika():\n \"\"\"Get data from CoinPaprika api.\n \"\"\"\n def __init__(self):\n self.api_url = 'https://api.coinpaprika.com/v1/'\n self.bitcoin_id = 'btc-bitcoin'\n\n async def get_data(self):\n \"\"\"Get current price of btc.\n \"\"\"\n url = f'{self.api_url}coins/btc-bitcoin/ohlcv/today'\n response = requests.get(url)\n return [\n {\n 'coin': 'bitcoin',\n 'price': json.loads(response.text)[0]['close']\n }\n ]\n","repo_name":"ddusek/everyday_sheets","sub_path":"data_apis/coinpaprika.py","file_name":"coinpaprika.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25134634376","text":"import re\nimport sys\nimport time\nfrom os import *\nfrom os.path import *\nimport urllib\n\nfrom papersearch import PaperSearch\n\nclass AcmSearch(PaperSearch):\n\n SEARCH_BASE=\"http://portal.acm.org/\"\n PAPER_BASE=\"http://portal.acm.org/\"\n\n def get_paper_id(self, url):\n tmp = basename(url).split('id=', 1)[1]\n id = basename(tmp).split('&', 1)[0]\n return \"acm.\"+id\n\n def get_search_url(self, query):\n return \"http://portal.acm.org/results.cfm/?\"+\\\n urllib.urlencode({'query': query, 'querydisp':query, 'start':1,\n 'slide':1, 'srt':'score dsc', 'short':0,\n 'parser':'Internet', 'source_parser':'Internet',\n 'source_disp':'', 'source_query':'',\n 'coll':'Portal', 'dl':'GUIDE'})\n\n def get_paper_filename(self, url):\n tmp = url.split('?',1)[0]\n return basename(tmp)\n\n #\n # Override\n #\n\n bibtex_2_regex = re.compile('
([^<]*)
', re.M|re.S)\n\n def get_bibtex_entry(self, page):\n try:\n url = PaperSearch.get_bibtex_entry(self, page)\n f = urllib.urlopen(self.SEARCH_BASE+url)\n bibtex = f.read()\n match = self.bibtex_2_regex.search(bibtex)\n if match:\n bibtex = match.group(1)\n else:\n bibtex = None\n f.close()\n except IOError:\n bibtex = None\n\n return bibtex\n\n #\n # Regular expressions\n #\n\n page_url_regex = re.compile(\\\n '\\s*.*?', re.M|re.S)\n\n descr_regex = re.compile(\\\n '\\s*(.*?)', re.M|re.S)\n\n paper_url_regex = re.compile(\\\n '(?<=).*?'\\\n '^',\\\n re.M|re.S)\n\n items_regex = re.compile(\\\n '(?<=
)'\\\n '(.*?(?= )|'\\\n '.*?^)',\\\n re.M|re.S)\n\n bibtex_regex = re.compile(\"popBibTex.cfm[^']*(?=')\", re.M|re.S)\n\n abstract_regex = re.compile(\\\n '>ABSTRACT\\s*

\\s*

(.*?)

',\n re.M|re.S)\n\n author_regex = re.compile(\\\n '^\\s*author\\s*=\\s*[\"{](.*?)[\"}]', re.M|re.S)\n\n title_regex = re.compile(\\\n '^\\s*title\\s*=\\s*[\"{](.*?)[\"}]', re.M|re.S)\n\n next_page_regex = re.compile(\\\n '(?<=\\s*next)', re.M|re.S)\n\n # ACM does not do reverse reverences (or I havent found them)\n citations_regex = re.compile('^ABCDE$')\n cite_count_regex = re.compile('^EDCBA$')\n\n # no alternative download locations either\n papers_regex = re.compile(\"^$\")\n paper_item_regex = re.compile(\"^$\")\n\n # no known busy page for ACM\n busy_page_regex = re.compile(\"^$\")\n\n\nif __name__ == '__main__':\n search = AcmSearch()\n query = ' '.join(sys.argv[1:])\n search.retrieve_papers(query)\n\n","repo_name":"brainsmoke/papercrawler","sub_path":"acm.py","file_name":"acm.py","file_ext":"py","file_size_in_byte":3238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"43284607749","text":"import pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom functions import Jaffe_theory, ks_initial_IonTracks\nimport itertools\n\n# set parameters\ndata_dict = dict(\n electrode_gap_cm=[0.1],\n particle=[\"proton\", \"carbon\"], # proton, helium, carbon, argon, iron\n voltage_V=[200, 300],\n E_MeV_u=np.linspace(1, 250, 100),\n)\n\n# create a data frame with all the variables\ndata_df = pd.DataFrame.from_records(data=itertools.product(*data_dict.values()), columns=data_dict.keys())\n\n# use the Jaffe theory for initial recombination for these parameters\nresult_df = pd.DataFrame()\nfor idx, data in data_df.iterrows():\n Jaffe_df = Jaffe_theory(data.E_MeV_u, data.voltage_V,\n data.electrode_gap_cm, particle=data.particle, input_is_LET=False)\n result_df = pd.concat([result_df, Jaffe_df], ignore_index=True)\n\n# plot the results\nfig, ax = plt.subplots()\nsns.lineplot(ax=ax, data=result_df, x=\"E_MeV_u\", y=\"ks_Jaffe\", hue=\"voltage_V\", style=\"particle\")\nax.set_xlabel(\"Energy (MeV/u)\")\nax.set_ylabel(\"$k_s$ Jaffe\")\nfig.savefig(\"Jaffe_example.pdf\", bbox_inches=\"tight\")\n\nprint(\"... Jaffe theory finished\")\n\n# APPLY IONTRACKS:\n# reduce the number of parameters\ndata_dict[\"E_MeV_u\"] = [60, 250]\ndata_df_shorter = pd.DataFrame.from_records(data=itertools.product(*data_dict.values()), columns=data_dict.keys())\n\n# calculate the recombination with the IonTracks code\nIonTracks_df = pd.DataFrame()\nfor idx, data in data_df_shorter.iterrows():\n temp_df = ks_initial_IonTracks(E_MeV_u=data.E_MeV_u,\n voltage_V=data.voltage_V,\n electrode_gap_cm=data.electrode_gap_cm,\n particle=data.particle,\n RDD_model=\"Gauss\")\n\n IonTracks_df = pd.concat([IonTracks_df, temp_df], ignore_index=True)\n print(IonTracks_df)\n\n# add to the plot\nsns.scatterplot(data=IonTracks_df, ax=ax, x=\"E_MeV_u\", y=\"ks\", label=\"IonTracks\")\nax.set_ylabel(\"$k_s$\")\nfig.savefig(\"Jaffe_theory_and_IonTracks.pdf\", bbox_inches=\"tight\")\n\n# save ion tracks results\nIonTracks_df.to_csv('IonTracks.csv')\n","repo_name":"jbrage/IonTracks","sub_path":"hadrons/example_single_track.py","file_name":"example_single_track.py","file_ext":"py","file_size_in_byte":2165,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"18205869650","text":"from pyglet.window.key import *\nfrom pyglet.image import load\nfrom pyglet.graphics import OrderedGroup\n\nWINDOW_SIZE = {\n 'width': 1280,\n 'height': 720\n}\n\nKEYS = {\n 'up': UP,\n 'down': DOWN,\n 'left': LEFT,\n 'right': RIGHT,\n 'focus': LSHIFT,\n 'shoot': Z,\n 'bomb': X,\n}\n\nKEY_PRESSED = 2\n\nIMAGES = {\n 'ui': {\n 'borders': load('resources/graphics/ui/borders.png'),\n 'mainmenu_bg': load('resources/graphics/ui/mainmenu_bg.png'),\n 'mainmenu_bg2': load('resources/graphics/ui/mainmenu_bg2.png'),\n 'mainmenu_left': load('resources/graphics/ui/mainmenu_left.png')\n },\n 'bullets': {\n 'round': load('resources/graphics/bullets/round.png')\n },\n 'player': {\n 'player1': load('resources/graphics/player/player1.png')\n },\n 'enemies': {\n\n },\n 'backgrounds': {\n 'clouds': load('resources/graphics/backgrounds/clouds.png'),\n 'grass': load('resources/graphics/backgrounds/grass.png')\n }\n}\n\nMENU_FONT = 'Calibri'\n\n# Layers\nG_BACKGROUND_0 = OrderedGroup(5)\nG_BACKGROUND_1 = OrderedGroup(6)\nG_BACKGROUND_2 = OrderedGroup(7)\nG_BACKGROUND_3 = OrderedGroup(8)\n\nG_MAIN_0 = OrderedGroup(10)\nG_MAIN_1 = OrderedGroup(11)\nG_MAIN_2 = OrderedGroup(12)\nG_MAIN_3 = OrderedGroup(13)\n\nG_FOREGROUND_0 = OrderedGroup(15)\nG_FOREGROUND_1 = OrderedGroup(16)\nG_FOREGROUND_2 = OrderedGroup(17)\nG_FOREGROUND_3 = OrderedGroup(18)\n\n\n# Set anchor points\nIMAGES['player']['player1'].anchor_x, IMAGES['player']['player1'].anchor_y = IMAGES['player']['player1'].width // 2, IMAGES['player']['player1'].height // 2\n\nIMAGES['bullets']['round'].anchor_x, IMAGES['bullets']['round'].anchor_y = IMAGES['bullets']['round'].width // 2, IMAGES['bullets']['round'].height // 2","repo_name":"JoaquinLeonArg/bullet-hell","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"32321434829","text":"\"\"\"\nDefinition of ParentTreeNode:\nclass ParentTreeNode:\n def __init__(self, val):\n self.val = val\n self.parent, self.left, self.right = None, None, None\n\"\"\"\n\n\nclass Solution:\n \"\"\"\n @param: root: The root of the tree\n @param: A: node in the tree\n @param: B: node in the tree\n @return: The lowest common ancestor of A and B\n \"\"\"\n def lowestCommonAncestorII(self, root, A, B):\n # write your code here\n s = set([])\n while A:\n s.add(A)\n A = A.parent\n while B:\n if B in s:\n return B\n B = B.parent\n return None\n\n# easy: https://www.lintcode.com/problem/lowest-common-ancestor-ii/\n","repo_name":"yingl/LintCodeInPython","sub_path":"lowest-common-ancestor-ii.py","file_name":"lowest-common-ancestor-ii.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","stars":123,"dataset":"github-code","pt":"31"} +{"seq_id":"10078446133","text":"\nfrom fastapi import APIRouter\nfrom apps.models import User\nrouter = APIRouter()\n\n\n@router.get(\"/\")\ndef read_root():\n users = User.objects.last()\n user_name = 'Guest'\n if users:\n user_name = users.username\n return {\"Hello\": user_name}\n","repo_name":"tolgahanuzun/fastapi-django-orm-layout","sub_path":"apps/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"18034801707","text":"import os\nimport sys\nimport yara\nfrom config.config import yaraRuleFile\nfrom db.db import Maildatabase\n \ndef fileScan(scanfile, md5Hash, msg_id):\n\tyaraRules = yara.compile(yaraRuleFile)\n\tmatches = []\n\tif os.path.getsize(scanfile) > 0:\n\t\tfor match in yaraRules.match(scanfile):\n\t\t\tmatches.append({\"name\" : match.rule, \"meta\" : match.meta})\n\tdb = Maildatabase()\n\tfor m in matches:\n\t\tyaraRule = m[\"name\"]\n\t\ttry:\n\t\t\tyaraDesc = m[\"meta\"][\"maltype\"]\n\t\texcept:\n\t\t\tyaraDesc = None\n\t\tsqlYara = (msg_id, md5Hash, yaraRule, yaraDesc)\n\t\tdb.storeYara(sqlYara)\n\treturn matches\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\t\n","repo_name":"kevthehermit/Maildb","sub_path":"core/yarascan.py","file_name":"yarascan.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"31"} +{"seq_id":"31047865002","text":"import os\n\nimport dotenv\nimport hydra\nfrom hydra.core.config_store import ConfigStore\nfrom omegaconf import DictConfig\nfrom rich.traceback import install\n\n# load environment variables from `.env` file if it exists\n# recursively searches for `.env` in all folders starting from work dir\n\ndotenv.load_dotenv(override=True, verbose=True)\ninstall(show_locals=False, extra_lines=1, word_wrap=True, width=350)\n\n\ndef collect_config_store():\n from capit.configs.config_tree import Config, base_callbacks, wandb_callbacks\n from capit.configs.datamodules import InstagramImageTextMultiModalDataModuleConfig\n from capit.configs.hydra import add_hydra_configs\n from capit.configs.loggers import (\n TensorboardLoggerConfig,\n WeightsAndBiasesLoggerConfig,\n )\n from capit.configs.mode import BaseMode\n from capit.configs.models import (\n CLIPImageTextMultiModalDatasetConfig,\n CLIPWithPostProcessingImageTextModelConfig,\n )\n from capit.configs.optimizers import AdamWOptimizerConfig\n from capit.configs.trainers import BaseTrainer, DDPTrainer, DPTrainer, MPSTrainer\n\n config_store = ConfigStore.instance()\n ###################################################################################\n config_store.store(name=\"config\", node=Config)\n ###################################################################################\n config_store.store(\n group=\"callbacks\",\n name=\"base\",\n node=base_callbacks,\n )\n\n config_store.store(\n group=\"callbacks\",\n name=\"wandb\",\n node=wandb_callbacks,\n )\n ###################################################################################\n config_store.store(\n group=\"logger\",\n name=\"wandb\",\n node=dict(wandb=WeightsAndBiasesLoggerConfig()),\n )\n\n config_store.store(\n group=\"logger\",\n name=\"tb\",\n node=dict(tensorboard_logger=TensorboardLoggerConfig()),\n )\n\n config_store.store(\n group=\"logger\",\n name=\"wandb+tb\",\n node=dict(\n tensorboard=TensorboardLoggerConfig(),\n wandb=WeightsAndBiasesLoggerConfig(),\n ),\n )\n ###################################################################################\n config_store.store(\n group=\"model\",\n name=\"clip\",\n node=CLIPImageTextMultiModalDatasetConfig,\n )\n\n config_store.store(\n group=\"model\",\n name=\"clip-with-pp\",\n node=CLIPWithPostProcessingImageTextModelConfig,\n )\n ###################################################################################\n config_store.store(\n group=\"datamodule\",\n name=\"InstagramImageTextMultiModal\",\n node=InstagramImageTextMultiModalDataModuleConfig,\n )\n ###################################################################################\n config_store.store(group=\"trainer\", name=\"base\", node=BaseTrainer)\n config_store.store(group=\"trainer\", name=\"gpu-dp\", node=DPTrainer)\n config_store.store(group=\"trainer\", name=\"gpu-ddp\", node=DDPTrainer)\n config_store.store(group=\"trainer\", name=\"mps\", node=MPSTrainer)\n ###################################################################################\n config_store = add_hydra_configs(config_store)\n ###################################################################################\n config_store.store(\n group=\"mode\",\n name=\"base\",\n node=BaseMode(),\n )\n ###################################################################################\n config_store.store(group=\"optimizer\", name=\"AdamW\", node=AdamWOptimizerConfig)\n\n return config_store\n\n\nconfig_store = collect_config_store()\n\n\n@hydra.main(version_base=None, config_name=\"config\")\ndef main(config: DictConfig):\n\n # Imports can be nested inside @hydra.main to optimize tab completion\n # https://github.com/facebookresearch/hydra/issues/934\n from capit.base import utils\n from capit.train_eval import train_eval\n\n # A couple of optional utilities:\n # - disabling python warnings\n # - forcing debug-friendly configuration\n # - verifying experiment name is set when running in experiment mode\n # You can safely get rid of this line if you don't want those\n utils.extras(config)\n os.environ[\"WANDB_PROGRAM\"] = config.code_dir\n\n return train_eval(config)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"AntreasAntoniou/CAPIT","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":4402,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"12621176617","text":"import db\nimport os, types, json, glob\n\n\nclass TextDbWrapper(object):\n\n def __init__(self, app=None, config=None):\n if app:\n self.init_app(app)\n elif config:\n self._dbname = config['DB_NAME']\n self._db = os.path.join(os.path.dirname(os.path.abspath(__file__)), config['DB_NAME'])\n self._ns = config['ENV']\n self._tokenmaps = db.get_tokenmaps(self)\n else:\n raise Exception(\"app or config please!\")\n\n def init_app(self, app):\n app.extensions['data_wrapper'] = app.extensions.get('data_wrapper', {})\n app.extensions['data_wrapper']['db'] = os.path.join(os.path.dirname(os.path.abspath(__file__)), app.config['DB_NAME'])\n app.extensions['data_wrapper']['ns'] = app.extensions['data_wrapper'].get('ns', app.config['ENV'])\n\n self._dbname = app.config['DB_NAME']\n self._db = app.extensions['data_wrapper']['db']\n self._ns = app.extensions['data_wrapper']['ns']\n\n app.extensions['data_wrapper']['tokenmaps'] = db.get_tokenmaps(self)\n self._tokenmaps = app.extensions['data_wrapper']['tokenmaps']\n\n def drop_db(self, db_name):\n import shutil\n try:\n shutil.rmtree(os.path.join(os.path.dirname(os.path.abspath(__file__)), db_name))\n except OSError:\n pass\n\n def drop_namespace(self, namespace = None):\n db.drop_namespace(self, namespace)\n\n def refresh_tokenmaps(self):\n self._tokenmaps = db.get_tokenmaps(self)\n\n def get_token_map(self, table, type):\n try:\n return self._tokenmaps[type][table]\n except:\n return {}\n\n def get_collection_names(self):\n ns_collections = glob.glob(\"%s/%s.*\" % (self._db, self.ns))\n return [collection.replace(\"%s/%s.\" % (self._db, self.ns), '', 1) for collection in ns_collections]\n\n def init_replication(self, destination_hostname, replication_id = None):\n return db.init_replication(self, destination_hostname, replication_id = replication_id)\n\n def accept_replicated_batch(self, data):\n return db.accept_replicated_batch(self, data)\n\n @property\n def config(self):\n return db.get_config(self)\n\n @property\n def ns(self):\n return self._ns\n\n def get_collection(self, table):\n collection_path = \"%s/%s.%s\" % (self._db, self.ns, table)\n return collection_path\n\n @db.unflattener\n @db.flattener\n def get(self, table, properties):\n document = None\n collectionpath = self.get_collection(table)\n try:\n if '_id' not in properties:\n result = self.find(table, properties, limit = 1, _flattened = True)\n document = list(result)[0]\n else:\n with open(\"%s/%s\" % (collectionpath, properties[\"_id\"])) as file:\n result = file.read()\n document = json.loads(result)\n except:\n pass\n return document\n\n @db.strong_consistency_option\n @db.flattener\n def put(self, table, document, replace = False, **kwargs):\n if document is None or table is None:\n return\n collectionpath = self.get_collection(table)\n if not replace and os.path.exists(collectionpath + '/' + document['_id']):\n raise self.DuplicateKeyError\n try:\n with open(collectionpath + '/' + document['_id'], 'w') as file:\n file.write(json.dumps(document))\n except:\n if not os.path.exists(collectionpath):\n os.makedirs(collectionpath)\n return self.put(table, document, _flattened = True)\n raise\n return document['_id']\n\n def put_multi(self, table, documents):\n results = []\n for document in documents:\n results.append(self.put(table, document))\n return results\n\n def remove(self, table, properties):\n keys_to_delete = self.find(table, properties, keys_only = True)\n for filename in keys_to_delete:\n try:\n os.remove(filename)\n except:\n pass\n\n @db.flattener\n def find(self, table, properties, _range = None, sort = [], limit = None, keys_only = False, **kwargs):\n matches = 0\n collectionpath = self.get_collection(table)\n documents = []\n document_paths = glob.glob(collectionpath + '/*')\n for document_path in document_paths:\n try:\n with open(document_path, 'r') as document_json:\n document = json.loads(document_json.read())\n if _range:\n if 'stop' in _range:\n assert document[_range['prop']] < _range['stop']\n if 'start' in _range:\n assert _range['start'] <= document[_range['prop']]\n for prop_key in properties:\n assert properties[prop_key] == document[prop_key]\n except (AssertionError, KeyError):\n continue\n matches += 1\n if keys_only:\n document = document_path\n documents.append(document)\n if matches == limit and not sort:\n break\n if keys_only:\n return documents\n else:\n if sort:\n def key_function(item):\n str_key = ''\n for sort_info in sort:\n if sort_info[0] in item:\n sort_val = item[sort_info[0]]\n if sort_info[1] == -1:\n # Hacky to reflect characters to other side of spectrum.\n sort_val = \"\".join(chr(127 - (ord(letter) - 128)) for letter in sort_val)\n str_key = \"%s_%s\" % (str_key, sort_val)\n return str_key\n documents = sorted(documents, key = key_function)\n if limit:\n documents = documents[:limit]\n return self.TextDbCursorWrapper(iter(documents), table = table, token_map = self.get_token_map(table, 'decode'))\n\n\n def update(self, table, key, properties, upsert = False, replace = False):\n document = self.get(table, {'_id' : key})\n if replace:\n document = {'_id' : key}\n if upsert and document is None:\n document = {'_id' : key}\n updated_document = merge_dicts(document, properties)\n self.put(table, updated_document, replace = True)\n\n def startswith(self, table, starts_with):\n collectionpath = self.get_collection(table)\n document_paths = glob.glob('%s/%s*' % (collectionpath, starts_with))\n documents = []\n for document_path in document_paths:\n with open(document_path, 'r') as document_json:\n document = json.loads(document_json.read())\n if document:\n documents.append(document)\n return documents\n\n class DuplicateKeyError(Exception):\n \"\"\" To pass dup exception through to wrapper.\n \"\"\"\n\n from collections import Iterator\n class TextDbCursorWrapper(Iterator):\n \"\"\" Mostly for proof-of-concept. Needed in GaeDatastoreWrapper.\n \"\"\"\n def __init__(self, wrapped, table, token_map):\n self._wrapped = wrapped\n self._table = table\n self._token_map = token_map\n def next(self):\n result = self._wrapped.next()\n if self._table not in ['metadata','tokenmaps']:\n result = db.unflatten(result, self._token_map)\n return result\n def __getattr__(self, attr):\n return getattr(self._wrapped, attr)\n\ndef merge_dicts(x,y):\n merged = dict(x,**y)\n xkeys = x.keys()\n for key in xkeys:\n if type(x[key]) is types.DictType and y.has_key(key):\n merged[key] = merge_dicts(x[key],y[key])\n return merged\n","repo_name":"eliwjones/heroku-gae-bridge","sub_path":"db/textdb_wrapper/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8004,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"25256335269","text":"from pathlib import Path\nfrom pydicom import dcmread\n\n\n# source directory with .dcm files\nsource_dir = 'src'\n\n# file with full filepaths old .dcm file and its matches new file with anonymous PatientName attribute\nmatch = 'match_file.txt'\n\nsrc_dir = Path(source_dir)\n\n# get only .dcm files from src/ directory\ndcm_files = [i for i in src_dir.iterdir() if i.is_file() and i.suffix == '.dcm']\n\n# write statistic of old and new .dcm filepaths\nwith open(match, 'w') as match_file:\n\n for dcm_file in dcm_files:\n dcm_file = str(dcm_file)\n ds = dcmread(dcm_file) # get metadata from .dcm file\n if hasattr(ds, 'PatientName'):\n ds.PatientName = '' # Clear patient name value if name exist\n\n # write this dicom file with anonymous PatientName to new directory \n if hasattr(ds, 'StudyInstanceUID') and hasattr(ds, 'SeriesInstanceUID') and hasattr(ds, 'SOPInstanceUID'):\n path = Path(ds.StudyInstanceUID, ds.SeriesInstanceUID) # merge attr values to create new paths\n path.mkdir(parents=True, exist_ok=True) # confirm create this path\n new_dcm_name = ds.SOPInstanceUID + '.dcm'\n new_dcm_file = str(Path(path, new_dcm_name)) # new file with path\n ds.save_as(new_dcm_file) # create new .dcm file in new directory\n\n # write line with full paths of old and new dcm files to match_files.txt\n line = dcm_file + '\\t' + new_dcm_file + '\\n'\n match_file.write(line)\n","repo_name":"bonifazy/dicom","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19680097079","text":"from PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5 import uic\nimport random\nimport sys\n\n#main class with methods needed\nclass UI(QMainWindow):\n #init method which loads UI and gets all buttons\n def __init__(self):\n super(UI, self).__init__()\n\n uic.loadUi(\"TTTUI.ui\", self)\n\n #counter for deciding whose turn is\n self.counter=0\n\n #all UI objects\n self.button1=self.findChild(QPushButton, \"pushButton_1\")\n self.button2=self.findChild(QPushButton, \"pushButton_2\")\n self.button3=self.findChild(QPushButton, \"pushButton_3\")\n self.button4=self.findChild(QPushButton, \"pushButton_4\")\n self.button5=self.findChild(QPushButton, \"pushButton_5\")\n self.button6=self.findChild(QPushButton, \"pushButton_6\")\n self.button7=self.findChild(QPushButton, \"pushButton_7\")\n self.button8=self.findChild(QPushButton, \"pushButton_8\")\n self.button9=self.findChild(QPushButton, \"pushButton_9\")\n self.pve=self.findChild(QCheckBox, \"checkBox\")\n self.start=self.findChild(QPushButton, \"start\")\n self.label=self.findChild(QLabel, \"label\")\n\n\n\n\n #all button clicks\n self.button1.clicked.connect(lambda: self.action(self.button1))\n self.button2.clicked.connect(lambda: self.action(self.button2))\n self.button3.clicked.connect(lambda: self.action(self.button3))\n self.button4.clicked.connect(lambda: self.action(self.button4))\n self.button5.clicked.connect(lambda: self.action(self.button5))\n self.button6.clicked.connect(lambda: self.action(self.button6))\n self.button7.clicked.connect(lambda: self.action(self.button7))\n self.button8.clicked.connect(lambda: self.action(self.button8))\n self.button9.clicked.connect(lambda: self.action(self.button9))\n self.start.clicked.connect(lambda: self.reset())\n \n\n self.button_list=[\n self.button1,\n self.button2,\n self.button3,\n self.button4,\n self.button5,\n self.button6,\n self.button7,\n self.button8,\n self.button9,\n ]\n self.list_for_ai = [\n self.button1,\n self.button2,\n self.button3,\n self.button4,\n self.button5,\n self.button6,\n self.button7,\n self.button8,\n self.button9,\n ]\n #code for UI style\n self.setWindowTitle(\"Tic-Tac-Toe\")\n self.setStyleSheet(\"background-color: white;\")\n self.label.setText(\"X starts\")\n self.label.setStyleSheet('QLabel {color: black;}')\n self.start.setStyleSheet('QPushButton {color: black;}')\n for b in self.button_list:\n b.setStyleSheet('QPushButton {border: none;}')\n b.setFont(QFont('Quicksliver', 36))\n\n\n self.show()\n\n #button triggers this method\n def action(self, btn):\n if self.pve.isChecked() == False:\n if self.counter%2 == 0:\n btn.setText('X')\n btn.setStyleSheet('QPushButton {background-color: red; color: white;}')\n self.label.setText(\"O's Turn\")\n btn.setEnabled(False)\n else:\n btn.setText('O')\n btn.setStyleSheet('QPushButton {background-color: blue; color: white;}')\n self.label.setText(\"X's Turn\")\n btn.setEnabled(False)\n self.counter += 1\n else:\n \n btn.setText('X')\n btn.setStyleSheet('QPushButton {background-color: red; color: white;}')\n self.label.setText(\"You're playing against AI\")\n btn.setEnabled(False)\n self.ai()\n\n\n\n self.win()\n\n def ai(self):\n\n for b in self.list_for_ai:\n if b.text() == 'X':\n self.list_for_ai.remove(b)\n\n if self.list_for_ai != [] and self.label.text()!= 'X Wins':\n button = random.choice(self.list_for_ai)\n button.setText('O')\n button.setStyleSheet('QPushButton {background-color: blue; color: white;}')\n button.setEnabled(False)\n self.list_for_ai.remove(button)\n\n \n #disables all buttons after game ends\n def disableAll(self):\n for b in self.button_list:\n b.setEnabled(False)\n \n #checks if game is ending at each step\n def win(self):\n if (self.button1.text() != '' and self.button1.text() == \n self.button4.text() == self.button7.text()):\n\n self.label.setText(f\"{self.button1.text()} Wins\")\n self.disableAll()\n\n elif (self.button2.text() != '' and self.button2.text() == \n self.button5.text() == self.button8.text()):\n\n self.label.setText(f\"{self.button2.text()} Wins\")\n self.disableAll()\n\n elif (self.button3.text() != '' and self.button3.text() == \n self.button6.text() == self.button9.text()):\n\n self.label.setText(f\"{self.button3.text()} Wins\")\n self.disableAll()\n\n elif (self.button1.text() != '' and self.button1.text() == \n self.button2.text() == self.button3.text()):\n\n self.label.setText(f\"{self.button1.text()} Wins\")\n self.disableAll()\n\n elif (self.button4.text() != '' and self.button4.text() == \n self.button5.text() == self.button6.text()):\n\n self.label.setText(f\"{self.button4.text()} Wins\")\n self.disableAll()\n\n elif (self.button7.text() != '' and self.button7.text() == \n self.button8.text() == self.button9.text()):\n\n self.label.setText(f\"{self.button7.text()} Wins\")\n self.disableAll()\n\n elif (self.button1.text() != '' and self.button1.text() == \n self.button5.text() == self.button9.text()):\n\n self.label.setText(f\"{self.button1.text()} Wins\")\n self.disableAll()\n\n elif (self.button7.text() != '' and self.button7.text() == \n self.button5.text() == self.button3.text()):\n\n self.label.setText(f\"{self.button7.text()} Wins\")\n self.disableAll()\n\n elif (self.button1.text() != '' and self.button2.text() != '' and\n self.button3.text() != '' and self.button4.text() != '' and\n self.button5.text() != '' and self.button6.text() != '' and\n self.button7.text() != '' and self.button8.text() != '' and\n self.button9.text() != ''):\n\n self.label.setText(\"Draw!\")\n\n #clears the field for starting new game\n def reset(self):\n self.list_for_ai = [\n self.button1,\n self.button2,\n self.button3,\n self.button4,\n self.button5,\n self.button6,\n self.button7,\n self.button8,\n self.button9,\n ]\n for b in self.button_list:\n b.setText('')\n b.setEnabled(True)\n b.setStyleSheet('QPushButton {background-color: white; border: none;}')\n self.label.setText(\"X Starts\")\n self.counter = 0\n\n\n#code for app execution\napp = QApplication(sys.argv)\nwindow = UI()\n\napp.exec_()","repo_name":"norayr1231/Lab-Project","sub_path":"TicTacToe.py","file_name":"TicTacToe.py","file_ext":"py","file_size_in_byte":7598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"36893331842","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\n\nfrom tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base\nfrom tensorflow.python.data.experimental.ops import batching\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.platform import test\n\n\nclass MapAndBatchDatasetSerializationTest(\n dataset_serialization_test_base.DatasetSerializationTestBase):\n\n def testNumParallelBatches(self):\n range_size = 11\n num_repeats = 2\n batch_size = 5\n total_outputs = range_size * num_repeats\n num_outputs_drop_remainder = total_outputs // batch_size\n num_outputs_keep_remainder = int(math.ceil(total_outputs / batch_size))\n num_parallel_batches = 2\n\n def build_ds(range_start, drop_remainder=False):\n\n def _map_fn(x):\n return math_ops.square(x)\n\n return dataset_ops.Dataset.range(\n range_start, range_start + range_size).repeat(num_repeats).apply(\n batching.map_and_batch(\n map_func=_map_fn,\n batch_size=batch_size,\n num_parallel_batches=num_parallel_batches,\n drop_remainder=drop_remainder))\n\n self.run_core_tests(lambda: build_ds(10), num_outputs_keep_remainder)\n self.run_core_tests(lambda: build_ds(10, True), num_outputs_drop_remainder)\n\n def testNumParallelCalls(self):\n range_size = 11\n num_repeats = 2\n batch_size = 5\n total_outputs = range_size * num_repeats\n num_outputs_drop_remainder = total_outputs // batch_size\n num_outputs_keep_remainder = int(math.ceil(total_outputs / batch_size))\n num_parallel_calls = 7\n\n def build_ds(range_start, drop_remainder=False):\n\n def _map_fn(x):\n return math_ops.square(x)\n\n return dataset_ops.Dataset.range(\n range_start, range_start + range_size).repeat(num_repeats).apply(\n batching.map_and_batch(\n map_func=_map_fn,\n batch_size=batch_size,\n num_parallel_calls=num_parallel_calls,\n drop_remainder=drop_remainder))\n\n self.run_core_tests(lambda: build_ds(10), num_outputs_keep_remainder)\n self.run_core_tests(lambda: build_ds(10, True), num_outputs_drop_remainder)\n\n def testSparse(self):\n\n def build_dataset():\n\n def map_fn(i):\n return sparse_tensor.SparseTensorValue(\n indices=[[0]], values=(i * [1]), dense_shape=[1])\n\n return dataset_ops.Dataset.range(10).apply(\n batching.map_and_batch(map_fn, 5))\n\n self.run_core_tests(build_dataset, 2)\n\n\nif __name__ == \"__main__\":\n test.main()\n","repo_name":"DeepRec-AI/DeepRec","sub_path":"tensorflow/python/data/experimental/kernel_tests/serialization/map_and_batch_dataset_serialization_test.py","file_name":"map_and_batch_dataset_serialization_test.py","file_ext":"py","file_size_in_byte":2784,"program_lang":"python","lang":"en","doc_type":"code","stars":895,"dataset":"github-code","pt":"31"} +{"seq_id":"11963767691","text":"#!/usr/bin/python\n# -*- coding:utf-8 -*-\n\nimport tensorflow as tf\n\nfrom tools import visualize, dataset\n\nNUMBER_CLASSES = 10\nIMAGE_WIDTH = 32\nIMAGE_HEIGHT = 32\nIMAGE_CHANNELS = 3\nDATASET_PATH = '/home/workspace/cifar10/pythonver'\nLEARNING_RATE = 0.001\nMOMENTUM = 0.9\nbatch_size = 128\nepoches = 5\nKEEP_PROB = 0.6\nLOG_DIR = 'Log--' + visualize.get_time()\n\n\ndef convolution(data, kernel, strides, name, bias, padding='SAME'):\n with tf.name_scope(name):\n conv = tf.nn.conv2d(input=data, filter=kernel, strides=[1, strides, strides, 1], padding=padding, name=name)\n with tf.name_scope(name+'_relu'):\n conv = tf.nn.bias_add(conv, bias)\n relu = tf.nn.relu(conv, name=name+'_relu')\n return relu\n return conv\n\n\ndef pooling(data, ksize, strides, name, padding='SAME'):\n with tf.name_scope(name):\n return tf.nn.max_pool(value=data, ksize=[1, ksize, ksize, 1], strides=[1, strides, strides, 1],\n padding=padding, name=name)\n\n\ndef fullconnection(data, weights, biases, name, relu_bias, relu=False):\n with tf.name_scope(name):\n fc = tf.nn.bias_add(tf.matmul(data, weights), biases, name=name)\n if not relu:\n return fc\n with tf.name_scope(name+'_relu'):\n fc = tf.nn.bias_add(fc, relu_bias)\n relu = tf.nn.relu(fc, name=(name+'_relu'))\n return relu\n\n\ndef model(input, model_param):\n conv1 = convolution(data=input, kernel=model_param['conv1_kernel'], strides=model_param['conv1_strides'], bias=model_param['conv1_biases'], name=model_param['conv1_name'], padding=model_param['conv1_padding'])\n conv2 = convolution(data=conv1, kernel=model_param['conv2_kernel'], strides=model_param['conv2_strides'], bias=model_param['conv2_biases'], name=model_param['conv2_name'], padding=model_param['conv2_padding'])\n\n pool1 = pooling(data=conv2, ksize=model_param['pool1_ksize'], strides=model_param['pool1_strides'], name=model_param['pool1_name'], padding=model_param['pool1_padding'])\n\n conv3 = convolution(data=pool1, kernel=model_param['conv3_kernel'], strides=model_param['conv3_strides'], bias=model_param['conv3_biases'], name=model_param['conv3_name'], padding=model_param['conv3_padding'])\n conv4 = convolution(data=conv3, kernel=model_param['conv4_kernel'], strides=model_param['conv4_strides'], bias=model_param['conv4_biases'], name=model_param['conv4_name'], padding=model_param['conv4_padding'])\n\n pool2 = pooling(data=conv4, ksize=model_param['pool2_ksize'], strides=model_param['pool2_strides'], name=model_param['pool2_name'], padding=model_param['pool2_padding'])\n\n conv5 = convolution(data=pool2, kernel=model_param['conv5_kernel'], strides=model_param['conv5_strides'], bias=model_param['conv5_biases'], name=model_param['conv5_name'], padding=model_param['conv5_padding'])\n conv6 = convolution(data=conv5, kernel=model_param['conv6_kernel'], strides=model_param['conv6_strides'], bias=model_param['conv6_biases'], name=model_param['conv6_name'], padding=model_param['conv6_padding'])\n conv7 = convolution(data=conv6, kernel=model_param['conv7_kernel'], strides=model_param['conv7_strides'], bias=model_param['conv7_biases'], name=model_param['conv7_name'], padding=model_param['conv7_padding'])\n\n pool3 = pooling(data=conv7, ksize=model_param['pool3_ksize'], strides=model_param['pool3_strides'], name=model_param['pool3_name'], padding=model_param['pool3_padding'])\n\n conv8 = convolution(data=pool3, kernel=model_param['conv8_kernel'], strides=model_param['conv8_strides'], bias=model_param['conv8_biases'], name=model_param['conv8_name'], padding=model_param['conv8_padding'])\n conv9 = convolution(data=conv8, kernel=model_param['conv9_kernel'], strides=model_param['conv9_strides'], bias=model_param['conv9_biases'], name=model_param['conv9_name'], padding=model_param['conv9_padding'])\n conv10 = convolution(data=conv9, kernel=model_param['conv10_kernel'], strides=model_param['conv10_strides'], bias=model_param['conv10_biases'], name=model_param['conv10_name'], padding=model_param['conv10_padding'])\n\n pool4 = pooling(data=conv10, ksize=model_param['pool4_ksize'], strides=model_param['pool4_strides'], name=model_param['pool4_name'], padding=model_param['pool4_padding'])\n\n conv11 = convolution(data=pool4, kernel=model_param['conv11_kernel'], strides=model_param['conv11_strides'], bias=model_param['conv11_biases'], name=model_param['conv11_name'], padding=model_param['conv11_padding'])\n conv12 = convolution(data=conv11, kernel=model_param['conv12_kernel'], strides=model_param['conv12_strides'], bias=model_param['conv12_biases'], name=model_param['conv12_name'], padding=model_param['conv12_padding'])\n conv13 = convolution(data=conv12, kernel=model_param['conv13_kernel'], strides=model_param['conv13_strides'], bias=model_param['conv13_biases'], name=model_param['conv13_name'], padding=model_param['conv13_padding'])\n\n pool5 = pooling(data=conv13, ksize=model_param['pool5_ksize'], strides=model_param['pool5_strides'], name=model_param['pool5_name'], padding=model_param['pool5_padding'])\n pool5 = tf.reshape(pool5, shape=[-1, model_param['fc1_weights'].get_shape()[0].value])\n\n fc1 = fullconnection(data=pool5, weights=model_param['fc1_weights'], biases=model_param['fc1_biases'], relu_bias=model_param['fc1_relu_biases'], name=model_param['fc1_name'], relu=model_param['fc1_relu'])\n fc2 = fullconnection(data=fc1, weights=model_param['fc2_weights'], biases=model_param['fc2_biases'], relu_bias=model_param['fc2_relu_biases'], name=model_param['fc2_name'], relu=model_param['fc2_relu'])\n drop = tf.nn.dropout(fc2, keep_prob=KEEP_PROB)\n fc3 = fullconnection(data=drop, weights=model_param['fc3_weights'], biases=model_param['fc3_biases'], relu_bias=model_param['fc3_relu_biases'], name=model_param['fc3_name'])\n return fc3\n\n\ndef init_param():\n model_param = {\n 'conv1_kernel': tf.Variable(tf.truncated_normal(shape=[3, 3, 3, 64], stddev=1e-2, dtype=tf.float32)), 'conv1_strides': 1, 'conv1_biases': tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32)), 'conv1_name': 'conv1', 'conv1_padding': 'SAME',\n 'conv2_kernel': tf.Variable(tf.truncated_normal(shape=[3, 3, 64, 64], stddev=1e-2, dtype=tf.float32)), 'conv2_strides': 1, 'conv2_biases': tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32)), 'conv2_name': 'conv2', 'conv2_padding': 'SAME',\n 'pool1_ksize': 2, 'pool1_strides': 2, 'pool1_name': 'pool1', 'pool1_padding': 'SAME',\n 'conv3_kernel': tf.Variable(tf.truncated_normal(shape=[3, 3, 64, 128], stddev=1e-2, dtype=tf.float32)), 'conv3_strides': 1, 'conv3_biases': tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32)), 'conv3_name': 'conv3', 'conv3_padding': 'SAME',\n 'conv4_kernel': tf.Variable(tf.truncated_normal(shape=[3, 3, 128, 128], stddev=1e-2, dtype=tf.float32)), 'conv4_strides': 1, 'conv4_biases': tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32)), 'conv4_name': 'conv4', 'conv4_padding': 'SAME',\n 'pool2_ksize': 2, 'pool2_strides': 2, 'pool2_name': 'pool2', 'pool2_padding': 'SAME',\n 'conv5_kernel': tf.Variable(tf.truncated_normal(shape=[3, 3, 128, 256], stddev=1e-2, dtype=tf.float32)), 'conv5_strides': 1, 'conv5_biases': tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32)), 'conv5_name': 'conv5', 'conv5_padding': 'SAME',\n 'conv6_kernel': tf.Variable(tf.truncated_normal(shape=[3, 3, 256, 256], stddev=1e-2, dtype=tf.float32)), 'conv6_strides': 1, 'conv6_biases': tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32)), 'conv6_name': 'conv6', 'conv6_padding': 'SAME',\n 'conv7_kernel': tf.Variable(tf.truncated_normal(shape=[3, 3, 256, 256], stddev=1e-2, dtype=tf.float32)), 'conv7_strides': 1, 'conv7_biases': tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32)), 'conv7_name': 'conv7', 'conv7_padding': 'SAME',\n 'pool3_ksize': 2, 'pool3_strides': 2, 'pool3_name': 'pool3', 'pool3_padding': 'SAME',\n 'conv8_kernel': tf.Variable(tf.truncated_normal(shape=[3, 3, 256, 512], stddev=1e-2, dtype=tf.float32)), 'conv8_strides': 1, 'conv8_biases': tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32)), 'conv8_name': 'conv8', 'conv8_padding': 'SAME',\n 'conv9_kernel': tf.Variable(tf.truncated_normal(shape=[3, 3, 512, 512], stddev=1e-2, dtype=tf.float32)), 'conv9_strides': 1, 'conv9_biases': tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32)), 'conv9_name': 'conv9', 'conv9_padding': 'SAME',\n 'conv10_kernel': tf.Variable(tf.truncated_normal(shape=[3, 3, 512, 512], stddev=1e-2, dtype=tf.float32)), 'conv10_strides': 1, 'conv10_biases': tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32)), 'conv10_name': 'conv10', 'conv10_padding': 'SAME',\n 'pool4_ksize': 2, 'pool4_strides': 2, 'pool4_name': 'pool4', 'pool4_padding': 'SAME',\n 'conv11_kernel': tf.Variable(tf.truncated_normal(shape=[3, 3, 512, 512], stddev=1e-2, dtype=tf.float32)), 'conv11_strides': 1, 'conv11_biases': tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32)), 'conv11_name': 'conv11', 'conv11_padding': 'SAME',\n 'conv12_kernel': tf.Variable(tf.truncated_normal(shape=[3, 3, 512, 512], stddev=1e-2, dtype=tf.float32)), 'conv12_strides': 1, 'conv12_biases': tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32)), 'conv12_name': 'conv12', 'conv12_padding': 'SAME',\n 'conv13_kernel': tf.Variable(tf.truncated_normal(shape=[3, 3, 512, 512], stddev=1e-2, dtype=tf.float32)), 'conv13_strides': 1, 'conv13_biases': tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32)), 'conv13_name': 'conv13', 'conv13_padding': 'SAME',\n 'pool5_ksize': 2, 'pool5_strides': 2, 'pool5_name': 'pool5', 'pool5_padding': 'SAME',\n 'fc1_weights': tf.Variable(tf.truncated_normal(shape=[512, 4096], stddev=1e-1, dtype=tf.float32)), 'fc1_biases': tf.Variable(tf.constant(1.0, shape=[4096], dtype=tf.float32)), 'fc1_relu_biases': tf.Variable(tf.constant(1.0, shape=[4096], dtype=tf.float32)), 'fc1_name': 'fc1', 'fc1_relu': True,\n 'fc2_weights': tf.Variable(tf.truncated_normal(shape=[4096, 4096], stddev=1e-1, dtype=tf.float32)), 'fc2_biases': tf.Variable(tf.constant(1.0, shape=[4096], dtype=tf.float32)), 'fc2_relu_biases': tf.Variable(tf.constant(1.0, shape=[4096], dtype=tf.float32)), 'fc2_name': 'fc2', 'fc2_relu': True,\n 'fc3_weights': tf.Variable(tf.truncated_normal(shape=[4096, NUMBER_CLASSES], stddev=1e-1, dtype=tf.float32)), 'fc3_biases': tf.Variable(tf.constant(1.0, shape=[NUMBER_CLASSES], dtype=tf.float32)), 'fc3_relu_biases': tf.Variable(tf.constant(1.0, shape=[4096], dtype=tf.float32)), 'fc3_name': 'fc3'\n }\n return model_param\n\n\ndef train_act(features_train, labels_train, features_test, labels_test):\n sess = tf.InteractiveSession()\n\n x = tf.placeholder(dtype=tf.float32, shape=[None, IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_CHANNELS], name='features')\n y = tf.placeholder(dtype=tf.float32, shape=[None, NUMBER_CLASSES], name='labels')\n pred = model(x, init_param())\n with tf.name_scope('loss'):\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\n tf.summary.scalar('loss', loss)\n tf.summary.histogram('loss', loss)\n with tf.name_scope('train'):\n train = tf.train.MomentumOptimizer(learning_rate=LEARNING_RATE, momentum=MOMENTUM).minimize(loss)\n with tf.name_scope('accuracy'):\n accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)), tf.float32))\n tf.summary.scalar('accuracy', accuracy)\n tf.summary.histogram('accuracy', accuracy)\n\n merge = tf.summary.merge_all()\n logwriter = tf.summary.FileWriter(LOG_DIR, sess.graph)\n initial = tf.global_variables_initializer()\n\n sess.run(initial)\n data_size = features_train.shape[0]\n iterations = int(data_size/batch_size)\n for _ in range(epoches):\n for i in range(iterations):\n data = []\n labels = []\n if i == iterations-1:\n data = features_train[i * batch_size: data_size, :, :, :]\n labels = labels_train[i * batch_size: data_size]\n else:\n data = features_train[i * batch_size: (i + 1) * batch_size, :, :, :]\n labels = labels_train[i * batch_size: (i + 1) * batch_size]\n sess.run(train, feed_dict={x: data, y: labels})\n if i % 10 == 0:\n summary, accuracy_res = sess.run([merge, accuracy], feed_dict={x: features_test, y: labels_test})\n logwriter.add_summary(summary, i)\n print(visualize.get_time() + ' epoch %d, train_iteration at %d, test score: %f ' % (_, i, accuracy_res))\n sess.close()\n logwriter.close()\n\n\ndef main():\n features_train, labels_train, features_test, labels_test = dataset.load_cifar10(DATASET_PATH,\n width=IMAGE_WIDTH,\n height=IMAGE_HEIGHT,\n one_hot=True)\n train_act(features_train, labels_train, features_test, labels_test)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"gu-yan/mlAlgorithms","sub_path":"tensorflow/VGG_impl/VGG16_tensorflow.py","file_name":"VGG16_tensorflow.py","file_ext":"py","file_size_in_byte":13267,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"31"} +{"seq_id":"73440629527","text":"\"\"\"Defines the templaters.\"\"\"\nimport logging\nimport os.path\nimport pkgutil\nfrom functools import reduce\nfrom typing import Callable, Dict, List, Optional, Tuple\n\nimport jinja2.nodes\nfrom jinja2 import (\n Environment,\n FileSystemLoader,\n TemplateError,\n TemplateSyntaxError,\n meta,\n)\nfrom jinja2.environment import Template\nfrom jinja2.sandbox import SandboxedEnvironment\n\nfrom sqlfluff.core.config import FluffConfig\nfrom sqlfluff.core.errors import SQLTemplaterError\nfrom sqlfluff.core.templaters.base import (\n RawFileSlice,\n TemplatedFile,\n TemplatedFileSlice,\n)\nfrom sqlfluff.core.templaters.python import PythonTemplater\nfrom sqlfluff.core.templaters.slicers.tracer import JinjaTracer\n\n\n# Instantiate the templater logger\ntemplater_logger = logging.getLogger(\"sqlfluff.templater\")\n\n\nclass JinjaTemplater(PythonTemplater):\n \"\"\"A templater using the jinja2 library.\n\n See: https://jinja.palletsprojects.com/\n \"\"\"\n\n name = \"jinja\"\n\n class Libraries:\n \"\"\"Mock namespace for user-defined Jinja library.\"\"\"\n\n pass\n\n @staticmethod\n def _extract_macros_from_template(template, env, ctx):\n \"\"\"Take a template string and extract any macros from it.\n\n Lovingly inspired by http://codyaray.com/2015/05/auto-load-jinja2-macros\n \"\"\"\n from jinja2.runtime import Macro # noqa\n\n # Iterate through keys exported from the loaded template string\n context = {}\n macro_template = env.from_string(template, globals=ctx)\n # This is kind of low level and hacky but it works\n for k in macro_template.module.__dict__:\n attr = getattr(macro_template.module, k)\n # Is it a macro? If so install it at the name of the macro\n if isinstance(attr, Macro):\n context[k] = attr\n # Return the context\n return context\n\n @classmethod\n def _extract_macros_from_path(cls, path: List[str], env: Environment, ctx: Dict):\n \"\"\"Take a path and extract macros from it.\"\"\"\n for path_entry in path:\n # Does it exist? It should as this check was done on config load.\n if not os.path.exists(path_entry):\n raise ValueError(f\"Path does not exist: {path_entry}\")\n\n macro_ctx = {}\n if os.path.isfile(path_entry):\n # It's a file. Extract macros from it.\n with open(path_entry) as opened_file:\n template = opened_file.read()\n # Update the context with macros from the file.\n macro_ctx.update(\n cls._extract_macros_from_template(template, env=env, ctx=ctx)\n )\n else:\n # It's a directory. Iterate through files in it and extract from them.\n for dirpath, _, files in os.walk(path_entry):\n for fname in files:\n if fname.endswith(\".sql\"):\n macro_ctx.update(\n cls._extract_macros_from_path(\n [os.path.join(dirpath, fname)], env=env, ctx=ctx\n )\n )\n return macro_ctx\n\n def _extract_macros_from_config(self, config, env, ctx):\n \"\"\"Take a config and load any macros from it.\"\"\"\n if config:\n # This is now a nested section\n loaded_context = (\n config.get_section((self.templater_selector, self.name, \"macros\")) or {}\n )\n else: # pragma: no cover TODO?\n loaded_context = {}\n\n # Iterate to load macros\n macro_ctx = {}\n for value in loaded_context.values():\n macro_ctx.update(\n self._extract_macros_from_template(value, env=env, ctx=ctx)\n )\n return macro_ctx\n\n def _extract_libraries_from_config(self, config):\n library_path = config.get_section(\n (self.templater_selector, self.name, \"library_path\")\n )\n if not library_path:\n return {}\n\n libraries = JinjaTemplater.Libraries()\n\n # If library_path has __init__.py we parse it as one module, else we parse it\n # a set of modules\n is_library_module = os.path.exists(os.path.join(library_path, \"__init__.py\"))\n library_module_name = os.path.basename(library_path)\n\n # Need to go one level up to parse as a module correctly\n walk_path = (\n os.path.join(library_path, \"..\") if is_library_module else library_path\n )\n\n for loader, module_name, is_pkg in pkgutil.walk_packages([walk_path]):\n # skip other modules that can be near module_dir\n if is_library_module and not module_name.startswith(library_module_name):\n continue\n\n module = loader.find_module(module_name).load_module(module_name)\n\n if \".\" in module_name: # nested modules have `.` in module_name\n *module_path, last_module_name = module_name.split(\".\")\n # find parent module recursively\n parent_module = reduce(\n lambda res, path_part: getattr(res, path_part),\n module_path,\n libraries,\n )\n\n # set attribute on module object to make jinja working correctly\n setattr(parent_module, last_module_name, module)\n else:\n # set attr on `libraries` obj to make it work in jinja nicely\n setattr(libraries, module_name, module)\n\n if is_library_module:\n # when library is module we have one more root module in hierarchy and we\n # remove it\n libraries = getattr(libraries, library_module_name)\n\n # remove magic methods from result\n return {k: v for k, v in libraries.__dict__.items() if not k.startswith(\"__\")}\n\n @staticmethod\n def _generate_dbt_builtins():\n \"\"\"Generate the dbt builtins which are injected in the context.\"\"\"\n # This feels a bit wrong defining these here, they should probably\n # be configurable somewhere sensible. But for now they're not.\n # TODO: Come up with a better solution.\n\n class ThisEmulator:\n \"\"\"A class which emulates the `this` class from dbt.\"\"\"\n\n name = \"this_model\"\n schema = \"this_schema\"\n database = \"this_database\"\n\n def __str__(self): # pragma: no cover TODO?\n return self.name\n\n dbt_builtins = {\n # `is_incremental()` renders as False, always in this case.\n # TODO: This means we'll never parse the other part of the query,\n # so we should find a solution to that. Perhaps forcing the file\n # to be parsed TWICE if it uses this variable.\n \"is_incremental\": lambda: False,\n \"this\": ThisEmulator(),\n }\n return dbt_builtins\n\n @classmethod\n def _crawl_tree(cls, tree, variable_names, raw):\n \"\"\"Crawl the tree looking for occurrences of the undeclared values.\"\"\"\n # First iterate through children\n for elem in tree.iter_child_nodes():\n yield from cls._crawl_tree(elem, variable_names, raw)\n # Then assess self\n if isinstance(tree, jinja2.nodes.Name) and tree.name in variable_names:\n line_no = tree.lineno\n line = raw.split(\"\\n\")[line_no - 1]\n pos = line.index(tree.name) + 1\n yield SQLTemplaterError(\n f\"Undefined jinja template variable: {tree.name!r}\",\n line_no=line_no,\n line_pos=pos,\n )\n\n def _get_jinja_env(self, config=None):\n \"\"\"Get a properly configured jinja environment.\"\"\"\n # We explicitly want to preserve newlines.\n macros_path = self._get_macros_path(config)\n return SandboxedEnvironment(\n keep_trailing_newline=True,\n # The do extension allows the \"do\" directive\n autoescape=False,\n extensions=[\"jinja2.ext.do\"],\n loader=FileSystemLoader(macros_path) if macros_path else None,\n )\n\n def _get_macros_path(self, config: FluffConfig) -> Optional[List[str]]:\n if config:\n macros_path = config.get_section(\n (self.templater_selector, self.name, \"load_macros_from_path\")\n )\n if macros_path:\n result = [s.strip() for s in macros_path.split(\",\") if s.strip()]\n if result:\n return result\n return None\n\n def get_context(self, fname=None, config=None, **kw) -> Dict:\n \"\"\"Get the templating context from the config.\"\"\"\n # Load the context\n env = kw.pop(\"env\")\n live_context = super().get_context(fname=fname, config=config)\n # Apply dbt builtin functions if we're allowed.\n if config:\n apply_dbt_builtins = config.get_section(\n (self.templater_selector, self.name, \"apply_dbt_builtins\")\n )\n if apply_dbt_builtins:\n # This feels a bit wrong defining these here, they should probably\n # be configurable somewhere sensible. But for now they're not.\n # TODO: Come up with a better solution.\n dbt_builtins = self._generate_dbt_builtins()\n for name in dbt_builtins:\n # Only apply if it hasn't already been set at this stage.\n if name not in live_context:\n live_context[name] = dbt_builtins[name]\n\n # Load macros from path (if applicable)\n if config:\n macros_path = self._get_macros_path(config)\n if macros_path:\n live_context.update(\n self._extract_macros_from_path(\n macros_path, env=env, ctx=live_context\n )\n )\n\n # Load config macros, these will take precedence over macros from the path\n live_context.update(\n self._extract_macros_from_config(\n config=config, env=env, ctx=live_context\n )\n )\n\n live_context.update(self._extract_libraries_from_config(config=config))\n return live_context\n\n def template_builder(\n self, fname=None, config=None\n ) -> Tuple[Environment, dict, Callable[[str], Template]]:\n \"\"\"Builds and returns objects needed to create and run templates.\"\"\"\n # Load the context\n env = self._get_jinja_env(config)\n live_context = self.get_context(fname=fname, config=config, env=env)\n\n def make_template(in_str):\n \"\"\"Used by JinjaTracer to instantiate templates.\n\n This function is a closure capturing internal state from process().\n Note that creating templates involves quite a bit of state known to\n _this_ function but not to JinjaTracer.\n\n https://www.programiz.com/python-programming/closure\n \"\"\"\n return env.from_string(in_str, globals=live_context)\n\n return env, live_context, make_template\n\n def process(\n self, *, in_str: str, fname: str, config=None, formatter=None\n ) -> Tuple[Optional[TemplatedFile], list]:\n \"\"\"Process a string and return the new string.\n\n Note that the arguments are enforced as keywords\n because Templaters can have differences in their\n `process` method signature.\n A Templater that only supports reading from a file\n would need the following signature:\n process(*, fname, in_str=None, config=None)\n (arguments are swapped)\n\n Args:\n in_str (:obj:`str`): The input string.\n fname (:obj:`str`, optional): The filename of this string. This is\n mostly for loading config files at runtime.\n config (:obj:`FluffConfig`): A specific config to use for this\n templating operation. Only necessary for some templaters.\n formatter (:obj:`CallbackFormatter`): Optional object for output.\n\n \"\"\"\n if not config: # pragma: no cover\n raise ValueError(\n \"For the jinja templater, the `process()` method requires a config \"\n \"object.\"\n )\n\n env, live_context, make_template = self.template_builder(\n fname=fname, config=config\n )\n\n # Load the template, passing the global context.\n try:\n template = make_template(in_str)\n except TemplateSyntaxError as err:\n # Something in the template didn't parse, return the original\n # and a violation around what happened.\n return (\n TemplatedFile(source_str=in_str, fname=fname),\n [\n SQLTemplaterError(\n f\"Failure to parse jinja template: {err}.\",\n line_no=err.lineno,\n )\n ],\n )\n\n violations = []\n\n # Attempt to identify any undeclared variables. The majority\n # will be found during the _crawl_tree step rather than this\n # first Exception which serves only to catch catastrophic errors.\n try:\n syntax_tree = env.parse(in_str)\n undefined_variables = meta.find_undeclared_variables(syntax_tree)\n except Exception as err: # pragma: no cover\n # TODO: Add a url here so people can get more help.\n raise SQLTemplaterError(f\"Failure in identifying Jinja variables: {err}.\")\n\n # Get rid of any that *are* actually defined.\n for val in live_context:\n if val in undefined_variables:\n undefined_variables.remove(val)\n\n if undefined_variables:\n # Lets go through and find out where they are:\n for val in self._crawl_tree(syntax_tree, undefined_variables, in_str):\n violations.append(val)\n\n try:\n # NB: Passing no context. Everything is loaded when the template is loaded.\n out_str = template.render()\n # Slice the file once rendered.\n raw_sliced, sliced_file, out_str = self.slice_file(\n in_str,\n out_str,\n config=config,\n make_template=make_template,\n )\n return (\n TemplatedFile(\n source_str=in_str,\n templated_str=out_str,\n fname=fname,\n sliced_file=sliced_file,\n raw_sliced=raw_sliced,\n ),\n violations,\n )\n except (TemplateError, TypeError) as err:\n templater_logger.info(\"Unrecoverable Jinja Error: %s\", err)\n violations.append(\n SQLTemplaterError(\n (\n \"Unrecoverable failure in Jinja templating: {}. Have you \"\n \"configured your variables? \"\n \"https://docs.sqlfluff.com/en/latest/configuration.html\"\n ).format(err),\n # We don't have actual line number information, but specify\n # line 1 so users can ignore with \"noqa\" if they want. (The\n # default is line 0, which can't be ignored because it's not\n # a valid line number.)\n line_no=1,\n line_pos=1,\n )\n )\n return None, violations\n\n def slice_file(\n self, raw_str: str, templated_str: str, config=None, **kwargs\n ) -> Tuple[List[RawFileSlice], List[TemplatedFileSlice], str]:\n \"\"\"Slice the file to determine regions where we can fix.\"\"\"\n # The JinjaTracer slicing algorithm is more robust, but it requires\n # us to create and render a second template (not raw_str) and is only\n # enabled if the caller passes a make_template() function.\n make_template = kwargs.pop(\"make_template\", None)\n if make_template is None:\n # make_template() was not provided. Use the base class\n # implementation instead.\n return super().slice_file(raw_str, templated_str, config, **kwargs)\n\n templater_logger.info(\"Slicing File Template\")\n templater_logger.debug(\" Raw String: %r\", raw_str)\n templater_logger.debug(\" Templated String: %r\", templated_str)\n tracer = JinjaTracer(raw_str, self._get_jinja_env(), make_template)\n trace = tracer.trace()\n return trace.raw_sliced, trace.sliced_file, trace.templated_str\n","repo_name":"kevingao-twg/dbt-learn","sub_path":"work_env/Lib/site-packages/sqlfluff/core/templaters/jinja.py","file_name":"jinja.py","file_ext":"py","file_size_in_byte":16742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25053748758","text":"n = int(input())\narr = map(int, input().split())\n\nnums = [1]\n\nfor i in arr:\n nums.append(i - nums[-1])\n\nn1 = [nums[i] for i in range(n) if i % 2 == 0]\nn2 = [nums[i] for i in range(n) if i % 2 == 1]\n\nans = min(n2) - max(0, 1 - min(n1))\n\nprint(max(ans, 0))\n","repo_name":"suoeh/cp-","sub_path":"dmopc21c9p1.py","file_name":"dmopc21c9p1.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"43099100055","text":"import random\nimport numpy as np\nimport smtplib\nimport email\nimport time \nimport os \n\ndef number_of_players_func():\n number_of_players = int(input(\"Enter Number of players: \"))\n if number_of_players <= 2 or number_of_players >= 12: # minimum 3 players and max 12 players\n number_of_players_func()\n else:\n print(f\"{str(number_of_players)} player(s) joined the game!\")\n print(\" \")\n print(\" \")\n return number_of_players\n\n\ndef player_names(x_players): # x_players is number of players from prev func\n player_list = []\n for i in range(0, x_players):\n name = input(f\"Enter player {str(i+1)}'s name: \").capitalize()\n player_list.append(name)\n return player_list\n\n\ndef characters(x_players):\n character_list = [\"Doppelgänger\",\n \"Werewolf\",\n \"Werewolf\",\n \"Minion\",\n \"Mason\",\n \"Mason\",\n \"Seer\",\n \"Robber\",\n \"Troublemaker\",\n \"Drunk\",\n \"Insomniac\",\n \"Hunter\",\n \"Tanner\",\n \"Villager\",\n \"Villager\",\n \"Villager\"]\n number_of_characters = x_players+4 # the number of players + 4 additional characters\n choosen_characters_for_game = []\n character_num = int(input(f\"\"\"\nChoose {str(number_of_characters)} different characters. Hit return after each choice!\n\n1. Doppelgänger\n2. Werewolf\n3. Werewolf\n4. Minion\n5. Mason\n6. Mason\n7. Seer\n8. Robber\n9. Troublemaker\n10. Drunk\n11. Insomniac\n12. Hunter\n13. Tanner\n14. Villager\n15. Villager\n16. Villager\n\nFirst character: \"\"\"))\n\n while character_num > 16 or character_num < 1: # Making sure the first charachter is a valid number\n print(\" \")\n print(\"Invalid character!\")\n character_num = int(input(\"Choose another character: \"))\n\n choosen_characters_for_game.append(character_num)\n print(\" \")\n\n for i in range(0, number_of_characters-1): # Choose the rest of the charachters and making sure the chose charachters are not repeated or invalid (outside 1-16)\n character_num = int(input(\"Next character: \"))\n while character_num in choosen_characters_for_game or character_num > 16 or character_num < 1:\n print(\" \")\n print(\"Invalid character or character is already chosen!\")\n character_num = int(input(\"Choose another character: \"))\n\n choosen_characters_for_game.append(character_num)\n print(\" \")\n\n choosen_characters_for_game.sort()\n choosen_characters_for_game = [x-1 for x in choosen_characters_for_game]\n all_chosen_characters = [character_list[i] for i in choosen_characters_for_game] # characters chosen to play with\n random_character = random.sample(range(0, number_of_characters), x_players)\n playing_characters = [all_chosen_characters[i] for i in random_character]\n\n hidden_characters_num = [] # Find hidden charachters with for loop\n for i in range(0, len(all_chosen_characters)):\n if i not in random_character:\n hidden_characters_num.append(i)\n\n hidden_characters_num.sort()\n hidden_characters = [all_chosen_characters[i] for i in hidden_characters_num]\n\n return all_chosen_characters, playing_characters, hidden_characters\n\n\n\ndef send_mail(mail_list, character):\n sender_email = \"dannydasan@hotmail.com\" # Skriv, hvilken mail vi skal sende fra her \n password = input(str(\"Please enter password:\")) # Skriv password her \n \n \n \n for i in range(len(mail_list)):\n \n msg = email.message_from_string('You are a:' + ' ' + character[i])\n msg['From'] = sender_email\n msg['To'] = mail_list[i]\n msg['Subject'] = \"Werewolf game\"\n \n sever = smtplib.SMTP('smtp.live.com', 587)\n sever.ehlo()\n sever.starttls()\n sever.ehlo()\n sever.login(sender_email, password)\n print(\"Login success\")\n sever.sendmail(sender_email, mail_list[i], msg.as_string())\n print(\"Email has been sent to:\", mail_list[i])\n \n print('The character is now sent to your mails, please check it')\n sever.quit()\n \ndef reveal_sys(players, character): \n mail_list = []\n reveal = input(str(\"Do you want to reveal the characters on the screen or mail. (screen/mail):\"))\n \n if reveal == \"screen\": \n for i in range(0, len(players)): \n print(f\"Show the screen to {players[i]}\")\n time.sleep(2)\n #print(f\"{players[i], are you ready? [y/n]?}\")\n ans_temp = input(str(f\"{players[i]}, are you ready? (y/n)?:\"))\n \n while ans_temp == \"n\": \n ans_temp = input(str(f\"{players[i]}, are you ready now? (y/n)?:\"))\n \n if ans_temp == \"y\":\n \n print(f\"{players[i]}, you are {character[i]}\")\n time.sleep(2)\n os.system('clear')\n \n \n elif reveal == \"mail\":\n for i in range(0,len(players)):\n mail_temp = input(str(f\"{players[i]}, enter your mail:\"))\n mail_list.append(mail_temp)\n \n send_mail(mail_list, character)\n \n #return mail_list\n \n else: \n print('Invalid syntax, please try again')\n reveal_sys(players, character)\n \n return mail_list\n \n\n","repo_name":"kevincasipillai/WerewolfDK","sub_path":"Utility.py","file_name":"Utility.py","file_ext":"py","file_size_in_byte":5681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"18149468249","text":"import requests\nfrom bs4 import BeautifulSoup\nimport os\nimport sys\ntotal_topics_fetched=0\ntotal_question_fetched=0\n\nerror_question=dict()\n\nch=input(\"do you want to enter sanfoundry site manually (yes/y/no/n) ?\").lower()\nfetchall=input(\"do you want to fetch all next topics to this (yes/y/no/n) ?\").lower()#fetch specific page or list of pages\n\nif(ch==\"yes\" or ch=='y'):\n next_link=input(\"enter site \").strip()\nelse:\n next_link=\"https://www.sanfoundry.com/computer-networks-mcqs-basics/\" #sample site added\n\ntry:\n if('mcq.txt' in os.listdir()):\n print(\"already exists file with filename mcq.txt. Try renaming/deleting that file first\")\n sys.exit()\n f=open('mcq.txt','wb')\n \n while True:\n data=requests.get(next_link)\n soup=BeautifulSoup(data.content,'html5lib')\n topic=soup.find('h1',attrs={'class':'entry-title'})\n if(topic is None):\n print(\"something went wrong\")\n break\n heading=topic.text\n print(\"Topic:\",heading)\n #print(soup.prettify())\n list_of_div=soup.find('div',attrs={'class':'entry-content'})\n #print(len(list_of_div))\n #questions\n questions=list_of_div.findAll('p')[1:-3]\n #print(questions)\n questions_total=len(questions)\n print(\"total q \",questions_total)\n '''for q in questions:\n print(q.text)'''\n\n list_of_ans=soup.findAll('span',attrs={'class':'collapseomatic'})\n\n #answers\n list_of_ans=soup.findAll('div',attrs={'class':'collapseomatic_content'})[:questions_total]\n\n '''for i in list_of_ans:\n print(i)'''\n \n f.write(\"\\n\\n\\t\\t\".encode()+heading.encode()+'\\n\\n'.encode())\n print(heading)\n for i in range(questions_total):\n total_question_fetched+=1\n currQuestion=questions[i].text\n currAns=list_of_ans[i].text\n #print(currQuestion,currAns)\n f.write(currQuestion.encode()+'\\n'.encode()+currAns.encode()+'\\n\\n'.encode())\n #print(currQuestion)\n #print(currAns)\n\n #increment topic fetched counter\n total_topics_fetched+=1\n if(fetchall=='no' or fetchall=='n'):\n break\n links=soup.findAll('div',attrs={'class':'sf-nav-bottom'})\n next_link_a=links[1]\n #print(next_link)\n link_tag=next_link_a.find('a')\n #print(link_tag)\n if(link_tag is None):\n print(\"End\")\n break\n if('Next' in link_tag.text):#next link found\n next_link=link_tag['href']\n print(next_link)\n #break\n else:\n print(\"End\")\n break\n\n \n print(f'Total topics fetched are {total_topics_fetched} with total questions {total_question_fetched}')\n\nexcept requests.exceptions.ConnectionError:\n print(\"Network Error\")\n","repo_name":"pratikbarve09/sanfoundry-mcq-fetcher","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2874,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"25668034560","text":"# This file access each document in each category and\n# make a preprocessed text file which contain all the term each document have.\nimport pandas as pd\n\n\ndef make_vector(needStopword):\n\n labels = []\n stories = []\n\n if needStopword=='Yes':\n file_name = 'data/input_withStop.xlsx'\n else:\n file_name = 'data/input_withoutStop.xlsx'\n\n news_data_frame = pd.read_excel(io=file_name, sheetname='sheet 1')\n labels = news_data_frame['Label'].tolist()\n stories = news_data_frame['News'].tolist()\n\n return stories, labels\n","repo_name":"pankaj512/Document-Clustering-and-title-Generation","sub_path":"classification/processing/vector.py","file_name":"vector.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"8674085904","text":"from TravelAgent import TravelAgent\n\n\ndef main():\n trav_agent = TravelAgent(\"Jao Pakhi\")\n trip_info1 = trav_agent.set_trip_one_city_one_way(\n 'HEA', 'WKM', \"02,05,2025\")\n print(f\"Price :{trip_info1.cost} \\nAirCraft : {trip_info1.air_craft}\")\n cities = ['Dha', \"nyk\", 'agg', 'europe']\n trav_agent.set_trip_multi_city_flexible(cities, '3/2/2021')\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ruhulaminjr/Phitron-Cse-Fundamental-Course","sub_path":"python/oop/flight-scheduler/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19463799726","text":"import urllib.request\ndef get_bet_html(url):\n request = urllib.request.Request(url)\n request.add_header(\"User-Agent\",\n \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36\")\n response = urllib.request.urlopen(request)\n html = response.read()\n return html\n\ndef find_tag(url):\n html = get_bet_html(url)\n html_encoded = html.decode('utf-8')\n with open('bet_page', 'w', encoding='utf-8') as f:\n f.write(html_encoded)\n\nif __name__ == '__main__':\n url = \"https://www.bet365.com/#/AS/B1/\"\n find_tag(url)","repo_name":"waitingFat/pythonNotes","sub_path":"getimagelist/getbet365footpage.py","file_name":"getbet365footpage.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"28226003804","text":"import numpy as np\nimport re\nimport csv\nimport glob\nclass AG_Data(object):\n \"\"\"\n Class to handle loading and processing of raw datasets.\n \"\"\"\n def __init__(self, data_source,\n alphabet=\"abcdefghijklmnopqrstuvwxyz0123456789-,;.!?:'\\\"/\\\\|_@#$%^&*~`+-=<>()[]{}\",\n input_size=1014, num_of_classes=4):\n \"\"\"\n Initialization of a Data object.\n\n Args:\n data_source (str): Raw data file path\n alphabet (str): Alphabet of characters to index\n input_size (int): Size of input features\n num_of_classes (int): Number of classes in data\n \"\"\"\n self.alphabet = alphabet\n self.alphabet_size = len(self.alphabet)\n self.dict = {} # Maps each character to an integer\n self.no_of_classes = num_of_classes\n for idx, char in enumerate(self.alphabet):\n self.dict[char] = idx + 1\n self.length = input_size\n self.data_source = data_source\n\n def load_data(self):\n \"\"\"\n Load raw data from the source file into data variable.\n\n Returns: None\n\n \"\"\"\n data = []\n with open(self.data_source, 'r', encoding='utf-8') as f:\n rdr = csv.reader(f, delimiter=',', quotechar='\"')\n for row in rdr:\n txt = \"\"\n for s in row[1:]:\n txt = txt + \" \" + re.sub(\"^\\s*(.-)\\s*$\", \"%1\", s).replace(\"\\\\n\", \"\\n\")\n data.append((int(row[0]), txt)) # format: (label, text)\n self.data = np.array(data)\n print(\"Data loaded from \" + self.data_source)\n\n def get_all_data(self):\n \"\"\"\n Return all loaded data from data variable.\n\n Returns:\n (np.ndarray) Data transformed from raw to indexed form with associated one-hot label.\n\n \"\"\"\n data_size = len(self.data)\n start_index = 0\n end_index = data_size\n batch_texts = self.data[start_index:end_index]\n batch_indices = []\n one_hot = np.eye(self.no_of_classes, dtype='int64')\n classes = []\n for c, s in batch_texts:\n batch_indices.append(self.str_to_indexes(s))\n c = int(c) - 1\n classes.append(one_hot[c])\n return np.asarray(batch_indices, dtype='int64'), np.asarray(classes)\n\n def str_to_indexes(self, s):\n \"\"\"\n Convert a string to character indexes based on character dictionary.\n \n Args:\n s (str): String to be converted to indexes\n\n Returns:\n str2idx (np.ndarray): Indexes of characters in s\n\n \"\"\"\n s = s.lower()\n max_length = min(len(s), self.length)\n str2idx = np.zeros(self.length, dtype='int64')\n for i in range(1, max_length + 1):\n c = s[-i]\n if c in self.dict:\n str2idx[i - 1] = self.dict[c]\n return str2idx\n\n\ndef read_IMDB_dataset():\n\tpath_IMDB = '../aclImdb/train/'\n\trawDatas = []\n\tlabels = []\n\t# print('we are here')\n\tposFileNames = glob.glob(path_IMDB+'pos/*.txt')\n\tnegFileNames = glob.glob(path_IMDB+'neg/*.txt')\t\n\tfor fileIndex in range(len(posFileNames)):\n\t\twith open(posFileNames[fileIndex], 'r', encoding=\"utf-8\") as file:\n\t\t\tdata = file.read()\n\t\t\t# the square brackets is important!!!!\n\t\t\trawDatas += [data]\n\t\t\tlabels += [0]\n\tfor fileIndex in range(len(negFileNames)):\n\t\twith open(negFileNames[fileIndex], 'r', encoding=\"utf-8\") as file:\n\t\t\tdata = file.read()\n\t\t\trawDatas += [data]\n\t\t\tlabels += [1]\n\t\t\n\treturn rawDatas, labels\n\t# print('Maximum length of the string is: ', MAX_SEQUENCE_LENGTH)\n\ndef read_AG_dataset():\n\ttraining_data = AG_Data(data_source = \"../data/ag_news_csv/train.csv\",\n\t alphabet = \"abcdefghijklmnopqrstuvwxyz0123456789-,;.!?:'\\\"/\\\\|_@#$%^&*~`+-=<>()[]{}\",\n\t input_size = 1014,\n\t num_of_classes = 4)\n\n\n\ttraining_data.load_data()\n\n\tdata_set, label_set = training_data.get_all_data()","repo_name":"HaolongEdward/textClassifier","sub_path":"myModel/data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":3963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"3547817467","text":"import os, sys\nimport subprocess, shlex\nimport h5py \nimport multiprocessing as mp\nimport numpy as np\n\ndef read_h5(args):\n h5_path, delete = args[:]\n mode = 'r'\n with h5py.File(h5_path, mode=mode) as f:\n num_samples = int(len(list(f.items()))//2)\n return (num_samples, h5_path)\n\n\ndef main(file_dir, delete=False):\n files = os.listdir(file_dir)\n paths = [os.path.join(file_dir, path) for path in files]\n processes = min(mp.cpu_count(), len(files))\n pool = mp.Pool(processes=processes)\n tasks = [(path, delete) for path in paths]\n chunk = max(np.int(np.floor(len(files) / processes)), 1)\n jobs = pool.imap(read_h5, tasks, chunksize=chunk)\n tally = [j for j in jobs]\n pool.close()\n tally = np.array(tally, dtype=[('num_samples', 'i4'), ('filepath', np.dtype('U100'))])\n np.save('tally_%s.npy' % file_dir.split('/')[-1], tally )\n def get_paths(mode='_train_'):\n mask = np.array([itm.find(mode) for itm in tally['filepath']])\n mask[mask >= 0] = 1 \n mask[mask < 0] = 0 \n mask = mask.astype(np.bool)\n mode_files = tally[mask]\n return mode_files\n train_files = get_paths()\n test_files = get_paths(mode='_test_')\n dev_files = get_paths(mode='_dev_')\n for name, arr_files in zip(['train', 'test', 'dev'], [train_files, test_files, dev_files]): \n if arr_files['num_samples'].size != 0:\n print(\"stats of %s samples (total, min, max, mean): %d, %2.2f, %2.2f, %2.2f\" %(name, arr_files['num_samples'].sum(), \n arr_files['num_samples'].min(), arr_files['num_samples'].max(), arr_files['num_samples'].mean()))\n \n\nif __name__ == \"__main__\":\n if len(sys.argv) == 3:\n main(sys.argv[-2], delete=bool(int(sys.argv[-1])))\n else:\n main(sys.argv[-1])\n print('DONE')\n","repo_name":"nlaanait/namsa","sub_path":"scripts/summit_scripts/tally_h5.py","file_name":"tally_h5.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"23176739478","text":"'''\nCreated on Oct 23, 2015\n\n@author: MUKUND\n'''\nfrom PyQt5.QtCore import (QFile, QFileInfo, QPoint, QSettings, QSignalMapper, QSize, QTextStream, Qt, )\nfrom PyQt5.QtWidgets import (QAction, QApplication, QFileDialog, QMainWindow, QMdiArea, QMessageBox, QTextEdit, QWidget, )\nfrom PyQt5.QtGui import (QIcon, QKeySequence, QFont, QColor)\nfrom PyQt5.Qsci import (QsciScintilla, QsciLexerPython)\nfrom PyQt5 import QtCore, QtGui, Qsci, QtWidgets\nfrom PyQt5.uic import loadUi\nimport os\n\nclass PyOneScript(QsciScintilla):\n sequenceNumber = 1 \n ARROW_MARKER_NUM = 8\n \n def __init__(self):\n super(PyOneScript, self).__init__()\n\n self.setAttribute(Qt.WA_DeleteOnClose)\n self.isUntitled = True\n\n font = QFont()\n font.setFamily('Courier')\n font.setFixedPitch(True)\n font.setPointSize(10)\n self.setFont(font)\n self.setMarginsFont(font)\n \n self.setMarginSensitivity(1, True)\n self.marginClicked.connect(self.on_margin_clicked)\n self.markerDefine(QsciScintilla.RightArrow, self.ARROW_MARKER_NUM)\n self.setMarkerBackgroundColor(QColor(\"#ee1111\"), self.ARROW_MARKER_NUM)\n self.setBraceMatching(QsciScintilla.SloppyBraceMatch)\n self.setEolMode(Qsci.QsciScintilla.EolUnix)\n self.setAutoCompletionSource(Qsci.QsciScintilla.AcsAll)\n self.setAutoCompletionThreshold(1)\n self.setAutoIndent(True)\n self.setIndentationsUseTabs(True)\n self.setTabWidth(4)\n self.setAutoCompletionFillupsEnabled(True)\n self.setBraceMatching(Qsci.QsciScintilla.StrictBraceMatch)\n self.setMarginLineNumbers(1, 1)\n self.setMarginWidth(1, 35)\n self.setUtf8(True)\n self.setEolVisibility(False)\n self.setMinimumSize(600, 450) \n self.setCaretLineVisible(True)\n self.setCaretLineBackgroundColor(QColor(\"#ffe4e4\"))\n\n self.lexer = QsciLexerPython()\n self.lexer.setDefaultFont(font)\n\n self.setLexer(self.lexer)\n self.sense = Qsci.QsciAPIs(self.lexer)\n self.sense.prepare() \n\n self.SendScintilla(QsciScintilla.SCI_SETHSCROLLBAR, 0) \n\n def on_margin_clicked(self, nmargin, nline, modifiers):\n # Toggle marker for the line the margin was clicked on\n if self.markersAtLine(nline) != 0:\n self.markerDelete(nline, self.ARROW_MARKER_NUM)\n else:\n self.markerAdd(nline, self.ARROW_MARKER_NUM)\n\n def newFile(self):\n self.isUntitled = True\n self.curFile = \"PyOne%d.py\" % PyOneScript.sequenceNumber\n PyOneScript.sequenceNumber += 1\n self.setWindowTitle(self.curFile + '[*]')\n\n self.modificationChanged.connect(self.documentWasModified)\n\n def loadFile(self, fileName):\n file = QFile(fileName)\n if not file.open(QFile.ReadOnly | QFile.Text):\n QMessageBox.warning(self, \"PyOne\",\n \"Cannot read file %s:\\n%s.\" % (fileName, file.errorString()))\n return False\n\n instr = QTextStream(file)\n QApplication.setOverrideCursor(Qt.WaitCursor)\n self.setText(instr.readAll())\n QApplication.restoreOverrideCursor()\n\n self.setCurrentFile(fileName)\n\n self.modificationChanged.connect(self.documentWasModified)\n\n return True\n\n def save(self):\n if self.isUntitled:\n return self.saveAs()\n else:\n return self.saveFile(self.curFile)\n\n def saveAs(self):\n scpt = self.scriptsPath\n scpt = scpt if os.path.exists(scpt) else 'C:'\n options = QFileDialog.Options()\n #options |= QFileDialog.DontUseNativeDialog\n fileName, _ = QFileDialog.getSaveFileName(self, 'Save python script file...', scpt, 'Python(*.py);;All Files (*)', options=options) \n #fileName, _ = QFileDialog.getSaveFileName(self, \"Save As\", self.curFile)\n if not fileName:\n return False\n\n return self.saveFile(fileName)\n\n def saveFile(self, fileName):\n file = QFile(fileName)\n\n if not file.open(QFile.WriteOnly | QFile.Text):\n QMessageBox.warning(self, \"PyOne\",\n \"Cannot write file %s:\\n%s.\" % (fileName, file.errorString()))\n return False\n\n outstr = QTextStream(file)\n QApplication.setOverrideCursor(Qt.WaitCursor)\n outstr << self.text()\n QApplication.restoreOverrideCursor()\n\n self.setCurrentFile(fileName)\n return True\n\n def userFriendlyCurrentFile(self):\n return self.strippedName(self.curFile)\n\n def currentFile(self):\n return self.curFile\n\n def closeEvent(self, event):\n if self.maybeSave():\n event.accept()\n else:\n event.ignore()\n\n def documentWasModified(self):\n self.setWindowModified(self.isModified())\n\n def maybeSave(self):\n if self.isModified():\n ret = QMessageBox.warning(self, \"PyOne\",\n \"'%s' has been modified.\\nDo you want to save your \"\n \"changes?\" % self.userFriendlyCurrentFile(),\n QMessageBox.Save | QMessageBox.Discard | QMessageBox.Cancel)\n\n if ret == QMessageBox.Save:\n return self.save()\n\n if ret == QMessageBox.Cancel:\n return False\n\n return True\n\n def setCurrentFile(self, fileName):\n self.curFile = QFileInfo(fileName).canonicalFilePath()\n self.isUntitled = False\n self.setModified(False)\n self.setWindowModified(False)\n self.setWindowTitle(self.userFriendlyCurrentFile() + \"[*]\")\n\n def strippedName(self, fullFileName):\n return QFileInfo(fullFileName).fileName()","repo_name":"kaymatrix/our-py-lib","sub_path":"PyOneApplication/src/core/pyoneScriptWindow.py","file_name":"pyoneScriptWindow.py","file_ext":"py","file_size_in_byte":5703,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"1737861636","text":"# -*- codeing = utf-8 -*-\n# @Time : 2021/3/23 9:42\n# @Author : CBkozou\n# @File : app.py\n# @Software : PyCharm\n\nfrom flask import Flask,render_template\n\nimport hotFind\nimport eventFind\nimport func\n\napp=Flask(__name__)\n\nyqlist=func.getYQList()\nlylist=func.getLYList()\nsjlist=func.getSJList()\nqglist=func.getQGList()\nidlist=func.getIDList()\n\npositiveCountList=[]\nnegativeCountList=[]\ngroupByTime = func.groupByElement(sjlist)#将相同日期的舆情进行分组\n#存储所有的爬取日期\nTimes=[]\nfor i in range(len(groupByTime)):\n Times.append(groupByTime[i][0])\n#将日期转换为情感\nid=0\nfor yuqing in groupByTime:\n for j in range(len(yuqing)):\n yuqing[j]=qglist[id]\n id+=1\nfor i in groupByTime:\n positiveCountList.append(i.count(\"positive\"))\n negativeCountList.append(i.count(\"negative\"))\n\n\n\nyqCount=len(yqlist)#爬取舆情总数\n# 统计来源数\nbaiduCount=lylist.count(\"百度\")\nweiboCount=lylist.count(\"微博\")\n\n# 统计该热点中包含的积极和消极舆情数目\npositiveCount = qglist.count(\"positive\")\nnegativeCount = qglist.count(\"negative\")\n\n# 统计进行爬取操作的日期列表\nsjlistCut=[]#只截取日期的时间列表\nfor yuqing in range(len(sjlist)):\n temp=sjlist[yuqing]\n sjlistCut.append(temp[0:10])\nsjlistCut=sorted(set(sjlistCut),key = sjlistCut.index) #每种只留一个\ntimeCount=len(sjlistCut)\n\n#创建全部舆情字典\nyqAllDataDic=[]\nfor i in range(len(yqlist)):\n temp={'id':idlist[i],'yq':yqlist[i],'ly':lylist[i],'sj':sjlist[i],'qg':qglist[i]}\n yqAllDataDic.append(temp)\n\n#创建正面舆情字典\nyqPosiDataDic=[]\nfor i in range(len(yqlist)):\n if qglist[i]=='positive':\n temp={'id':idlist[i],'yq':yqlist[i],'ly':lylist[i],'sj':sjlist[i],'qg':qglist[i]}\n yqPosiDataDic.append(temp)\n\n#创建负面舆情字典\nyqNegaDataDic=[]\nfor i in range(len(yqlist)):\n if qglist[i]=='negative':\n temp={'id':idlist[i],'yq':yqlist[i],'ly':lylist[i],'sj':sjlist[i],'qg':qglist[i]}\n yqNegaDataDic.append(temp)\n\n#创建舆情事件字典\neventsDic=[]\nfor i in range(len(sjlistCut)):\n temp={'data':sjlistCut[i],'keywords':','.join(eventFind.eventKeywordList[i]),'events':','.join(eventFind.eventsList[i]),\n 'id1':r'#collapseCard%d'%i,'id2':r'collapseCard%d'%i}\n eventsDic.append(temp)\n\n@app.route('/')\ndef index():\n return render_template(\"index.html\",yqCount=yqCount,\n positiveCount=positiveCount,negativeCount=negativeCount,\n hotCount=hotFind.hotCount,timeCount=timeCount,\n sjlistCut=sjlistCut,\n positiveCountList=positiveCountList,negativeCountList=negativeCountList,\n eventKeywordCount=eventFind.eventKeywordCount)\n\n@app.route('/hots')\ndef hots():\n return render_template(\"hots.html\",countList=hotFind.countList,positiveCountList=hotFind.positiveCountList,\n negativeCountList=hotFind.negativeCountList,warningLevelList=hotFind.warningLevelList,\n negativeRateList=hotFind.negativeRateList,\n hotCount=hotFind.hotCount,topicsCount=hotFind.topicsCount)\n\n@app.route('/events')\ndef events():\n return render_template(\"events.html\",eventKeywordCount=eventFind.eventKeywordCount,\n eventsCount=eventFind.eventsCount,eventKeywordCountList=eventFind.eventKeywordCountList,\n eventsCountList=eventFind.eventsCountList,sjlistCut=sjlistCut,\n eventsDic=eventsDic)\n\n@app.route('/website')\ndef website():\n return render_template(\"tables1.html\",weiboCount=weiboCount,baiduCount=baiduCount)\n\n@app.route('/yqAllData')\ndef yqAllData():\n return render_template(\"tables2.html\",yqAllDataDic=yqAllDataDic)\n\n@app.route('/yqPosiData')\ndef yqPosiData():\n return render_template(\"tables3.html\",yqPosiDataDic=yqPosiDataDic)\n\n@app.route('/yqNegaData')\ndef yqNegaData():\n return render_template(\"tables4.html\",yqNegaDataDic=yqNegaDataDic)\n\nif __name__=='__main__':\n app.run(debug=True) #开debug,随时写网页上随时改\n","repo_name":"CBkozou/China-Public-Opinion-Visualization-System","sub_path":"China-Public-Opinion-Visualization-System/visualization_dsz/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"4028698092","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"Tests for data base.\"\"\"\n\n__author__ = 'AleksNeStu'\n__copyright__ = \"The GNU General Public License v3.0\"\n\nfrom random import randrange\n\nimport pytest\n\nfrom constants import messages\nfrom model.group import Group\n\n@pytest.mark.smoke_tests\ndef test_db_matches_ui_for_group_list(app, db):\n \"\"\"Check matches of data base and UI data via groups page.\"\"\"\n if app.group.count_of_groups_groups() == 0:\n app.group.create_group_groups(Group())\n db_groups = db.list_of_groups_db()\n ui_groups = app.group.list_of_groups_groups()\n assert (sorted(db_groups, key=Group.id_or_max) ==\n sorted(ui_groups, key=Group.id_or_max),\n messages.COMPARE_DB_VS_UI.format(db_groups, ui_groups))","repo_name":"AleksNeStu/Testing_Automation_Framework__web-app","sub_path":"framework/test/test_db_vs_ui.py","file_name":"test_db_vs_ui.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"558140699","text":"from imaplib import IMAP4, IMAP4_SSL\nfrom unittest.mock import patch\n\nimport pytest\nfrom pytest import raises\n\nfrom ggmail.account import Account\nfrom ggmail.authentication import Google, GoogleOAuth2, Outlook\nfrom ggmail.exception import AlreadyConnected, LoginFailed\n\n\nclass TestAuthenticationLogin:\n google = Google(username=\"test@gmail.com\", password=\"secret\")\n google_oauth2 = GoogleOAuth2(username=\"test@gmail.com\", token=\"secret\")\n outlook = Outlook(username=\"test@gmail.com\", password=\"secret\")\n\n @patch.object(IMAP4_SSL, \"authenticate\")\n @patch.object(IMAP4_SSL, \"login\")\n @pytest.mark.parametrize(\n \"authentication\",\n [\n pytest.param(\n google,\n id=\"google\",\n ),\n pytest.param(google_oauth2, id=\"google_oauth2\"),\n pytest.param(outlook, id=\"outlook\"),\n ],\n )\n def test_login_success(\n self, imap_login_mock, imap_authenticate_mock, authentication\n ):\n account = Account(authentication=authentication)\n account.login()\n assert account.is_connected is True\n\n @patch.object(IMAP4_SSL, \"authenticate\")\n @patch.object(IMAP4_SSL, \"login\")\n @pytest.mark.parametrize(\n \"authentication\",\n [\n pytest.param(\n google,\n id=\"google\",\n ),\n pytest.param(google_oauth2, id=\"google_oauth2\"),\n pytest.param(outlook, id=\"outlook\"),\n ],\n )\n def test_login_already_connected(\n self, imap_login_mock, imap_authenticate_mock, authentication\n ):\n account = Account(authentication=authentication)\n account.login()\n\n with raises(AlreadyConnected):\n account.login()\n\n @patch.object(IMAP4_SSL, \"authenticate\")\n @patch.object(IMAP4_SSL, \"login\")\n @pytest.mark.parametrize(\n \"authentication\",\n [\n pytest.param(\n google,\n id=\"google\",\n ),\n pytest.param(google_oauth2, id=\"google_oauth2\"),\n pytest.param(outlook, id=\"outlook\"),\n ],\n )\n def test_login_imap_error(\n self, imap_login_mock, imap_authenticate_mock, authentication\n ):\n imap_login_mock.side_effect = IMAP4.error()\n imap_authenticate_mock.side_effect = IMAP4.error()\n\n account = Account(authentication=authentication)\n\n with raises(LoginFailed):\n account.login()\n","repo_name":"dylandoamaral/ggmail","sub_path":"tests/test_authentification.py","file_name":"test_authentification.py","file_ext":"py","file_size_in_byte":2459,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"31"} +{"seq_id":"21721344570","text":"import pickle\n\nimport numpy as np\nimport torch\n\nfrom vidar.arch.networks.layers.selffsm.implemented_dataset_cfg import IMPLEMENTED_DATASET2FRONT_CAM\nfrom vidar.geometry.pose_utils import invert_pose\nfrom vidar.utils.distributed import print0\n\n\nclass ExtGtLoader(object):\n \"\"\"Ground-truth interface for extrinsics that is once generated and converted to .pkl\"\"\"\n\n def __init__(self, dumped_path_name: str, load_camera_id: tuple, mode=\"ddad\"):\n # load pkl file\n f = open(dumped_path_name, 'rb')\n self.__scenario_and_camera2extrinsics = pickle.load(f)\n f.close()\n\n # load fundamental information based on the dataset mode\n print0('[INFO] Extrinsics Evaluation on : {}'.format(mode))\n self.main_cam = IMPLEMENTED_DATASET2FRONT_CAM[mode]\n self.camera_ids = load_camera_id\n self.__all_scenario = self._get_all_scenarios()\n\n def __call__(self, scenario_list: list, batch_info_giver=None):\n \"\"\"Ground-truth of the extrinsic tensor with (Bxcamx4x4), that is sampled based on the `scenario_list`\"\"\"\n return self._get_multi_scenario_tensor(scenario_list, batch_info_giver)\n\n def _get_all_scenarios(self) -> list:\n \"\"\"\n Return scenario index such as ['000150', '000151', '000152', ...] from generated extrinscs's keys like\n dict_keys([('000169', 'CAMERA_08'), ('000189', 'CAMERA_07'), ('000171', 'CAMERA_09'), ...])\n \"\"\"\n return sorted(list(\n set([scenario_cam[0] for scenario_cam in list(self.__scenario_and_camera2extrinsics.keys()) if\n scenario_cam[1] == self.main_cam])))\n\n def _get_multi_scenario_tensor(self, scenario_list: list, batch_info_giver: torch.Tensor):\n \"\"\"Core implementation for __call__()\"\"\"\n out = torch.stack([self._scenario2multicam_tensor(scenario=key) for key in scenario_list], 0)\n return out if batch_info_giver is None else out.to(batch_info_giver.device)\n\n def _scenario2multicam_tensor(self, scenario: str) -> torch.Tensor:\n \"\"\"\n Load extrinsic tensor from .pkl file given the scenario str, with Tensor[0, 4, 4] is identity\n and the others (Tensor[1:,4,4]) are rebased from the canonical camera, Tensor[0, 4, 4]\n \"\"\"\n from_tensor = torch.tensor(\n # ndarray -> torch.Tensor is better since Creating a tensor from a list of ndarray is extremely slow.\n np.array([self.__scenario_and_camera2extrinsics[(scenario, self.camera_ids[0])]])) # (1,4,4)\n to_tensors = [\n torch.tensor(self.__scenario_and_camera2extrinsics[(scenario, self.camera_ids[to_id])]) @ invert_pose(\n from_tensor) for to_id in range(1, len(self.camera_ids))] # [(1,4,4), (1,4,4), ... ]\n return torch.concat([torch.eye(4).unsqueeze(0)] + to_tensors, 0)\n\n @property\n def scenarios(self):\n return self.__all_scenario\n","repo_name":"TRI-ML/vidar","sub_path":"vidar/arch/networks/extrinsics/ExtGtWrapper.py","file_name":"ExtGtWrapper.py","file_ext":"py","file_size_in_byte":2884,"program_lang":"python","lang":"en","doc_type":"code","stars":404,"dataset":"github-code","pt":"31"} +{"seq_id":"34327241309","text":"import tensorflow as tf\nimport torch\n\n\nclass TFToTorchGenerator(torch.utils.data.IterableDataset):\n def __init__(self, tf_dataset):\n super().__init__()\n self.tf_dataset = tf_dataset\n\n def __iter__(self):\n for ex in self.tf_dataset.take(-1):\n x, y = ex\n # separately handle benign/adversarial data formats\n if isinstance(x, tuple):\n x_torch = (\n torch.from_numpy(x[0].numpy()),\n torch.from_numpy(x[1].numpy()),\n )\n else:\n x_torch = torch.from_numpy(x.numpy())\n\n # separately handle tensor/object detection label formats\n if isinstance(y, dict):\n y_torch = {}\n for k, v in y.items():\n if isinstance(v, tf.Tensor):\n y_torch[k] = torch.from_numpy(v.numpy())\n else:\n raise ValueError(\n f\"Expected all values to be of type tf.Tensor, but value at key {k} is of type {type(v)}\"\n )\n else:\n y_torch = torch.from_numpy(y.numpy())\n\n yield x_torch, y_torch\n\n\ndef get_pytorch_data_loader(ds):\n torch_ds = TFToTorchGenerator(ds)\n return torch.utils.data.DataLoader(\n torch_ds, batch_size=None, collate_fn=lambda x: x, num_workers=0\n )\n","repo_name":"twosixlabs/armory","sub_path":"armory/data/pytorch_loader.py","file_name":"pytorch_loader.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","stars":165,"dataset":"github-code","pt":"31"} +{"seq_id":"12269349002","text":"\nfrom flask import Flask, jsonify, request, render_template, redirect, url_for\nimport os\nimport json\n\n'''\n This is a demo of a Library application that uses REST API calls to manage it's books and borrowers.\n It is a toy program that does things that should not be done in a real application.\n \n The Demo is to run on a single server that hosts this library_REST_API.py module. The app must be continuously\n running for the Demo to work.\n A persistent data store for the Library's Books and Patrons is simulated with a pair of globals:\n Book Dictionary book_dict() indexed by bookName\n Patron Dictionary patron_dict() indexed by patronName\n \n Persistence is simulated by using this pair of globals, hence the requirement that the python app must \n run continuously for the Demo to work. Shut down the server and all the Books and Patrons disappear.\n \n Here are a few REST calls to manipulate the Books\n \n\n Here are a few REST calls to manipulate the Patrons\n \n \n Between the Books API and the Patrons API there are NO constraint guarantees.\n It is assumed that the Library app will take care of reconciling Books and Patrons,\n e.g. there is only a single copy of each book so a single book cannot be checked out by \n more than one patron at a time.\n In a real physical library with physical books and patrons and a checkout line, it's not so bad to\n have the Library be in charge of constraints since people operations are so slow any race conditions\n between exchanges of books and patrons can be ignored (for the demo only, not in real life).\n \n Here are a few commands:\n \n Books\n \n GET all books\n http://127.0.0.1:5000/books/\n \n POST add new book 'Book C'\n http://127.0.0.1:5000/books/\n body:\n {\n\t \"Book\": {\n\t\t \"bookName\" : \"Book C\",\n\t\t \"patronName\" : \"\"\n\t }\n }\n \n PUT assign 'Book C' to 'Patron 6'\n http://127.0.0.1:5000/books/Book C\n body:\n {\n\t \"Book\": {\n\t\t \"bookName\" : \"book C\",\n\t\t \"patronName\" : \"patron 6\"\n\t }\n } \n \n DELETE remove 'Book C'\n http://127.0.0.1:5000/books/Book C\n \n \n \n Patrons\n \n GET all patrons\n http://127.0.0.1:5000/patrons/\n \n POST add new patron 'Patron 1'\n http://127.0.0.1:5000/patrons/\n body:\n {\n\t \"Patron\": {\n\t\t \"patronName\" : \"Patron 1\",\n\t\t \"checked_out_books\" : {}\n\t }\n } \n \n PATCH add 'Book A' to 'Patron 1'\n http://127.0.0.1:5000/patrons/Patron 1\n body:\n { \n \"op\": \"add\", \n \"path\": \"checked_out_books\", \n \"value\": \"Book C\"\n }\n \n \n \n PATCH remove 'Book B' from 'Patron 1\"\n http://127.0.0.1:5000/patrons/Patron 1\n body:\n { \n \"op\": \"remove\", \n \"path\": \"checked_out_books\", \n \"value\": \"Book B\"\n } \n \n DELETE 'Patron 1'\n http://127.0.0.1:5000/patrons/Patron 1 \n\n\n\nLibrary OPS NYI\n\n\n'''\napp = Flask(__name__)\napp.config['SECRET_KEY'] = os.urandom(24) # required for Flask client session writing\n\nbook_dict = {} # this will be used as a class global to simulate a persistent store\n # for as long as the library demo service is continuously running\npatron_dict = {} # likewise for the patrons\n\n\n@app.route('/books', methods=['GET'])\n@app.route('/books/', methods=['GET'])\ndef allBooks():\n global book_dict\n print(\"we have entered allBooks \")\n allBooksJSON = bookDictToJSON()\n _response = jsonify(allBooksJSON, 200)\n # do we need to add a JSON Content-Type header ?\n return _response\n\n\n# DOESN'T work with POST @app.route('/books', methods=['POST'])\n@app.route('/books/', methods=['POST'])\ndef doBookPost():\n global book_dict\n print(\"doPost we have entered /books/ method=\"+request.method)\n try:\n print(\"POST now parse input JSON data data=\"+str(request.data))\n _book = Book.fromJSON(request.data)\n print(\"POST parse complete\")\n except Exception:\n print(\"got exception attempting to parse exc=\"+str(Exception))\n return jsonify(\"could not parse Book from \"+str(request.data), 404)\n\n bookname = _book.bookName\n if bookname in book_dict:\n return jsonify(\"book with bookName '\"+bookname+\"', already exists. ignore request\", 409)\n book_dict[bookname] = _book\n return jsonify(\"new book \"+bookname+\" added\", 200)\n\n\n@app.route('/books/', methods=['GET', 'PUT', 'DELETE'])\ndef handleBook(bookname):\n global book_dict\n global DEBUG\n print(\"we have entered books(bookname) bookname=\"+bookname+\", method=\"+request.method)\n if request.method == 'GET':\n if isinstance(bookname, str):\n try:\n book = book_dict[bookname]\n except Exception:\n return jsonify(\"\", 404)\n if isinstance(book, Book):\n bookJSON = Book.toJSON(book)\n return jsonify(bookJSON, 200)\n else:\n return jsonify(\"\", 404)\n else:\n print(\"GET we have entered allBooks \")\n allBooksJSON = bookDictToJSON()\n _response = jsonify(allBooksJSON, 200)\n return _response\n elif request.method == 'PUT':\n try:\n print(\"PUT now parse input JSON data data=\"+str(request.data))\n _book = Book.fromJSON(request.data)\n print(\"PUT parse complete\")\n except Exception:\n print(\"PUT got exception attempting to parse exc=\"+str(Exception))\n return jsonify(\"\", 404)\n patronname = _book.patronName\n try:\n # PUT we assume that the book already exists\n book = book_dict[bookname]\n except KeyError:\n print(\"PUT book \"+bookname+\" not found so now add it.\")\n book_dict[bookname] = _book # RFC-2616 if book not there then add it to the library\n print(\"PUT added book. complete\")\n return jsonify(\"\", 200)\n except Exception:\n return jsonify(\"unable to process request\", 400)\n book.patronName = patronname # UPDATE\n return jsonify(\"\", 200)\n elif request.method == 'DELETE':\n try:\n print(\"entered DELETE bookname='\"+bookname+\"'\")\n if bookname in book_dict:\n print(\"found bookname \"+bookname+\" in book_dict now do pop()\")\n result = book_dict.pop(bookname, None)\n print(\" pop complete popped entry is \"+str(result))\n return jsonify(\"\", 200)\n except Exception:\n return jsonify(\"Book not found\", 404)\n else:\n return jsonify(\"command \"+str(request.method)+\" NYI\", 404)\n\n\ndef bookDictToJSON():\n global book_dict\n print(\"we have entered bookListToJSON\")\n keys = book_dict.keys()\n result = '{ \"bookList\" : {'\n isFirst = True\n for bookName in keys:\n if not isFirst:\n result = result + ', '\n else:\n pass\n try:\n _book = book_dict[bookName]\n if isinstance(_book, Book):\n result = result + _book.toJSON()\n isFirst = False\n except Exception:\n pass\n result = result + ' }'\n return result\n\n\n@app.route('/patrons', methods=['GET'])\n@app.route('/patrons/', methods=['GET'])\ndef allPatrons():\n global patron_dict\n print(\"we have entered allPatrons \")\n allPatronsJSON = patronDictToJSON()\n _response = jsonify(allPatronsJSON, 200)\n # do we need to add a JSON Content-Type header ?\n return _response\n\n\n# DOESN'T work with POST @app.route('/patrons', methods=['POST'])\n@app.route('/patrons/', methods=['POST'])\ndef doPatronPost():\n global patron_dict\n print(\"doPost we have entered /patrons/ method=\"+request.method)\n try:\n print(\"POST now parse input JSON data data=\"+str(request.data))\n _patron = Patron.fromJSON(request.data)\n print(\"POST parse complete\")\n except Exception:\n print(\"got exception attempting to parse exc=\"+str(Exception))\n return jsonify(\"could not parse patron from \"+str(request.data), 404)\n\n patronname = _patron.patronName\n if patronname in patron_dict:\n return jsonify(\"patron with patronName '\"+patronname+\"', already exists. ignore request\", 409)\n patron_dict[patronname] = _patron\n print(\"POST return normal new patron added to global dictionary. verify read back dictionary now: \"+str(patronDictToJSON()))\n return jsonify(\"new patron \"+patronname+\" added\", 200)\n\n\n@app.route('/patrons/', methods=['GET', 'DELETE', 'PATCH'])\ndef handlepatron(patronname):\n global patron_dict\n global DEBUG\n print(\"we have entered patrons(patronname) patronname=\"+patronname+\", method=\"+request.method)\n if request.method == 'GET':\n if isinstance(patronname, str):\n try:\n patron = patron_dict[patronname]\n except Exception:\n return jsonify(\"\", 404)\n if isinstance(patron, patron):\n patronJSON = patron.toJSON(patron)\n return jsonify(patronJSON, 200)\n else:\n return jsonify(\"\", 404)\n else:\n print(\"GET we have entered allPatrons \")\n allPatronsJSON = patronDictToJSON()\n _response = jsonify(allPatronsJSON, 200)\n return _response\n elif request.method == 'PUT': # PUT currently has no use in Patron but that could change\n try:\n print(\"PUT now parse input JSON data data=\"+str(request.data))\n _patron = Patron.fromJSON(request.data)\n print(\"PUT parse complete\")\n except Exception:\n print(\"PUT got exception attempting to parse exc=\"+str(Exception))\n return jsonify(\"\", 404)\n patronname = _patron.patronName\n try:\n # PUT we assume that the patron already exists\n patron = patron_dict[patronname]\n except KeyError:\n print(\"PUT patron \"+patronname+\" not found so now add it.\")\n patron_dict[patronname] = _patron # RFC-2616 if patron not there then add it to the library\n print(\"PUT added patron. complete\")\n return jsonify(\"\", 200)\n except Exception:\n return jsonify(\"unable to process request\", 400)\n patron.patronName = patronname # UPDATE\n return jsonify(\"\", 200)\n elif request.method == 'DELETE':\n try:\n print(\"entered DELETE patronname='\"+patronname+\"'\")\n if patronname in patron_dict:\n print(\"found patronname \"+patronname+\" in patron_dict now do pop()\")\n result = patron_dict.pop(patronname, None)\n print(\" pop complete popped entry is \"+str(result))\n return jsonify(\"\", 200)\n return jsonify(\"patron \"+patronname+\" not found\", 404)\n except Exception:\n return jsonify(\"patron not found\", 404)\n elif request.method == 'PATCH':\n try:\n print(\"entered PATCH patronname='\"+patronname+\"'\")\n if patronname in patron_dict:\n print(patronname+\" is in the patron_dict\")\n _patron = patron_dict[patronname]\n _command_dict = json.loads(request.data)\n print(\" PATCH the command is \"+str(_command_dict))\n _path = _command_dict['path']\n print(\" PATCH _path is \"+str(_path))\n if isinstance(_path, str):\n if _path == 'checked_out_books':\n _value = _command_dict['value']\n print(\" PATH _value is \"+str(_value))\n # _book = book_dict[_value]\n # if isinstance(_book, Book):\n _command = _command_dict['op']\n if isinstance(_command, str):\n if _command == 'add':\n _patron.checked_out_books[_value] = 'X'\n return jsonify(\"\", 200)\n elif _command == 'remove':\n _patron.checked_out_books.pop(_value, None)\n return jsonify(\"\", 200)\n else:\n return jsonify(\"cannot handle PATCH command '\"+str(_command)+\"'\", 400)\n else:\n return jsonify(\"cannot handle PATCH path '\"+str(_path)+\"'\", 400)\n else:\n return jsonify(\"cannot handle PATCH path '\"+str(_path)+\"'\", 400)\n else:\n return jsonify(\"there is no patron named '\"+str(patronname)+\"'\", 400)\n except Exception:\n return jsonify(\"error attempting PATCH using data: \"+str(request.data), 404)\n else:\n return jsonify(\"command \"+str(request.method)+\" NYI\", 404)\n\n\ndef patronDictToJSON():\n global patron_dict\n print(\"we have entered patronListToJSON\")\n keys = patron_dict.keys()\n print(\"patron_dict keys = \"+str(keys)+\", len=\"+str(len(keys)))\n result = '{ \"patronList\" : {'\n isFirst = True\n for patronName in keys:\n if not isFirst:\n result = result + ', '\n else:\n pass\n try:\n _patron = patron_dict[patronName]\n if isinstance(_patron, Patron):\n result = result + _patron.toJSON()\n isFirst = False\n except Exception:\n pass\n result = result + ' }'\n return result\n\n\nclass Book():\n\n def __init__(self, bookname, patronname = None):\n self.bookName = bookname\n self.patronName = patronname\n\n def get_name(self):\n return self.bookName\n\n def checkoutBook(self, patronname):\n _patron = patron_dict[patronname]\n if isinstance(_patron, Patron):\n self.patronName = patronname\n _book = _patron.book_checkout(self.bookName)\n if isinstance(_book, Book):\n return _patron\n return None\n\n def returnBook(self):\n if isinstance(self.patronName, str):\n _patron = patron_dict[self.patronName]\n if isinstance(_patron, Patron):\n _patron.book_return(self)\n self.patronName = None # no matter what, the book returns\n\n def get_checkedout(self):\n return self.patronName\n\n def isBook(self, other):\n if isinstance(other, Book):\n return True\n else:\n return False\n\n @staticmethod\n def fromJSON(jsonString):\n try:\n _book = json.loads(jsonString)\n _bookDict = _book['Book']\n _bookname = _bookDict['bookName']\n _patronname = _bookDict['patronName']\n return Book(_bookname, _patronname)\n except Exception:\n print(\" could not parse into Book, JSON string: \"+jsonString)\n\n def toJSON(self):\n return '{ \"Book\" : { \"bookName\" : \"' + str(self.bookName) + '\", \"patronName\" : \"' + str(self.patronName) + '\" } }'\n\n\nclass Patron():\n\n def __init(self, name, checked_out_books = None):\n self.patronName = name\n self.checked_out_books = checked_out_books\n\n def book_checkout(self, book):\n _book = book_dict[book.bookName]\n if isinstance(_book, Book):\n self.checked_out_books[book.bookName] = \"X\"\n _book.patronName = self.patronName\n return _book\n return None\n\n def book_return(self, book):\n _book = book_dict[book.bookName]\n if isinstance(_book, Book):\n _bookName = self.checked_out_books.pop(book.bookName, None)\n return _bookName\n return None\n\n @staticmethod\n def fromJSON(jsonString):\n try:\n _patron = json.loads(jsonString)\n _patronDict = _patron['Patron']\n print(\" got patronDict \"+str(_patronDict))\n _patronname = _patronDict['patronName']\n print(\" got patronName = \"+str(_patronname))\n _temp = _patronDict['checked_out_books']\n print(\" got checked_out_books = \"+str(_temp))\n _checked_out_books = {}\n print(\" here is [*_temp] \"+str([*_temp]) + \" len=\"+str(len([*_temp])))\n # _test = { 1: 1}\n # _testKeys = [*_test]\n # print(\" here is [*_test] \"+str([*_test])+\" len=\"+str(len([*_test])))\n _tempKeys = [*_temp]\n print(\" here is _tempKeys \"+str(_tempKeys))\n if len(_tempKeys) > 0:\n print(\" length of keys in _temp is \"+str(len(_tempKeys)))\n print(\" here is _tempKeys \"+str(_tempKeys))\n for _key in _tempKeys:\n print(\" process first key = '\"+str(_key))\n # _value = _temp[_key]\n # if isinstance(_value, str):\n _checked_out_books[_key] = \"X\"\n print(\"_checked_out_books[\"+str(_key)+\"] = \"+str( _checked_out_books[_key]))\n print(\" complete _checked_out_books = \"+str(_checked_out_books))\n print(\"now construct new Patron and return\")\n # patron = Patron(_patronname)\n patron = Patron()\n print(\"new Patron() complete\")\n patron.patronName = _patronname\n patron.checked_out_books = _checked_out_books\n print(\"new Patron() toJSON() \"+str(patron.toJSON()))\n return patron\n # return Patron(_patronname, _checked_out_books)\n except Exception:\n print(\" could not parse into Book, JSON string: \"+jsonString)\n\n def toJSON(self):\n _json = '{ \"Patron : {'\n _json += ' \"patronName\" : \"' + str(self.patronName) + '\", '\n _json += ' \"checked_out_books\" : {'\n first = True\n for _key in [*self.checked_out_books]:\n _value = self.checked_out_books[_key]\n if isinstance(_value, str):\n if first == False:\n _json += ', '\n _json += ' \"' + str(_key) + '\" : \"' + str(_value) + '\"'\n first = False\n _json += '}'\n _json += '}}'\n return _json\n\nkey = \"Book A\"\nbookA = Book(key)\nbook_dict[key] = bookA\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n","repo_name":"thorick/examples","sub_path":"rootPython/library_REST_demo/sandbox/library_REST_API.py","file_name":"library_REST_API.py","file_ext":"py","file_size_in_byte":18476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"10302501531","text":"\"\"\"\nImage preprocessing module.\nMain functionality:\n * Resize images according to a given input dimensions.\n * Convert images to grayscale.\n\"\"\"\n# pylint: disable=no-member\nimport os\nimport argparse\nimport glob\nimport numpy as np\nimport cv2\nfrom skimage import img_as_float32\n\n\ndef get_images_helper(args_opt, folder_name):\n \"\"\"Helper function for get_images.\"\"\"\n video_file = []\n images_ = glob.glob(folder_name + \"/\" + args_opt.img_format)\n for img_ in images_:\n img = cv2.imread(img_) # pylint: disable=E1101\n if args_opt.gray_scale:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # pylint: disable=E1101\n if args_opt.resize:\n img = cv2.resize(\n img, (args_opt.img_width, args_opt.img_height)\n ) # pylint: disable=E1101\n video_file.append(img_as_float32(img))\n continue\n return video_file\n\n\ndef get_images(args_opt):\n \"\"\"\n Read images from a given dir, using specified image format.\n Additionally it allows for resizing the images.\n Parameters:\n - dir_: directory contains the images.\n - img_format: specify the images format, it is string in the format \"*.xxx\".\n - resize: this is a boolean, set True if resizing of the images are needed.\n - resize_dim: set to the required image dimensions.\n It takes input in the format (height, width).\n Returns:\n -- all_images: an array of array, it contains all values from the images in the\n dir.\n This array is of size = (num_images, height, width, no_channels)\n \"\"\"\n img_folders = [x[0] for x in os.walk(args_opt.dir)]\n all_videos = []\n labels = []\n for folder_name in img_folders:\n video_file = get_images_helper(args_opt, folder_name)\n if len(video_file) != 0:\n # convert video_file to numpy array\n video_file = np.array(video_file)\n all_videos.append(video_file)\n labels.append(folder_name.split(\"/\")[-1])\n\n assert len(all_videos) != 0, \"The given images folder does not contain any frames\"\n # find the maximum length of the videos (number of frames) in a video\n max_frames = max(len(x) for x in all_videos)\n # find the maximum shape of the arrays\n # max_shape = max([arr.shape for arr in all_images])\n # create a new array with the maximum shape\n # specify the desired shape of the padded arrays\n frame_dim = all_videos[0][0].shape\n frames_in_videos_dim = (len(all_videos), max_frames) + frame_dim\n frames_in_videos = np.zeros(frames_in_videos_dim, dtype=np.float64)\n\n # pad the shorter videos with zeros at the end to make them all the same length\n for index_, video_ in enumerate(all_videos):\n frames_in_videos[index_][0 : len(video_)] = video_\n # save frames_in_videos to a file\n np.savez_compressed(args_opt.output, frames_in_videos)\n # save labels to frames_labels.txt file\n with open(\"frames_labels.txt\", \"w\", encoding=\"utf-8\") as my_file:\n for label in labels:\n my_file.write(label + \"\\n\")\n return frames_in_videos, labels\n\n\ndef create_videos(folder_path, output_path, image_format):\n \"\"\"Create videos from the extracted frames.\"\"\"\n img_array = []\n # check if output path has .avi extension\n if output_path[-4:] != \".avi\":\n output_path = output_path + \"new_video.avi\"\n for filename in sorted(glob.glob(folder_path + \"/*.\" + image_format)):\n img = cv2.imread(filename) # pylint: disable=E1101\n height, width, _ = img.shape\n size = (width, height)\n img_array.append(img)\n # pylint: disable=E1101\n out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*\"DIVX\"), 10, size)\n for _, img in enumerate(img_array):\n out.write(img)\n out.release()\n\n\nif __name__ == \"__main__\":\n parser_ = argparse.ArgumentParser()\n parser_.add_argument(\"--dir\", default=\"./images/\")\n parser_.add_argument(\"--img_format\", default=\"*.jpg\")\n parser_.add_argument(\"--resize\", default=False)\n parser_.add_argument(\"--img_width\", default=224, type=int)\n parser_.add_argument(\"--img_height\", default=224, type=int)\n parser_.add_argument(\"--gray_scale\", default=False)\n parser_.add_argument(\"--output\", default=\"./results/all_images.npz\")\n args_ = parser_.parse_args()\n\n _images, file_names = get_images(args_)\n\n print(\"Images saved in array of array of size\", str(_images.shape))\n","repo_name":"simulamet-host/video_analytics","sub_path":"e2evideo/image_preprocessing.py","file_name":"image_preprocessing.py","file_ext":"py","file_size_in_byte":4460,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"31"} +{"seq_id":"25442671649","text":"import sys\r\ninput = sys.stdin.readline\r\nn, m, r = map(int, input().split())\r\n# 수색범위(m)을 넘어서면 제외한다.\r\nitems = list(map(int, input().split()))\r\nINF = 1e9\r\nground = [[INF] * n for _ in range(n)]\r\n\r\nfor a in range(n):\r\n for b in range(n):\r\n if a == b:\r\n ground[a][b] = 0\r\n\r\nfor _ in range(r):\r\n a, b, c = map(int, input().split())\r\n a, b = a - 1, b - 1\r\n ground[a][b] = c\r\n ground[b][a] = c\r\n\r\nfor k in range(n):\r\n for a in range(n):\r\n for b in range(n):\r\n ground[a][b] = min(ground[a][b], ground[a][k] + ground[k][b])\r\n# print(ground)\r\nlst = []\r\nfor a in range(n):\r\n res = 0\r\n for b in range(n):\r\n if ground[a][b] <= m:\r\n res += items[b]\r\n lst.append(res)\r\n# print(lst)\r\nprint(max(lst))\r\n","repo_name":"hany0147/algorithm","sub_path":"백준/Gold/14938. 서강그라운드/서강그라운드.py","file_name":"서강그라운드.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"17635196818","text":"'''\nDescription: 四个指针\nversion: \nAuthor: Data Designer\nDate: 2021-05-28 09:26:23\nLastEditors: Data Designer\nLastEditTime: 2021-05-28 09:54:22\n'''\n#\n# @lc app=leetcode.cn id=117 lang=python3\n#\n# [117] 填充每个节点的下一个右侧节点指针 II\n#\n\n# @lc code=start\n\"\"\"\n# Definition for a Node.\nclass Node:\n def __init__(self, val: int = 0, left: 'Node' = None, right: 'Node' = None, next: 'Node' = None):\n self.val = val\n self.left = left\n self.right = right\n self.next = next\n\"\"\"\n\nclass Solution:\n def connect(self, root: 'Node') -> 'Node':\n if not root:\n return root\n pre = root\n while pre:\n cur = pre # 指向当前层头节点\n tail = Node(None) # 用于连接下一层节点\n head = None # 下一层的头节点\n find = False\n while cur: # 遍历当前层节点\n if cur.left:\n tail.next = cur.left\n tail = tail.next\n if not find:\n head = cur.left\n find = True\n if cur.right:\n tail.next = cur.right\n tail = tail.next\n if not find:\n head = cur.right\n find = True\n cur = cur.next\n pre = head\n return root\n\n\n \n# @lc code=end\n\n","repo_name":"Data-Designer/Leetcode-Travel","sub_path":"leetcode/117.填充每个节点的下一个右侧节点指针-ii.py","file_name":"117.填充每个节点的下一个右侧节点指针-ii.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"29474040148","text":"import argparse\nimport os\nimport glob\nimport yaml\n\n\ndef count_tweets(filename):\n items = []\n\n with open(filename, 'r') as input_file:\n items = yaml.load(input_file)\n\n print(f'{filename}: {len(items)} tweets')\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('directory', type=str, nargs='+')\n args = parser.parse_args()\n\n for directory in args.directory:\n if os.path.isdir(directory):\n for filename in glob.glob(f'{directory}/*.yaml'):\n count_tweets(filename)\n else:\n count_tweets(directory)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"clitetailor/community-detection","sub_path":"src/count_tweets.py","file_name":"count_tweets.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"11896488099","text":"import logging\nimport warnings\n\nimport pandas as pd\nimport numpy as np\n\nfrom ..algorithms import Recommender\nfrom .. import util\nfrom ..sharing import PersistedModel\n\n_logger = logging.getLogger(__name__)\n\n\ndef _recommend_user(algo, req):\n user, n, candidates = req\n\n _logger.debug('generating recommendations for %s', user)\n watch = util.Stopwatch()\n res = algo.recommend(user, n, candidates)\n _logger.debug('%s recommended %d/%s items for %s in %s',\n str(algo), len(res), n, user, watch)\n\n res['user'] = user\n res['rank'] = np.arange(1, len(res) + 1)\n\n return res.reset_index(drop=True)\n\n\ndef __standard_cand_fun(candidates):\n \"\"\"\n Convert candidates from the forms accepted by :py:fun:`recommend` into\n a standard form, a function that takes a user and returns a candidate\n list.\n \"\"\"\n if isinstance(candidates, dict):\n return candidates.get\n elif candidates is None:\n return lambda u: None\n else:\n return candidates\n\n\ndef recommend(algo, users, n, candidates=None, *, n_jobs=None, **kwargs):\n \"\"\"\n Batch-recommend for multiple users. The provided algorithm should be a\n :py:class:`algorithms.Recommender`.\n\n Args:\n algo: the algorithm\n users(array-like): the users to recommend for\n n(int): the number of recommendations to generate (None for unlimited)\n candidates:\n the users' candidate sets. This can be a function, in which case it will\n be passed each user ID; it can also be a dictionary, in which case user\n IDs will be looked up in it. Pass ``None`` to use the recommender's\n built-in candidate selector (usually recommended).\n n_jobs(int):\n The number of processes to use for parallel recommendations. Passed to\n :func:`lenskit.util.parallel.invoker`.\n\n Returns:\n A frame with at least the columns ``user``, ``rank``, and ``item``; possibly also\n ``score``, and any other columns returned by the recommender.\n \"\"\"\n\n if n_jobs is None and 'nprocs' in kwargs:\n n_jobs = kwargs['nprocs']\n warnings.warn('nprocs is deprecated, use n_jobs', DeprecationWarning)\n\n if not isinstance(algo, PersistedModel):\n rec_algo = Recommender.adapt(algo)\n if candidates is None and rec_algo is not algo:\n warnings.warn('no candidates provided and algo is not a recommender, unlikely to work')\n algo = rec_algo\n del rec_algo\n\n if 'ratings' in kwargs:\n warnings.warn('Providing ratings to recommend is not supported', DeprecationWarning)\n\n candidates = __standard_cand_fun(candidates)\n\n with util.parallel.invoker(algo, _recommend_user, n_jobs=n_jobs) as worker:\n _logger.info('recommending with %s for %d users (n_jobs=%s)',\n str(algo), len(users), n_jobs)\n del algo\n timer = util.Stopwatch()\n results = worker.map((user, n, candidates(user)) for user in users)\n results = pd.concat(results, ignore_index=True, copy=False)\n _logger.info('recommended for %d users in %s', len(users), timer)\n\n return results\n","repo_name":"lenskit/lkpy","sub_path":"lenskit/batch/_recommend.py","file_name":"_recommend.py","file_ext":"py","file_size_in_byte":3161,"program_lang":"python","lang":"en","doc_type":"code","stars":248,"dataset":"github-code","pt":"31"} +{"seq_id":"25346929090","text":"from bs4 import BeautifulSoup\r\nimport urllib3\r\n\r\ndef get_art_text(art_url):\r\n manager=urllib3.PoolManager()\r\n site=manager.request('GET',art_url)\r\n B=BeautifulSoup(site.data.decode('utf-8'),'lxml')\r\n Btmp=B.find('div',{'id':'storytext'})\r\n t=Btmp.get_text()\r\n return t\r\n\r\ndef scrapeNPR():\r\n manager=urllib3.PoolManager()\r\n npr_url=\"https://www.npr.org/sections/news/\"\r\n site=manager.request('GET',npr_url)\r\n B=BeautifulSoup(site.data.decode('utf-8'),'lxml')\r\n #print(B.prettify())\r\n articles=B.find_all('h2',attrs={'class':'title'})\r\n for article in articles:\r\n title=article.find('a').string\r\n art_url=article.find('a')['href']\r\n print(get_art_text(art_url))\r\n \r\ndef main():\r\n scrapeNPR()\r\n\r\nmain()\r\n","repo_name":"kuzbijc/UIC-CS-Code","sub_path":"nprsent.py","file_name":"nprsent.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40160673007","text":"import pandas as pd\nimport numpy as np\nimport random as rd\nfrom sklearn.decomposition import PCA\nfrom sklearn import preprocessing\nfrom sklearn.preprocessing import StandardScaler\nimport matplotlib.pyplot as plt\n\n\ndef generate_data():\n genes = ['gene'+str(i) for i in range(1, 101)]\n\n wt = ['wt'+str(i) for i in range(1, 6)]\n ko = ['ko'+str(i) for i in range(1, 6)]\n data = pd.DataFrame(columns=[*wt, *ko], index=genes)\n for gene in data.index:\n data.loc[gene, 'wt1':'wt5'] = np.random.poisson(lam=rd.randrange(10, 15), size=5)\n data.loc[gene, 'ko1':'ko5'] = np.random.poisson(lam=rd.randrange(10, 15), size=5)\n\n return data\n\ndef scale_data(data):\n scaled_data = preprocessing.scale(data.T) # mean = 0, std_dev = 1\n return scaled_data\n\nif __name__ == '__main__':\n df = generate_data()\n scaled_data = scale_data(df)\n pca = PCA()\n pca.fit(scaled_data)\n pca_data = pca.transform(scaled_data) # Data reduction\n\n per_var = np.round(pca.explained_variance_ratio_*100, decimals=1)\n labels = ['PC'+str(x) for x in range(1, len(per_var)+1)]\n\n plt.bar(x=range(1, len(per_var)+1), height=per_var, tick_label=labels)\n plt.ylabel('Percentage of Explained Variance')\n plt.xlabel('Principal Component')\n plt.title('Scree Plot')\n plt.show()\n\n pca_df = pd.DataFrame(pca_data, columns=labels)\n\n plt.scatter(pca_df.PC1, pca_df.PC2)\n plt.title('My PCA graph')\n plt.xlabel('PC1 - {0}%'.format(per_var[0]))\n plt.ylabel('PC2 - {0}%'.format(per_var[1]))\n\n for sample in pca_df.index:\n plt.annotate(sample, (pca_df.PC1.loc[sample], pca_df.PC2.loc[sample]))\n \n plt.show()","repo_name":"Misantonio/deep_learning","sub_path":"PCA/pca.py","file_name":"pca.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5751618709","text":"import csv\nimport sys\n\n\ndef main():\n\n # TODO: Check for command-line usage\n if len(sys.argv) != 3:\n sys.exit(\"Usage: python dna.py data.csv sequence.txt\")\n\n # TODO: Read database file into a variable\n people_db = list()\n if (sys.argv[1] == \"databases/small.csv\"):\n with open(sys.argv[1]) as file:\n reader = csv.DictReader(file)\n for line in reader:\n line[\"AGATC\"] = int(line[\"AGATC\"])\n line[\"AATG\"] = int(line[\"AATG\"])\n line[\"TATC\"] = int(line[\"TATC\"])\n people_db.append(line)\n # print(people_db)\n else:\n # print(\"BIG\")\n with open(sys.argv[1]) as file:\n reader = csv.DictReader(file)\n for line in reader:\n line[\"AGATC\"] = int(line[\"AGATC\"])\n line[\"TTTTTTCT\"] = int(line[\"TTTTTTCT\"])\n line[\"AATG\"] = int(line[\"AATG\"])\n line[\"TATC\"] = int(line[\"TATC\"])\n line[\"TCTAG\"] = int(line[\"TCTAG\"])\n line[\"GATA\"] = int(line[\"GATA\"])\n line[\"GAAA\"] = int(line[\"GAAA\"])\n line[\"TCTG\"] = int(line[\"TCTG\"])\n people_db.append(line)\n # TODO: Read DNA sequence file into a variable\n with open(sys.argv[2], 'r') as file_txt:\n # rstrip() method removes any trailing characters\n dna_sequence = file_txt.read().rstrip()\n # print(dna_sequence)\n\n # TODO: Find longest match of each STR in DNA sequence\n agatc_longest = longest_match(dna_sequence, \"AGATC\")\n # print(agatc_longest)\n aatg_longest = longest_match(dna_sequence, \"AATG\")\n # print(aatg_longest)\n tatc_longest = longest_match(dna_sequence, \"TATC\")\n # print(tatc_longest)\n ttt_longest = longest_match(dna_sequence, \"TTTTTTCT\")\n tctag_longest = longest_match(dna_sequence, \"TCTAG\")\n gata_longest = longest_match(dna_sequence, \"GATA\")\n gaa_longest = longest_match(dna_sequence, \"GAAA\")\n tctg_longest = longest_match(dna_sequence, \"TCTG\")\n\n\n # TODO: Check database for matching profiles\n # print(len(people_db))\n match = \"No\"\n if (sys.argv[1] == \"databases/small.csv\"):\n for i in range(len(people_db)):\n if (people_db[i][\"AGATC\"] == agatc_longest and people_db[i][\"AATG\"] == aatg_longest and people_db[i][\"TATC\"] == tatc_longest):\n #print(people_db[i][\"name\"])\n match = people_db[i][\"name\"]\n break\n if (match == \"No\"):\n match = \"No match\"\n else:\n for i in range(len(people_db)):\n if (people_db[i][\"AGATC\"] == agatc_longest and people_db[i][\"AATG\"] == aatg_longest and\n people_db[i][\"TATC\"] == tatc_longest and people_db[i][\"TTTTTTCT\"] == ttt_longest and\n tctag_longest == people_db[i][\"TCTAG\"] and gata_longest == people_db[i][\"GATA\"] and\n gaa_longest == people_db[i][\"GAAA\"] and tctg_longest == people_db[i][\"TCTG\"]):\n #print(people_db[i][\"name\"])\n match = people_db[i][\"name\"]\n break\n if (match == \"No\"):\n match = \"No match\"\n print(match)\n\n return\n\n\ndef longest_match(sequence, subsequence):\n \"\"\"Returns length of longest run of subsequence in sequence.\"\"\"\n\n # Initialize variables\n longest_run = 0\n subsequence_length = len(subsequence)\n sequence_length = len(sequence)\n\n # Check each character in sequence for most consecutive runs of subsequence\n for i in range(sequence_length):\n\n # Initialize count of consecutive runs\n count = 0\n\n # Check for a subsequence match in a \"substring\" (a subset of characters) within sequence\n # If a match, move substring to next potential match in sequence\n # Continue moving substring and checking for matches until out of consecutive matches\n while True:\n\n # Adjust substring start and end\n start = i + count * subsequence_length\n end = start + subsequence_length\n\n # If there is a match in the substring\n if sequence[start:end] == subsequence:\n count += 1\n\n # If there is no match in the substring\n else:\n break\n\n # Update most consecutive matches found\n longest_run = max(longest_run, count)\n\n # After checking for runs at each character in seqeuence, return longest run found\n return longest_run\n\n\nmain()\n","repo_name":"tlukanie/CS50","sub_path":"Week_6/dna/dna.py","file_name":"dna.py","file_ext":"py","file_size_in_byte":4584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"43581971197","text":"'''\nGeneralV1.py\nThe purpose of this very basic example is to test some general purpose API's\n\n'''\nimport requests\nimport json\nimport tmconfig # to get the token and region url\nregion = tmconfig.region['us']\nappname = 'GeneralV1.py'\nheader = {'Authorization': 'Bearer ' + tmconfig.xdr_token, 'Content-Type': 'application/json;charset=utf-8',\n 'User-Agent': appname}\n\n# wrapper on requests get\ndef get(url_path, query_params):\n r = requests.get(region + url_path, params=query_params, headers=header)\n print(r.status_code)\n if 'application/json' in r.headers.get('Content-Type', ''):\n return json.dumps(r.json(), indent=4)\n raise RuntimeError(f'Request unsuccessful (GET {url_path}):'\n f' {r.status_code} {r.text}')\n\n# wrapper on requests post\ndef post(url_path, query_params, body):\n r = requests.post(region + url_path, params=query_params, headers=header, data=body)\n if ((200 == r.status_code) and ('application/json' in r.headers.get('Content-Type', ''))):\n return r.json()\n raise RuntimeError(f'Request unsuccessful (POST {url_path}):'\n f' {r.status_code} {r.text}')\n\n# Wrapper for requests delete\ndef delete(url_path, query_params):\n r = requests.delete(region + url_path, params=query_params, headers=header)\n print(r.status_code)\n if 'application/json' in r.headers.get('Content-Type', ''):\n return r.json()\n raise RuntimeError(f'Request unsuccessful (DELETE {url_path}):'\n f' {r.status_code} {r.text}')\n\n# wrapper on requests put\ndef put(url_path, query_params, body):\n r = requests.put(region + url_path, params=query_params, headers=header, data=body)\n if ((200 == r.status_code) and ('application/json' in r.headers.get('Content-Type', ''))):\n return r.json()\n raise RuntimeError(f'Request unsuccessful (PUT {url_path}):'\n f' {r.status_code} {r.text}')\n\n##### Vision One version 2.0 API\n\n#Deletes an account from the XDR service platform\ndef deleteaccount(email):\n url_path = '/v2.0/xdr/portal/accounts/{email}'\n url_path = url_path.format(**{'email': email})\n query_params = {}\n return delete(url_path, query_params)\n\n# Creates a SAML or local account\ndef createaccount(email, firstName, lastName):\n # note that we don't test other roles than Analyst and we hardcoded type 0 = local\n # and we hardcoded autorization 3 (UI + API), this is just an example\n url_path = '/v2.0/xdr/portal/accounts/{email}'\n url_path = url_path.format(**{'email': email})\n query_params = {}\n data = {'type': 0,\n 'firstName': firstName,\n 'lastName': lastName,\n 'enabled': True,\n 'description': 'Test account',\n 'token': tmconfig.xdr_token,\n 'authorization': 3,\n 'role': 'Analyst'\n }\n body = json.dumps(data)\n return post(url_path, query_params, body)\n\n#Generates an authentication token for an account with API access\ndef generateToken(email):\n url_path = '/v2.0/xdr/portal/accounts/{email}/tokens'\n url_path = url_path.format(**{'email': email})\n query_params = {}\n return get(url_path, query_params)\n\n#Deletes the authentication token of an account with API access\ndef deleteToken(email):\n url_path = '/v2.0/xdr/portal/accounts/{email}/tokens'\n url_path = url_path.format(**{'email': email})\n query_params = {}\n return delete(url_path, query_params)\n\n#Configures the account password using the link in the verification message\ndef setPassword(email, password):\n url_path = '/v2.0/xdr/portal/accounts/{email}/passwords'\n url_path = url_path.format(**{'email': email})\n query_params = {}\n data = {'type': 0,\n 'password': password,\n 'token': tmconfig.xdr_token\n }\n body = json.dumps(data)\n return put(url_path, query_params)\n\n#Sends a verification message with a link to a password configuration screen\ndef SendAccountVerif(email):\n url_path = '/v2.0/xdr/portal/accounts/{email}/passwords/sendEmails'\n url_path = url_path.format(**{'email': email})\n query_params = {}\n body = ''\n return post(url_path, query_params, body)\n\n#Retrieves the status (enabled or disabled) of two-factor authentication for local accounts\ndef get2faStatus():\n url_path = '/v2.0/xdr/portal/accounts/mfa'\n query_params = {}\n return get(url_path, query_params)\n\n#Retrieves a list of roles that users can select when configuring accounts\ndef listRoles():\n url_path = '/v2.0/xdr/portal/accounts/roles'\n query_params = {}\n return get(url_path, query_params)\n\n#Retrieves a list of roles used in company accounts and their corresponding permissions\ndef listUsedRoles():\n url_path = '/v2.0/xdr/portal/roles'\n query_params = {}\n return get(url_path, query_params)\n\n#Retrieves the permissions of a specific role for all XDR features\ndef getPermissions(role):\n url_path = '/v2.0/xdr/portal/roles/{role}/permissions'\n url_path = url_path.format(**{'role': role})\n query_params = {}\n return get(url_path, query_params)\n\n#Retrieves the permissions of all roles for all XDR features\ndef getPermissionsAll():\n url_path = '/v2.0/xdr/portal/roles/permissions'\n query_params = {}\n return get(url_path, query_params)\n\n\n#Retrieves a list of log entries that match specified criteria\ndef searchAuditLogs(pageIndex=1, pageSize=20, period=30, accessType=0,\n categories=\"01\", detail='',sort='desc'):\n #categories = \"01|02|03|04|05|06|07|08|09|0c|0d|11\"\n url_path = '/v2.0/xdr/portal/auditLog/search'\n\n data = {'pageIndex': pageIndex,\n 'pageSize': pageSize,\n 'period': period,\n 'accessType': accessType,\n 'categories': categories,\n 'detail': detail,\n 'sort': sort\n }\n\n return get(url_path, data)\n\n#Retrieves a list of log entries that match specified criteria\ndef exportAuditLogs(pageIndex=1, pageSize=20, period=30, accessType=0,\n categories='11', detail='david',sort='desc'):\n #categories = \"01|02|03|04|05|06|07|08|09|0c|0d|11\"\n url_path = '/v2.0/xdr/portal/auditLog/exportCsv'\n data = {'pageIndex': pageIndex,\n 'pageSize': pageSize,\n 'period': period,\n 'accessType': accessType,\n 'categories': categories,\n 'detail': detail,\n 'sort': sort\n }\n \n #Since return is not json, we will not use the get wrapper function and call requests.get directly\n r = requests.get(region + url_path, params=data, headers=header)\n print(r.status_code)\n if r.status_code != 200:\n raise Exception(str(r.status_code) + \" \" + r.text)\n else:\n return r.content\n\nemail = 'email@email.com' # replace by an email that never being used with Vision One\n#print(createaccount(email, 'test first name', 'test last name'))\n#print(deleteaccount(email))\n#print(generateToken(email))\n#print(listRoles())\n#print(listUsedRoles())\n#print(getPermissions('Analyst'))\n#print(getPermissionsAll())\n#print(get2faStatus())\nprint(searchAuditLogs())\nprint(exportAuditLogs(1,20,30, 0,categories='11', detail='david',sort='ASC'))\n","repo_name":"girdav01/V1APITraining","sub_path":"GeneralPurpose/GeneralV1.py","file_name":"GeneralV1.py","file_ext":"py","file_size_in_byte":7197,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"27539639549","text":"import pickle\nimport pandas as pd\n\n\nmodel = pickle.load(open('model', 'rb'))\nvector = pickle.load(open('vector', 'rb'))\n\ndf = pd.read_csv('news.csv')\n\ntest = df.loc[40:41, 'text']\nlabel = df.loc[40:41, 'label']\ntfidf_test = vector.transform(test)\nprint(model.predict(tfidf_test))\nprint(label)","repo_name":"Tuanyk/FakeNewsDetections","sub_path":"load_model.py","file_name":"load_model.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27981859578","text":"##made by enrique\nprint('filename?:')\nfilename = input()\nprint('# of lines?:')\nnumOfLines = input()\nlist1 = []\nprint('opening file >:/n')\nwith open(filename, 'r') as f:\n for r in range(int(numOfLines)):\n temp = f.readline()\n print(temp)\n list1.append(temp)\n\nprint('printing complete >:')\nprint('extrcting data >:')\n##turns data from file into LONG list of numbers.\nlist2 = []\nfor i in range(int(numOfLines)):\n temp = list1[i]\n temp2 = temp.split()\n \ntemp3 = []\nfor j in range(len(temp2)):\n temp3.append(int(temp2[j]))\n\ntemp2 = temp3\n\nfor k in range(len(temp2)):\n list2.append(temp2[k])\n\nquit()\n","repo_name":"Flower-fist/CORE-WARS-pinkcode-dredcode-version","sub_path":"loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"24879159779","text":"from odoo import models, fields, api, tools\n\nclass property_dld_comparison_usage(models.Model):\n _name = 'property_dld_comparison_usage'\n\n _auto = False\n avg_price = fields.Float()\n usage = fields.Char()\n sqft_rate = fields.Float()\n pr_value = fields.Integer()\n emirate = fields.Char()\n\n #@api.cr # cr\n def init(self):\n tools.drop_view_if_exists(self.env.cr, self._table)\n self._cr.execute(\"\"\"create or replace view property_dld_comparison_usage as \n (SELECT row_number() OVER () as id, sum(transaction_price)/sum(transaction_size) avg_price , usage , sum(transaction_size) sqft_rate, sum(transaction_price) pr_value, emirate from property_property where origin='dubailand' group by usage , emirate)\"\"\")","repo_name":"DeuceAceTrey/odoo_scraping_property","sub_path":"dld/dld_comparison_usage.py","file_name":"dld_comparison_usage.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"31409659325","text":"#RPi camera mock code for taking a still image and then processing with the petri dish code\n#Written by Brendon Madison on 2nd October 2021\n\nfrom picamera import PiCamera\nfrom time import sleep\nimport time\nimport os\n\ncamera = PiCamera()\n\n#time between each image\ndelay_time = 0.5*60\n\n#time images are taken, below is 48 hours\ncamera_time = (60*48*60.0)\n\nsleep(5)\n\n#Take pictures for 48 hours\nfor i in range(int(camera_time/delay_time) + 1):\n #filename = str('LabTest%s.jpg' % i)\n filename = str(str(int(time.time()))+'.jpg')\n camera.resolution = (1280,720)\n camera.capture(filename)\n \n \n #sleep for 10 seconds to ensure the image is saved correctly before the processing script is called\n sleep(10)\n \n print(\"Captured: \" + filename)\n #Calls the processor code \"FindPetriDish.py\"\n os.system(str('python3 FindPetriDish.py ') + str(filename))\n \n #sleep for 60 seconds to ensure the above code is run with no \n sleep(60)\n \n #sleep for (delay_time) seconds . Default is 5 minutes\n sleep(delay_time)\n\ncamera.close()\n","repo_name":"BrendonMadison/KUbeSatPiCam","sub_path":"RpiCameraMockCode.py","file_name":"RpiCameraMockCode.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"23063694749","text":"#!/usr/bin/python3\nimport csv\nimport datetime\nimport sys\n\n# hadoop jar /usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar wordcount s3a://inputfilesassignment4/file-input1.csv s3a://inputfilesassignment4/wordcount\n\n# Unique URLs per Hour\nfor line_in in sys.stdin:\n\n try:\n line = line_in.split(\",\")\n\n # TRY THIS\n # for col in line\n\n # timestamp = line[1]\n # url = line[2]\n\n timestamp = line[1]\n url = line[2]\n\n date = datetime.time()\n \n # Prepare a key in the form of :\n try:\n date = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.%fZ')\n except ValueError: \n date = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%SZ')\n\n day = date.day\n if len(str(day)) == 1:\n day = '0{}'.format(day)\n month = date.month\n if len(str(month)) == 1:\n month = '0{}'.format(month)\n hour = date.hour\n if len(str(hour)) == 1:\n hour = '0{}'.format(hour)\n\n timestamp_hour = '%s-%s-%s:%s' % (date.year, month, day, hour)\n\n # Prepare a key\n timestamp_hour_url = '%s-%s' % (timestamp_hour, url)\n\n # Output, stdout\n # print('%s\\t%d' % (timestamp_hour_url, 1))\n print('%s\\t%s' % (timestamp_hour, url))\n\n\n except ValueError as e:\n continue\n\n\n# print(\"\\nMapper-1 Process Completed\")\n\n","repo_name":"thinkocapo/big-data-processing","sub_path":"assignment4/historical/old.mapper-1.py","file_name":"old.mapper-1.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22568097392","text":"from ontobot.utils.onto import OP\nfrom ontobot.utils.rules import nary\nfrom ontobot.model.output import Error, Response\nfrom ontobot.utils.rules import custom\nfrom ontobot.utils.rules import role\nfrom ontobot.utils.rules import collective\nfrom ontobot.services import firestore_connect\nfrom ontobot.utils import cmethod\n\n# from ontobot.db.taxonomy import Taxonomy\n\n\ndef get_op_structure(parsed_json):\n op = OP()\n # taxonomy_obj = Taxonomy()\n sessionID = parsed_json['sessionId']\n print(sessionID)\n\n try:\n relationship_list = parsed_json['subrelationships']\n op_struct = op.get_stack(relationship_list)\n # taxonomy_result = taxonomy_obj.taxonomy_result # get taxonomy result from the db\n db_taxonomy_result = firestore_connect.get_document(\n session_id=sessionID)\n\n if db_taxonomy_result is not None:\n\n invalid_custom_concepts = custom.get_relational_pattern(db_taxonomy_result['msg'], op_struct) # check relational pattern\n invalid_role_concepts = role.get_role_pattern(db_taxonomy_result['msg'], op_struct) # check role pattern\n invalid_collective_concepts_stage_01 = collective.get_collective_pattern_stage_01(db_taxonomy_result['msg'], op_struct) # check collective pattern\n invalid_collective_concepts_stage_02 = collective.get_collective_pattern_stage_02(op_struct) # check collective pattern\n\n\n if len(invalid_custom_concepts) > 0:\n return Error.next(err=invalid_custom_concepts, type=\"op_relational\")\n\n if len(invalid_role_concepts) > 0:\n return Error.next(err=invalid_role_concepts, type=\"role\")\n\n if len(invalid_collective_concepts_stage_01) > 0:\n return Error.next(err=invalid_collective_concepts_stage_01, type=\"collective-01\")\n\n if len(invalid_collective_concepts_stage_02) > 0:\n return Error.next(err=invalid_collective_concepts_stage_02, type=\"collective-02\")\n\n # get n-ary pattern\n db_owlTaxonomy_result = firestore_connect.get_OwlTaxo_document(\n session_id=sessionID)\n final_op_result = nary.get_nary_structure(\n op_struct, db_owlTaxonomy_result['concepts'], sessionID)\n\n relationships = []\n for op in final_op_result:\n relationships.append(op['op_name'])\n relationships.append(op['op_inverse'])\n if len(op[\"op_equal\"]) > 0:\n relationships.append(op['op_equal'])\n\n owl_complete = {\n\n \"sessionID\" : sessionID,\n \"taxonomy\" : db_owlTaxonomy_result['taxonomy'],\n \"concepts\" : list(set(db_owlTaxonomy_result['concepts'])),\n \"op\" : final_op_result,\n \"relationships\" : list(set(relationships))\n\n }\n\n firestore_connect.create_new_owlComplete_document(\n session_id=sessionID, obj=owl_complete)\n return Response.next(cmethod.convertFromTaxonomyContent(owl_complete))\n\n else:\n return Error.next(err=\"No session data is found\", type=\"sww\")\n\n except Exception as err:\n return Error.next(err=err, type=\"sww\")\n\n\ndef test_data_structure(parsed_json):\n op = OP()\n relationship_list = parsed_json['subrelationships']\n op_struct = op.get_stack(relationship_list)\n return Response.send_response(op_struct)\n","repo_name":"OntoBot-org/ontobot-app","sub_path":"flask-server/ontobot/services/op_service.py","file_name":"op_service.py","file_ext":"py","file_size_in_byte":3434,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"3897003587","text":"from indykite_sdk.authorization import AuthorizationClient\nfrom indykite_sdk.indykite.authorization.v1beta1 import authorization_service_pb2 as pb2\nfrom indykite_sdk.model.who_authorized import WhoAuthorizedResource, WhoAuthorizedResponse\n\n\ndef test_who_authorized_wrong(capsys):\n client = AuthorizationClient()\n assert client is not None\n\n actions = [12, 13]\n resources = [WhoAuthorizedResource(\"resourceID\", \"TypeName\", actions),\n WhoAuthorizedResource(\"resource2ID\", \"TypeName\", actions)]\n input_params = {}\n response = client.who_authorized(resources, input_params, [])\n captured = capsys.readouterr()\n assert \"bad argument type for built-in operation\" in captured.err\n\n\ndef test_who_authorized_success():\n client = AuthorizationClient()\n assert client is not None\n\n actions = [\"ACTION1\", \"ACTION2\"]\n resources = [WhoAuthorizedResource(\"resourceID\", \"TypeName\", actions),\n WhoAuthorizedResource(\"resource2ID\", \"TypeName\", actions)]\n input_params = {\"age\": \"21\"}\n policy_tags = [\"Car\", \"Rental\", \"Sharing\"]\n response = client.who_authorized(resources, input_params, policy_tags)\n assert response is not None\n assert isinstance(response, WhoAuthorizedResponse)\n\n\ndef test_who_authorized_empty():\n client = AuthorizationClient()\n assert client is not None\n\n actions = [\"ACTION1\", \"ACTION2\"]\n resources = [WhoAuthorizedResource(\"resourceID\", \"TypeName\", actions), WhoAuthorizedResource(\"resource2ID\", \"TypeName\", actions)]\n input_params = {}\n\n def mocked_who_authorized(request: pb2.WhoAuthorizedRequest):\n return None\n\n client.stub.WhoAuthorized = mocked_who_authorized\n response = client.who_authorized(resources, input_params, [])\n assert response is None\n","repo_name":"indykite/indykite-sdk-python","sub_path":"tests/test_who_authorized.py","file_name":"test_who_authorized.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72331206168","text":"# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\nclass Solution:\n def __init__(self):\n self.stack = list()\n def postorderTraversal(self, root):\n def postOrder(root):\n if root is not None:\n\n postOrder(root.left)\n postOrder(root.right)\n self.stack.append(root.val)\n\n postOrder(root)\n return self.stack\n\nroot = TreeNode(1)\nroot.left = TreeNode(2)\nroot.right = TreeNode(3)\nroot.left.left = TreeNode(4)\nroot.left.right = TreeNode(5)\n\nprint(Solution().postorderTraversal(root))\n","repo_name":"galethegreat/LeetCode","sub_path":"Amazon/review/postOrder.py","file_name":"postOrder.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"36291613811","text":"import pandas as pd\nfrom sql_app.database import SessionLocal, engine\nfrom sql_app import crud, schemas, models, database\nfrom datetime import datetime\n\nimport os\n\ndb = SessionLocal()\nmodels.Base.metadata.drop_all(engine)\nmodels.Base.metadata.create_all(bind=engine)\n\ndef insert_weather_file(df, station):\n total_count = len(df)\n data = []\n for row in df.itertuples(index=False):\n payload = {\n \"station\": station,\n \"date\": datetime.strptime(str(row.date), '%Y%m%d') if row.date != -9999 else None,\n \"max_temprature\": int(row.max_temprature) if row.max_temprature != -9999 else None,\n \"min_temprature\": int(row.min_temprature) if row.min_temprature != -9999 else None,\n \"precipitation_amount\": int(row.precipitation_amount) if row.precipitation_amount != -9999 else None\n }\n data.append(schemas.Weather.parse_obj(payload))\n crud.insert_weather(db, data)\n print(f\"records {total_count} for station {station} is inserted.\")\n\ndef weather_data_ingestion():\n directory = 'wx_data'\n for filename in os.listdir(directory):\n station = filename.split('.')[0]\n file = os.path.join(directory, filename)\n df = pd.read_csv(file, delimiter='\\t', names=('date', 'max_temprature', 'min_temprature', 'precipitation_amount'))\n print(f\"file {file} is being inserted.\")\n insert_weather_file(df, station)\n\n\ndef insert_yield_file(df):\n total_count = len(df)\n data = []\n for row in df.itertuples(index=False):\n payload = {\n \"year\": int(row.year) if row.year != -9999 else None,\n \"grain_yield\": int(row.grain_yield) if row.grain_yield != -9999 else None\n }\n data.append(schemas.Yield.parse_obj(payload))\n crud.insert_yield(db, data)\n print(f\"records {total_count} for yield is inserted.\")\n\ndef yield_data_ingestion():\n directory = 'yld_data'\n for filename in os.listdir(directory):\n file = os.path.join(directory, filename)\n df = pd.read_csv(file, delimiter='\\t', names=('year', 'grain_yield'))\n print(f\"file {file} is being inserted.\")\n insert_yield_file(df)\n\nif __name__ == \"__main__\":\n weather_data_ingestion()\n yield_data_ingestion()","repo_name":"nagaarju/weather","sub_path":"data_ingestion.py","file_name":"data_ingestion.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8591839238","text":"from math import *\nimport sys\nimport numpy as np\nfrom ASRCAISim1.libCore import *\nfrom OriginalModelSample.libOriginalModelSample import *\n\nclass R4PyRewardSample01(TeamReward):\n\t\"\"\"いくつかの観点に基づいた報酬の実装例。\n\t(1)Bite(誘導弾シーカで目標を捕捉)への加点\n\t(2)誘導弾目標のメモリトラック落ちへの減点\n\t(3)敵探知への加点(生存中の敵の何%を探知できているか)\n\t(4)過剰な機動への減点\n\t(5)前進・後退への更なる加減点\n\t(6)保持している力学的エネルギー(回転を除く)の増減による加減点\n\t\"\"\"\n\tdef __init__(self,modelConfig,instanceConfig):\n\t\tsuper().__init__(modelConfig,instanceConfig)\n\t\tif(self.isDummy):\n\t\t\treturn\n\t\tself.pBite=getValueFromJsonKRD(self.modelConfig,\"pBite\",self.randomGen,0.0)\n\t\tself.pMemT=getValueFromJsonKRD(self.modelConfig,\"pMemT\",self.randomGen,0.2)\n\t\tself.pDetect=getValueFromJsonKRD(self.modelConfig,\"pDetect\",self.randomGen,0.0)\n\t\tself.pVel=getValueFromJsonKRD(self.modelConfig,\"pVel\",self.randomGen,0.0)\n\t\tself.pOmega=getValueFromJsonKRD(self.modelConfig,\"pOmega\",self.randomGen,0.0)\n\t\tself.pLine=getValueFromJsonKRD(self.modelConfig,\"pLine\",self.randomGen,0.0)\n\t\tself.pEnergy=getValueFromJsonKRD(self.modelConfig,\"pEnergy\",self.randomGen,0.0)\n\t\tself.pLineAsPeak=getValueFromJsonKRD(self.modelConfig,\"pLineAsPeak\",self.randomGen,True)\n\tdef onEpisodeBegin(self):#初期化\n\t\tself.j_target=\"All\"#個別のconfigによらず強制的に対象を指定する\n\t\tsuper().onEpisodeBegin()\n\t\to=self.manager.getRuler()().observables\n\t\tself.westSider=o[\"westSider\"]()\n\t\tself.eastSider=o[\"eastSider\"]()\n\t\tself.forwardAx=o[\"forwardAx\"]()\n\t\tself.dLine=o[\"dLine\"]()\n\t\tself.friends={\n\t\t\tteam:[\n\t\t\t\tf for f in [f() for f in self.manager.getAssets(lambda a:a.getTeam()==team and isinstance(a,Fighter))]\n\t\t\t]\n\t\t\tfor team in self.reward\n\t\t}\n\t\tself.totalEnergy={\n\t\t\tteam:sum([\n\t\t\t\tnp.linalg.norm(f.velI())**2/2-gravity*f.posI()[2] for f in [f() for f in self.manager.getAssets(lambda a:a.getTeam()==team and isinstance(a,Fighter))]\n\t\t\t])\n\t\t\tfor team in self.reward\n\t\t}\n\t\tself.leadRangePrev={\n\t\t\tteam:max(-self.dLine,max([\n\t\t\t\tnp.dot(self.forwardAx[team],f.posI()[0:2]) for f in [f() for f in self.manager.getAssets(lambda a:a.getTeam()==team and isinstance(a,Fighter))]\n\t\t\t]))\n\t\t\tfor team in self.reward\n\t\t}\n\t\tself.leadRange={key:value for key,value in self.leadRangePrev.items()}\n\t\tself.enemies={\n\t\t\tteam:[\n\t\t\t\tf for f in [f() for f in self.manager.getAssets(lambda a:a.getTeam()!=team and isinstance(a,Fighter))]\n\t\t\t]\n\t\t\tfor team in self.reward\n\t\t}\n\t\tself.friendMsls={\n\t\t\tteam:[\n\t\t\t\tf for f in [f() for f in self.manager.getAssets(lambda a:a.getTeam()==team and isinstance(a,Missile))]\n\t\t\t]\n\t\t\tfor team in self.reward\n\t\t}\n\t\tself.numMissiles={team:len(self.friendMsls[team]) for team in self.reward}\n\t\tself.biteFlag={team:np.full(self.numMissiles[team],False)\n\t\t\tfor team in self.reward}\n\t\tself.memoryTrackFlag={team:np.full(self.numMissiles[team],False)\n\t\t\tfor team in self.reward}\n\tdef onStepEnd(self):\n\t\tdelta={t:0.0 for t in self.reward}\n\t\tfor team in self.reward:\n\t\t\t#(1)Biteへの加点、(2)メモリトラック落ちへの減点\n\t\t\tfor i,m in enumerate(self.friendMsls[team]):\n\t\t\t\tif(m.hasLaunched and m.isAlive):\n\t\t\t\t\tif(m.mode==Missile.Mode.SELF and not self.biteFlag[team][i]):\n\t\t\t\t\t\tself.reward[team]+=self.pBite\n\t\t\t\t\t\tdelta[team]+=self.pBite\n\t\t\t\t\t\tself.biteFlag[team][i]=True\n\t\t\t\t\tif(m.mode==Missile.Mode.MEMORY and not self.memoryTrackFlag[team][i]):\n\t\t\t\t\t\tself.reward[team]-=self.pMemT\n\t\t\t\t\t\tdelta[team]-=self.pMemT\n\t\t\t\t\t\tself.memoryTrackFlag[team][i]=True\n\t\t\t#(3)敵探知への加点(生存中の敵の何%を探知できているか)(データリンク前提)\n\t\t\ttrack=[]\n\t\t\tfor f in self.friends[team]:\n\t\t\t\tif(f.isAlive()):\n\t\t\t\t\ttrack=[Track3D(t) for t in f.observables[\"sensor\"][\"track\"]]\n\t\t\t\t\tbreak\n\t\t\tnumAlive=0\n\t\t\tnumTracked=0\n\t\t\tfor f in self.enemies[team]:\n\t\t\t\tif(f.isAlive()):\n\t\t\t\t\tnumAlive+=1\n\t\t\t\t\tfor t in track:\n\t\t\t\t\t\tif(t.isSame(f)):\n\t\t\t\t\t\t\tnumTracked+=1\n\t\t\t\t\t\t\tbreak\n\t\t\tif(numAlive>0):\n\t\t\t\tself.reward[team]+=(1.0*numTracked/numAlive)*self.pDetect\n\t\t\t\tdelta[team]+=(1.0*numTracked/numAlive)*self.pDetect\n\t\t\tene=0.0\n\t\t\ttmp=-self.dLine\n\t\t\tfor f in self.friends[team]:\n\t\t\t\tpos=f.posI()\n\t\t\t\tvel=f.velI()\n\t\t\t\tomega=f.omegaI()\n\t\t\t\tif(f.isAlive()):\n\t\t\t\t\t#(4)過剰な機動への減点(角速度ノルムに対してL2、高度方向速度に対してL1正則化)\n\t\t\t\t\tself.reward[team]+=-self.pVel*abs(vel[2])-(np.linalg.norm(omega)**2)*self.pOmega\n\t\t\t\t\t#(5)前進・後退への更なる加減点\n\t\t\t\t\ttmp=max(tmp,np.dot(self.forwardAx[team],f.posI()[0:2]))\n\t\t\t\t#(6)保持している力学的エネルギー(回転を除く)の増減による加減点\n\t\t\t\tene+=np.linalg.norm(vel)**2/2-gravity*pos[2]\n\t\t\tself.leadRange[team]=tmp\n\t\t\tif(self.pLineAsPeak):\n\t\t\t\t#最高到達点で前進の加点をする場合\n\t\t\t\tif(self.leadRange[team]>self.leadRangePrev[team]):\n\t\t\t\t\tself.reward[team]+=(self.leadRange[team]-self.leadRangePrev[team])*self.pLine\n\t\t\t\t\tself.leadRangePrev[team]=self.leadRange[team]\n\t\t\telse:\n\t\t\t\t#都度前進・後退の加減点をする場合\n\t\t\t\tself.reward[team]+=(self.leadRange[team]-self.leadRangePrev[team])*self.pLine\n\t\t\t\tself.leadRangePrev[team]=self.leadRange[team]\n\t\t\tself.reward[team]+=(ene-self.totalEnergy[team])*self.pEnergy\n\t\t\tdelta[team]+=(ene-self.totalEnergy[team])*self.pEnergy\n\t\t\tself.totalEnergy[team]=ene\n\t\tsuper().onStepEnd()\n","repo_name":"AMay-eime/Airforce_AI","sub_path":"simulator_dist/root/sample/OriginalModelSample/OriginalModelSample/R4PyRewardSample01.py","file_name":"R4PyRewardSample01.py","file_ext":"py","file_size_in_byte":5420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"16104683748","text":"# All pair shortest path\n# A Dynamic Programming solution\n# The Floyd-Warshall Algorithm\n\n\n# optimal substructure: Let P be the shortest cycle free i-j path with all internal nodes in V(k)\n# Case1: If k not internal to P, then P is the shortest cycle free i-j path with all internal nodes in V(k-1)\n# Case2: If k is internal to P, then P1 = shortest(cycle free) i-k path, P2 = shortset k-j path un V(k-1)\n\n# Initialization:\n# Let A be a 3-D array(indexed by i,j,k)\n# base case: for all i, j belongs to V\n# 1. A[i,j,0] = 0 if i=j\n# 2.A[i,j,0]= cost_ij if i,j belongs to E\n# 3.A[i,j,0]= inf if i!=j and i,j does not internal\n\n# Loop through k, i, j in n, n, n --> A[i,j,k] = min{A[i,j,k-1],A[i,k,k-1] + A[k,j,k-1]}\n# How to determine Negative Edges? Scan the Diagonal\n# will have at least one i belongs to V: A[i,i,n]<0 at the end of the algo with negative cycles existent\n\nimport numpy as np\nfrom tqdm import tqdm\n\n\ndef read_edges(file_name):\n \"\"\"n = number of nodes, m = number of edges\n return to the initialized 3-D array A\"\"\"\n\n with open(file_name) as f:\n first_row = list(map(int, f.readline().split(' ')))\n n, m = first_row[0], first_row[1]\n\n A = np.zeros((n, n, n))\n for i in range(n):\n for j in range(n):\n A[i, j, 0] = 0 if i == j else np.inf\n for position, line in enumerate(f):\n if position != 0:\n item = list(map(int, line.split(' ')))\n A[item[0]-1, item[1]-1, 0] = item[2]\n\n return n, A\n\n\ndef floyd_warshall(A, n):\n \"\"\"Run Floyd Warshall Algorithm to get the the shortest Path \n return to the shortest path if no negative cycles exist\n return to string \"Negative cycles exists\" if negative cycle exists\"\"\"\n\n for k in tqdm(range(1, n)):\n for i in range(n):\n for j in range(n):\n A[i, j, k] = min(A[i, j, k-1], A[i, k-1, k-1] + A[k-1, j, k-1])\n\n for i in range(n):\n if A[i, i, n-1] < 0:\n return \"Negative Cycles Exit\"\n\n return np.min(A[:, :, n-1])\n\n\nif __name__ == \"__main__\":\n\n n, A = read_edges(\n 'Shortest Paths Revisited, NP-Complete Problems/1/ShortestPathRevisit/g3.txt')\n\n min_path = floyd_warshall(A, n)\n print(min_path)\n","repo_name":"annacx0123/Algorithms-Specialization","sub_path":"Shortest Paths Revisited, NP-Complete Problems/1/ShortestPathRevisit/all_pair_shortest_path.py","file_name":"all_pair_shortest_path.py","file_ext":"py","file_size_in_byte":2236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37275481316","text":"import numbers\nimport numpy as np\nfrom vayesta.core.util import AbstractMethodError, brange, dot, einsum, fix_orbital_sign, hstack, time_string, timer\nfrom vayesta.core import spinalg\nfrom vayesta.core.types import Cluster\nfrom vayesta.core.bath import helper\nfrom vayesta.core.bath.bath import Bath\n\n\nclass BNO_Threshold:\n def __init__(self, type, threshold):\n \"\"\"\n number: Fixed number of BNOs\n occupation: Occupation threshold for BNOs (\"eta\")\n truncation: Maximum number of electrons to be ignored\n electron-percent: Add BNOs until 100-x% of the total number of all electrons is captured\n excited-percent: Add BNOs until 100-x% of the total number of excited electrons is captured\n \"\"\"\n if type not in (\"number\", \"occupation\", \"truncation\", \"electron-percent\", \"excited-percent\"):\n raise ValueError()\n self.type = type\n self.threshold = threshold\n\n def __repr__(self):\n return \"%s(type=%s, threshold=%g)\" % (self.__class__.__name__, self.type, self.threshold)\n\n def get_number(self, bno_occup, electron_total=None):\n \"\"\"Get number of BNOs.\"\"\"\n nbno = len(bno_occup)\n if nbno == 0:\n return 0\n if self.type == \"number\":\n return self.threshold\n if self.type in (\"truncation\", \"electron-percent\", \"excited-percent\"):\n npos = np.clip(bno_occup, 0.0, None)\n nexcited = np.sum(npos)\n nelec0 = 0\n if self.type == \"truncation\":\n ntarget = nexcited - self.threshold\n elif self.type == \"electron-percent\":\n assert electron_total is not None\n ntarget = (1.0 - self.threshold) * electron_total\n nelec0 = electron_total - nexcited\n elif self.type == \"excited-percent\":\n ntarget = (1.0 - self.threshold) * nexcited\n for bno_number in range(nbno + 1):\n nelec = nelec0 + np.sum(npos[:bno_number])\n if nelec >= ntarget:\n return bno_number\n raise RuntimeError()\n if self.type == \"occupation\":\n return np.count_nonzero(bno_occup >= self.threshold)\n raise RuntimeError()\n\n\nclass BNO_Bath(Bath):\n \"\"\"Bath natural orbital (BNO) bath, requires DMET bath.\"\"\"\n\n def __init__(self, fragment, dmet_bath, occtype, *args, c_buffer=None, canonicalize=True, **kwargs):\n super().__init__(fragment, *args, **kwargs)\n self.dmet_bath = dmet_bath\n if occtype not in (\"occupied\", \"virtual\"):\n raise ValueError(\"Invalid occtype: %s\" % occtype)\n self.occtype = occtype\n self.c_buffer = c_buffer\n # Canonicalization can be set separately for occupied and virtual:\n if np.ndim(canonicalize) == 0:\n canonicalize = (canonicalize, canonicalize)\n self.canonicalize = canonicalize\n # Coefficients, occupations, and correlation energy:\n self.coeff, self.occup, self.ecorr = self.kernel()\n\n @property\n def c_cluster_occ(self):\n \"\"\"Occupied DMET cluster orbitals.\"\"\"\n return self.dmet_bath.c_cluster_occ\n\n @property\n def c_cluster_vir(self):\n \"\"\"Virtual DMET cluster orbitals.\"\"\"\n return self.dmet_bath.c_cluster_vir\n\n def make_bno_coeff(self, *args, **kwargs):\n raise AbstractMethodError()\n\n @property\n def c_env(self):\n if self.occtype == \"occupied\":\n return self.dmet_bath.c_env_occ\n if self.occtype == \"virtual\":\n return self.dmet_bath.c_env_vir\n\n @property\n def ncluster(self):\n if self.occtype == \"occupied\":\n return self.dmet_bath.c_cluster_occ.shape[-1]\n if self.occtype == \"virtual\":\n return self.dmet_bath.c_cluster_vir.shape[-1]\n\n def kernel(self):\n c_env = self.c_env\n if self.spin_restricted and (c_env.shape[-1] == 0):\n return c_env, np.zeros(0), None\n if self.spin_unrestricted and (c_env[0].shape[-1] + c_env[1].shape[-1] == 0):\n return c_env, tuple(2 * [np.zeros(0)]), None\n self.log.info(\"Making %s BNOs\", self.occtype.capitalize())\n self.log.info(\"-------%s-----\", len(self.occtype) * \"-\")\n self.log.changeIndentLevel(1)\n coeff, occup, ecorr = self.make_bno_coeff()\n self.log_histogram(occup)\n self.log.changeIndentLevel(-1)\n self.coeff = coeff\n self.occup = occup\n return coeff, occup, ecorr\n\n def log_histogram(self, n_bno):\n if len(n_bno) == 0:\n return\n self.log.info(\"%s BNO histogram:\", self.occtype.capitalize())\n bins = np.hstack([-np.inf, np.logspace(-3, -10, 8)[::-1], np.inf])\n labels = \" \" + \"\".join(\"{:{w}}\".format(\"E-%d\" % d, w=5) for d in range(3, 11))\n self.log.info(helper.make_histogram(n_bno, bins=bins, labels=labels))\n\n def get_bath(self, bno_threshold=None, **kwargs):\n return self.truncate_bno(self.coeff, self.occup, bno_threshold=bno_threshold, **kwargs)\n\n @staticmethod\n def _has_frozen(c_frozen):\n return c_frozen.shape[-1] > 0\n\n def get_finite_bath_correction(self, c_active, c_frozen):\n if not self._has_frozen(c_frozen):\n return 0\n e1 = self.ecorr\n actspace = self.get_active_space(c_active=c_active)\n # --- Canonicalization\n fock = self.base.get_fock_for_bath()\n if self.canonicalize[0]:\n self.log.debugv(\"Canonicalizing occupied orbitals\")\n c_active_occ = self.fragment.canonicalize_mo(actspace.c_active_occ, fock=fock)[0]\n else:\n c_active_occ = actspace.c_active_occ\n if self.canonicalize[1]:\n self.log.debugv(\"Canonicalizing virtual orbitals\")\n c_active_vir = self.fragment.canonicalize_mo(actspace.c_active_vir, fock=fock)[0]\n else:\n c_active_vir = actspace.c_active_vir\n actspace = Cluster.from_coeffs(c_active_occ, c_active_vir, actspace.c_frozen_occ, actspace.c_frozen_vir)\n e0 = self._make_t2(actspace, fock, energy_only=True)[1]\n e_fbc = e1 - e0\n return e_fbc\n\n def truncate_bno(self, coeff, occup, bno_threshold=None, verbose=True):\n \"\"\"Split natural orbitals (NO) into bath and rest.\"\"\"\n\n header = \"%s BNOs:\" % self.occtype\n\n if isinstance(bno_threshold, numbers.Number):\n bno_threshold = BNO_Threshold(\"occupation\", bno_threshold)\n nelec_cluster = self.dmet_bath.get_cluster_electrons()\n bno_number = bno_threshold.get_number(occup, electron_total=nelec_cluster)\n\n # Logging\n if verbose:\n if header:\n self.log.info(header.capitalize())\n fmt = \" %4s: N= %4d max= % 9.3g min= % 9.3g sum= % 9.3g ( %7.3f %%)\"\n\n def log_space(name, n_part):\n if len(n_part) == 0:\n self.log.info(fmt[: fmt.index(\"max\")].rstrip(), name, 0)\n return\n with np.errstate(invalid=\"ignore\"): # supress 0/0 warning\n self.log.info(\n fmt,\n name,\n len(n_part),\n max(n_part),\n min(n_part),\n np.sum(n_part),\n 100 * np.sum(n_part) / np.sum(occup),\n )\n\n log_space(\"Bath\", occup[:bno_number])\n log_space(\"Rest\", occup[bno_number:])\n\n c_bath, c_rest = np.hsplit(coeff, [bno_number])\n return c_bath, c_rest\n\n def get_active_space(self, c_active=None):\n dmet_bath = self.dmet_bath\n nao = self.mol.nao\n empty = np.zeros((nao, 0)) if self.spin_restricted else np.zeros((2, nao, 0))\n if self.occtype == \"occupied\":\n if c_active is None:\n c_active_occ = spinalg.hstack_matrices(dmet_bath.c_cluster_occ, self.c_env)\n else:\n c_active_occ = c_active\n c_frozen_occ = empty\n if self.c_buffer is not None:\n raise NotImplementedError\n c_active_vir = dmet_bath.c_cluster_vir\n c_frozen_vir = dmet_bath.c_env_vir\n elif self.occtype == \"virtual\":\n if c_active is None:\n c_active_vir = spinalg.hstack_matrices(dmet_bath.c_cluster_vir, self.c_env)\n else:\n c_active_vir = c_active\n c_frozen_vir = empty\n if self.c_buffer is None:\n c_active_occ = dmet_bath.c_cluster_occ\n c_frozen_occ = dmet_bath.c_env_occ\n else:\n c_active_occ = spinalg.hstack_matrices(dmet_bath.c_cluster_occ, self.c_buffer)\n ovlp = self.fragment.base.get_ovlp()\n r = dot(self.c_buffer.T, ovlp, dmet_bath.c_env_occ)\n dm_frozen = np.eye(dmet_bath.c_env_occ.shape[-1]) - np.dot(r.T, r)\n e, r = np.linalg.eigh(dm_frozen)\n c_frozen_occ = np.dot(dmet_bath.c_env_occ, r[:, e > 0.5])\n\n actspace = Cluster.from_coeffs(c_active_occ, c_active_vir, c_frozen_occ, c_frozen_vir)\n return actspace\n\n def _rotate_dm(self, dm, rot):\n return dot(rot, dm, rot.T)\n\n def _dm_take_env(self, dm):\n ncluster = self.ncluster\n self.log.debugv(\"n(cluster)= %d\", ncluster)\n self.log.debugv(\"tr(D)= %g\", np.trace(dm))\n dm = dm[ncluster:, ncluster:]\n self.log.debugv(\"tr(D[env,env])= %g\", np.trace(dm))\n return dm\n\n def _diagonalize_dm(self, dm):\n n_bno, r_bno = np.linalg.eigh(dm)\n sort = np.s_[::-1]\n n_bno = n_bno[sort]\n r_bno = r_bno[:, sort]\n return r_bno, n_bno\n\n\nclass BNO_Bath_UHF(BNO_Bath):\n def _rotate_dm(self, dm, rot):\n return (super()._rotate_dm(dm[0], rot[0]), super()._rotate_dm(dm[1], rot[1]))\n\n @property\n def ncluster(self):\n if self.occtype == \"occupied\":\n return (self.dmet_bath.c_cluster_occ[0].shape[-1], self.dmet_bath.c_cluster_occ[1].shape[-1])\n if self.occtype == \"virtual\":\n return (self.dmet_bath.c_cluster_vir[0].shape[-1], self.dmet_bath.c_cluster_vir[1].shape[-1])\n\n def _dm_take_env(self, dm):\n ncluster = self.ncluster\n self.log.debugv(\"n(cluster)= (%d, %d)\", ncluster[0], ncluster[1])\n self.log.debugv(\"tr(alpha-D)= %g\", np.trace(dm[0]))\n self.log.debugv(\"tr( beta-D)= %g\", np.trace(dm[1]))\n dm = (dm[0][ncluster[0] :, ncluster[0] :], dm[1][ncluster[1] :, ncluster[1] :])\n self.log.debugv(\"tr(alpha-D[env,env])= %g\", np.trace(dm[0]))\n self.log.debugv(\"tr( beta-D[env,env])= %g\", np.trace(dm[1]))\n return dm\n\n def _diagonalize_dm(self, dm):\n r_bno_a, n_bno_a = super()._diagonalize_dm(dm[0])\n r_bno_b, n_bno_b = super()._diagonalize_dm(dm[1])\n return (r_bno_a, r_bno_b), (n_bno_a, n_bno_b)\n\n def log_histogram(self, n_bno):\n if len(n_bno[0]) == len(n_bno[0]) == 0:\n return\n self.log.info(\"%s BNO histogram (alpha/beta):\", self.occtype.capitalize())\n bins = np.hstack([-np.inf, np.logspace(-3, -10, 8)[::-1], np.inf])\n labels = \" \" + \"\".join(\"{:{w}}\".format(\"E-%d\" % d, w=5) for d in range(3, 11))\n ha = helper.make_histogram(n_bno[0], bins=bins, labels=labels, rstrip=False).split(\"\\n\")\n hb = helper.make_histogram(n_bno[1], bins=bins, labels=labels).split(\"\\n\")\n for i in range(len(ha)):\n self.log.info(ha[i] + \" \" + hb[i])\n\n def truncate_bno(self, coeff, occup, *args, **kwargs):\n c_bath_a, c_rest_a = super().truncate_bno(coeff[0], occup[0], *args, **kwargs)\n c_bath_b, c_rest_b = super().truncate_bno(coeff[1], occup[1], *args, **kwargs)\n return (c_bath_a, c_bath_b), (c_rest_a, c_rest_b)\n\n @staticmethod\n def _has_frozen(c_frozen):\n return (c_frozen[0].shape[-1] + c_frozen[1].shape[-1]) > 0\n\n\nclass MP2_BNO_Bath(BNO_Bath):\n def __init__(self, *args, project_dmet_order=0, project_dmet_mode=\"full\", project_dmet=None, **kwargs):\n # Backwards compatibility:\n if project_dmet:\n project_dmet_order = 1\n project_dmet_mode = project_dmet\n self.project_dmet_order = project_dmet_order\n self.project_dmet_mode = project_dmet_mode\n super().__init__(*args, **kwargs)\n if project_dmet:\n # Log isn't set at the top of the function\n self.log.warning(\"project_dmet is deprecated; use project_dmet_order and project_dmet_mode.\")\n\n def _make_t2(self, actspace, fock, eris=None, max_memory=None, blksize=None, energy_only=False):\n \"\"\"Make T2 amplitudes and pair correlation energies.\"\"\"\n\n if eris is None:\n eris, cderi, cderi_neg = self.get_eris_or_cderi(actspace)\n # (ov|ov)\n if eris is not None:\n self.log.debugv(\"Making T2 amplitudes from ERIs\")\n assert eris.ndim == 4\n nocc, nvir = eris.shape[:2]\n # (L|ov)\n elif cderi is not None:\n self.log.debugv(\"Making T2 amplitudes from CD-ERIs\")\n assert cderi.ndim == 3\n assert cderi_neg is None or cderi_neg.ndim == 3\n nocc, nvir = cderi.shape[1:]\n else:\n raise ValueError()\n\n # Fragment projector:\n ovlp = self.base.get_ovlp()\n rfrag = dot(actspace.c_active_occ.T, ovlp, self.c_frag)\n\n t2 = np.empty((nocc, nocc, nvir, nvir)) if not energy_only else None\n mo_energy = self._get_mo_energy(fock, actspace)\n eia = mo_energy[:nocc, None] - mo_energy[None, nocc:]\n max_memory = max_memory or int(1e9)\n if blksize is None:\n blksize = int(max_memory / max(nocc * nvir * nvir * 8, 1))\n nenv = nocc if self.occtype == \"occupied\" else nvir\n ecorr = 0\n for blk in brange(0, nocc, blksize):\n if eris is not None:\n gijab = eris[blk].transpose(0, 2, 1, 3)\n else:\n gijab = einsum(\"Lia,Ljb->ijab\", cderi[:, blk], cderi)\n if cderi_neg is not None:\n gijab -= einsum(\"Lia,Ljb->ijab\", cderi_neg[:, blk], cderi_neg)\n eijab = eia[blk][:, None, :, None] + eia[None, :, None, :]\n t2blk = gijab / eijab\n if not energy_only:\n t2[blk] = t2blk\n # Projected correlation energy:\n tp = einsum(\"ix,i...->x...\", rfrag[blk], t2blk)\n gp = einsum(\"ix,i...->x...\", rfrag[blk], gijab)\n ecorr += 2 * einsum(\"ijab,ijab->\", tp, gp) - einsum(\"ijab,ijba->\", tp, gp)\n\n return t2, ecorr\n\n def _get_mo_energy(self, fock, actspace):\n c_act = actspace.c_active\n mo_energy = einsum(\"ai,ab,bi->i\", c_act, fock, c_act)\n return mo_energy\n\n def _get_eris(self, actspace):\n # We only need the (ov|ov) block for MP2:\n mo_coeff = 2 * [actspace.c_active_occ, actspace.c_active_vir]\n eris = self.base.get_eris_array(mo_coeff)\n return eris\n\n def _get_cderi(self, actspace):\n # We only need the (L|ov) block for MP2:\n mo_coeff = (actspace.c_active_occ, actspace.c_active_vir)\n cderi, cderi_neg = self.base.get_cderi(mo_coeff)\n return cderi, cderi_neg\n\n def get_eris_or_cderi(self, actspace):\n eris = cderi = cderi_neg = None\n t0 = timer()\n if self.fragment.base.has_df:\n cderi, cderi_neg = self._get_cderi(actspace)\n else:\n eris = self._get_eris(actspace)\n self.log.timingv(\"Time for AO->MO transformation: %s\", time_string(timer() - t0))\n # TODO: Reuse previously obtained integral transformation into N^2 sized quantity (rather than N^4)\n # else:\n # self.log.debug(\"Transforming previous eris.\")\n # eris = transform_mp2_eris(eris, actspace.c_active_occ, actspace.c_active_vir, ovlp=self.base.get_ovlp())\n return eris, cderi, cderi_neg\n\n def _get_dmet_projector_weights(self, eig):\n assert np.all(eig > -1e-10)\n assert np.all(eig - 1 < 1e-10)\n eig = np.clip(eig, 0, 1)\n mode = self.project_dmet_mode\n if mode == \"full\":\n weights = np.zeros(len(eig))\n elif mode == \"half\":\n weights = np.full(len(eig), 0.5)\n elif mode == \"linear\":\n weights = 2 * abs(np.fmin(eig, 1 - eig))\n elif mode == \"cosine\":\n weights = (1 - np.cos(2 * eig * np.pi)) / 2\n elif mode == \"cosine-half\":\n weights = (1 - np.cos(2 * eig * np.pi)) / 4\n elif mode == \"entropy\":\n weights = 4 * eig * (1 - eig)\n elif mode == \"sqrt-entropy\":\n weights = 2 * np.sqrt(eig * (1 - eig))\n elif mode == \"squared-entropy\":\n weights = (4 * eig * (1 - eig)) ** 2\n else:\n raise ValueError(\"Invalid value for project_dmet_mode: %s\" % mode)\n assert np.all(weights > -1e-14)\n assert np.all(weights - 1 < 1e-14)\n weights = np.clip(weights, 0, 1)\n return weights\n\n def _project_t2(self, t2, actspace):\n \"\"\"Project and symmetrize T2 amplitudes\"\"\"\n self.log.info(\n \"Projecting DMET space for MP2 bath (mode= %s, order= %d).\", self.project_dmet_mode, self.project_dmet_order\n )\n weights = self._get_dmet_projector_weights(self.dmet_bath.n_dmet)\n weights = hstack(self.fragment.n_frag * [1], weights)\n ovlp = self.fragment.base.get_ovlp()\n c_fragdmet = hstack(self.fragment.c_frag, self.dmet_bath.c_dmet)\n if self.occtype == \"occupied\":\n rot = dot(actspace.c_active_vir.T, ovlp, c_fragdmet)\n proj = einsum(\"ix,x,jx->ij\", rot, weights, rot)\n if self.project_dmet_order == 1:\n t2 = einsum(\"xa,ijab->ijxb\", proj, t2)\n elif self.project_dmet_order == 2:\n t2 = einsum(\"xa,yb,ijab->ijxy\", proj, proj, t2)\n else:\n raise ValueError\n elif self.occtype == \"virtual\":\n rot = dot(actspace.c_active_occ.T, ovlp, c_fragdmet)\n proj = einsum(\"ix,x,jx->ij\", rot, weights, rot)\n if self.project_dmet_order == 1:\n t2 = einsum(\"xi,i...->x...\", proj, t2)\n elif self.project_dmet_order == 2:\n t2 = einsum(\"xi,yj,ij...->xy...\", proj, proj, t2)\n else:\n raise ValueError\n t2 = (t2 + t2.transpose(1, 0, 3, 2)) / 2\n return t2\n\n def make_delta_dm1(self, t2, actspace):\n \"\"\"Delta MP2 density matrix\"\"\"\n\n if self.project_dmet_order > 0:\n t2 = self._project_t2(t2, actspace)\n\n # This is equivalent to:\n # do, dv = pyscf.mp.mp2._gamma1_intermediates(mp2, eris=eris)\n # do, dv = -2*do, 2*dv\n if self.occtype == \"occupied\":\n dm = 2 * einsum(\"ikab,jkab->ij\", t2, t2) - einsum(\"ikab,jkba->ij\", t2, t2)\n elif self.occtype == \"virtual\":\n dm = 2 * einsum(\"ijac,ijbc->ab\", t2, t2) - einsum(\"ijac,ijcb->ab\", t2, t2)\n assert np.allclose(dm, dm.T)\n return dm\n\n def make_bno_coeff(self, eris=None):\n \"\"\"Construct MP2 bath natural orbital coefficients and occupation numbers.\n\n This routine works for both for spin-restricted and unrestricted.\n\n Parameters\n ----------\n eris: mp2._ChemistERIs\n\n Returns\n -------\n c_bno: (n(AO), n(BNO)) array\n Bath natural orbital coefficients.\n n_bno: (n(BNO)) array\n Bath natural orbital occupation numbers.\n \"\"\"\n t_init = timer()\n\n actspace_orig = self.get_active_space()\n fock = self.base.get_fock_for_bath()\n\n # --- Canonicalization [optional]\n if self.canonicalize[0]:\n self.log.debugv(\"Canonicalizing occupied orbitals\")\n c_active_occ, r_occ = self.fragment.canonicalize_mo(actspace_orig.c_active_occ, fock=fock)\n else:\n c_active_occ = actspace_orig.c_active_occ\n r_occ = None\n if self.canonicalize[1]:\n self.log.debugv(\"Canonicalizing virtual orbitals\")\n c_active_vir, r_vir = self.fragment.canonicalize_mo(actspace_orig.c_active_vir, fock=fock)\n else:\n c_active_vir = actspace_orig.c_active_vir\n r_vir = None\n actspace = Cluster.from_coeffs(\n c_active_occ, c_active_vir, actspace_orig.c_frozen_occ, actspace_orig.c_frozen_vir\n )\n\n t0 = timer()\n t2, ecorr = self._make_t2(actspace, fock, eris=eris)\n t_amps = timer() - t0\n\n dm = self.make_delta_dm1(t2, actspace)\n\n # --- Undo canonicalization\n if self.occtype == \"occupied\" and r_occ is not None:\n dm = self._rotate_dm(dm, r_occ)\n elif self.occtype == \"virtual\" and r_vir is not None:\n dm = self._rotate_dm(dm, r_vir)\n # --- Diagonalize environment-environment block\n dm = self._dm_take_env(dm)\n t0 = timer()\n r_bno, n_bno = self._diagonalize_dm(dm)\n t_diag = timer() - t0\n c_bno = spinalg.dot(self.c_env, r_bno)\n c_bno = fix_orbital_sign(c_bno)[0]\n\n self.log.timing(\n \"Time MP2 bath: amplitudes= %s diagonal.= %s total= %s\",\n *map(time_string, (t_amps, t_diag, (timer() - t_init))),\n )\n\n return c_bno, n_bno, ecorr\n\n\nclass UMP2_BNO_Bath(MP2_BNO_Bath, BNO_Bath_UHF):\n def _get_mo_energy(self, fock, actspace):\n c_act_a, c_act_b = actspace.c_active\n mo_energy_a = einsum(\"ai,ab,bi->i\", c_act_a, fock[0], c_act_a)\n mo_energy_b = einsum(\"ai,ab,bi->i\", c_act_b, fock[1], c_act_b)\n return (mo_energy_a, mo_energy_b)\n\n def _get_eris(self, actspace):\n # We only need the (ov|ov) block for MP2:\n return self.base.get_eris_array_uhf(actspace.c_active_occ, mo_coeff2=actspace.c_active_vir)\n\n def _get_cderi(self, actspace):\n # We only need the (ov|ov) block for MP2:\n mo_a = [actspace.c_active_occ[0], actspace.c_active_vir[0]]\n mo_b = [actspace.c_active_occ[1], actspace.c_active_vir[1]]\n cderi_a, cderi_neg_a = self.base.get_cderi(mo_a)\n cderi_b, cderi_neg_b = self.base.get_cderi(mo_b)\n return (cderi_a, cderi_b), (cderi_neg_a, cderi_neg_b)\n\n def _make_t2(self, actspace, fock, eris=None, max_memory=None, blksize=None, energy_only=False):\n \"\"\"Make T2 amplitudes\"\"\"\n\n if eris is None:\n eris, cderi, cderi_neg = self.get_eris_or_cderi(actspace)\n # (ov|ov)\n if eris is not None:\n assert len(eris) == 3\n assert eris[0].ndim == 4\n assert eris[1].ndim == 4\n assert eris[2].ndim == 4\n nocca, nvira = eris[0].shape[:2]\n noccb, nvirb = eris[2].shape[:2]\n # (L|ov)\n elif cderi is not None:\n assert len(cderi) == 2\n assert cderi[0].ndim == 3\n assert cderi[1].ndim == 3\n nocca, nvira = cderi[0].shape[1:]\n noccb, nvirb = cderi[1].shape[1:]\n else:\n raise ValueError()\n\n # Fragment projector:\n ovlp = self.base.get_ovlp()\n rfrag = spinalg.dot(spinalg.T(actspace.c_active_occ), ovlp, self.c_frag)\n\n if not energy_only:\n t2aa = np.empty((nocca, nocca, nvira, nvira))\n t2ab = np.empty((nocca, noccb, nvira, nvirb))\n t2bb = np.empty((noccb, noccb, nvirb, nvirb))\n else:\n t2aa = t2ab = t2bb = None\n mo_energy = self._get_mo_energy(fock, actspace)\n eia_a = mo_energy[0][:nocca, None] - mo_energy[0][None, nocca:]\n eia_b = mo_energy[1][:noccb, None] - mo_energy[1][None, noccb:]\n\n # Alpha-alpha and Alpha-beta:\n max_memory = max_memory or int(1e9)\n if blksize is None:\n blksize_a = int(max_memory / max(nocca * nvira * nvira * 8, 1))\n else:\n blksize_a = blksize\n ecorr = 0\n for blk in brange(0, nocca, blksize_a):\n # Alpha-alpha\n if eris is not None:\n gijab = eris[0][blk].transpose(0, 2, 1, 3)\n else:\n gijab = einsum(\"Lia,Ljb->ijab\", cderi[0][:, blk], cderi[0])\n if cderi_neg[0] is not None:\n gijab -= einsum(\"Lia,Ljb->ijab\", cderi_neg[0][:, blk], cderi_neg[0])\n eijab = eia_a[blk][:, None, :, None] + eia_a[None, :, None, :]\n t2blk = gijab / eijab\n t2blk -= t2blk.transpose(0, 1, 3, 2)\n if not energy_only:\n t2aa[blk] = t2blk\n # Projected correlation energy:\n tp = einsum(\"ix,i...->x...\", rfrag[0][blk], t2blk)\n gp = einsum(\"ix,i...->x...\", rfrag[0][blk], gijab)\n ecorr += (einsum(\"ijab,ijab->\", tp, gp) - einsum(\"ijab,ijba->\", tp, gp)) / 4\n # Alpha-beta\n if eris is not None:\n gijab = eris[1][blk].transpose(0, 2, 1, 3)\n else:\n gijab = einsum(\"Lia,Ljb->ijab\", cderi[0][:, blk], cderi[1])\n if cderi_neg[0] is not None:\n gijab -= einsum(\"Lia,Ljb->ijab\", cderi_neg[0][:, blk], cderi_neg[1])\n eijab = eia_a[blk][:, None, :, None] + eia_b[None, :, None, :]\n t2blk = gijab / eijab\n if not energy_only:\n t2ab[blk] = t2blk\n # Projected correlation energy:\n # Alpha projected:\n tp = einsum(\"ix,i...->x...\", rfrag[0][blk], t2blk)\n gp = einsum(\"ix,i...->x...\", rfrag[0][blk], gijab)\n ecorr += einsum(\"ijab,ijab->\", tp, gp) / 2\n # Beta projected:\n tp = einsum(\"jx,ij...->ix...\", rfrag[1], t2blk)\n gp = einsum(\"jx,ij...->ix...\", rfrag[1], gijab)\n ecorr += einsum(\"ijab,ijab->\", tp, gp) / 2\n\n # Beta-beta:\n if blksize is None:\n blksize_b = int(max_memory / max(noccb * nvirb * nvirb * 8, 1))\n else:\n blksize_b = blksize\n for blk in brange(0, noccb, blksize_b):\n if eris is not None:\n gijab = eris[2][blk].transpose(0, 2, 1, 3)\n else:\n gijab = einsum(\"Lia,Ljb->ijab\", cderi[1][:, blk], cderi[1])\n if cderi_neg[0] is not None:\n gijab -= einsum(\"Lia,Ljb->ijab\", cderi_neg[1][:, blk], cderi_neg[1])\n eijab = eia_b[blk][:, None, :, None] + eia_b[None, :, None, :]\n t2blk = gijab / eijab\n t2blk -= t2blk.transpose(0, 1, 3, 2)\n if not energy_only:\n t2bb[blk] = t2blk\n # Projected correlation energy:\n tp = einsum(\"ix,i...->x...\", rfrag[1][blk], t2blk)\n gp = einsum(\"ix,i...->x...\", rfrag[1][blk], gijab)\n ecorr += (einsum(\"ijab,ijab->\", tp, gp) - einsum(\"ijab,ijba->\", tp, gp)) / 4\n\n return (t2aa, t2ab, t2bb), ecorr\n\n def _project_t2(self, t2, actspace):\n \"\"\"Project and symmetrize T2 amplitudes\"\"\"\n self.log.info(\n \"Projecting DMET space for MP2 bath (mode= %s, order= %d).\", self.project_dmet_mode, self.project_dmet_order\n )\n weightsa = self._get_dmet_projector_weights(self.dmet_bath.n_dmet[0])\n weightsb = self._get_dmet_projector_weights(self.dmet_bath.n_dmet[1])\n weightsa = hstack(self.fragment.n_frag[0] * [1], weightsa)\n weightsb = hstack(self.fragment.n_frag[1] * [1], weightsb)\n\n # Project and symmetrize:\n t2aa, t2ab, t2bb = t2\n ovlp = self.fragment.base.get_ovlp()\n c_fragdmet_a = hstack(self.fragment.c_frag[0], self.dmet_bath.c_dmet[0])\n c_fragdmet_b = hstack(self.fragment.c_frag[1], self.dmet_bath.c_dmet[1])\n if self.occtype == \"occupied\":\n rota = dot(actspace.c_active_vir[0].T, ovlp, c_fragdmet_a)\n rotb = dot(actspace.c_active_vir[1].T, ovlp, c_fragdmet_b)\n proja = einsum(\"ix,x,jx->ij\", rota, weightsa, rota)\n projb = einsum(\"ix,x,jx->ij\", rotb, weightsb, rotb)\n if self.project_dmet_order == 1:\n t2aa = einsum(\"xa,ijab->ijxb\", proja, t2aa)\n t2bb = einsum(\"xa,ijab->ijxb\", projb, t2bb)\n t2ab = (einsum(\"xa,ijab->ijxb\", proja, t2ab) + einsum(\"xb,ijab->ijax\", projb, t2ab)) / 2\n # Not tested:\n elif self.project_dmet_order == 2:\n t2aa = einsum(\"xa,yb,ijab->ijxy\", proja, proja, t2aa)\n t2bb = einsum(\"xa,yb,ijab->ijxy\", projb, projb, t2bb)\n t2ab = (\n einsum(\"xa,yb,ijab->ijxy\", proja, projb, t2ab) + einsum(\"xb,ya,ijab->ijyx\", projb, proja, t2ab)\n ) / 2\n else:\n raise ValueError\n elif self.occtype == \"virtual\":\n rota = dot(actspace.c_active_occ[0].T, ovlp, c_fragdmet_a)\n rotb = dot(actspace.c_active_occ[1].T, ovlp, c_fragdmet_b)\n proja = einsum(\"ix,x,jx->ij\", rota, weightsa, rota)\n projb = einsum(\"ix,x,jx->ij\", rotb, weightsb, rotb)\n if self.project_dmet_order == 1:\n t2aa = einsum(\"xi,i...->x...\", proja, t2aa)\n t2bb = einsum(\"xi,i...->x...\", projb, t2bb)\n t2ab = (einsum(\"xi,i...->x...\", proja, t2ab) + einsum(\"xj,ij...->ix...\", projb, t2ab)) / 2\n # Not tested:\n elif self.project_dmet_order == 2:\n t2aa = einsum(\"xi,yj,ij...->xy...\", proja, proja, t2aa)\n t2bb = einsum(\"xi,yj,ij...->xy...\", projb, projb, t2bb)\n t2ab = (\n einsum(\"xi,yj,ij...->xy...\", proja, projb, t2ab) + einsum(\"xj,yi,ij...->yx...\", projb, proja, t2ab)\n ) / 2\n else:\n raise ValueError\n t2aa = (t2aa + t2aa.transpose(1, 0, 3, 2)) / 2\n t2bb = (t2bb + t2bb.transpose(1, 0, 3, 2)) / 2\n return (t2aa, t2ab, t2bb)\n\n def make_delta_dm1(self, t2, actspace):\n \"\"\"Delta MP2 density matrix\"\"\"\n\n if self.project_dmet_order > 0:\n t2 = self._project_t2(t2, actspace)\n\n t2aa, t2ab, t2bb = t2\n # Construct occupied-occupied DM\n if self.occtype == \"occupied\":\n dma = einsum(\"imef,jmef->ij\", t2aa.conj(), t2aa) / 2 + einsum(\"imef,jmef->ij\", t2ab.conj(), t2ab)\n dmb = einsum(\"imef,jmef->ij\", t2bb.conj(), t2bb) / 2 + einsum(\"mief,mjef->ij\", t2ab.conj(), t2ab)\n # Construct virtual-virtual DM\n elif self.occtype == \"virtual\":\n dma = einsum(\"mnae,mnbe->ba\", t2aa.conj(), t2aa) / 2 + einsum(\"mnae,mnbe->ba\", t2ab.conj(), t2ab)\n dmb = einsum(\"mnae,mnbe->ba\", t2bb.conj(), t2bb) / 2 + einsum(\"mnea,mneb->ba\", t2ab.conj(), t2ab)\n assert np.allclose(dma, dma.T)\n assert np.allclose(dmb, dmb.T)\n return (dma, dmb)\n\n\n# ================================================================================================ #\n\n# if self.opts.plot_orbitals:\n# #bins = np.hstack((-np.inf, np.self.logspace(-9, -3, 9-3+1), np.inf))\n# bins = np.hstack((1, np.self.logspace(-3, -9, 9-3+1), -1))\n# for idx, upper in enumerate(bins[:-1]):\n# lower = bins[idx+1]\n# mask = np.self.logical_and((dm_occ > lower), (dm_occ <= upper))\n# if np.any(mask):\n# coeff = c_rot[:,mask]\n# self.log.info(\"Plotting MP2 bath density between %.0e and %.0e containing %d orbitals.\" % (upper, lower, coeff.shape[-1]))\n# dm = np.dot(coeff, coeff.T)\n# dset_idx = (4001 if kind == \"occ\" else 5001) + idx\n# self.cubefile.add_density(dm, dset_idx=dset_idx)\n","repo_name":"BoothGroup/Vayesta","sub_path":"vayesta/core/bath/bno.py","file_name":"bno.py","file_ext":"py","file_size_in_byte":31599,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"31"} +{"seq_id":"14746736605","text":"from pymatchseries import matchseries as ms\nimport os\nimport gc\nimport shutil\nimport pytest\nfrom hyperspy.signals import Signal2D, EDSTEMSpectrum\nimport dask.array as da\nimport numpy as np\n\n\n# numpy dataset\nnp.random.seed(1001)\nimageds = np.random.randint(1, 5, size=(3, 32, 32))\nimageds_da = da.from_array(imageds)\nhsimage = Signal2D(imageds)\nhsimage_da = Signal2D(imageds_da).as_lazy()\nhsimage.metadata.General.original_filename = \"non/existant/path.emd\"\nhsimage.metadata.General.title = \"dummy\"\nhsimage_da.metadata.General.original_filename = \"non/existant/path.emd\"\nhsimage_da.metadata.General.title = \"dummy\"\n\n# spectrum maps\nspecmap = np.random.randint(1, 5, size=(3, 32, 32, 3))\nspecmap_da = da.from_array(specmap)\nhsspecmap = EDSTEMSpectrum(specmap)\nhsspecmap_da = EDSTEMSpectrum(specmap_da).as_lazy()\n\n\n@pytest.mark.parametrize(\"data\", [imageds, imageds_da, hsimage, hsimage_da])\ndef test_match_series_create(data):\n mso = ms.MatchSeries(data)\n assert not mso.completed\n assert type(mso.data) == type(data)\n\n\n@pytest.mark.xfail(raises=ValueError)\ndef test_match_series_fail():\n ms.MatchSeries()\n\n\n@pytest.mark.xfail(raises=NotImplementedError)\ndef test_match_series_typefail():\n ms.MatchSeries(data=\"won't work\")\n\n\n@pytest.mark.xfail(raises=ValueError)\ndef test_match_series_dimfail():\n ms.MatchSeries(data=np.ones((4, 4, 4, 4)))\n\n\n@pytest.mark.xfail(raises=ValueError)\ndef test_match_series_sizefail():\n ms.MatchSeries(data=np.ones((4, 6, 6)))\n\n\n@pytest.mark.parametrize(\n \"data, f\",\n [\n (imageds, 1),\n (imageds_da, 2),\n (hsimage, 3),\n (hsimage_da, 4),\n ],\n)\ndef test_match_series_save_load(data, f):\n mso = ms.MatchSeries(data, path=f\"test{f}\")\n mso._MatchSeries__prepare_calculation()\n mso.save_data(mso.input_data_file)\n msl = ms.MatchSeries.load(mso.path)\n assert mso.metadata == msl.metadata\n assert mso.configuration == msl.configuration\n assert type(mso.data) == type(msl.data)\n path = mso.path\n del mso\n del msl\n gc.collect()\n shutil.rmtree(path)\n\n\n@pytest.fixture(scope=\"module\")\ndef match_series_dummy():\n mso = ms.MatchSeries(imageds)\n mso.run()\n yield mso\n shutil.rmtree(mso.path)\n\n\n@pytest.mark.parametrize(\"data\", [None, imageds, imageds_da, hsimage, hsimage_da])\ndef test_match_series_apply_images(data, match_series_dummy):\n defdat = match_series_dummy.get_deformed_images(data)\n if data is None:\n data = match_series_dummy.data\n assert type(data) == type(defdat)\n\n\n@pytest.mark.parametrize(\"data\", [specmap, specmap_da, hsspecmap, hsspecmap_da])\ndef test_match_series_apply_spectra(data, match_series_dummy):\n defspec = match_series_dummy.apply_deformations_to_spectra(data)\n defspec2 = match_series_dummy.apply_deformations_to_spectra(data, sum_frames=False)\n assert type(data) == type(defspec)\n assert type(data) == type(defspec2)\n","repo_name":"din14970/pyMatchSeries","sub_path":"pymatchseries/tests/test_matchseries.py","file_name":"test_matchseries.py","file_ext":"py","file_size_in_byte":2894,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"31"} +{"seq_id":"18132583199","text":"\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n# use this file except in compliance with the License. You may obtain a copy of\n# the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations under\n# the License.\n\nimport sqlalchemy\nfrom sqlalchemy import or_, and_\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker, aliased, relationship\n\n\nBase = declarative_base()\n\n\nclass FileMapping(Base):\n __tablename__ = 'file_mapping'\n id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)\n local_path = sqlalchemy.Column(sqlalchemy.Text, unique=True, index=True)\n md5 = sqlalchemy.Column(sqlalchemy.String(32))\n galaxy_id = sqlalchemy.Column(sqlalchemy.String(30))\n file_type = sqlalchemy.Column(sqlalchemy.String(32))\n\n def __init__(self, file_type, path, md5, gid):\n self.file_type = file_type\n self.local_path = path\n self.md5 = md5\n self.galaxy_id = gid\n\nclass WorkflowRun(Base):\n __tablename__ = 'workflow_run'\n id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)\n history_galaxy_id = sqlalchemy.Column(sqlalchemy.String(30))\n workflow_id = sqlalchemy.Column(sqlalchemy.Integer)\n\n def __init__(self, workflow_id, history_galaxy_id):\n self.workflow_id = workflow_id\n self.history_galaxy_id = history_galaxy_id\n\n\n\nclass SQLiteMapping(alfredo.FileGalaxyMap):\n\n def __init__(self, sess):\n alfredo.FileGalaxyMap.__init__(self, sess)\n url = \"sqlite:///\" + os.path.join( os.path.abspath(self.sess.basedir), \"local.mapping\")\n self.engine = sqlalchemy.create_engine(url, echo=True)\n Base.metadata.create_all(self.engine)\n self.sql_sessionmaker = sessionmaker(bind=self.engine)\n self.sql_sess = self.sql_sessionmaker()\n\n def _find_local_file(self, file_type, path):\n relpath = os.path.relpath(path, self.sess.basedir)\n hmd5 = self._calc_md5(path)\n ref = self.sql_sess.query( FileMapping ).filter( FileMapping.file_type == file_type, FileMapping.md5 == hmd5, FileMapping.local_path == relpath ).first()\n if ref is not None:\n return ref.galaxy_id\n return None\n\n def _add_local_file(self, file_type, path, gid):\n relpath = os.path.relpath(path, self.sess.basedir)\n hmd5 = self._calc_md5(relpath)\n wm = FileMapping(file_type, relpath, hmd5, gid)\n self.sql_sess.add(wm)\n self.sql_sess.commit()\n\n def find_workflow_run(self, workflow_gid, id_mapping):\n q = self.sql_sess.query( WorkflowRun )\n for i in id_mapping:\n a = aliased( WorkflowFileLink )\n f = aliased( FileMapping )\n q = q.filter(\n and_(\n a.run_id == WorkflowRun.id,\n a.input_number == i,\n a.file_id == f.id,\n f.galaxy_id == id_mapping[i]['id']\n )\n )\n res = q.first()\n if res is not None:\n return res.history_galaxy_id\n return None\n\n def add_workflow_run(self, workflow_gid, id_mapping, history_id):\n w_res = self.sql_sess.query(FileMapping).filter( FileMapping.galaxy_id == workflow_gid ).first()\n if w_res is None:\n raise alfredo.ElementNotFound(workflow_gid)\n\n wfr = WorkflowRun(w_res.id, history_id)\n self.sql_sess.add(wfr)\n self.sql_sess.commit()\n\n for i in id_mapping:\n\n d_res = self.sql_sess.query( FileMapping ).filter( FileMapping.galaxy_id == id_mapping[i]['id'] ).first()\n if d_res is None:\n raise alfredo.ElementNotFound( id_mapping[i]['id'] )\n wfl = WorkflowFileLink( wfr.id, i, d_res.id )\n self.sql_sess.add(wfl)\n self.sql_sess.commit()\n\n def find_local_workflow(self, path):\n return self._find_local_file(\"workflow\", path)\n\n def add_local_workflow(self, path, gid):\n self._add_local_file(\"workflow\", path, gid)\n\n def find_local_dataset(self, path):\n return self._find_local_file(\"dataset\", path)\n\n def add_local_dataset(self, path, gid):\n self._add_local_file(\"dataset\", path, gid)\n\n def _calc_md5(self,path):\n md5 = hashlib.md5()\n f = open(path,'rb')\n smeta = \"\"\n block_size = 80000\n while True:\n data = f.read(block_size)\n if not data:\n break\n md5.update(data)\n smeta += data\n return md5.hexdigest()\n","repo_name":"kellrott/nebula","sub_path":"nebula/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4841,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"35116097259","text":"from flask import Flask, render_template\nfrom flask_api import FlaskAPI\nfrom pdb import *\nimport json\n\nprogressAPI = FlaskAPI(__name__)\n\n\n@progressAPI.route('/')\ndef hello_world():\n author = \"butt\"\n name = \"a butt\"\n return \"a butt\"\n\n\n@progressAPI.route('/api/playtime/')\n@progressAPI.route('/api/playtime/

/')\ndef Playtime(p=None):\n butt = ButtDbInterface()\n if p:\n playtime = butt.playtime_single(p)\n a = []\n a.append({\"player\": p, \"playtime\": playtime[0], \"sessions\": playtime[1]})\n return flask.jsonify({\"playtime\": a})\n\n else:\n return flask.jsonify({\"playtime\": butt.playtime_global()})\n\n\nif __name__ == '__main__':\n progressAPI.run()\n","repo_name":"Fart-Butt/progress.fartcannon.com","sub_path":"progress/progress.py","file_name":"progress.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19141239140","text":"import json\n\nfrom django.test import RequestFactory\nfrom django.test import TestCase\nfrom rest_framework import status\n\nfrom bothub.api.v2.organization.views import OrganizationViewSet\nfrom bothub.api.v2.tests.utils import create_user_and_token\nfrom bothub.common.models import Organization, OrganizationAuthorization\n\n\nclass NewOrganizationAPITestCase(TestCase):\n def setUp(self):\n self.factory = RequestFactory()\n self.owner, self.owner_token = create_user_and_token(\"owner\")\n\n def request(self, data, token=None):\n authorization_header = (\n {\"HTTP_AUTHORIZATION\": \"Token {}\".format(token.key)} if token else {}\n )\n\n request = self.factory.post(\n \"/v2/org/organization/\", data, **authorization_header\n )\n\n response = OrganizationViewSet.as_view({\"post\": \"create\"})(request)\n response.render()\n content_data = json.loads(response.content)\n return (response, content_data)\n\n def test_okay(self):\n response, content_data = self.request(\n {\n \"name\": \"Organization 1\",\n \"nickname\": \"organization1\",\n \"locale\": \"Brazil\",\n \"description\": \"This organization is very good\",\n },\n self.owner_token,\n )\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n organization = Organization.objects.get(pk=content_data.get(\"id\"))\n\n self.assertEqual(organization.name, \"Organization 1\")\n self.assertEqual(organization.nickname, \"organization1\")\n self.assertEqual(organization.locale, \"Brazil\")\n self.assertEqual(organization.description, \"This organization is very good\")\n\n organization_authorization = OrganizationAuthorization.objects.filter(\n organization=organization, user=self.owner\n )\n\n self.assertEqual(organization_authorization.count(), 1)\n self.assertEqual(\n organization_authorization.first().role,\n OrganizationAuthorization.ROLE_ADMIN,\n )\n","repo_name":"weni-ai/bothub-engine","sub_path":"bothub/api/v2/tests/test_organization.py","file_name":"test_organization.py","file_ext":"py","file_size_in_byte":2068,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"31"} +{"seq_id":"38749477569","text":"# This is a function to ask a matrix to the user and shaping it.\n\nimport numpy as np\n\ndef ask_matrix():\n matrix = []\n el = float(input('Entrez le premier élément de la matrice : '))\n matrix.append(el)\n while el:\n el = input(\"Entrez l'élément suivant, appuyez sur enter si termniné: \")\n if el:\n matrix.append(float(el))\n return matrix\n\ndef create_matrix(M):\n matrix = M\n nb_columns = int(input('Entrez le nombre de colonnes : '))\n nb_rows = int(input('Entrez le nombre de lignes : '))\n return np.reshape(matrix, (nb_rows, nb_columns))\n\n\n# Press the green button in the gutter to run the 1script.\nif __name__ == '__main__':\n print(create_matrix(ask_matrix()))\n\n","repo_name":"QuentinStouffs/MathPy","sub_path":"enter_the_matrix.py","file_name":"enter_the_matrix.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73229013209","text":"# Escribir un programa que pida ingresar coordenadas (x,y) que representan puntos en el plano.\n# Informar cuántos puntos se han ingresado en el primer, segundo, tercer y cuarto cuadrante. Al comenzar el programa se pide que se ingrese\n# la cantidad de puntos a procesar.\n\ncantidad=int(input(\"Cuantas coordenadas quiere introducir? \"))\nx=0\ny=0\ncuadrante1=0\ncuadrante2=0\ncuadrante3=0\ncuadrante4=0\n\nfor f in range(cantidad):\n x=float(input(\"Ingrese el valor x de la coordenada \"))\n y=float(input(\"Ingrese el valor y de la coordenada \"))\n\n if x>0 and y>0:\n cuadrante2=cuadrante2+1\n elif x<0 and y>0:\n cuadrante1=cuadrante1+1\n elif x<0 and y<0:\n cuadrante3=cuadrante3+1\n else:\n cuadrante4=cuadrante4+1\n\nprint(\"El número de coordenadas en el 1º cuadrante es de: \", cuadrante1)\nprint(\"El número de coordenadas en el 2º cuadrante es de: \", cuadrante2)\nprint(\"El número de coordenadas en el 3º cuadrante es de: \", cuadrante3)\nprint(\"El número de coordenadas en el 4º cuadrante es de: \", cuadrante4)","repo_name":"AlvaroGT17/Lenguaje-Python","sub_path":"Programas/Programa52.py","file_name":"Programa52.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"32353244079","text":"class Solution:\n def multiply(self, num1: str, num2: str) -> str:\n if num1 == '0' or num2 == '0':\n return '0'\n \n int_num1, int_num2 = 0, 0 \n \n for i in num1:\n int_num1 = int_num1 * 10 + (ord(i) - ord('0'))\n \n for i in num2:\n int_num2 = int_num2 * 10 + (ord(i) - ord('0'))\n \n print(int_num1, int_num2)\n int_res = int_num1 * int_num2\n \n str_res = \"\"\n while int_res:\n str_res += chr(ord('0') + int_res % 10) \n int_res //= 10\n \n #print(str_res)\n return str_res[::-1]\n\n'''\nMedium\nSimul\n\n간단하면서도 복잡한 문제\nint() 사용금지\nord 테크닉 ord(i) - ord('0') 이면 원래 숫자가 가진 값이 나온다.\n다시 변경할때 chr(ord('0')- int_res%10) 하게되면 마지막자리의 문자열로 변경되어 나옴\n\n\n'''","repo_name":"suleesulee/TIL","sub_path":"Algorithm/Leetcode/[M]43. Multiply Strings.py","file_name":"[M]43. Multiply Strings.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27660409961","text":"#!/Users/maxcummins/miniconda2/envs/py36ipy/bin/python\n\n#used to edit headers of customDB_Feb_2018.fasta to make them compatible with ARIBAlord\n\nimport re\nwith open('../E_coli_customDB/EC_customDB.fasta', 'r+' ) as f:\n f = f.read()\nf = re.sub('(>[^:]+):(.*)', r'\\1_\\2', f)\nf = re.sub('insertion_', '', f)\n\nprint(f)\n","repo_name":"maxlcummins/E_coli_customDB","sub_path":"Change_headers.py","file_name":"Change_headers.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19543765362","text":"def solution(numbers, target):\n answer = 0\n \n def dfs(num, level):\n nonlocal answer\n \n if level == len(numbers):\n if num == target:\n answer += 1\n return\n \n dfs(num + numbers[level], level + 1)\n dfs(num - numbers[level], level + 1)\n \n dfs(numbers[0], 1)\n dfs(-numbers[0], 1)\n \n return answer\n \nprint(f'test1 = {solution([1,1,1,1,1],3)}')\nprint(f'test2 = {solution([4,1,2,1],4)}')","repo_name":"Ji-Hwan-Jung/coding-test","sub_path":"level2/타겟 넘버.py","file_name":"타겟 넘버.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"44696752757","text":"#Vì đề bài cho hint là xor với 1 single byte, chắc là nó sẽ nằm trong khoảng từ 1 -> 256 ý nên ta làm thôi^^\n\nkey1 = \"73626960647f6b206821204f21254f7d694f7624662065622127234f726927756d\"\n\n#decode hex\ns = [o for o in bytes.fromhex(key1)]\nprint(s)\n\n#lặp tới 256\nfor order in range(256):\n\tpossible_flag_ord = [order ^ o for o in s]\n\tfake_flag = \"\".join(chr(o) for o in possible_flag_ord)\n\n\tif(fake_flag.startswith(\"crypto\")):\n\t\treal_flag = fake_flag\n\t\tbreak\n\nprint(real_flag)","repo_name":"saamnguyen/cryptohack","sub_path":"beginner/xorkey0.py","file_name":"xorkey0.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"9901435980","text":"### this document uses 'anal' as short for 'analyze'\n\nimport rid\nimport json\nimport csv\nfrom collections import defaultdict as dd\nfrom pdb import pm\n\n#initalize RID --- copied (almost) directly from rid.py\nrider = rid.RegressiveImageryDictionary()\nrider.load_dictionary_from_string(rid.DEFAULT_RID_DICTIONARY)\nrider.load_exclusion_list_from_string(rid.DEFAULT_RID_EXCLUSION_LIST)\nrid=rider\ndel rider\n\ndb = json.load(open(\"all-posts.json\"))\n# [(date, post), ...]\n\n\ncategory_names = set()\n\ndef anal_post(post):\n date, post = post\n results = rid.analyze(post)\n out = {catergory.full_name(): count for catergory,count in results.category_count.items()}\n category_names.update(out.keys()) \n out[\"date\"] = date\n out[\"length\"] = results.word_count\n return out \n\nresults = [anal_post(row) for row in db]\ncols = [\"date\", \"length\"] + sorted(list(category_names))\n\nwriter = csv.DictWriter(open(\"anal.csv\", 'w'), fieldnames=cols, restval=0)\nwriter.writeheader()\nwriter.writerows(results)\n\n\n\n\n\n","repo_name":"technillogue/exomnemosyne","sub_path":"RID-analyze-tumblr.py","file_name":"RID-analyze-tumblr.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"7326159186","text":"from pprint import pprint\nimport sys\n\nclass Policy:\n def __init__(self, policy, loc_data):\n self.key = list(policy)[0]\n self.name = loc_data.get('policy_' + self.key, self.key)\n self._loc_data = loc_data\n\n policy_data = policy[self.key]\n self.options = self._options(policy_data, loc_data)\n\n def _options(self, policy_data, loc_data):\n return [Option(entry['option'], loc_data)\n for entry in policy_data\n if list(entry)[0] == 'option']\n\n\nclass Option:\n def __init__(self, option_data, loc_data):\n self.name = self._name(option_data, loc_data)\n self.prerequisites = self._prerequisites(option_data)\n\n\n def _name(self, option_data, loc_data):\n unlocalized = next(iter(\n subkey for subkey in option_data if list(subkey)[0] == 'name'\n ))['name']\n\n return loc_data[unlocalized]\n\n def _prerequisites(self, option_data):\n try:\n prerequisites = next(iter(\n subkey for subkey in option_data\n if list(subkey)[0] == 'prerequisites'\n ))['prerequisites']\n except (StopIteration):\n prerequisites = []\n\n return prerequisites\n","repo_name":"iHamsterball/stellaris_tech_tree","sub_path":"stellaris_tech_tree/game_objects/policy.py","file_name":"policy.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"31"} +{"seq_id":"74796863128","text":"from __future__ import annotations\n\nfrom abc import ABC, abstractmethod\nfrom typing import List\n\nfrom loguru import logger\n\nfrom dubdub import (\n Binary,\n Expr,\n Grouping,\n Literal,\n Node,\n Token,\n TokenType,\n Unary,\n Visitor,\n)\n\n\nclass PrintVisitor(Visitor):\n # def on_visit(self, node: Node):\n # logger.warning(f\"Printing {node.vname}\")\n # return super().on_visit(node)\n\n def visit_token(self, node: \"Node\"):\n logger.debug(\"Visiting a token\")\n\n def visit_binary(self, node: \"Binary\"):\n return self.parenthesize(node.token.lexeme, node.right, node.left)\n\n def visit_unary(self, node: \"Node\"):\n return self.parenthesize(node.token.lexeme, node.right)\n\n \"\"\"\n Normal expressions \n \"\"\"\n\n def visit_grouping(self, expr: \"Grouping\") -> str:\n return self.parenthesize(\"group\", expr.expression)\n\n def visit_literal(self, node: \"Literal\") -> str:\n if node.value is None:\n return \"nil\"\n return str(node.value)\n\n def parenthesize(self, name: str, *exprs: \"Expr\"):\n str_resp: str = f\"({name}\"\n for expr in exprs:\n expr_resp = self.visit(expr)\n str_resp += f\" {expr_resp}\"\n str_resp += f\")\"\n return str_resp\n\n\ndef step():\n test_binary = Binary(\n right=Unary(token=Token(TokenType.MINUS, \"-\", None, 1), right=Literal(123)),\n token=Token(TokenType.STAR, \"*\", None, 1),\n left=Grouping(Literal(45.67)),\n )\n printer = PrintVisitor()\n\n logger.info(printer.visit(test_binary))\n\n\nif __name__ == \"__main__\":\n step()\n","repo_name":"rltools/dubdub","sub_path":"dubdub/visitors.py","file_name":"visitors.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"3882125196","text":"from django.shortcuts import render, redirect\r\nfrom django.http import HttpResponse, HttpResponseRedirect\r\nfrom django.urls import reverse\r\nfrom django.contrib.auth import authenticate, login, logout\r\nfrom django.contrib import messages\r\nfrom phonenumber_field.phonenumber import PhoneNumber\r\nimport json\r\nfrom app.models import User, OTPCode, Application\r\nfrom .models import StaffProfile, Job, Schedule\r\nfrom django.contrib.sites.shortcuts import get_current_site\r\nfrom django.utils.encoding import force_bytes\r\nfrom django.utils.http import urlsafe_base64_encode\r\nfrom django.template.loader import render_to_string\r\nfrom app.tokens import account_activation_token\r\nfrom django.utils.encoding import force_str\r\nfrom django.utils.http import urlsafe_base64_decode\r\nfrom django.core.mail import send_mail\r\nfrom django.conf import settings\r\nimport random\r\nimport datetime\r\nfrom django.db.utils import IntegrityError\r\nfrom django.core.paginator import Paginator\r\n\r\n# Create your views here.\r\n\r\ndef index(request):\r\n if not request.user.is_authenticated:\r\n messages.error(request, 'Please login first')\r\n return HttpResponseRedirect(reverse('login_dashboard'))\r\n return render(request, 'dashboard/index.html')\r\n\r\n #Login of Staff/admin dashboard \r\ndef login_dashboard(request):\r\n if request.method == 'POST':\r\n try:\r\n email = request.POST.get(\"email\")\r\n password = request.POST.get(\"password\")\r\n print(request.POST)\r\n user = authenticate(username=email, password=password)\r\n print(user)\r\n if user is not None:\r\n if user.user_type == '3':\r\n messages.error(request, 'Not authorized to login')\r\n return HttpResponseRedirect(reverse('login_dashboard'))\r\n if user.user_type == '2':\r\n login(request, user)\r\n request.session['is_verified'] = False\r\n\r\n number_list = [x for x in range(10)]\r\n codes = []\r\n\r\n for i in range(5):\r\n digit = random.choice(number_list)\r\n codes.append(digit)\r\n \r\n code = \"\".join(str(item) for item in codes)\r\n \r\n\r\n otp_code, created = OTPCode.objects.update_or_create(\r\n user=user,\r\n defaults={\"code\": code},\r\n )\r\n print(otp_code)\r\n print(otp_code.code)\r\n\r\n send_verification_otp(request, otp_code.code, user)\r\n return HttpResponseRedirect(reverse('staff_otp'))\r\n\r\n login(request, user)\r\n return HttpResponseRedirect(reverse('index'))\r\n \r\n else:\r\n messages.add_message(request, messages.ERROR, 'Email or password incorrect')\r\n return HttpResponseRedirect(reverse('login_dashboard'))\r\n except Exception as e:\r\n print(str(e))\r\n return HttpResponseRedirect(reverse('index'))\r\n return render(request, 'dashboard/login.html')\r\n\r\n #Logout Staff/admin dashboard \r\ndef logout_dashboard(request):\r\n if request.user.is_authenticated:\r\n logout(request)\r\n \r\n return HttpResponseRedirect(reverse('login_dashboard'))\r\n\r\n#Send reset email for staff\r\ndef send_reset_link(request,email):\r\n from django.http import HttpRequest\r\n from django.contrib.auth.forms import PasswordResetForm\r\n try: \r\n form = PasswordResetForm({'email': email})\r\n if form.is_valid():\r\n print(\"Sending email for to this email:\", email)\r\n form.save(request=request, from_email=settings.EMAIL_HOST_USER, \r\n email_template_name='dashboard/password_reset/password_reset_email.html')\r\n\r\n except Exception as e:\r\n print(str(e))\r\n return 'success'\r\n\r\n#Send OTP for staff\r\ndef send_verification_otp(request, otp_code, user):\r\n \r\n subject = 'One time Password - CorpU'\r\n message = render_to_string('app/emails/otp_verification_email.html', {\r\n 'user': user,\r\n 'code': otp_code\r\n\r\n })\r\n send_mail(subject, message, settings.EMAIL_HOST_USER, [user.email], fail_silently=False)\r\n messages.success(request, f\"Please check your email for OTP code\")\r\n\r\n#Send account confirmation for staff\r\ndef send_confirmation_email(request, user, email):\r\n current_site = get_current_site(request)\r\n print(\"current site\", current_site)\r\n subject = 'Confirm your CorpU account'\r\n message = render_to_string('dashboard/emails/confirmation_email.html', {\r\n 'user': user,\r\n 'domain': current_site.domain,\r\n 'uid': urlsafe_base64_encode(force_bytes(user.pk)),\r\n 'token': account_activation_token.make_token(user),\r\n 'protocol': 'https' if request.is_secure() else 'http'\r\n\r\n })\r\n send_mail(subject, message, settings.EMAIL_HOST_USER, [user.email], fail_silently=False)\r\n messages.success(request, f\"Please check your email to confirm your registration\")\r\n\r\n#OTP implementation\r\ndef otp_view(request):\r\n if not request.user.is_authenticated:\r\n return redirect('login_dashboard')\r\n if request.method == 'POST':\r\n code = request.POST['otp']\r\n print(code)\r\n otp_codes = OTPCode.objects.filter(user=request.user)\r\n print(otp_codes)\r\n if otp_codes:\r\n if otp_codes[0].code == code:\r\n \r\n request.session['is_verified'] = True\r\n messages.success(request, 'Logged in!')\r\n return HttpResponseRedirect(reverse('index')) \r\n logout_dashboard(request)\r\n messages.error(request, 'OTP verification failed!')\r\n return HttpResponseRedirect(reverse('login_dashboard'))\r\n return render(request, 'dashboard/staff_otp.html')\r\n\r\n#Account activaion\r\ndef activate_account(request, uidb64, token):\r\n try:\r\n uid = force_str(urlsafe_base64_decode(uidb64))\r\n user = User.objects.get(pk=uid)\r\n except (TypeError, ValueError, OverflowError, User.DoesNotExist):\r\n user = None\r\n\r\n if user is not None and account_activation_token.check_token(user, token):\r\n user.is_active = True\r\n \r\n user.save()\r\n # login(request, user)\r\n messages.success(request, ('Your account have been verified. You can login now'))\r\n return redirect('login_dashboard')\r\n else:\r\n messages.warning(request, ('The confirmation link was invalid, possibly because it has already been used.'))\r\n return redirect('index')\r\n\r\n\r\n #Add Staff by Admin\r\ndef add_staff(request):\r\n if not request.user.is_authenticated or not request.user.is_superuser:\r\n messages.error(request, 'Only admins are allowed here')\r\n return HttpResponseRedirect(reverse('index'))\r\n \r\n if request.method == 'POST':\r\n first_name = request.POST['firstname']\r\n last_name = request.POST['lastname']\r\n email = request.POST['email']\r\n phone_no = request.POST['phone_no']\r\n faculty = request.POST['faculty']\r\n password = request.POST['password']\r\n\r\n number = PhoneNumber.from_string(phone_no, region=\"AU\")\r\n print(number.as_national)\r\n print(number.as_e164)\r\n\r\n#Edit Staff details\r\ndef edit_staff(request, staff_id):\r\n staff_profile = StaffProfile.objects.filter(id=staff_id).first()\r\n if request.method == 'POST':\r\n first_name = request.POST['firstname']\r\n last_name = request.POST['lastname']\r\n email = request.POST['email']\r\n phone_no = request.POST['phone_no']\r\n faculty = request.POST['faculty']\r\n try:\r\n \r\n user = User.objects.filter(id=staff_profile.user.id).first()\r\n user.first_name = first_name\r\n user.last_name = last_name\r\n user.email = email\r\n user.phone_no = phone_no\r\n user.save()\r\n staff_profile.faculty = faculty\r\n staff_profile.save()\r\n messages.success(request, 'Staff updated')\r\n return HttpResponseRedirect(reverse('manage_staff'))\r\n except Exception as e:\r\n print(str(e))\r\n messages.error(request, 'Staff could be updated')\r\n return HttpResponseRedirect(reverse('manage_staff')) \r\n return render(request, 'dashboard/edit_staff.html', {\"profile\": staff_profile})\r\n\r\n#Manage Staff details\r\ndef manage_staff(request):\r\n if not request.user.is_authenticated or not request.user.is_superuser:\r\n messages.error(request, 'Only admins are allowed here')\r\n return HttpResponseRedirect(reverse('index'))\r\n \r\n profiles = StaffProfile.objects.all().order_by(\"id\")\r\n paginator = Paginator(profiles, 10)\r\n page_number = request.GET.get(\"page\")\r\n page_obj = paginator.get_page(page_number)\r\n\r\n print(page_obj)\r\n return render(request, 'dashboard/manage_staff.html', {\"page_obj\": page_obj})\r\n\r\n#Manage Staff details\r\ndef delete_staff(request, staff_id):\r\n staff_profile = StaffProfile.objects.filter(id=staff_id).first()\r\n if staff_profile:\r\n user = User.objects.filter(id=staff_profile.user.id).first()\r\n user.delete()\r\n staff_profile.delete()\r\n messages.success(request, 'Staff deleted')\r\n return HttpResponseRedirect(reverse('manage_staff'))\r\n messages.error(request, 'Staff profile not found')\r\n return HttpResponseRedirect(reverse('manage_staff'))\r\n\r\n#Add Jobs by Staff \r\ndef add_job(request):\r\n if not request.user.is_authenticated:\r\n messages.error(request, 'Log in first')\r\n return HttpResponseRedirect(reverse('login_dashboard'))\r\n if request.method == 'POST':\r\n code = request.POST['code']\r\n name = request.POST['name']\r\n description = request.POST['description']\r\n start_date = request.POST['start_date']\r\n end_date = request.POST['end_date']\r\n comments =request.POST['comments']\r\n faculty = request.POST['faculty']\r\n \r\n job = Job(code=code, name=name, description=description, start_date=start_date, end_date=end_date, comments=comments, user=request.user, status=\"0\", faculty=faculty)\r\n job.save()\r\n\r\n messages.success(request, 'Job added!')\r\n return HttpResponseRedirect(reverse('add_job'))\r\n\r\n return render(request, 'dashboard/add_job.html', {\"current_date\": datetime.datetime.today})\r\n\r\n#Edit Job\r\ndef edit_job(request, job_id):\r\n job = Job.objects.filter(id=job_id).first()\r\n\r\n if request.method == 'POST':\r\n code = request.POST['code']\r\n name = request.POST['name']\r\n description = request.POST['description']\r\n start_date = request.POST['start_date']\r\n end_date = request.POST['end_date']\r\n comments =request.POST['comments']\r\n faculty = request.POST['faculty']\r\n\r\n job.code = code\r\n job.name = name\r\n job.description = description\r\n job.start_date = start_date\r\n job.end_date = end_date\r\n job.comments = comments\r\n job.faculty = faculty\r\n\r\n job.save()\r\n messages.success(request, \"Unit updated!\")\r\n return HttpResponseRedirect(reverse('unit_list'))\r\n if job:\r\n return render(request, 'dashboard/edit_job.html', {\"job\": job})\r\n else:\r\n messages.error(request, \"No unit found\")\r\n return HttpResponseRedirect(reverse('unit_list'))\r\n\r\n#List of jobs\r\ndef unit_list(request):\r\n jobs = Job.objects.filter(user=request.user).order_by(\"id\")\r\n paginator = Paginator(jobs, 10)\r\n page_number = request.GET.get(\"page\")\r\n page_obj = paginator.get_page(page_number)\r\n\r\n print(page_obj)\r\n return render(request, 'dashboard/unit_list.html', {\"page_obj\": page_obj}) \r\n \r\n#Delete jobs\r\ndef delete_unit(request, job_id):\r\n if not request.user.is_authenticated:\r\n messages.error(request, 'Invalid Operation')\r\n return HttpResponseRedirect(reverse('index'))\r\n job = Job.objects.filter(id=job_id).first()\r\n print(job)\r\n if job:\r\n print(job.status)\r\n if job.status == '1':\r\n job.status = '2'\r\n job.save()\r\n messages.success(request, \"Job inactive\")\r\n print(job.status)\r\n if request.user.is_superuser:\r\n return HttpResponseRedirect(reverse('review_units'))\r\n else:\r\n return HttpResponseRedirect(reverse('unit_list')) \r\n \r\n if request.user.is_superuser:\r\n return HttpResponseRedirect(reverse('review_units'))\r\n else:\r\n return HttpResponseRedirect(reverse('unit_list')) \r\n\r\n #Review units \r\ndef review_units(request):\r\n if not request.user.is_authenticated or not request.user.is_superuser:\r\n messages.error(request, 'Only admins are allowed here')\r\n return HttpResponseRedirect(reverse('index'))\r\n \r\n jobs = Job.objects.all().order_by(\"id\")\r\n paginator = Paginator(jobs, 10)\r\n page_number = request.GET.get(\"page\")\r\n page_obj = paginator.get_page(page_number)\r\n\r\n print(page_obj)\r\n return render(request, 'dashboard/review_units.html', {\"page_obj\": page_obj, \"current_date\": datetime.date.today()})\r\n \r\n #Approve Units \r\ndef approve_unit(request, job_id):\r\n if not request.user.is_authenticated or not request.user.is_superuser:\r\n messages.error(request, 'Invalid Operation')\r\n return HttpResponseRedirect(reverse('index'))\r\n \r\n job = Job.objects.filter(id=job_id).first()\r\n if job.status == '1':\r\n messages.success(request, \"Unit is already active!\")\r\n return HttpResponseRedirect(reverse('review_units'))\r\n job.status = '1'\r\n job.save()\r\n messages.success(request, \"Unit approved!\")\r\n return HttpResponseRedirect(reverse('review_units'))\r\n\r\n#Shortlist applicants\r\ndef approve_application(request, app_id):\r\n \r\n application = Application.objects.filter(id=app_id).first()\r\n if application:\r\n application.is_shortlisted = True\r\n application.save()\r\n messages.success(request, \"Application approved\")\r\n else:\r\n messages.success(request, \"Application not found\") \r\n \r\n return HttpResponseRedirect(reverse('manage_candidates')) \r\n\r\n#Reject applicants\r\ndef reject_application(request, app_id):\r\n application = Application.objects.filter(id=app_id).first()\r\n if application:\r\n application.is_shortlisted = False\r\n application.save()\r\n messages.success(request, \"Application rejected\")\r\n else:\r\n messages.success(request, \"Application not found\")\r\n\r\n return HttpResponseRedirect(reverse('manage_candidates'))\r\n\r\n# Shorted listed applicant list\r\ndef schedule(request):\r\n if not request.user.is_authenticated:\r\n return HttpResponseRedirect(reverse('login_dashboard'))\r\n applications = Application.objects.filter(is_shortlisted=True, is_scheduled=None)\r\n\r\n paginator = Paginator(applications, 10)\r\n page_number = request.GET.get(\"page\")\r\n page_obj = paginator.get_page(page_number)\r\n \r\n return render(request, 'dashboard/schedule.html', {\"applications\": page_obj})\r\n\r\n#Scheduling applicant \r\ndef schedule_application(request, app_id):\r\n application = Application.objects.filter(id=app_id).first()\r\n\r\n if request.method == 'POST':\r\n availability = request.POST['availability']\r\n\r\n try:\r\n schedule_count = Schedule.objects.filter(application=application).count()\r\n if schedule_count < 20:\r\n schedule = Schedule(application=application, time_slot=availability)\r\n schedule.save()\r\n application.is_scheduled=True\r\n application.save()\r\n send_schedule_email(request, application.user, application.job, availability)\r\n messages.success(request, \"Applicant scheduled\")\r\n return HttpResponseRedirect(reverse('schedule'))\r\n else:\r\n messages.error(request, \"Applicant already scheduled 20 times\")\r\n return HttpResponseRedirect(reverse('schedule')) \r\n except Exception as e:\r\n print(str(e))\r\n messages.error(request, \"Error scheduling applicant\")\r\n return HttpResponseRedirect(reverse('schedule'))\r\n availability = []\r\n\r\n slots = get_timeslots(application.monday, \"Monday\")\r\n for slot in slots:\r\n availability.append(slot)\r\n slots = get_timeslots(application.tuesday, \"Tuesday\")\r\n for slot in slots:\r\n availability.append(slot)\r\n slots = get_timeslots(application.wednesday, \"Wednesday\")\r\n for slot in slots:\r\n availability.append(slot)\r\n slots = get_timeslots(application.thursday, \"Thursday\")\r\n for slot in slots:\r\n availability.append(slot)\r\n slots = get_timeslots(application.friday, \"Friday\")\r\n for slot in slots:\r\n availability.append(slot)\r\n slots = get_timeslots(application.saturday, \"Saturday\")\r\n for slot in slots:\r\n availability.append(slot)\r\n return render(request, 'dashboard/schedule_application.html', {'application': application, 'availability': availability})\r\n\r\n #Notification for Scheduled applicant \r\ndef send_schedule_email(request, user, job, time_slot):\r\n subject = \"You have been selected as Sessional Staff at CorpU\"\r\n message = render_to_string('dashboard/emails/schedule_email.html', {\r\n 'firstname': user.first_name,\r\n 'lastname': user.last_name,\r\n 'unit_code': job.code,\r\n 'unit_name': job.name,\r\n 'start_date': job.start_date,\r\n 'end_date': job.end_date,\r\n 'scheduled_slot': time_slot\r\n\r\n })\r\n send_mail(subject, message, settings.EMAIL_HOST_USER, [user.email], fail_silently=False)\r\n \r\n #Schedule reject \r\ndef reject_schedule(request, app_id):\r\n app = Application.objects.filter(id=app_id)\r\n if app:\r\n app.is_scheduled = False\r\n app.save()\r\n messages.success(request, \"Application rejected\")\r\n return HttpResponseRedirect(reverse('schedule'))\r\n else:\r\n messages.error(request, \"Application not found\")\r\n return HttpResponseRedirect(reverse('schedule'))\r\n \r\n#Schedule list\r\ndef detail_schedule(request):\r\n \r\n schedules = Schedule.objects.all()\r\n paginator = Paginator(schedules, 10)\r\n page_number = request.GET.get(\"page\")\r\n page_obj = paginator.get_page(page_number)\r\n\r\n return render(request, 'dashboard/schedule-detail.html', {'page_obj': page_obj})\r\n\r\ndef get_timeslots(arr, day):\r\n TIME_SLOTS = {\r\n # \"0\": \"Unavailable All Day\",\r\n \"1\": f\"{day} - 08.00am-10.00am\",\r\n \"2\": f\"{day} - 10.00am-12.00pm\",\r\n \"3\": f\"{day} - 01.00pm-03.00pm\",\r\n \"4\": f\"{day} - 03.00pm-05.00pm\",\r\n \"5\": f\"{day} - 05.00pm-07.00pm\"\r\n }\r\n slots = []\r\n print(arr)\r\n items = arr.replace('[', '').replace(']', '').replace(\"'\", '').split(', ')\r\n print(items)\r\n print(type(arr))\r\n for item in items:\r\n print(item)\r\n if item != '0':\r\n slots.append(TIME_SLOTS[item])\r\n return slots\r\n\r\n\r\n","repo_name":"Chankami/corpu-team-1","sub_path":"dashboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":19141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"15243770766","text":"from django.urls import re_path\n\nfrom api import views\n\nurlpatterns = [\n re_path(r'^login/?$', views.auth.login),\n re_path(r'^logout/?$', views.auth.logout),\n\n re_path(r'^management/recreate-index/?$', views.management.recreate_index),\n re_path(r'^management/learn-categories/?$', views.management.learn_categories),\n\n re_path(r'^statistics/stor(?:ag)?e/?$', views.statistics.storage),\n\n re_path(r'^documents/count/?$', views.documents.count),\n re_path(r'^documents/upload/?$', views.documents.upload),\n re_path(r'^documents/search/?$', views.documents.search),\n re_path(r'^documents/scan/?$', views.documents.scan),\n re_path(r'^documents/recent(/(?P[a-z]+)(/(?P\\d+)/(?P\\d+))?)?/?$', views.documents.recent),\n\n re_path(r'^documents/(?P[a-z0-9-]+)/?$', views.documents.index),\n re_path(r'^documents/(?P[a-z0-9-]+)/preview/?$', views.documents.download),\n re_path(r'^documents/(?P[a-z0-9-]+)/download/?$', views.documents.download),\n re_path(r'^documents/(?P[a-z0-9-]+)/confirm/?$', views.documents.confirm),\n\n re_path(r'^categories/?$', views.categories.categories),\n re_path(r'^categories/count/?$', views.categories.count),\n re_path(r'^categories/_random/?$', views.categories.random),\n re_path(r'^categories/(?P\\w{1,20})/?$', views.categories.category),\n re_path(r'^categories/(?P\\w{1,20})/assign/?$', views.categories.assign),\n re_path(r'^categories/(?P\\w{1,20})/documents(/(?P\\d+)/(?P\\d+))?/?$', views.categories.documents),\n]\n","repo_name":"leupibr/undeep","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"12077632613","text":"import simple_draw as sd\nsd.resolution = (1200, 600)\n\n\ndef primitivs(point_zero_fig, start_angle_figura, lenght_line, number_of_sides):\n start_fig = sd.get_point(*point_zero_fig)\n step_angle = 360 / number_of_sides\n angle_figura = [start_angle_figura]\n for i in range(1, number_of_sides+1):\n angle_figura.append(angle_figura[i - 1] + step_angle)\n v = sd.get_vector(start_point=start_fig, angle=angle_figura[0], length=lenght_line, width=3)\n v.draw()\n for i in range(1, number_of_sides-1):\n v = sd.get_vector(start_point=v.end_point, angle=angle_figura[i], length=lenght_line, width=3)\n v.draw()\n sd.line(start_point=v.end_point, end_point=sd.get_point(*point_zero_fig), width=3)\n\n\nif __name__ == \"__main__\":\n point_zero = (600, 300)\n length = 100\n angle = 45\n primitivs(point_zero_fig=point_zero, start_angle_figura=angle, lenght_line=length, number_of_sides=5)\n sd.pause()\n","repo_name":"psolster/py","sub_path":"lesson_005/graph_pack/prim_graph.py","file_name":"prim_graph.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"29618893876","text":"import pafy\n\nclass youTube_metadata(object):\n \"\"\" Wrapper class for pafy \"\"\"\n\n def __init__(self, url, stream_type = \"audio\"):\n video = pafy.new(url)\n\n self.author = video.author\n self.category = video.category\n self.length = video.length\n self.videoid = video.videoid\n self.published = video.published\n self.title = video.title\n\n self.streams = []\n\n streams = video.audiostreams if stream_type == \"audio\" else video.streams\n\n for stream in streams:\n self.streams.append(youTube_stream(stream))\n\nclass youTube_stream(object):\n \"\"\" Wrapper class for holding data of YouTube streams \"\"\"\n\n def __init__(self, stream):\n self.extension = stream.extension\n self.mediatype = stream.mediatype\n self.quality = stream.quality\n self.resolution = stream.resolution\n self.size = stream.get_filesize()\n self.url = stream.url","repo_name":"laszlocsontos/ringtube","sub_path":"rto-service/src/main/python/youTube.py","file_name":"youTube.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"44732083386","text":"from django.urls import path\n\nfrom products import views\n\nurlpatterns = [\n path('', views.index, name='products'),\n path('/', views.product, name='product'),\n path('create/', views.product_form, name='create_product'),\n path('update//', views.product_form, name='update_product'),\n path('delete//', views.delete, name='delete_product')\n]\n","repo_name":"izubairsh/django-ecommerce","sub_path":"products/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"41495668984","text":"'''\n=============================================================\nHEADER\n=============================================================\nINSTITUTION: BU-ISCIII\nAUTHOR: Guillermo J. Gorines Cordero\nMAIL: guillermo.gorines@urjc.es\nVERSION: 0\nCREATED: 15-3-2022\nREVISED: \nDESCRIPTION: \n Ad-hoc script to generate a heatmap on each category\nINPUT:\n -Number of organisms to include (does not matter if\n number set is bigger than current number of organisms)\nOUTPUT:\n Balance of reads obtained in the whole service \n'''\nimport os\nimport sys\n\nimport pandas\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nrelevant_features = sys.argv[1]\n\nos.mkdir(\"Heatmaps\")\n\ndef remove_zero_columns(df):\n \"\"\"Remove rows that are all 0\"\"\"\n \n df_removed = df.loc[:, (df != 0).any(axis=0)]\n \n return df_removed\n\ndef get_most_relevant(df, number):\n \n df.loc[\"Desv\"] = df.std()\n df.loc[\"Mean Desv\"] = df.mad()\n df = df[(df.loc[\"Mean Desv\"].nlargest(n=number)).index]\n \n df2 = df.drop([\"Desv\",\"Mean Desv\"], axis=0)\n \n return df2\n\n# dictionary to associate level and group\nlevel_dict = {\"lvl7\" : \"species\",\n \"lvl6\" : \"genera\",\n \"lvl5\" : \"family\",\n \"lvl4\" : \"order\",\n \"lvl3\" : \"class\",\n \"lvl2\" : \"phyla\",\n \"lvl1\" : \"domain\"}\n\n# list of suitable directories generated by previous script\ndir_list = [\n os.path.realpath(item[0]) for item in os.walk(\".\") \n if item[0] != \".\" \n and \"logs\" not in item[0]\n and \"lvl_6\" not in item[0]\n and \"lvl_7\" not in item[0]] \n\n# list of files in previous directories\nfile_list = []\nfor folder in dir_list:\n for file in os.listdir(folder):\n file_list.append(f\"{folder}/{file}\")\n\n# for each file, generate the df\n# remove empty columns\n# get the N most relevant features\n\nfor file in file_list:\n df = pandas.read_csv(file, sep=\"\\t\", header=0, index_col=0)\n df = remove_zero_columns(df)\n df = get_most_relevant(df,10)\n \n category, level = os.path.basename(file).replace(\".tsv\",\"\").split(\"_\")\n level = level_dict[level]\n png_name = f\"Heatmaps/{level}_{category}.png\"\n \n # dict to rename index\n dict_rename_index = {item:item.split(\"_\")[-1] for item in df.index}\n df.rename(index=dict_rename_index, inplace=True)\n\n # dict to rename columns\n dict_rename_taxa = {item:item.split(\"_\")[-1] for item in df.columns}\n df.rename(columns=dict_rename_taxa, inplace=True)\n\n fig,ax = plt.subplots(figsize=(15,15))\n sns.set(rc={'axes.facecolor':'white', 'figure.facecolor':'white'})\n ax.set_title(f\"Abundance (% of samples) of organism {level} by {category}\", fontdict={'fontsize' : 20})\n ax.tick_params(labelsize=13)\n sns.heatmap(df, \n annot=True, \n cbar=True, \n cmap=\"Greens\", \n vmax=100, \n vmin=0, \n fmt=\"g\", \n square=True, \n ax=ax, \n cbar_kws={\n \"orientation\" : \"horizontal\",\n }\n )\n plt.yticks(rotation=0)\n plt.xticks(rotation=45)\n plt.savefig(png_name)\n plt.close()","repo_name":"GuilleGorines/16S-Qiime-protocol","sub_path":"archived/heatmap.py","file_name":"heatmap.py","file_ext":"py","file_size_in_byte":3155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"69997226968","text":"import pandas as pd\nfrom utils.probabilityEstimation import ConditionalProbability\nfrom ocik.network import BayesianNetwork\nfrom ocik import CausalLeaner\nfrom utils.drawing import draw\n\nimport os\nos.environ[\"PATH\"] += os.pathsep + 'C:\\\\Users\\\\pakyr\\\\.conda\\\\envs\\\\bayesianEnv\\\\Library\\\\bin\\\\graphviz'\n\n\n# Class for an Agent of the environment\nclass Agent:\n def __init__(self, nodes, non_doable, edges, obs_data):\n self.nodes = nodes\n self.edges = edges\n self.non_doable = non_doable\n self.obs_data = obs_data\n self.conditional_prob = ConditionalProbability(self.obs_data, self.edges)\n self.bn = self.build_network()\n self.undirected_edges = []\n self.incomplete = []\n\n # replace list of edges\n def reset_edges(self, learned_edges):\n self.edges.clear()\n self.edges = learned_edges\n\n # build the Bayesian Network with probabilities\n def build_network(self):\n # Define network structure\n bn = BayesianNetwork(self.edges)\n\n # Fill with conditional probabilities\n for node in bn.nodes():\n parents = []\n for edge in bn.edges():\n if node == edge[1]:\n parents.append(edge[0])\n\n arr = self.conditional_prob.get_node_prob(node)\n # Invert the array for construction reasons\n arr = [arr[1], arr[0]]\n bn.set_cpd(node, arr, parents)\n return bn\n\n # Get non-duplicate nodes list from edges\n def nodes_from_edges(self, edges):\n nodes = []\n for edge in edges:\n nodes.append(edge[0])\n nodes.append(edge[1])\n return list(set(nodes))\n\n def add_node(self, node):\n self.nodes.append(node)\n\n def remove_node(self, node):\n self.nodes.remove(node)\n\n def add_non_doable(self, node):\n self.non_doable.append(node)\n\n def remove_non_doable(self, node):\n self.non_doable.remove(node)\n\n def add_edge(self, edge):\n self.edges.append(edge)\n\n def remove_edge(self, edge):\n self.edges.remove(edge)\n\n def add_undirected_edges(self, undirected_edges):\n for edge in undirected_edges:\n self.undirected_edges.append(edge)\n\n def concatenate_data(self, data_to_concatenate, override=True):\n # Concatenate original data with received data\n # Pay attention on:\n # - identifier for each sample\n # - dimensions\n # - no duplicate data (as columns name)\n\n # When a column is already present, decide if keep it or override it\n drops = [] # list of same column names for agent data and received data\n for col in data_to_concatenate.columns:\n if col in self.obs_data.columns:\n drops.append(col)\n\n if override:\n # override old data\n self.obs_data.drop(columns=drops, inplace=True)\n else:\n # do not override old data\n data_to_concatenate.drop(columns=drops, inplace=True)\n\n # Merge of data based on the id\n # self.obs_data = pd.merge(self.obs_data, data_to_concatenate, how='outer', on='index')\n self.obs_data = pd.concat([self.obs_data, data_to_concatenate], axis=1)\n\n # Decide how to manage NaN values if present\n\n def learning(self, nodes, parameters, non_doable, mod, bn=None, obs_data=None, edges=None):\n\n estimator = CausalLeaner(nodes=nodes, non_dobale=non_doable, edges=edges, env=bn, obs_data=obs_data)\n model, undirected_edges = estimator.learn(mod=mod, max_cond_vars=parameters['max_cond_vars'], do_size=parameters['do_size'], do_conf=parameters['do_conf'], ci_conf=parameters['ci_conf'])\n\n return model, undirected_edges\n\n # Check for incomplete nodes: for now this step is simulated, we add variables manually\n # def check_incomplete(self):\n # # Example\n # incomplete = ['T']\n #\n # for node in incomplete:\n # if node in self.nodes:\n # self.incomplete.append(node)\n\n def print_structure(self):\n dot = draw(self.edges)\n dot.view(directory='tmp/tmp/')\n\n def build_request_msg(self, nodes_to_investigate: list, undirected_edges: list):\n # The message contains:\n # - nodes with outliers values (nodes_to_investigate)\n # - nodes in undirected connections\n # In case of duplicates, eliminate them\n\n nodes_to_send = []\n if len(nodes_to_investigate) != 0:\n nodes_to_send.extend(nodes_to_investigate)\n if len(undirected_edges) != 0:\n nodes_to_send.extend(self.nodes_from_edges(undirected_edges))\n\n nodes_to_send = list(set(nodes_to_send))\n\n if len(nodes_to_send) != 0:\n non_doable = []\n for node in nodes_to_send:\n if node in self.non_doable:\n non_doable.append(node)\n\n # Data are necessary for the chi-square\n # Example: Pow->W (non-doable->doable)\n # In this case we need data both for Pow and for W, because the chi-square compares the distributions\n obs_data = self.obs_data\n data_to_send = obs_data.drop(columns=[x for x in obs_data.columns if x not in nodes_to_send])\n\n # Build message\n msg = dict()\n msg['nodes'] = nodes_to_send\n msg['non_doable'] = non_doable\n msg['data'] = data_to_send\n else:\n return None\n\n return msg\n\n def build_response_msg(self, discovered_edges: list):\n msg = dict()\n\n non_doable = []\n nodes = self.nodes_from_edges(discovered_edges)\n for node in nodes:\n if node in self.non_doable:\n non_doable.append(node)\n\n if len(discovered_edges) != 0:\n msg['edges'] = discovered_edges\n msg['non_doable'] = non_doable\n return msg\n else:\n return None\n\n def read_request(self, request_msg):\n\n if request_msg:\n msg = request_msg\n else:\n return False # Not going to learn\n\n # Check if all received nodes were already known: in this case it is useless to repeat the learning\n if all(item in self.nodes for item in msg['nodes']):\n print('Nodes already known, checking the previous learning results...')\n return False # Not going to learn\n else:\n # Code for adding new nodes to existing structure before to make incremental learning\n # for node in msg['nodes']:\n # if node not in self.nodes:\n # self.add_node(node)\n #\n # for node in msg['non_doable']:\n # if node not in self.non_doable:\n # self.add_non_doable(node)\n #\n # # Concatenation of observational data\n # if msg['data'] is not None:\n # self.concatenate_data(msg['data'])\n\n return True # Going to learn\n\n def read_response(self, response):\n # We consider trusted the communication between agents, so we directly integrate the response\n # without repeat the learning\n\n if len(response) != 0:\n # Read nodes and add to structure\n new_nodes = self.nodes_from_edges(response['edges'])\n for node in new_nodes:\n if node not in self.nodes:\n self.add_node(node)\n\n for node in response['non_doable']:\n if node not in self.non_doable:\n self.add_non_doable(node)\n\n # Read edges and add to structure\n for t in response['edges']:\n if t not in self.edges:\n self.add_edge(t)\n else:\n print('Empty response, nothing added')\n\n\n\n\n\n\n\n\n","repo_name":"pakyr/multiagent_algorithm","sub_path":"agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":7815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"17787536991","text":"#!/usr/bin/python3\n\"\"\" Defines the perimeter of the island described in grid \"\"\"\n\n\ndef island_perimeter(grid):\n \"\"\" Return perimeter of grid where \"1\" is found \"\"\"\n perimeter = 0\n for c in range(len(grid)):\n for d in range(len(grid[0])):\n if grid[c][d] == 1:\n if (c - 1) == -1 or grid[c - 1][d] == 0:\n perimeter += 1\n if (c + 1) == (len(grid) - 1) or grid[c + 1][b] == 0:\n perimeter += 1\n if (d - 1) == 0 or grid[c][d - 1] == 0:\n perimeter += 1\n if (d + 1) == (len(grid[0]) - 1) or grid[c][d + 1] == 0:\n perimeter += 1\n return perimeter\n","repo_name":"yellowduke/alx-low_level_programming","sub_path":"0x1C-makefiles/5-island_perimeter.py","file_name":"5-island_perimeter.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"10373592932","text":"from typing import List\n\nfrom azure.quantum.qiskit import AzureQuantumProvider\nfrom openqaoa.backends.devices_core import DeviceBase\n\n\nclass DeviceAzure(DeviceBase):\n \"\"\"\n Contains the required information and methods needed to access remote\n Azure QPUs and Simulators.\n\n Parameters\n ----------\n available_qpus: `list`\n When connection to a provider is established, this attribute contains a list\n of backend names which can be used to access the selected backend by reinitialising\n the Access Object with the name of the available backend as input to the\n device_name parameter.\n \"\"\"\n\n def __init__(self, device_name: str, resource_id: str, az_location: str):\n \"\"\"\n Input parameters required for this can be found in the user's Azure\n Quantum Workspace.\n\n Parameters\n ----------\n device_name: `str`\n The name of the Azure remote QPU/Simulator to be used\n resource_id: `str`\n The resource_id of the Workplace\n az_location: `str`\n The location of the Azure Workplace. e.g. \"westus\"\n \"\"\"\n\n self.resource_id = resource_id\n self.location = az_location\n self.device_name = device_name\n self.device_location = \"azure\"\n\n self.provider_connected = None\n self.qpu_connected = None\n\n def check_connection(self):\n \"\"\" \"\"\"\n\n self.provider_connected = self._check_provider_connection()\n\n if self.provider_connected == False:\n return self.provider_connected\n\n self.available_qpus = [backend.name() for backend in self.provider.backends()]\n\n if self.device_name == \"\":\n return self.provider_connected\n\n self.qpu_connected = self._check_backend_connection()\n\n if self.provider_connected and self.qpu_connected:\n return True\n else:\n return False\n\n def _check_backend_connection(self) -> bool:\n \"\"\"Private method for checking connection with backend(s).\"\"\"\n\n if self.device_name in self.available_qpus:\n self.backend_device = self.provider.get_backend(self.device_name)\n self.n_qubits = self.backend_device.configuration().n_qubits\n return True\n else:\n print(f\"Please choose from {self.available_qpus} for this provider\")\n return False\n\n def _check_provider_connection(self) -> bool:\n \"\"\"\n Private method for checking connection with provider.\n \"\"\"\n\n try:\n self.provider = AzureQuantumProvider(\n resource_id=self.resource_id, location=self.location\n )\n\n return True\n\n except ValueError as e:\n print(\n \"Either the resource id or location specified was invalid: {}\".format(e)\n )\n return False\n\n except Exception as e:\n print(\n \"An Exception has occured when trying to connect with the \\\n provider: {}\".format(\n e\n )\n )\n return False\n\n def connectivity(self) -> List[List[int]]:\n return self.backend_device.configuration().coupling_map\n","repo_name":"entropicalabs/openqaoa","sub_path":"src/openqaoa-azure/openqaoa_azure/backends/devices.py","file_name":"devices.py","file_ext":"py","file_size_in_byte":3231,"program_lang":"python","lang":"en","doc_type":"code","stars":94,"dataset":"github-code","pt":"31"} +{"seq_id":"1995012189","text":"import pandas\nfrom flask import Blueprint\nfrom flask import Flask, Response, jsonify, request, render_template\n\nfrom api.utils import database as db_util\nfrom api.model.settings import APP_DATABASE_NAME\n\nroute_blueprint = Blueprint('route_blueprint', __name__)\n\ndef get_table_data(query):\n engine = db_util.get_engine(APP_DATABASE_NAME)\n df = pandas.read_sql_query(query, con=engine)\n #count = int(df.iloc[0]['count'])\n return df\n\n\n@route_blueprint.route('/')\ndef home():\n return render_template('home.html')\n\n\n@route_blueprint.route(\"/custom\", methods=[\"POST\"])\ndef custom():\n payload = request.get_json()\n\n if payload.get(\"say_hello\") is True:\n output = jsonify({\"message\": \"Hello!\"})\n else:\n output = jsonify({\"message\": \"...\"})\n\n return output\n\n\n@route_blueprint.route(\"/health\")\ndef health():\n return Response(\"OK\", status=200)\n\n\n\n@route_blueprint.route('/roster/')\ndef roster():\n query = 'select * from vw_Roster where PPG > 2'\n df = get_table_data(query)\n return render_template('table_embed.html',\n column_names=df.columns.values,\n row_data=list(df.values.tolist()), zip=zip)\n\n\n@route_blueprint.route('/roadmap/')\ndef roadmap():\n return render_template('roadmap.html')\n","repo_name":"benvneal88/ncaa-bb-fantasy-manager","sub_path":"services/app/src/api/route_blueprint.py","file_name":"route_blueprint.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25811841785","text":"# -*- coding: utf-8 -*-\n\nimport geom\n\ndef net2DfromXYlists(preprocessor,abscissae,ordinates):\n points= preprocessor.getMultiBlockTopology.getPoints\n nets= preprocessor.getMultiBlockTopology.get2DNets\n retval= nets.new2DNet()\n retval.dim(len(ordinates),len(abscissae))\n\n i= 1\n for x in abscissae:\n j= 1\n for y in ordinates:\n pnt= points.newPoint(geom.Pos3d(x,y,0.0))\n retval.setPnt(j,i,pnt.tag)\n j+= 1\n i+= 1\n return retval\n\n\ndef createSurfacesNet2D(preprocessor,net2D,iSize,jSize):\n surfaces= preprocessor.getMultiBlockTopology.getSurfaces\n m= net2D.nRow\n n= net2D.nCol\n for i in range(1,m):\n for j in range(1,n):\n p1= net2D.getPnt(i,j)\n p2= net2D.getPnt(i+1,j)\n p3= net2D.getPnt(i+1,j+1)\n p4= net2D.getPnt(i,j+1)\n s= surfaces.newQuadSurfacePts(p1.tag,p2.tag,p3.tag,p4.tag)\n s.setElemSizeIJ(iSize,jSize)\n\nclass Net2DHelper(object):\n net= None #\n def __init__(self, net2D):\n self.net= net2D\n\n def assignZ(self,zFunc):\n m= self.net.nRow\n n= self.net.nCol\n for i in range(1,m+1):\n for j in range(1,n+1):\n p= self.net.getPnt(i,j)\n pos= p.getPos\n pos.z= zFunc(pos.x,pos.y)\n\n def createSurfaces(self,preprocessor,iSize,jSize):\n return createSurfacesNet2D(preprocessor,self.net,iSize,jSize)\n","repo_name":"xcfem/xc","sub_path":"python_modules/model/geometry/net_utils.py","file_name":"net_utils.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","stars":196,"dataset":"github-code","pt":"31"} +{"seq_id":"23365878672","text":"import mxnet as mx\nfrom mxnet import gluon\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport json\nimport os\nfrom itertools import islice\nfrom pathlib import Path\n\nfrom gluonts.dataset.repository.datasets import get_dataset, dataset_recipes\nfrom gluonts.dataset.util import to_pandas\n\nfrom gluonts.dataset.common import ListDataset\nfrom gluonts.dataset.field_names import FieldName\n\ndataset = get_dataset(\"m4_hourly\", regenerate=True)\n\n# get the \"first\" time series in the training set (which means we get more in the dataset)\ntrain_entry = next(iter(dataset.train))\ntrain_entry.keys()\n\n# get the \"first\" time series in the test set\ntest_entry = next(iter(dataset.test))\ntest_entry.keys()\n\ntest_series = to_pandas(test_entry)\ntrain_series = to_pandas(train_entry)\n\nfig, ax = plt.subplots(2, 1, sharex=True, sharey=True, figsize=(10, 7))\n\ntrain_series.plot(ax=ax[0])\nax[0].grid(which=\"both\")\nax[0].legend([\"train series\"], loc=\"upper left\")\n\ntest_series.plot(ax=ax[1])\nax[1].axvline(train_series.index[-1], color='r') # end of train dataset\nax[1].grid(which=\"both\")\nax[1].legend([\"test series\", \"end of train series\"], loc=\"upper left\")\n\nplt.show()\n\nif __name__ == \"__main__\":\n pass\n\n# 1.3 Use your time series and features\n\ndef create_dataset(num_series, num_steps, period=24, mu=1, sigma=0.3):\n # create target: noise + pattern\n # noise\n noise = np.random.normal(mu, sigma, size=(num_series, num_steps))\n\n # pattern - sinusoid with different phase\n sin_minumPi_Pi = np.sin(np.tile(np.linspace(-np.pi, np.pi, period), int(num_steps / period)))\n sin_Zero_2Pi = np.sin(np.tile(np.linspace(0, 2 * np.pi, 24), int(num_steps / period)))\n\n pattern = np.concatenate((np.tile(sin_minumPi_Pi.reshape(1, -1),\n (int(np.ceil(num_series / 2)),1)),\n np.tile(sin_Zero_2Pi.reshape(1, -1),\n (int(np.floor(num_series / 2)), 1))\n ),\n axis=0\n )\n\n target = noise + pattern\n\n # create time features: use target one period earlier, append with zeros\n feat_dynamic_real = np.concatenate((np.zeros((num_series, period)),\n target[:, :-period]\n ),\n axis=1\n )\n\n # create categorical static feats: use the sinusoid type as a categorical feature\n feat_static_cat = np.concatenate((np.zeros(int(np.ceil(num_series / 2))),\n np.ones(int(np.floor(num_series / 2)))\n ),\n axis=0\n )\n\n return target, feat_dynamic_real, feat_static_cat\n\n# define the parameters of the dataset\n\ncustom_ds_metadata = {'num_series': 100,\n 'num_steps': 24 * 7,\n 'prediction_length': 24,\n 'freq': '1H',\n 'start': [pd.Timestamp(\"01-01-2019\", freq='1H')\n for _ in range(100)]\n }\n\ndata_out = create_dataset(custom_ds_metadata['num_series'],\n custom_ds_metadata['num_steps'],\n custom_ds_metadata['prediction_length']\n )\n\ntarget, feat_dynamic_real, feat_static_cat = data_out\n\ntrain_ds = ListDataset([{FieldName.TARGET: target,\n FieldName.START: start,\n FieldName.FEAT_DYNAMIC_REAL: [fdr],\n FieldName.FEAT_STATIC_CAT: [fsc]}\n for (target, start, fdr, fsc) in zip(target[:, :-custom_ds_metadata['prediction_length']],\n custom_ds_metadata['start'],\n feat_dynamic_real[:, :-custom_ds_metadata['prediction_length']],\n feat_static_cat)],\n freq=custom_ds_metadata['freq'])\n\n# 5.2 Probabilistic forecasting\n\n\nfrom gluonts.distribution.distribution_output import DistributionOutput\nfrom gluonts.distribution.gaussian import GaussianOutput","repo_name":"syyunn/gluonts","sub_path":"data/S&P_GSCI/S&P_GSCI_Soybeans /extended-tutorial.py","file_name":"extended-tutorial.py","file_ext":"py","file_size_in_byte":4321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73032615127","text":"import mock\n\n\nclass Registry(object):\n \"\"\"\n A registry for mocks.\n \"\"\"\n def __init__(self, specification):\n self._mocks = {}\n self._patches = []\n for name, kwargs in specification.iteritems():\n patch = mock.patch(name=name, **kwargs)\n self._patches.append(patch)\n\n\n @classmethod\n def forTestCase(cls, testCase, specification=None):\n \"\"\"\n Creates a registry for this test case.\n\n Immediately starts this registry and schedules it to be cleaned up.\n \"\"\"\n if specification is None:\n specification = testCase.mockSpecification\n registry = cls(specification)\n registry.start()\n testCase.addCleanup(registry.stop)\n return registry\n\n\n def start(self):\n \"\"\"\n Starts the registry, putting mocks into effect.\n \"\"\"\n for p in self._patches:\n m = p.start()\n self._mocks[m._mock_name] = m\n\n\n def stop(self):\n \"\"\"\n Stops the registry, cleaning up existing mocks.\n \"\"\"\n self._mocks = {}\n for p in self._patches:\n p.stop()\n\n\n def __getitem__(self, key):\n return self._mocks[key]\n","repo_name":"lvh/decmoc","sub_path":"decmoc/registry.py","file_name":"registry.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"17434748701","text":"from http import HTTPStatus\n\nimport pytest\nfrom django.test.client import Client\nfrom django.urls import reverse\n\nfrom favorit.funding.enums import FundingState\nfrom tests.favorit.funding.test_mixins import TestFundingMixins\n\nclient = Client()\n\n\nclass TestCloseFundingAcceptance(TestFundingMixins):\n def _call_api(self, funding, jwt_access_token):\n response = client.post(\n path=reverse(\"favorit:close_funding\", kwargs={\"funding_id\": funding.id}),\n content_type=\"application/json\",\n **{\"HTTP_Authorization\": f\"Bearer {jwt_access_token}\"},\n )\n return response\n\n @pytest.mark.django_db\n def test_close_funding_on_success(self, jwt_access_token):\n _, _, funding = self.create_funding()\n\n response = self._call_api(funding, jwt_access_token)\n\n assert response.status_code == HTTPStatus.OK\n funding.refresh_from_db()\n assert funding.state == FundingState.CLOSED\n\n @pytest.mark.parametrize(\"not_enable_closed\", [FundingState.CLOSED, FundingState.COMPLETED])\n @pytest.mark.django_db\n def test_close_funding_on_fail(self, jwt_access_token, not_enable_closed):\n _, _, funding = self.create_funding()\n\n funding.state = not_enable_closed\n funding.save()\n\n response = self._call_api(funding, jwt_access_token)\n\n assert response.status_code == HTTPStatus.BAD_REQUEST\n","repo_name":"ThinLineIT/FavorIt_Backend","sub_path":"src/tests/favorit/funding/test_close_funding.py","file_name":"test_close_funding.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"22669900437","text":"import zmq\nimport time\nimport numpy\nfrom scipy import signal as sg\n\nfrom bokeh.driving import count\nfrom bokeh.plotting import figure, curdoc\nfrom bokeh.models import ColumnDataSource, RangeTool\nfrom bokeh.layouts import gridplot, column\n\nport_temp = 5558\nsensor_count = 2\n\ncontext = zmq.Context()\nsocket_temp = context.socket(zmq.PULL)\nsocket_temp.connect(\"tcp://localhost:\"+str(port_temp))\npoller_temp = zmq.Poller()\npoller_temp.register(socket_temp, zmq.POLLIN)\n\n\ndata = dict(\n time=[]\n )\n\np0 = figure(title=\"Temperature\", y_range=(0, 40), plot_height=500, plot_width=1000, tools=\"xpan,xwheel_zoom,xbox_zoom,reset\", y_axis_location=\"left\")\n#p0.xaxis.axis_label = 'Time'\np0.yaxis.axis_label = 'Temperature [°C]'\np0.x_range.follow = \"end\"\np0.x_range.follow_interval = 500\np0.x_range.range_padding = 0\n\n\nfor i in range(0, sensor_count):\n data.update({'temp_'+str(i): []})\n\nsource = ColumnDataSource(data)\n\nfor i in range(0, sensor_count):\n p0.line(x='time', y='temp_'+str(i), source=source, line_width=2)\n\n\n\ndef _update_function():\n socks_temp = dict(poller_temp.poll())\n if socks_temp.get(socket_temp) == zmq.POLLIN:\n message_temp = socket_temp.recv()\n T = numpy.frombuffer(message_temp, dtype=numpy.float32())\n T = numpy.reshape(T, (len(T)//sensor_count, sensor_count))\n return T\n\n\n@count()\ndef update(t):\n T = _update_function()\n print(T)\n new_data = dict(\n time=[t]\n )\n #,\n # Temp=[T[0]]\n T = numpy.array(T)\n i, j = numpy.argwhere(numpy.isnan(T)).T\n T[i, j] = 0.0\n for ii in range(0, sensor_count):\n new_data.update({'temp_'+str(ii): [numpy.mean(T[0, ii])]})\n print(new_data['time'])\n source.stream(new_data, 500)\n\n\ncurdoc().add_root(column(p0))\ncurdoc().add_periodic_callback(update, 20)\ncurdoc().title = \"test plot\"\n","repo_name":"Dur0k/gr-spectral_analysis","sub_path":"python_plot/python/testplot.py","file_name":"testplot.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"20407830935","text":"from __future__ import annotations\n\nimport json\nimport os\nimport struct\nfrom typing import List\n\nfrom .crc32 import Crc32\nfrom .xor import XOR\n\n\nclass GMDSection(object):\n def __init__(self, section_id: int,\n section_name: str, section_text: bytes) -> None:\n self.id = section_id\n self.name = section_name\n self.text = section_text\n\n def __str__(self) -> str:\n return (\n f\"{self.id} - {self.name}:\"\n f\" {len(self.text)}\"\n )\n\n def __repr__(self) -> str:\n return self.__str__()\n\n\nclass GMD(object):\n class _Header(object):\n def __init__(self,\n magic: bytes,\n version: bytes,\n language: int,\n padding: int,\n label_count: int,\n section_count: int,\n label_size: int,\n section_size: int,\n name_size: int) -> None:\n self.magic = magic\n self.version = version\n self.language = language\n self.padding = padding\n self.label_count = label_count\n self.section_count = section_count\n self.label_size = label_size\n self.section_size = section_size\n self.name_size = name_size\n\n @staticmethod\n def load(data) -> GMD._Header:\n return GMD._Header(*struct.unpack_from('<4s4siqiiiii', data))\n\n def dump(self) -> bytes:\n return struct.pack(\n '<4s4siqiiiii',\n self.magic,\n self.version,\n self.language,\n self.padding,\n self.label_count,\n self.section_count,\n self.label_size,\n self.section_size,\n self.name_size\n )\n\n class _Label(object):\n def __init__(self,\n section_id: int,\n hash1: int,\n hash2: int,\n label_offset: int,\n list_link: int) -> None:\n self.section_id = section_id\n self.hash1 = hash1\n self.hash2 = hash2\n self.label_offset = label_offset\n self.list_link = list_link\n\n @staticmethod\n def create(section_id: int, section_name: str,\n label_offset: int) -> GMD._Label:\n return GMD._Label(\n section_id,\n ~Crc32.create(section_name * 2),\n ~Crc32.create(section_name * 3),\n label_offset,\n 0\n )\n\n @staticmethod\n def load(data) -> GMD._Label:\n return GMD._Label(*struct.unpack_from(' bytes:\n return struct.pack(\n ' str:\n return (\n f'[Section ID: {self.section_id}'\n f', Hash 1: {self.hash1}'\n f', Hash 2: {self.hash2}'\n f', Label offset: {self.label_offset}'\n f', List link: {self.list_link}]'\n )\n\n def __repr__(self) -> str:\n return self.__str__()\n\n def __init__(self) -> None:\n self.name = None\n self.header = None\n self.padding = 0\n self.labels: List[GMD._Label] = list()\n self.sections: List[GMDSection] = list()\n self.buckets = [0 for _ in range(0x100)]\n self.__label_offset = 0\n\n @staticmethod\n def __read_cstr(data: bytes, offset: int):\n end_offset = offset\n while data[end_offset] != 0x00:\n end_offset += 1\n return data[offset:end_offset]\n\n @staticmethod\n def load(f) -> GMD:\n gmd = GMD()\n content = f.read()\n offset = 0\n\n gmd.header = GMD._Header.load(content)\n gmd.padding = gmd.header.padding\n offset += 40\n\n gmd.name = content[offset:offset+gmd.header.name_size].decode('UTF-8')\n offset += gmd.header.name_size + 1\n\n for _ in range(gmd.header.label_count):\n gmd.labels.append(GMD._Label.load(content[offset:]))\n offset += 20\n\n bucket_size = 0x100 if gmd.header.label_count > 0 else 0\n gmd.buckets = list()\n for _ in range(bucket_size):\n gmd.buckets.append(struct.unpack_from(' None:\n os.makedirs(dump_name, exist_ok=True)\n\n dump_info = {\n 'name': self.name,\n 'padding': self.padding\n }\n with open(os.path.join(dump_name, 'info.json'), 'w') as f:\n json.dump(dump_info, f)\n\n for s in self.sections:\n file_name = f\"{s.id}-{s.name}.txt\"\n with open(os.path.join(dump_name, file_name), 'wb') as f:\n f.write(s.text)\n\n def add_section(self, section: GMDSection) -> None:\n counter = len(self.sections)\n if counter == 0:\n counter = -1\n self.sections.append(section)\n\n if section.name.startswith('no_name_'):\n return\n\n label = GMD._Label.create(section.id,\n section.name,\n self.__label_offset)\n self.__label_offset += len(section.name) + 1\n self.labels.append(label)\n\n bucket = ~Crc32.create(section.name) & 0xff\n if self.buckets[bucket] > 0:\n self.labels[self.buckets[bucket]].list_link = counter\n else:\n self.buckets[bucket] = counter\n\n def pack(self, pack_path: str, pack_name: str) -> None:\n text_blob = b''.join(\n [s.text + b'\\x00' for s in self.sections]\n )\n text_blob = text_blob.replace(b'\\r\\n', b'\\n')\n text_blob = text_blob.replace(b'\\n', b'\\r\\n')\n text_blob = XOR.rexor(text_blob)\n\n label_blob = b''.join(\n [s.name.encode('UTF-8') + b'\\x00' for s in self.sections]\n )\n if len(self.labels) == 0:\n label_blob = b''\n\n self.header = GMD._Header(\n magic=b'GMD\\x00',\n version=b'\\x02\\x03\\x01\\x00',\n language=0,\n padding=self.padding,\n label_count=len(self.labels),\n section_count=len(self.sections),\n label_size=len(label_blob),\n section_size=len(text_blob),\n name_size=len(self.name)\n )\n\n print(pack_name)\n with open(os.path.join(pack_path, pack_name), 'wb') as f:\n f.write(self.header.dump())\n\n f.write(self.name.encode('UTF-8'))\n f.write(b'\\x00')\n\n for label in self.labels:\n f.write(label.dump())\n\n # print(self.buckets)\n if self.header.label_count > 0:\n f.write(struct.pack('<256i', *self.buckets))\n\n if self.header.label_count > 0:\n f.write(label_blob)\n\n f.write(text_blob)\n","repo_name":"hguandl/dgs2utils","sub_path":"gmd/gmd.py","file_name":"gmd.py","file_ext":"py","file_size_in_byte":8330,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"75087218966","text":"import pytest\n\nfrom articlenizer import formatting\n\ndef test_replacement():\n text = ' Statistical analyses were conducted applying Statistical Program for the Social Sciences (SPS௵௵S);version 24 (f)=∇∑ (IBM;Inc. Chicago, IL, USA). '\n annotation = '''T1\tsoftware_usage 47 90\tStatistical Program for the Social Sciences\nT2\tabbreviation 92 98\tSPS௵௵S\nR1\tabbreviation_of Arg1:T2 Arg2:T1\t\nT3\tversion 110 112\t24\nT4\tdeveloper 121 129\tIBM;Inc.\nR2\tversion_of Arg1:T3 Arg2:T1\t\nR3\tdeveloper_of Arg1:T4 Arg2:T1\t\n'''\n\n sentences = formatting.brat_to_bio(text, annotation)\n target_tokens = ['Statistical', 'analyses', 'were', 'conducted', 'applying', 'Statistical', 'Program', 'for', 'the', 'Social', 'Sciences', '(', 'SPSS', ')', ';', 'version', '24', 'formtok', '(', 'IBM', ';', 'Inc', '.', 'Chicago', ',', 'IL', ',', 'USA', ')', '.']\n target_labels = ['O', 'O', 'O', 'O', 'O', 'B-software_usage', 'I-software_usage', 'I-software_usage', 'I-software_usage', 'I-software_usage', 'I-software_usage', 'O', 'B-abbreviation', 'O', 'O', 'O', 'B-version', 'O', 'O', 'B-developer', 'I-developer', 'I-developer', 'I-developer', 'O', 'O', 'O', 'O', 'O', 'O', 'O']\n assert len(sentences) == 1 and sentences[0]['tokens'] == target_tokens and sentences[0]['labels'] == target_labels\n\ndef test_bio_to_brat():\n text = 'Choroidal segmentation and thickness analyses were performed automatically with custom MATLAB ( MATLAB 2017b , The MathWorks , Inc . , Natick , MA , USA ) software for choroid segmentation [21] .'\n labels = 'O O O O O O O O O O B-pl_usage O B-pl_usage B-release O B-developer I-developer I-developer I-developer I-developer O O O O O O O O O O O O O'\n entities, _, _ = formatting.bio_to_brat(text, labels, split_sent=True)\n assert len(entities) == 4 and entities[0]['beg'] == 87 and entities[0]['end'] == 93 and entities[1]['beg'] == 96 and entities[1]['end'] == 102\n\ndef test_relations():\n text = ' Statistical analyses were conducted applying Statistical Program for the Social Sciences (SPS௵௵S);version 24 (f)=∇∑ (IBM;Inc. Chicago, IL, USA). '\n annotation = '''T1\tsoftware_usage 47 90\tStatistical Program for the Social Sciences\nT2\tabbreviation 92 98\tSPS௵௵S\nR1\tabbreviation_of Arg1:T2 Arg2:T1\t\nT3\tversion 110 112\t24\nT4\tdeveloper 121 129\tIBM;Inc.\nR2\tversion_of Arg1:T3 Arg2:T1\t\nR3\tdeveloper_of Arg1:T4 Arg2:T1\t\n'''\n\n sentences = formatting.brat_to_bio(text, annotation)\n assert sentences[0]['relations']['R1']['pos2'] == 45 and sentences[0]['relations']['R1']['pos1'] == 91\n\ndef test_sentence_based_info():\n text = 'This is some text with software. That will be split into more than one line. Just to debug it.'\n annotation = '''T1\\tsoftware 0 4\\tThis\nT2\\tdeveloper 8 12\\tsome\nT3\\tsoftware 38 42\\tsplit\nR1\\tdeveloper_of Arg1:T2 Arg2:T1\\t\n'''\n sentences = formatting.sentence_based_info(text, annotation)\n expected_result = [{'string': 'This is some text with software.', 'entities': {'T1': {'label': 'software', 'beg': 0, 'end': 4, 'string': 'This', 'idx': 0}, 'T2': {'label': 'developer', 'beg': 8, 'end': 12, 'string': 'some', 'idx': 1}}, 'relations': {'R1': {'label': 'developer_of', 'arg1_old': 'T2', 'arg2_old': 'T1', 'arg1': 'some', 'arg2': 'This', 'pos1': 8, 'pos2': 0, 'ent1': 1, 'ent2': 0}}}, {'string': 'That will be split into more than one line.', 'entities': {'T3': {'label': 'software', 'beg': 5, 'end': 9, 'string': 'will', 'idx': 0}}, 'relations': {}}, {'string': 'Just to debug it.', 'entities': {}, 'relations': {}}]\n assert sentences == expected_result\n\ndef test_sentence_based_info_real_sample():\n text = 'The maps were drawn from free-access shapefiles obtained from DIVA-GIS (http://www.diva-gis.org/)with QGIS 1.8.0 and ArcView 3.2 software.'\n annotation = 'T1\\treference 72 96\\thttp://www.diva-gis.org/'\n sentences = formatting.sentence_based_info(text, annotation)\n expected_result = [{'string': 'The maps were drawn from free-access shapefiles obtained from DIVA-GIS (http://www.diva-gis.org/) with QGIS 1.8.0 and ArcView 3.2 software.', 'entities': {'T1': {'label': 'reference', 'beg': 72, 'end': 96, 'string': 'http://www.diva-gis.org/', 'idx': 0}}, 'relations': {}}]\n assert sentences == expected_result\n","repo_name":"dave-s477/articlenizer","sub_path":"tests/test_formatting.py","file_name":"test_formatting.py","file_ext":"py","file_size_in_byte":4230,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"34020906722","text":"#Write a function to find the longest common prefix string amongst an array of strings.\n#If there is no common prefix, return an empty string \"\".\n#Constraints:\n# 1 <= strs.length <= 200\n# 0 <= strs[i].length <= 200\n# strs[i] consists of only lowercase English letters.\n\ndef longestCommonPrefix(strs):\n minLen = 201\n for i in strs:\n if 0 <= len(i) <= 200 and len(i) < minLen:\n minLen = len(i)\n\n res = \"\"\n if 1 <= len(strs) <= 200 and minLen <= 200:\n for i in range(minLen):\n common = strs[0][i]\n flag = True\n for s in range(1, len(strs)):\n if strs[s][i] != common:\n flag = False\n return res\n res = res + common\n return res\n\nresStr1 = longestCommonPrefix([\"flower\",\"flow\",\"flight\"])\nprint(resStr1)\nresStr2 = longestCommonPrefix([\"dog\",\"racecar\",\"car\"])\nprint(resStr2)\nresStr3 = longestCommonPrefix([\"carpet\", \"car\", \"cannon\"])\nprint(resStr3)\nresStr4 = longestCommonPrefix([\"cir\", \"car\"])\nprint(resStr4)","repo_name":"vvkhan/LCpython3","sub_path":"LongestCommonPrefix.py","file_name":"LongestCommonPrefix.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22884974011","text":"import tensorflow as tf \nfrom keras.layers import Input, Dense, Flatten, Conv1D, AveragePooling1D, Concatenate\nfrom keras.models import Model\n\ninputs = Input(shape=(32,1))\nh = Conv1D(16, 5, activation='relu')(inputs)\nh = AveragePooling1D(3)(h)\nh = Flatten()(h)\naux_inputs = Input(shape=(12,))\nh = Concatenate()([h, aux_inputs])\noutputs = Dense(20, activation='sigmoid')(h)\naux_outputs = Dense(1, activation='Linear')(h)\n\nmodel = Model(inputs = [inputs, aux_inputs], outputs = [outputs, aux_outputs])\n\n# In this, we have multiple inputs and multiple outputs\n# In this new model design, the auxiliary input is included in the model as an extra input to the final dense layer.\n# Shape of input layer and output layer has to be taken care for concatenation\n# Notice that the input is one-dimensional and so it has the right shape to be fed into the dense layer.\n# Just before the final dense layer it's the flattened layer. And this outputs an unrolls tensor h. \n# The next line takes this output tensor h, and concatenates it with the auxiliary input to make a single one-dimensional vector.\n\n# model.compile(loss=['binary_crossentropy', 'mse'], \n# loss_weights=[1, 0.4], \n# metrics = ['accuracy'])\n\n# history = model.fit([X_train, X_aux], [y_train, y_aux], validation_split=0.2, epochs=20)\n# Same goes with model.evaluate and model.predict methods.\n\n\n# These losses are in same order as we define the outputs list order\n# If we have two loss functions though, we need to combine them somehow. \n# We can only train our model using a gradient-based optimizer, \n# if there is a single loss value that we're trying to optimize.\n\n# That's what the new loss_weights keyword argument is doing here. \n# These weights tell the model how to combine the loss functions. So here, \n# the final loss is the binary_crossentropy plus 0.4 times the mean squared error.\n\ninputs = Input(shape=(32,1), name='inputs')\nh = Conv1D(16, 5, activation='relu')(inputs)\nh = AveragePooling1D(3)(h)\nh = Flatten()(h)\naux_inputs = Input(shape=(12,), name='aux_inputs')\nh = Concatenate()([h, aux_inputs])\noutputs = Dense(20, activation='sigmoid', name='outputs')(h)\naux_outputs = Dense(1, activation='Linear', name='aux_outputs')(h)\n\nmodel = Model(inputs = [inputs, aux_inputs], outputs = [outputs, aux_outputs])\n\n# model.compile(loss={'outputs': 'binary_crossentropy', 'aux_outputs': 'mse'},\n# loss_weights={'outputs': 1, 'aux_outputs': 0.4},\n# metrics=['accuracy'])\n\n# history = model.fit({'inputs': X_train, 'aux_inputs': X_aux},\n# {'outputs': y_train, 'aux_outputs': y_aux},\n# validation_split=0.2, epochs=20)\n","repo_name":"lalitgarg12/tensorflow-repo","sub_path":"Tensorflow 2 for Deep Learning Specialization/02. Customizing your models with Tensorflow 2/Week1 - Keras Functional API/02MultipleInputsOutputs.py","file_name":"02MultipleInputsOutputs.py","file_ext":"py","file_size_in_byte":2699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73473603608","text":"from __future__ import print_function\nimport os\nos.environ['CUDA_LAUNCH_BLOCKING'] = \"1\"\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nfrom torch.optim.lr_scheduler import StepLR\nfrom catSNN import spikeLayer, transfer_model, SpikeDataset ,load_model, fuse_module\n\n\nclass AddGaussianNoise(object):\n def __init__(self, mean=0., std=1.):\n self.std = std\n self.mean = mean\n \n def __call__(self, tensor):\n return tensor + torch.randn(tensor.size()) * self.std + self.mean\n \n def __repr__(self):\n return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 32, 3, 1,0, bias=True)\n self.Bn1 = nn.BatchNorm2d(32)\n self.conv2 = nn.Conv2d(32, 32, 3, 1,0, bias=True)\n self.Bn2 = nn.BatchNorm2d(32)\n self.dropout1 = nn.Dropout2d(0.4)\n self.conv3 = nn.Conv2d(32, 32, 4, 2,1, bias=True)\n self.Bn3 = nn.BatchNorm2d(32)\n\n self.conv4 = nn.Conv2d(32, 64, 3, 1,0, bias=True)\n self.Bn4 = nn.BatchNorm2d(64)\n self.conv5 = nn.Conv2d(64, 64, 3, 1,0, bias=True)\n self.Bn5 = nn.BatchNorm2d(64)\n self.dropout2 = nn.Dropout2d(0.4)\n self.conv6 = nn.Conv2d(64, 64, 4, 2,1, bias=True)\n self.Bn6 = nn.BatchNorm2d(64)\n\n self.conv7 = nn.Conv2d(64, 128, 3, 1,0, bias=True)\n self.Bn7 = nn.BatchNorm2d(128)\n\n self.fc1 = nn.Linear(128*4, 10, bias=True)\n self.dropout3 = nn.Dropout2d(0.3)\n #self.fc2 = nn.Linear(128, 10, bias=True)\n\n\n\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.Bn1(x)\n x = torch.clamp(x, min=0, max=1)\n #Please add Q function during retraining\n #x = torch.div(torch.ceil(torch.mul(x,10)),10)\n\n x = self.conv2(x)\n x = self.Bn2(x)\n x = torch.clamp(x, min=0, max=1)\n #Please add Q function during retraining\n #x = torch.div(torch.ceil(torch.mul(x,10)),10)\n\n x = self.conv3(x)\n x = self.Bn3(x)\n x = torch.clamp(x, min=0, max=1)\n #Please add Q function during retraining\n #x = torch.div(torch.ceil(torch.mul(x,10)),10)\n #x = self.dropout1(x)\n \n x = self.conv4(x)\n x = self.Bn4(x)\n x = torch.clamp(x, min=0, max=1)\n #Please add Q function during retraining\n #x = torch.div(torch.ceil(torch.mul(x,10)),10)\n\n x = self.conv5(x)\n x = self.Bn5(x)\n x = torch.clamp(x, min=0, max=1)\n #Please add Q function during retraining\n #x = torch.div(torch.ceil(torch.mul(x,10)),10)\n\n #x = self.dropout3(x)\n\n x = self.conv6(x)\n x = self.Bn6(x)\n x = torch.clamp(x, min=0, max=1)\n #Please add Q function during retraining\n #x = torch.div(torch.ceil(torch.mul(x,10)),10)\n\n #x = self.dropout2(x)\n\n x = self.conv7(x)\n x = self.Bn7(x)\n x = torch.clamp(x, min=0, max=1)\n #Please add Q function during retraining\n #x = torch.div(torch.ceil(torch.mul(x,10)),10)\n\n \n x = torch.flatten(x, 1)\n\n x = self.fc1(x)\n return x\n\n\nclass CatNet(nn.Module):\n\n def __init__(self, T):\n super(CatNet, self).__init__()\n self.T = T\n snn = spikeLayer(T)\n self.snn=snn\n\n self.conv1 = snn.conv(1, 32, 3, 1,0,bias=True)\n self.conv2 = snn.conv(32, 32, 3, 1,0,bias=True)\n self.conv3 = snn.conv(32, 32, 4,2,1,bias=True)\n\n self.conv4 = snn.conv(32, 64, 3, 1,0,bias=True)\n self.conv5 = snn.conv(64, 64, 3, 1,0,bias=True)\n self.conv6 = snn.conv(64, 64, 4, 2,1,bias=True)\n\n self.conv7 = snn.conv(64, 128, 3, 1,0,bias=True)\n \n self.fc1 = snn.dense((2,2,128), 10, bias=True)\n #self.fc2 = snn.dense(128, 10, bias=True)\n\n\n def forward(self, x):\n x = self.snn.spike(self.conv1(x))\n x = self.snn.spike(self.conv2(x))\n x = self.snn.spike(self.conv3(x))\n x = self.snn.spike(self.conv4(x))\n x = self.snn.spike(self.conv5(x))\n x = self.snn.spike(self.conv6(x))\n x = self.snn.spike(self.conv7(x))\n x = self.fc1(x)\n return self.snn.sum_spikes(x)/self.T\n\ndef train(args, model, device, train_loader, optimizer, epoch):\n model.train()\n for batch_idx, (data, target) in enumerate(train_loader):\n data, target = data.to(device), target.to(device)\n onehot = torch.nn.functional.one_hot(target, 10)\n optimizer.zero_grad()\n output = model(data)\n loss = F.mse_loss(output, onehot.type(torch.float))\n loss.backward()\n optimizer.step()\n if batch_idx % args.log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(epoch, batch_idx * len(data), len(train_loader.dataset),100. * batch_idx / len(train_loader), loss.item()))\n if args.dry_run:\n break\n\n\ndef test(model, device, test_loader):\n model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n onehot = torch.nn.functional.one_hot(target, 10)\n output = model(data)\n test_loss += F.mse_loss(output, onehot.type(torch.float), reduction='sum').item() # sum up batch loss\n pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability\n print(pred.eq(target.view_as(pred)).sum().item())\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n test_loss /= len(test_loader.dataset)\n\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(test_loss, correct, len(test_loader.dataset),100. * correct / len(test_loader.dataset)))\n return correct\n\n\ndef main():\n # Training settings\n parser = argparse.ArgumentParser(description='PyTorch MNIST Example')\n parser.add_argument('--batch-size', type=int, default=512, metavar='N',\n help='input batch size for training (default: 64)')\n parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',\n help='input batch size for testing (default: 1000)')\n parser.add_argument('--epochs', type=int, default=10, metavar='N',\n help='number of epochs to train (default: 14)')\n parser.add_argument('--lr', type=float, default=1, metavar='LR',\n help='learning rate (default: 1.0)')\n parser.add_argument('--gamma', type=float, default=0.7, metavar='M',\n help='Learning rate step gamma (default: 0.7)')\n parser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\n parser.add_argument('--dry-run', action='store_true', default=False,\n help='quickly check a single pass')\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n parser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='how many batches to wait before logging training status')\n parser.add_argument('--save-model', action='store_true', default=False,\n help='For Saving the current Model')\n parser.add_argument('--T', type=int, default=450, metavar='N',\n help='SNN time window')\n parser.add_argument('--resume', type=str, default=None, metavar='RESUME',\n help='Resume model from checkpoint')\n \n args = parser.parse_args()\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n\n torch.manual_seed(args.seed)\n\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n kwargs = {'batch_size': args.batch_size}\n if use_cuda:\n kwargs.update({'num_workers': 1,\n 'pin_memory': True,\n 'shuffle': True},\n )\n transform_train = transforms.Compose([\n transforms.ToTensor(),\n AddGaussianNoise(std=0.01)\n ])\n\n transform=transforms.Compose([\n transforms.ToTensor()\n ])\n dataset1 = datasets.MNIST('../data', train=True, download=True,\n transform=transform_train)\n \n for i in range(30):\n transform_train_1 = transforms.Compose([\n\n transforms.RandomRotation(10),\n #transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n AddGaussianNoise(std=0.01)\n ])\n dataset1 = dataset1+ datasets.MNIST('../data', train=True, download=True,\n transform=transform_train_1)\n \n dataset2 = datasets.MNIST('../data', train=False,\n transform=transform)\n snn_dataset = SpikeDataset(dataset2, T = args.T)\n train_loader = torch.utils.data.DataLoader(dataset1,**kwargs)\n \n test_loader = torch.utils.data.DataLoader(dataset2, **kwargs)\n snn_loader = torch.utils.data.DataLoader(snn_dataset, **kwargs)\n\n model = Net().to(device)\n snn_model = CatNet(args.T).to(device)\n\n if args.resume != None:\n load_model(torch.load(args.resume), model)\n for param_tensor in model.state_dict():\n print(param_tensor, \"\\t\", model.state_dict()[param_tensor].size())\n optimizer = optim.Adadelta(model.parameters(), lr=args.lr)\n\n scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)\n ACC = 0\n for epoch in range(1, args.epochs + 1):\n train(args, model, device, train_loader, optimizer, epoch)\n ACC_ = test(model, device, test_loader)\n if ACC_>ACC or ACC_ == ACC:\n ACC = ACC_\n torch.save(model.state_dict(), \"mnist_pretrained.pt\")\n \n scheduler.step()\n # After retraining with Q function, you can transfer ANN to SNN. \n fuse_module(model)\n transfer_model(model, snn_model)\n test(snn_model, device, snn_loader)\n\n #if args.save_model:\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"zhoujuncc1/shenjingcat","sub_path":"examples/fashion-MNIST/fashion.py","file_name":"fashion.py","file_ext":"py","file_size_in_byte":10201,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"31"} +{"seq_id":"18907768661","text":"import os\n\nos.system(\"pip3 install hyperopt\")\nos.system(\"pip3 install lightgbm\")\nos.system(\"pip3 install pandas==0.24.2\")\nos.system(\"pip3 install scikit-learn==0.20.3\")\nos.system(\"pip3 install catboost\")\n\nimport copy\nimport numpy as np\nimport pandas as pd\n\nfrom automl import predict, train, validate\nfrom CONSTANT import MAIN_TABLE_NAME\nfrom merge import merge_table\nfrom preprocess import clean_df, clean_tables, transform_numeric,transform_categorical_hash\nfrom util import Config, log, show_dataframe, timeit\nfrom model_automl import Model_NIPS\nimport time\nfrom sklearn.preprocessing import OneHotEncoder\n\n\n\nclass Model:\n def __init__(self, info):\n self.config = Config(info)\n self.tables = None\n self.diff_info=None\n self.model = None\n self.time={}\n self.training_data = None\n self.start_time = time.time()\n\n @timeit\n def fit(self, Xs, y, time_ramain):\n\n self.tables = copy.deepcopy(Xs)\n \n self.dropcols = []\n \n self.istrain = True\n \n self.numericmap = {} \n self.square_cubic_transform = True\n \n self.skewness = True\n \n clean_tables(Xs)\n enc = OneHotEncoder(handle_unknown='ignore')\n\n self.ohe = enc\n \n start = time.time()\n X = merge_table(Xs, self.config)\n self.time['merging_train']= time.time() -start\n clean_df(X)\n \n start = time.time()\n #feature_engineer(X, self.config, self.dropcols, self.numericmap, self.istrain,self.square_cubic_transform,self.skewness)\n transform_numeric(X, self.dropcols, self.numericmap, self.istrain,self.square_cubic_transform,self.skewness)\n transform_categorical_hash(X, self.dropcols,self.istrain)\n \n self.time['feature_engineer']= time.time() -start\n \n\n numerical_list = list()\n date_time = list()\n categorical=list()\n \n for term,col in enumerate(X.columns):\n if ((X[col].dtype == \"int64\") or (X[col].dtype==\"float64\")):\n numerical_list.append(term)\n if ((X[col].dtype==\"datetime64[ns]\")):\n date_time.append(term)\n if ((X[col].dtype.name==\"category\")):\n categorical.append(term) \n \n \n datainfo={}\n\n\n \n datainfo['loaded_feat_types'] = list()\n datainfo['loaded_feat_types'].append(date_time)\n datainfo['loaded_feat_types'].append(numerical_list)\n datainfo['loaded_feat_types'].append(categorical)\n datainfo['time_budget'] = self.config['time_budget']\n\n self.diff_info = datainfo\n \n self.training_data = X\n self.model = Model_NIPS(datainfo)\n start = time.time()\n self.model.fit(X, y,datainfo)\n\n self.time['fitting']= time.time() -start \n \n\n @timeit\n def predict(self, X_test, time_remain):\n\n Xs = self.tables\n self.istrain = False\n \n \n \n Xs[MAIN_TABLE_NAME] = X_test\n\n clean_tables(Xs)\n start = time.time()\n\n X = merge_table(Xs, self.config)\n\n self.time['merging_test']= time.time() -start\n\n clean_df(X)\n\n \n #feature_engineer(X, self.config, self.dropcols,self.numericmap, self.istrain,self.square_cubic_transform,self.skewness)\n\n transform_numeric(X, self.dropcols, self.numericmap, self.istrain,self.square_cubic_transform,self.skewness)\n transform_categorical_hash(X, self.dropcols,self.istrain)\n \n start = time.time()\n result =self.model.predict(X,self.diff_info,self.start_time)\n\n self.time['result_predict']= time.time() -start \n\n return pd.Series(result)\n","repo_name":"flytxtds/KDDAutoML2019","sub_path":"sample_code_submission/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3707,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"35984717","text":"\"\"\"\n\nTicTacToe:\nObservations:\n* on an NxN board, NOBODY can win until the 2N - 1st move has been made. Thus we do not need to check for winners until this point\n* we only ever need to check if the current piece has won the game, because previous pieces will have been checked on previous turns\n * corollary: we can short circuit this check as soon as a blocking piece or empty square is found\n\nOptimization:\n* once a piece becomes blocked from winning in a given direction, we can mark it, and all pieces in that direction, as unable to win in that direction. If N is large, this could save many checks (i.e. if N == 1000 and p1 has 999 pieces in a row and then there's 1 piece from p2). Not implemented here.\n\nStrategy:\nboard initialized to 0, we set 1 or 2 to mark whether p1 or p2 has left piece\nmove:\n * place piece on board (we can assume move is proper)\n * if current_move # >= 2N - 1, check for winner from current piece\n * check to left and to right, return false if any numbers do not equal current player #\n * check above and below\n * if square_number is equal to (i,i) or (i,n-i-1): check diagonal up left, down right, up right, down left as well\n * check function can be parametrized using delta row, delta col rather than having different ones for each direction\n * if move results in winner, return current player #\n * if current_move # == N^2, return 0 --> tie, no winner\n\n\"\"\"\n\nclass TicTacToe(object):\n\n def __init__(self, n):\n \"\"\"\n Initialize your data structure here.\n :type n: int\n \"\"\"\n self.__board = [[0] * n for j in range(n)]\n self.__max_moves = n*n\n # players can move out of order\n self.__first_win_move = n\n self.__move_num = 0\n\n def move(self, row, col, player):\n \"\"\"\n Player {player} makes a move at ({row}, {col}).\n @param row The row of the board.\n @param col The column of the board.\n @param player The player, can be either 1 or 2.\n @return The current winning condition, can be either:\n 0: No one wins.\n 1: Player 1 wins.\n 2: Player 2 wins.\n :type row: int\n :type col: int\n :type player: int\n :rtype: int\n \"\"\"\n if self.__move_num < self.__max_moves:\n self.__board[row][col] = player\n self.__move_num += 1\n if self.__move_num >= self.__first_win_move and self.__check_for_winner(player,row,col):\n return player\n return 0\n\n def __check_for_winner(self,player,row,col):\n won = False\n if self.__has_won(player,row,col,1,0) \\\n or self.__has_won(player,row,col,0,1) \\\n or (row == col and self.__has_won(player,row,col,1,1)) \\\n or (row == len(self.__board) - col - 1 and self.__has_won(player,row,col,-1,1)):\n won = True\n return won\n \n def __has_won(self,player,row,col,row_delta,col_delta):\n return self.__verify_win(player,row,col,row_delta,col_delta) and self.__verify_win(player,row,col,row_delta * -1, col_delta * -1)\n \n def __verify_win(self,player,row,col,row_delta,col_delta):\n current_cell = [row + row_delta,col + col_delta]\n while self.__valid_cell(current_cell):\n if self.__board[current_cell[0]][current_cell[1]] != player:\n return False\n current_cell[0] += row_delta\n current_cell[1] += col_delta\n return True\n \n def __valid_cell(self,cell):\n return cell[0] >= 0 and cell[1] >= 0 and cell[0] < len(self.__board) and cell[1] < len(self.__board)\n\n# Your TicTacToe object will be instantiated and called as such:\n# obj = TicTacToe(n)\n# param_1 = obj.move(row,col,player)\n","repo_name":"TedYav/CodingChallenges","sub_path":"leetcode/Microsoft/tic_tac_toe.py","file_name":"tic_tac_toe.py","file_ext":"py","file_size_in_byte":3781,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"12904046342","text":"#!/usr/bin/env python3\nfrom datetime import datetime\nfrom argparse import ArgumentParser, FileType\n\ndef main(args) -> int:\n fin = open(args.infile.name, 'r')\n time_lst = []\n process_list = []\n\n full_process_time = []\n\n for line in fin:\n if args.cue[0] in line:\n process_name, date, the_time = setup_str_lst(line)\n\n if process_name not in [\"pre_process\", \"mid_process\",\"final_process\",\"full_process\",\"post_process\"]:\n process_list.append(cue_start_startword + ' ' + process_name)\n time_lst.append(date + ' ' + the_time)\n elif process_name == \"full_process\":\n full_process_time.append(date + ' ' + the_time)\n\n elif args.cue[1] in line:\n process_name, date, the_time = setup_str_lst(line)\n\n if process_name not in [\"pre_process\", \"mid_process\", \"final_process\", \"full_process\", \"post_process\"]:\n\n process_list.append(cue_end_startword + ' ' + process_name)\n time_lst.append(date + ' ' + the_time)\n elif process_name == \"full_process\":\n full_process_time.append(date + \" \" + the_time)\n\n fin.close()\n return full_print(process_list, time_lst, full_process_time)\n\n\ndef setup_str_lst(line) -> tuple:\n strlst = line.split()\n\n # get name of process\n process_name = strlst[2]\n\n # get date and time from string\n date = num_from_string(strlst[3])\n the_time = num_from_string(strlst[4])\n return process_name, date, the_time\n\n\ndef num_from_string(date_str: str) -> str:\n before_nums = True\n begin_nums = 0\n last_nums = 0\n for i in range(len(date_str)):\n if before_nums and date_str[i].isdigit():\n before_nums = False\n begin_nums = i\n last_nums = i\n elif date_str[i].isdigit():\n last_nums = i\n return date_str[begin_nums:last_nums + 1]\n\n\ndef full_print(process_lst: str, time_lst: str, full_process_time: str) -> int:\n start_table = {}\n final_table = {}\n out_string = \"\"\n total_time = 0\n\n for i in range(len(process_lst)):\n individual_process = process_lst[i].split()\n\n if cue_end_startword in individual_process:\n # get name of process\n process_name = individual_process[1]\n start_time = start_table[process_name]\n end_time = get_time_formatted(time_lst[i])\n del start_table[process_name]\n actual_time = end_time - start_time\n total_time += actual_time\n\n if process_name in final_table:\n final_table[process_name] += actual_time\n else:\n final_table[process_name] = actual_time\n\n else:\n process_name = individual_process[1]\n end_time = get_time_formatted(time_lst[i])\n\n start_table[process_name] = end_time\n # final_table[\"total\"] = total_time\n\n for key in final_table:\n final_table[key] = scale_time(final_table[key])\n out_string += (str(key) + ':').ljust(27) + str(final_table[key]) + '\\n'\n\n full_process_actual = get_time_formatted(full_process_time[1]) - get_time_formatted(full_process_time[0])\n\n\n dif_time = full_process_actual - total_time\n\n out_string += \"\\n\" + \"Time accounted for:\".ljust(27) + scale_time(total_time)\n out_string += \"\\n\" + \"Full process actual time:\".ljust(27) + scale_time(full_process_actual)\n out_string += \"\\n\" + \"Time unaccounted for:\".ljust(27) + scale_time(dif_time) + '\\n'\n return cust_out(out_string)\n\n\ndef scale_time(time_seconds: int) -> str:\n updated_time = 0\n if args.time_format == 'f':\n updated_time = full_format(time_seconds)\n elif args.time_format == 's':\n updated_time = seconds_format(time_seconds)\n elif args.time_format == 'm':\n updated_time = minutes_format(time_seconds)\n elif args.time_format == 'h':\n updated_time = hours_format(time_seconds)\n return updated_time\n\n\ndef hours_format(num: int) -> str:\n updated_time = num / 3600\n updated_time = handle_truncate(updated_time)\n return str(updated_time) + \" hours\"\n\n\ndef minutes_format(num: int) -> str:\n updated_time = num / 60\n updated_time = handle_truncate(updated_time)\n return str(updated_time) + \" minutes\"\n\n\ndef seconds_format(num: int) -> str:\n return str(int(num)) + \" seconds\"\n\n\ndef full_format(num: int) -> str:\n updated_time = 0\n if num >= 3600:\n updated_time = hours_format(num)\n elif num >= 60:\n updated_time = minutes_format(num)\n else:\n updated_time = seconds_format(num)\n return updated_time\n\n\ndef handle_truncate(num: int) -> int:\n if args.truncate_level == 0:\n return int(num)\n return truncate(num, args.truncate_level)\n\n\ndef truncate(f: int, n: int) -> int:\n s = '%.12f' % f\n i, p, d = s.partition('.')\n return '.'.join([i, (d+'0'*n)[:n]])\n\n\ndef get_time_formatted(time_lst: list) -> int:\n dt_obj = datetime.strptime(time_lst, args.date_format)\n return dt_obj.timestamp()\n\n\ndef cust_out(out_string: str) -> int:\n if args.outfile:\n fout = open(args.outfile.name, 'w')\n fout.write(out_string)\n fout.close()\n else:\n print(out_string)\n return 0\n\n\n# handle arguments:\nparser = ArgumentParser(\n description=\"Returns the times processes run from log text files.\")\nparser.add_argument(dest=\"infile\", \n nargs='?',\n type=FileType('r'),\n help=\" log's full path\")\n\nparser.add_argument('-c', dest=\"cue\",\n nargs='+',\n default=[\"Start app_process: \", \"End app_process: \"],\n help=\"Input the text expected at the beginning of the line holding the date, ex:(-c 'Start app_process:' 'End app_process')(default)\")\n\nparser.add_argument('-d', dest=\"date_format\",\n nargs='?',\n default=\"%Y-%m-%d %H:%M:%S\",\n type=str,\n help=\"Select date format to be used in document, refer to python module 'datetime'\")\n \n\nparser.add_argument('-L', dest=\"truncate_level\",\n nargs='?',\n default=3,\n type=int,\n help=\"Select decimal rounding level(default=3)\")\n\nparser.add_argument('-o', dest=\"outfile\",\n nargs='?',\n type=FileType('w'),\n help=\"Save to file \")\n\nparser.add_argument('-t', dest=\"time_format\",\n nargs='?',\n default=\"f\",\n type=str,\n choices=['f', 's', 'm', 'h'],\n help=\"select time format full(default), seconds, minutes, or hours only\")\n\nparser.add_argument('-v', \"--version\", \n action=\"version\", \n version=\"BCS Timer Version 1.1\",\n help=\"Show program version\")\n\n# parser.add_argument('-r', '--run', help=\"Time during run\") # may or may not implement, seems unnecissary\n\nargs = parser.parse_args()\n\ncue_start = args.cue[0].split()\ncue_end = args.cue[1].split()\n\ncue_start_startword = cue_start[0]\ncue_end_startword = cue_end[0]\n\nif __name__ == \"__main__\":\n if not main(args):\n print(args.infile.name+ \" processed successfully.\")\n","repo_name":"Ktoks/RRD","sub_path":"bcs_timer/bcs_timer.py","file_name":"bcs_timer.py","file_ext":"py","file_size_in_byte":7323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"32898036160","text":"\"\"\" The module implements common options like wait until page is loaded \"\"\"\nimport os\nfrom time import time, sleep\nfrom datetime import datetime\n\nfrom common.constants import log_message\n\nMAX_WAIT_TIME = 180\nFAILED_ASSERTIONS = 0\nTOTAL_ASSERTIONS = 0\n\ndef add_cookie(driver, cookie_name, cookie_value, log_level='WARN'):\n \"\"\"\n Add Cookie\n :param driver:\n :param cookie_name:\n :param cookie_value:\n :param log_level:\n :return:\n \"\"\"\n log_message(\"Adding cookie '{}' with value '{}'\".format(cookie_name, cookie_value), log_level)\n driver.add_cookie({'name': cookie_name, 'value': cookie_value})\n\ndef delete_cookie(driver, cookie_name, log_level='WARN'):\n \"\"\"\n Add Cookie\n :param driver:\n :param cookie_name:\n :param log_level:\n :return:\n \"\"\"\n log_message(\"Deleting cookie '{}'\".format(cookie_name), log_level)\n driver.delete_cookie(cookie_name)\n\ndef set_attribute(driver, element, attribute_name, attribute_value):\n \"\"\"\n set attribute of an element\n :param driver:\n :param element:\n :param attribute_name:\n :param attribute_value:\n :return:\n \"\"\"\n if is_visible(element):\n scroll_to(driver, element)\n driver.execute_script(\n \"arguments[0].setAttribute('{}', '{}');\".format(\n attribute_name, attribute_value), element)\n\ndef highlight(driver, element, width=3, color='red'):\n \"\"\"\n Highlight an element\n :param driver:\n :param element:\n :param width: width of border\n :param color: color of border\n :return:\n \"\"\"\n set_attribute(driver, element, 'style', 'border: {}px solid {};'.format(\n width, color))\n\ndef unhighlight(driver, element):\n \"\"\"\n Unhighlight an element\n :param driver:\n :param element:\n :return:\n \"\"\"\n set_attribute(driver, element, 'style', 'border: 0px solid green;')\n\ndef animate(driver, element=None, name='highlight'):\n \"\"\"\n Red-border the element for a moment\n :param driver:\n :param element:\n :param log_level:\n :return: None\n \"\"\"\n if not get_test_property(\"animate\"):\n return\n try:\n if is_visible(element):\n animation_time = get_test_property(\"animation_time\")\n highlight(driver, element=element)\n save_screenshot(driver, name='{}'.format(name))\n sleep(animation_time)\n unhighlight(driver, element=element)\n except Exception as exception:\n log_message(\"Error in animate {}\".format(exception), 'WARN')\n\n\ndef check_failed_assertions(log_level=\"WARN\"):\n \"\"\"\n :param log_level:\n :return:\n This function should be called at the end of a test to check whether there were failures\n \"\"\"\n if FAILED_ASSERTIONS:\n message = \"{} assertion(s) failed during the run\".format(FAILED_ASSERTIONS)\n log_message(message, \"ERROR\")\n raise Exception(message)\n if TOTAL_ASSERTIONS:\n log_message(\"All {} assertions passed\".format(TOTAL_ASSERTIONS), log_level)\n else:\n log_message(\"None assertions were verified\")\n\n\ndef assert_and_log(assertion, message=\"\", continue_on_error=True):\n \"\"\"\n validate assertion and log the result\n if it failed either increase number of failed assertions or raise exception\n\n :param assertion:\n :param message:\n :param continue_on_error:\n :return:\n \"\"\"\n global FAILED_ASSERTIONS, \\\n TOTAL_ASSERTIONS\n TOTAL_ASSERTIONS += 1\n message = \"{}. asserting:{}\".format(TOTAL_ASSERTIONS, message)\n try:\n if assertion:\n log_message(message)\n else:\n FAILED_ASSERTIONS += 1\n message += \" NOT TRUE\"\n log_message(message, \"ERROR\")\n if not continue_on_error:\n raise Exception(message)\n except Exception as exception:\n return handle_error(exception, continue_on_error=continue_on_error)\n\ndef save_screenshot(driver, name='', log_level='INFO'):\n \"\"\"\n Save screenshot in a file\n If file name is not set, use timestamp with extension png\n :param driver:\n :param name:\n :return:\n \"\"\"\n try:\n if not driver:\n return\n except:\n return\n folder = get_test_property('screenshots_folder')\n if not folder:\n folder = 'screenshots'\n extension = '.png'\n full_name = os.path.join(folder, \"{}_{}{}\".format(\n datetime.now().strftime(\"%Y-%M-%d_%H-%M-%S.%f\")[:-3], name, extension))\n driver.get_screenshot_as_file(full_name)\n log_message('saved screenshot {}'.format(full_name), log_level)\n\ndef handle_error(exception=None, message=\"\", continue_on_error=True):\n \"\"\"\n If continue_on_error:\n - None: return None\n - True: construe as a failed soft assertion and continue execution\n - False: re-raise exception\n\n :param exception:\n :param message:\n :param continue_on_error:\n :return:\n \"\"\"\n if continue_on_error is None:\n return None\n global FAILED_ASSERTIONS\n log_message(\"{} execution continues\\n{}\".format(message, str(exception)), \"ERROR\")\n FAILED_ASSERTIONS += 1\n if continue_on_error:\n return None\n if exception:\n raise exception\n raise exception(message)\n\ndef is_visible(element, log_level=\"DEBUG\"):\n \"\"\"\n returns whether the element is visible\n it checks that:\n element is not Null\n if is_displayed method is available, it's value is True\n if size is available, and only one of (width, length) is 0\n \"\"\"\n if not element:\n log_message(\"element is None -> not visible\", log_level)\n return False\n if hasattr(element, 'is_displayed') and not element.is_displayed():\n log_message(\"element.is_displayed() = False -> not visible\", log_level)\n return False\n _class = element.get_attribute('class')\n if _class and _class.find('_hide') > -1:\n log_message(\"element's class {} contains _hide -> not visible\".format(_class))\n try:\n if element.size['height'] + element.size['width'] and \\\n not element.size['height'] * element.size['width']:\n log_message(\"elements height or width is 0 -> not visible\", log_level)\n return False\n except:\n log_message(\"element does not have width or height\")\n log_message(\"element is visible\", log_level)\n return True\n\ndef is_visible_and_enabled(element, log_level=\"DEBUG\"):\n \"\"\"\n returns whether the element is visible and enabled\n \"\"\"\n if not is_visible(element, log_level):\n log_message(\"element is not visible\", log_level)\n return False\n if hasattr(element, 'is_enabled') and not element.is_enabled():\n log_message(\"element.is_displayed() = False -> not visible and enabled\", log_level)\n return False\n log_message(\"element is visible and enabled\", log_level)\n return True\n\ndef back(driver, log_level=\"INFO\"):\n \"\"\"\n go back and wait until page is loaded\n :param driver:\n :param log_level:\n :return:\n \"\"\"\n log_message(\"Go back\", log_level)\n driver.back()\n wait_until_page_is_loaded(driver, wait_time=MAX_WAIT_TIME)\n\ndef navigate_to(driver, url, log_level=\"INFO\"):\n \"\"\"\n navigate to url and wait until it's loaded\n\n :param driver:\n :param url:\n :param wait_time:\n :return:\n \"\"\"\n log_message(\"Navigate to \" + url, log_level)\n driver.get(url)\n wait_until_page_is_loaded(driver, wait_time=MAX_WAIT_TIME)\n\ndef refresh(driver, wait_time=MAX_WAIT_TIME):\n \"\"\"\n refresh and wait until pageis reloaded\n :param driver:\n :param wait_time:\n :return:\n \"\"\"\n log_message(\"Refreshing page ...\")\n driver.refresh()\n wait_until_page_is_loaded(driver, wait_time)\n\ndef refresh_if_asked(driver, wait_time=MAX_WAIT_TIME):\n \"\"\"\n If message asking to refresh appears, refresh until message disappears or wait_time expires\n :param driver:\n :param wait_time:\n :return:\n \"\"\"\n to_refresh_xpath = \"//button[@class='link' and @onclick='location.reload()']\"\n to_refresh = driver.find_elements('xpath', to_refresh_xpath)\n start_time = time()\n while to_refresh and (time() < start_time + wait_time):\n if to_refresh:\n log_message(\"Refreshing page requested\", \"WARN\")\n refresh(driver, wait_time)\n to_refresh = driver.find_elements('xpath', to_refresh_xpath)\n if to_refresh:\n log_message(\"Request to refresh the page didn't dissapear\", \"WARN\")\n\ndef wait_until_page_is_loaded(driver, wait_time=MAX_WAIT_TIME):\n \"\"\"\n wait until page is loaded\n :param driver:\n :param wait_time:\n :return:\n \"\"\"\n start_time = time()\n source = driver.page_source\n first = True\n loaded = False\n while first or (\n source != driver.page_source and not loaded and time() < start_time + wait_time):\n sleep(.2)\n first = False\n loaded = (driver.execute_script(\"return document.readyState\") == \"complete\")\n source = driver.page_source\n refresh_if_asked(driver, wait_time=wait_time)\n if get_test_property('save_screenshots'):\n save_screenshot(driver, name=\"page_loaded\")\n if source != driver.page_source:\n log_message(\"Page is still updating ...\", \"WARN\")\n\ndef scroll_to(driver, element=None, log_level=\"DEBUG\", continue_on_error=True):\n \"\"\"\n Scroll to the element\n :param driver:\n :param element:\n :param log_level:\n :return:\n \"\"\"\n try:\n log_message(\"scroll_to {}\".format(str(element)), log_level)\n driver.execute_script(\"arguments[0].scrollIntoView(true);\", element)\n except:\n handle_error(message=\"error while scrolling to the item\",\n continue_on_error=continue_on_error)\n\ndef get_test_property(property_name, log_level=\"DEBUG\"):\n \"\"\"\n return test property from environment variable PARAMS\n if such property is not set, return None\n :param property_name:\n :return:\n \"\"\"\n content = os.environ.get(\"PARAMS\", \"{}\")\n try:\n result = eval(content).get(property_name, None)\n log_message(\"Value of property {} is {}\".format(property_name, result), log_level)\n return result\n except:\n log_message(\"Can't process environment variable PARAMS '{}'\".format(content), \"WARN\")\n return None\n\ndef set_test_property(property_name, property_value, log_level=\"DEBUG\"):\n \"\"\"\n set test property to environment variable PARAMS\n if such property is not set, return None\n :param property_name:\n :return:\n \"\"\"\n try:\n content = eval(os.environ.get(\"PARAMS\", \"{}\"))\n log_message(\"variable PARAMS '{}'\".format(content), log_level)\n except:\n log_message(\"wrong value of environment variable PARAMS '{}'\".format(content), \"WARN\")\n content = {}\n content[property_name] = property_value\n os.environ['PARAMS'] = str(content)\n\ndef validate(basic_list, to_add=list(), to_exclude=list(), log_level=\"INFO\", wait_time=1):\n \"\"\"\n validate that items in the list are visible.\n Basic list is optionally adjusted with to_add and to_exclude list\n to comply with requirements with different pages and tests\n :param to_add: items to be added to basic validation list\n :param to_exclude: items to be excluded from basic validation list\n :param log_level:\n :param wait_time:\n :return:\n \"\"\"\n for item in basic_list:\n if item not in to_exclude:\n assert_and_log(\n item.find_visible_element(log_level=\"DEBUG\", wait_time=wait_time),\n \"{} is visible\".format(str(item)), True)\n first = True\n for item in to_add:\n if first:\n first = False\n log_message(\"{stars} validating additional fields {stars}\".format(\n stars=20 * '*'), log_level)\n if item in to_add and item not in to_exclude:\n assert_and_log(\n item.find_visible_element(log_level=\"DEBUG\", wait_time=wait_time),\n \"{} is visible\".format(str(item)), True)\n\ndef validate_negative(item):\n \"\"\"\n validate that item is not visible.\n :param item\n :return:\n \"\"\"\n assert_and_log(\n not item.find_visible_elements(0, log_level=\"DEBUG\", wait_time=0),\n \"{} is not visible\".format(str(item)), True)","repo_name":"nsluzky/returnly_affirm","sub_path":"common/common_actions.py","file_name":"common_actions.py","file_ext":"py","file_size_in_byte":12199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74638268569","text":"import cv2\nimport numpy as np\n\nfrom tools import parameters\n\n\ndef processForEdgeDetection(original_img):\n processed_img = cv2.cvtColor(original_img, cv2.COLOR_BGR2GRAY)\n processed_img = cv2.Canny(\n processed_img, \n threshold1=parameters.CANNY_THRESHOLD1, \n threshold2=parameters.CANNY_THRESHOLD2\n )\n return processed_img\n\ndef processForLogging(original_img):\n img = processForEdgeDetection(original_img)\n img = cv2.resize(img, \n (\n parameters.RESIZED_WIDTH, \n parameters.RESIZED_HEIGHT\n )\n )\n img = {\"Frame\": img.reshape(-1)}\n return img\n\ndef processForPrediction(original_img):\n img = processForEdgeDetection(original_img)\n img = cv2.resize(img, \n (\n parameters.RESIZED_WIDTH, \n parameters.RESIZED_HEIGHT\n )\n )\n return img\n\ndef bgrToGray(original_img):\n processed_img = cv2.cvtColor(original_img, cv2.COLOR_BGR2GRAY)\n return processed_img\n\ndef binaryThreshold(original_img, threshold):\n ret,processed_img = cv2.threshold(original_img,threshold,255,cv2.THRESH_BINARY)\n return processed_img","repo_name":"igsmo/CSGO-Bot","sub_path":"tools/imageProcessor.py","file_name":"imageProcessor.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"1662584762","text":"import cv2\nimport numpy as np\nfrom sahi.models import yolov5\n\nh, w = None, None\nnet = cv2.dnn.readNet('dnn/yolov4.weights','dnn/yolov4.cfg')\nclasses = []\n\nwith open('coco.names','r') as f:\n classes = f.read().splitlines()\n\n# Getting only output layer names that we need from YOLO\n# ln = net.getLayerNames()\n# ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\n# Initialize colours for representing every detected object\ncolours = np.random.randint(0, 255, size=(len(classes), 3), dtype='uint8')\n# print(colours)\n\n# print(classes)\n\n\ncap = cv2.VideoCapture('tellofintuna.mp4')\n# height, width, _ = cap.read()\n\n\nwhile True:\n\n _,frame = cap.read()\n if not _:\n break\n # Getting dimensions of the frame for once as everytime dimensions will be same]\n img = cv2.resize(frame, (650, 550))\n blob = cv2.dnn.blobFromImage(img, 1.0, (416, 416), (0, 0, 0), swapRB=True, crop=False)\n # print(blob)\n net.setInput(blob)\n # Perform forward pass\n detections = net.forward()\n\n for detection in detections:\n confidence = detection[2]\n print(detection)\n print(confidence)\n\n\n height, width, _ = img.shape\n\n\n\n\n\n # boxes, masks = net.forward([\"detection_out_final\", \"detection_masks\"])\n\n\n # Detection objetc on frame\n # od.detect(frame)\n\n\n cv2.imshow('Frame',frame)\n key = cv2.waitKey(0)\n if key == 27:\n break\n\ncap.release()\ncv2.destroyAllWindows()\n\n","repo_name":"suleyman735/object_tracking","sub_path":"youtubevideos.py","file_name":"youtubevideos.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"35378124697","text":"wrds = [\"end\", 'work', \"play\", \"start\", \"walk\", \"look\", \"open\", \"rain\", \"learn\", \"clean\"]\n\n# pasa los verbos a pasado\n\npastensear = lambda w : w + \"ed\"\npast_wrds = []\nfor word in wrds:\n past_wrds.append(pastensear(word))\n\n\ninventory = [\"shoes, 12, 29.99\", \"shirts, 20, 9.99\", \"sweatpants, 25, 15.00\", \"scarves, 13, 7.75\"]\n\n\nparse_item = lambda item : item.split(\", \")\nget_item = lambda items, idx : items[idx]\n\nfor item in inventory:\n print('The store has {} {}, each for {} USD.'.format(get_item(parse_item(item), 1), get_item(parse_item(item), 0), get_item(parse_item(item), 2)))\n\n\nscores = \"67 80 90 78 93 20 79 89 96 97 92 88 79 68 58 90 98 100 79 74 83 88 80 86 85 70 90 100\"\n\nparse_array_scores = lambda scores : scores.split(\" \")\nparse_int_array = lambda scores : map(int, scores)\na_scores = 0\nfor score in parse_int_array(parse_array_scores(scores)):\n if(score >= 90):\n a_scores += 1\n\n\n\n\nstopwords = ['to', 'a', 'for', 'by', 'an', 'am', 'the', 'so', 'it', 'and', \"The\"]\norg = \"The organization for health, safety, and education\"\n\norg_lower = org.lower()\n\norg_words = org_lower.split()\nacro = \"\"\nfor stop_word in stopwords:\n for stop_word_time in range(org_lower.count(stop_word)):\n try:\n org_words.remove(stop_word)\n except:\n pass\n\nfor word in org_words:\n word = word.upper()\n acro += word[0]","repo_name":"santiagoclv/python-3","sub_path":"basics_2/week_1/lambdas.py","file_name":"lambdas.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"7342008576","text":"from sequence_lib import read_fasta\nfrom rand_aln_prob import rand_Aln\nfrom math import log\n\nclass merger:\n\tdef __init__(self,ref_aln_file):\n\t\ttaxa_names, ref_aln = read_fasta(ref_aln_file)\n\t\tself.ref_aln = ref_aln\t\n\t\tself.tax2seqidx = {}\n\n\t\tfor i in range(len(taxa_names)):\n\t\t\tself.tax2seqidx[taxa_names[i]] = i\n\n\tdef show_taxa(self):\n\t\treturn self.tax2seqidx.keys()\n\n\tdef seqidx(self,tax):\n\t\treturn self.tax2seqidx[tax]\n\n\tdef ref_matching(self,aln1,taxa1,aln2,taxa2):\n\t\tm = len(self.ref_aln[0])\n\t\tn = len(aln1) + len(aln2)\n\t\tmatching = [[-1 for x in range(m)] for y in range(n)]\n\t\tmatch1 = [[-1 for x in range(len(aln1[0])+1)] for y in range(len(aln1))] # the last column stores the length of each sequence\n\t\tmatch2 = [[-1 for x in range(len(aln2[0])+1)] for y in range(len(aln2))] # the last column stores the length of each sequence\n\n\n\t\t#taxa1, aln1 = read_fasta(aln1_file)\n\t\t#taxa2, aln2 = read_fasta(aln2_file)\n\n\t\t# match aln1 to ref_aln\n\t\tfor j1 in range(len(aln1)):\n\t\t\ttax = taxa1[j1]\n\t\t\tj = self.seqidx(tax)\n\t\t\ti1 = 0\n\t\t\tk = 0\n\t\t\tgap1 = 0\n\t\t\tfor i in range(m):\n\t\t\t\tif self.ref_aln[j][i] == '-':\n\t\t\t\t\tgap1 += 1\n\t\t\t\t\tcontinue\n\t\t\t\twhile aln1[j1][i1] == '-':\n\t\t\t\t\ti1 += 1\n\t\t\t\tmatch1[j1][i1] = k\n\t\t\t\tk += 1\n\t\t\t\tif aln1[j1][i1] == self.ref_aln[j][i]:\n\t\t\t\t\tmatching[j1][i] = i1\n\t\t\t\t\ti1 += 1\n\t\t\t\t\t#print(matching)\n\t\t\t\telse:\n\t\t\t\t\tprint(\"reference alignment and alignment 1 are not matched at taxon \" + tax)\n\t\t\tmatch1[j1][len(aln1[0])] = k\n\t\tgap_rate1 = float(gap1)/len(self.ref_aln[0])/len(aln1)\n\t\t# match aln2 to ref_aln\n\t\tfor j2 in range(len(aln2)):\n\t\t\ttax = taxa2[j2]\n\t\t\tj = self.seqidx(tax)\n\t\t\ti2 = 0\n\t\t\tk = 0\n\t\t\tgap2 = 0\n\t\t\tfor i in range(m):\n\t\t\t\tif self.ref_aln[j][i] == '-':\n\t\t\t\t\tgap2 += 1\n\t\t\t\t\tcontinue\n\t\t\t\twhile aln2[j2][i2] == '-':\n\t\t\t\t\ti2 += 1\n\t\t\t\tmatch2[j2][i2] = k\n\t\t\t\tk += 1\n\t\t\t\tif aln2[j2][i2] == self.ref_aln[j][i]:\n\t\t\t\t\tmatching[j2+len(aln1)][i] = i2\n\t\t\t\t\ti2 += 1\n\t\t\t\telse:\n\t\t\t\t\tprint(\"reference alignment and alignment 2 are not matched at taxon \" + tax)\n\t\t\tmatch2[j2][len(aln2[0])] = k\n\t\tgap_rate2 = float(gap2)/len(self.ref_aln[0])/len(aln2)\n\t\treturn matching,match1,match2,gap_rate1,gap_rate2\n\n\tdef residue_count(self,aln):\n\t\tR = []\n\t\tfor j in range(len(aln[0])):\n\t\t\tR += [0]\n\t\t\tfor i in range(len(aln)):\n\t\t\t\tif aln[i][j] != '-':\n\t\t\t\t\tR[j] += 1\n\t\treturn R\n\n\tdef heuristic_score(self,aln1,taxa1,aln2,taxa2):\n\t\tmatching,m1,m2,gap_rate1,gap_rate2 = self.ref_matching(aln1,taxa1,aln2,taxa2)\n\t\tm = len(matching[0])\n\t\tn = len(matching)\n\t\tTP_score = {}\n\t\t\t\n\t\tfor i in range(m):\n\t\t\tL1 = []\n\t\t\td1 = {}\n\t\t\tL2 = []\n\t\t\td2 = {}\n\t\t\tfor j in range(n):\n\t\t\t# below L and d are used as \"references\" (just as pointers in C++): they are not deep copy, but a shallow copy of L1/L2 and d1/d2\n\t\t\t\tif j < len(aln1):\n\t\t\t\t\tL = L1\n\t\t\t\t\td = d1\n\t\t\t\telse:\n\t\t\t\t\tL = L2\n\t\t\t\t\td = d2\n\t\t\t\t#if j == len(aln1) and L1 == []:\n\t\t\t\t\t#L1 = L1 + [-1]\t\t\n\t\t\t\t\t#d1[-1] = len(aln1)\n\t\t\t\tif matching[j][i] >= 0:\n\t\t\t\t\tif matching[j][i] in d:\n\t\t\t\t\t\td[matching[j][i]] += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tL += [matching[j][i]]\n\t\t\t\t\t\td[matching[j][i]] = 1\n\t\t\tfor x in L1:\n\t\t\t\tfor y in L2:\n\t\t\t\t\tif (x,y) not in TP_score:\n\t\t\t\t\t\tTP_score[(x,y)] = 0\n\t\t\t\t\tTP_score[(x,y)] += float(d1[x])*d2[y]#/len(aln1)/len(aln2)\n\t\treturn TP_score,m1,m2,gap_rate1,gap_rate2\n\t\n\tdef logodd_score(self,aln1,taxa1,aln2,taxa2,rand_P):\n\t\tTP_score,match1,match2,gap_rate1,gap_rate2 = self.heuristic_score(aln1,taxa1,aln2,taxa2)\n\t\t#print(gap_rate1)\n\t\t#print(gap_rate2)\n\t\tfor key in TP_score:\n\t\t\t'''\n\t\t\tprint(key)\n\t\t\tp = 0\n\t\t\tfor s1 in match1:\n\t\t\t\tfor s2 in match2:\n\t\t\t\t\tif s1[key[0]] >= 0 and s2[key[1]] >= 0 :\n\t\t\t\t\t\tp += rand_P.prob(s1[-1],s2[-1],s1[key[0]]+1,s2[key[1]]+1)\n\t\t\t\t\t\t#print(p)\n\t\t\t#TP_score[key] = log(TP_score[key]/rand_P.prob(len(aln1[0]),len(aln2[0]),key[0]+1,key[1]+1))\n\t\t\tp = p/len(aln1)/len(aln2)\n\t\t\t#print(p)\n\t\t\tTP_score[key] = log(TP_score[key]/p)\n\t\t\t#print(TP_score[key])\n\t\t#print(TP_score)\n\t\t\t'''\n\t\t\tTP_score[key] = log(TP_score[key]/rand_P.prob(len(aln1[0]),len(aln2[0]),key[0]+1,key[1]+1))\n\t\t\t#p = p/len(aln1)/len(aln2)\n\t\t\t#print(p)\n\t\t\t#TP_score[key] = TP_score[key]/p\n\t\t\t#print(TP_score[key])\n\t\t#print(TP_score)\n\t\tdel_score = log(gap_rate2/rand_P.del_rate(len(aln1[0]),len(aln2[0]),1))\n\t\tins_score = log(gap_rate1/rand_P.ins_rate(len(aln1[0]),len(aln2[0]),1))\n\t\t#print(del_score)\n\t\t#print(ins_score)\n\t\treturn TP_score,del_score,ins_score\n\t\n\t\n\tdef merge(self,aln1,aln2,TP_score,default=0,ins_score=0,del_score=0,w=1):\n\t\tn = len(aln1[0])\n\t\tm = len(aln2[0])\n\n\t\tR1 = self.residue_count(aln1)\n\t\tR2 = self.residue_count(aln2)\t\t\n\n\t\taln_score = [[0 for i in range(m+1)] for j in range(n+1)]\n\t\tbacktrack = [['-' for i in range(m+1)] for j in range(n+1)]\n\t\tfor i in range(1,m+1):\n\t\t\tbacktrack[0][i] = 'L'\n\t\t\taln_score[0][i] = aln_score[0][i-1] + ins_score\n\t\tfor j in range(1,n+1):\n\t\t\tbacktrack[j][0] = 'U'\n\t\t\taln_score[j][0] = aln_score[j-1][0] + del_score\n\n\t\tfor j in range(1,n+1):\n\t\t\tfor i in range(1,m+1):\n\t\t\t\tTP = TP_score[(j-1,i-1)] if (j-1,i-1) in TP_score else default\n\t\t\t\tms = aln_score[j-1][i-1] + (2*w-1)*TP - (1-w)*R1[j-1]*R2[i-1]\n\t\t\t\tg1 = aln_score[j][i-1] + ins_score\n\t\t\t\tg2 = aln_score[j-1][i] + del_score\n\n\t\t\t\tif ms >= g1 and ms >= g2:\n\t\t\t\t\taln_score[j][i] = ms\n\t\t\t\t\tbacktrack[j][i] = 'D'\n\t\t\t\telif g1 >= ms and g1 >= g2:\n\t\t\t\t\taln_score[j][i] = g1\n\t\t\t\t\tbacktrack[j][i] = 'L'\n\t\t\t\telse:\n\t\t\t\t\taln_score[j][i] = g2\n\t\t\t\t\tbacktrack[j][i] = 'U'\n\n\t\ti = m\n\t\tj = n\n\t\tM1 = \"\"\n\t\tM2 = \"\"\n\t\twhile (i > 0 or j > 0):\n\t\t\t#print(aln_score[j][i])\n\t\t\tif backtrack[j][i] == 'D':\n\t\t\t\t#print('D')\n\t\t\t\t#M1 = str(j-1) + M1\n\t\t\t\t#M2 = str(i-1) + M2\n\t\t\t\tM1 = \".\" + M1\n\t\t\t\tM2 = \".\" + M2\n\t\t\t\ti -= 1\n\t\t\t\tj -= 1\t\n\t\t\telif backtrack[j][i] == 'L':\n\t\t\t\t#print('L')\n\t\t\t\t#M2 = str(i-1) + M2\n\t\t\t\tM2 = \".\" + M2\n\t\t\t\tM1 = \"-\" + M1\n\t\t\t\ti -= 1\n\t\t\telse:\n\t\t\t\t#print('U')\n\t\t\t\tM2 = \"-\" + M2\n\t\t\t\t#M1 = str(j-1) + M1\n\t\t\t\tM1 = \".\" + M1\n\t\t\t\tj -= 1\n\t\treturn aln_score[n][m], M1, M2\t\t\n\n\tdef heuristic_merge(self,aln1,taxa1,aln2,taxa2):\n\t\tTP_score,m1,m2,g1,g2 = self.heuristic_score(aln1,taxa1,aln2,taxa2)\n\t\t#m = len(aln2[0])\n\t\t#n = len(aln1[0])\n\t\treturn self.merge(aln1,aln2,TP_score,w=0.75)\n\n\tdef logodd_merge(self,aln1,taxa1,aln2,taxa2,rand_P):\n\t\tTP_score,del_score,ins_score = self.logodd_score(aln1,taxa1,aln2,taxa2,rand_P)\n\t\t#print(del_score)\n\t\t#print(ins_score)\n\t\t#m = len(aln2[0])\n\t\t#n = len(aln1[0])\n\t\treturn self.merge(aln1,aln2,TP_score,default=-100,ins_score=0,del_score=0)\n\t\t#return self.merge(n,m,TP_score)\n","repo_name":"uym2/PASTA_with_structure","sub_path":"merger_backup.py","file_name":"merger_backup.py","file_ext":"py","file_size_in_byte":6322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37634418775","text":"#!/usr/bin/python3\nfrom boxscores import boxscores\nfrom standings import standings\nfrom leaders import leaders\n\nimport smtplib\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\n\naddr_to = [YOUR_EMAIL_ADDRESS]\naddr_from = 'Python Baseball Times <[YOUR_FROM_EMAIL_ADDRESS]>'\nsmtp_server = [YOUR_SMTP_SERVER]\nsmtp_user = [YOUR_SMTP_USERNAME]\nsmtp_pass = [YOUR_SMTP_PASSWORD]\n\nmsg = MIMEMultipart('alternative')\nmsg['To'] = addr_to\nmsg['From'] = addr_from\nmsg['Subject'] = 'Daily MLB Report'\n\ntext = 'This email is not available in plain text.'\n\nhtml = \"\"\"\n\n \n \n \n \n\"\"\"\nhtml += boxscores()\nhtml += '


'\nhtml += standings()\nhtml += '
'\nhtml += leaders()\nhtml += \"\"\"\n \n\n\"\"\"\npart1 = MIMEText(text, 'plain')\npart2 = MIMEText(html, 'html', _charset='iso-8859-1')\n\nmsg.attach(part1)\nmsg.attach(part2)\n\ns = smtplib.SMTP_SSL(smtp_server, 465)\ns.login(smtp_user, smtp_pass)\ns.sendmail(addr_from, addr_to, msg.as_string())\ns.quit()\n","repo_name":"tomboone/boxscores","sub_path":"bbemail.py","file_name":"bbemail.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"22131112184","text":"#\n# Standard Diplomacy Variant\n#\n# Map\n#\n# The Map Dictionary is of the form \n#\n#\tkey : Province Name value : { Aliases, Borders }\n#\n#\n# Aliases = list of alternative Names for the Province (the first is the default name)\n# Borders = adjacent Map spaces listed in CONSECUTIVE CLOCKWISE ORDER (including OTB)\n#\n\nOTB = 'OTB' # Off The Board (an \"edge\" of the Map)\n\n\nMap = {\t'Adr'\t: {\t'Aliases' : [ 'Adriatic Sea' ],\n\t\t\t\t'Borders' : [ 'Ven', 'Tri', 'Alb', 'Ion', 'Apu' ] },\n\t\t'Aeg'\t: {\t'Aliases' : [ 'Aegean Sea' ],\n\t\t\t\t'Borders' : [ 'Ion', 'Gre', 'Bul', 'Con', 'Smy', 'Eas' ] },\n\t\t'Alb'\t: {\t'Aliases' : [ 'Albania' ],\n\t\t\t\t'Borders' : [ 'Tri', 'Ser', 'Gre', 'Ion', 'Adr' ] },\n\t\t'Ank'\t: {\t'Aliases' : [ 'Ankara' ],\n\t\t\t\t'Borders' : [ 'Con', 'Bla', 'Arm', 'Smy' ] },\n\t\t'Apu'\t: {\t'Aliases' : [ 'Apulia' ],\n\t\t\t\t'Borders' : [ 'Nap', 'Rom', 'Ven', 'Adr', 'Ion' ] },\n\t\t'Arm'\t: {\t'Aliases' : [ 'Armenia' ],\n\t\t\t\t'Borders' : [ 'Syr', 'Smy', 'Ank', 'Bla', 'Sev', OTB ] },\n\t\t'Bal'\t: {\t'Aliases' : [ 'Baltic Sea' ],\n\t\t\t\t'Borders' : [ 'Den', 'Swe', 'GoB', 'Lvn', 'Pru', 'Ber', 'Kie' ] },\n\t\t'Bar'\t: {\t'Aliases' : [ 'Barents Sea' ],\n\t\t\t\t'Borders' : [ 'Nwg', OTB, 'StP', 'Nwy' ] },\n\t\t'Bel'\t: {\t'Aliases' : [ 'Belgium' ],\n\t\t\t\t'Borders' : [ 'Hol', 'Ruh', 'Bur', 'Pic', 'Eng', 'Nth' ] },\n\t\t'Ber'\t: {\t'Aliases' : [ 'Berlin' ],\n\t\t\t\t'Borders' : [ 'Pru', 'Sil', 'Mun', 'Kie', 'Bal' ] },\n\t\t'Bla'\t: {\t'Aliases' : [ 'Black Sea' ],\n\t\t\t\t'Borders' : [ 'Rum', 'Sev', 'Arm', 'Ank', 'Con', 'Bul' ] },\n\t\t'Boh'\t: {\t'Aliases' : [ 'Bohemia' ],\n\t\t\t\t'Borders' : [ 'Gal', 'Vie', 'Tyr', 'Mun', 'Sil' ] },\n\t\t'Bre'\t: {\t'Aliases' : [ 'Brest' ],\n\t\t\t\t'Borders' : [ 'Gas', 'Mid' , 'Eng', 'Pic', 'Par' ] },\n\t\t'Bud'\t: {\t'Aliases' : [ 'Budapest' ],\n\t\t\t\t'Borders' : [ 'Tri', 'Vie', 'Gal', 'Rum', 'Ser' ] },\n\t\t'Bul'\t: {\t'Aliases' : [ 'Bulgaria' ],\n\t\t\t\t'Borders' : [ 'Con', 'Aeg' , 'Gre', 'Ser', 'Rum', 'Bla'] },\n\t\t'Bur'\t: {\t'Aliases' : [ 'Burgundy' ],\n\t\t\t\t'Borders' : [ 'Swi', 'Mar', 'Gas', 'Par', 'Pic', 'Bel', 'Ruh', 'Mun' ] },\n\t\t'Cly'\t: {\t'Aliases' : [ 'Clyde' ],\n\t\t\t\t'Borders' : [ 'Edi', 'Lvp', 'NAt', 'Nwg' ] },\n\t\t'Con'\t: {\t'Aliases' : [ 'Constantinople' ],\n\t\t\t\t'Borders' : [ 'Ank', 'Smy', 'Aeg', 'Bul', 'Bla' ] },\n\t\t'Den'\t: {\t'Aliases' : [ 'Denmark' ],\n\t\t\t\t'Borders' : [ 'Kie', 'Hel', 'Nth', 'Ska', 'Swe', 'Bal' ] },\n\t\t'Eas'\t: {\t'Aliases' : [ 'Eastern Mediterranean' ],\n\t\t\t\t'Borders' : [ 'Ion', 'Aeg', 'Smy', 'Syr', OTB ] },\n\t\t'Edi'\t: {\t'Aliases' : [ 'Edinburgh' ],\n\t\t\t\t'Borders' : [ 'Lvp', 'Cly', 'Nwg', 'Nth', 'Yor'] },\n\t\t'Eng'\t: {\t'Aliases' : [ 'English Channel' ],\n\t\t\t\t'Borders' : [ 'Bel', 'Pic', 'Bre', 'Mid', 'Iri', 'Wal', 'Lon', 'Nth' ] },\n\t\t'Fin'\t: {\t'Aliases' : [ 'Finland' ],\n\t\t\t\t'Borders' : [ 'Nwy', 'StP', 'GoB', 'Swe' ] },\n\t\t'Gal'\t: {\t'Aliases' : [ 'Galicia' ],\n\t\t\t\t'Borders' : [ 'Bud', 'Vie', 'Boh', 'Sil', 'War', 'Ukr', 'Rum' ] },\n\t\t'Gas'\t: {\t'Aliases' : [ 'Gascony' ],\n\t\t\t\t'Borders' : [ 'Bre', 'Par', 'Bur', 'Mar', 'Spa', 'Mid' ] },\n\t\t'Gre'\t: {\t'Aliases' : [ 'Greece' ],\n\t\t\t\t'Borders' : [ 'Alb', 'Ser', 'Bul', 'Aeg', 'Ion' ] },\n\t\t'GoB'\t: {\t'Aliases' : [ 'Gulf of Bothnia', 'Bot' ],\n\t\t\t\t'Borders' : [ 'Bal', 'Swe' , 'Fin', 'StP', 'Lvn'] },\n\t\t'GoL'\t: {\t'Aliases' : [ 'Gulf of Lyon', 'Lyo' ],\n\t\t\t\t'Borders' : [ 'Mar', 'Pie', 'Tus', 'TyS', 'Wes' , 'Spa'] },\n\t\t'Hel'\t: {\t'Aliases' : [ 'Helgoland Bight' ],\n\t\t\t\t'Borders' : [ 'Den', 'Kie', 'Hol', 'Nth' ] },\n\t\t'Hol'\t: {\t'Aliases' : [ 'Holland' ],\n\t\t\t\t'Borders' : [ 'Bel', 'Nth', 'Hel', 'Kie', 'Ruh'] },\n\t\t'Ion'\t: {\t'Aliases' : [ 'Ionian Sea' ],\n\t\t\t\t'Borders' : [ 'Adr', 'Alb', 'Gre', 'Aeg', 'Eas', OTB, 'Tun', 'TyS', 'Nap', 'Apu' ] },\n\t\t'Iri'\t: {\t'Aliases' : [ 'Irish Sea' ],\n\t\t\t\t'Borders' : [ 'Eng', 'Mid', 'NAt', 'Lvp', 'Wal' ] },\n\t\t'Kie'\t: {\t'Aliases' : [ 'Kiel' ],\n\t\t\t\t'Borders' : [ 'Ber', 'Mun', 'Ruh', 'Hol', 'Hel', 'Den', 'Bal' ] },\n\t\t'Lon'\t: {\t'Aliases' : [ 'London' ],\n\t\t\t\t'Borders' : [ 'Eng', 'Wal', 'Yor', 'Nth' ] },\n\t\t'Lvn'\t: {\t'Aliases' : [ 'Livonia' ],\n\t\t\t\t'Borders' : [ 'StP', 'Mos', 'War', 'Pru', 'Bal', 'GoB'] },\n\t\t'Lvp'\t: {\t'Aliases' : [ 'Liverpool' ],\n\t\t\t\t'Borders' : [ 'Cly', 'Edi', 'Yor', 'Wal', 'Iri', 'NAt' ] },\n\t\t'Mar'\t: {\t'Aliases' : [ 'Marseilles' ],\n\t\t\t\t'Borders' : [ 'Swi', 'Pie', 'GoL', 'Spa', 'Gas', 'Bur' ] },\n\t\t'Mid'\t: {\t'Aliases' : [ 'Mid-Atlantic Ocean', 'MAO' ],\n\t\t\t\t'Borders' : [ 'Eng', 'Bre', 'Gas', 'Spa', 'Por', 'Spa', 'Wes', 'NAf', OTB, 'NAt', 'Iri' ] },\n\t\t'Mos'\t: {\t'Aliases' : [ 'Moscow' ],\n\t\t\t\t'Borders' : [ 'Lvn', 'StP', OTB, 'Sev', 'Ukr', 'War' ] },\n\t\t'Mun'\t: {\t'Aliases' : [ 'Munich' ],\n\t\t\t\t'Borders' : [ 'Swi', 'Bur', 'Ruh', 'Kie', 'Ber', 'Sil', 'Boh', 'Tyr' ] },\n\t\t'NAf'\t: {\t'Aliases' : [ 'North Africa' ],\n\t\t\t\t'Borders' : [ 'Mid', 'Wes', 'Tun', OTB ] },\n\t\t'Nap'\t: {\t'Aliases' : [ 'Naples' ],\n\t\t\t\t'Borders' : [ 'Rom', 'Apu', 'Ion', 'TyS' ] },\n\t\t'NAt'\t: {\t'Aliases' : [ 'North Atlantic Ocean', 'NAO' ],\n\t\t\t\t'Borders' : [ 'Nwg', 'Cly', 'Lvp', 'Iri', 'Mid', OTB ] },\n\t\t'Nwg'\t: {\t'Aliases' : [ 'Norwegian Sea', 'Nrg' ],\n\t\t\t\t'Borders' : [ 'Bar', 'Nwy', 'Nth', 'Edi', 'Cly', 'NAt', OTB ] },\n\t\t'Nth'\t: {\t'Aliases' : [ 'North Sea' ],\n\t\t\t\t'Borders' : [ 'Edi', 'Nwg', 'Nwy', 'Ska', 'Den', 'Hel', 'Hol', 'Bel', 'Eng', 'Lon', 'Yor' ] },\n\t\t'Nwy'\t: {\t'Aliases' : [ 'Norway' ],\n\t\t\t\t'Borders' : [ 'Nth', 'Nwg', 'Bar', 'StP', 'Fin', 'Swe', 'Ska' ] },\n\t\t'Par'\t: {\t'Aliases' : [ 'Paris' ],\n\t\t\t\t'Borders' : [ 'Bur', 'Gas', 'Bre', 'Pic' ] },\n\t\t'Pic'\t: {\t'Aliases' : [ 'Picardy' ],\n\t\t\t\t'Borders' : [ 'Bel', 'Bur', 'Par', 'Bre', 'Eng' ] },\n\t\t'Pie'\t: {\t'Aliases' : [ 'Piedmont' ],\n\t\t\t\t'Borders' : [ 'Mar', 'Swi', 'Tyr', 'Ven', 'Tus', 'GoL' ] },\n\t\t'Por'\t: {\t'Aliases' : [ 'Portugal' ],\n\t\t\t\t'Borders' : [ 'Spa', 'Mid' ] },\n\t\t'Pru'\t: {\t'Aliases' : [ 'Prussia' ],\n\t\t\t\t'Borders' : [ 'Lvn', 'War', 'Sil', 'Ber', 'Bal' ] },\n\t\t'Rom'\t: {\t'Aliases' : [ 'Rome' ],\n\t\t\t\t'Borders' : [ 'Tus', 'Ven', 'Apu', 'Nap', 'TyS' ] },\n\t\t'Ruh'\t: {\t'Aliases' : [ 'Ruhr' ],\n\t\t\t\t'Borders' : [ 'Bur', 'Bel', 'Hol', 'Kie', 'Mun' ] },\n\t\t'Rum'\t: {\t'Aliases' : [ 'Rumania' ],\n\t\t\t\t'Borders' : [ 'Bul', 'Ser', 'Bud', 'Gal', 'Ukr', 'Sev', 'Bla' ] },\n\t\t'Ser'\t: {\t'Aliases' : [ 'Serbia' ],\n\t\t\t\t'Borders' : [ 'Bud', 'Rum', 'Bul', 'Gre', 'Alb', 'Tri' ] },\n\t\t'Sev'\t: {\t'Aliases' : [ 'Sevastopol' ],\n\t\t\t\t'Borders' : [ 'Arm', 'Bla', 'Rum', 'Ukr', 'Mos', OTB ] },\n\t\t'Sil'\t: {\t'Aliases' : [ 'Silesia' ],\n\t\t\t\t'Borders' : [ 'Gal', 'Boh', 'Mun', 'Ber', 'Pru', 'War' ] },\n\t\t'Ska'\t: {\t'Aliases' : [ 'Skagerrak' ],\n\t\t\t\t'Borders' : [ 'Den', 'Nth', 'Nwy', 'Swe' ] },\n\t\t'Smy'\t: {\t'Aliases' : [ 'Smyrna' ],\n\t\t\t\t'Borders' : [ 'Aeg', 'Con', 'Ank', 'Arm', 'Syr', 'Eas' ] },\n\t\t'Spa'\t: {\t'Aliases' : [ 'Spain' ],\n\t\t\t\t'Borders' : [ 'Mid', 'Por', 'Mid', 'Gas', 'Mar', 'GoL', 'Wes' ] },\n\t\t'StP'\t: {\t'Aliases' : [ 'St. Petersburg' ],\n\t\t\t\t'Borders' : [ 'Mos', 'Lvn', 'GoB', 'Fin', 'Nwy', 'Bar', OTB ] },\n\t\t'Swe'\t: {\t'Aliases' : [ 'Sweden' ],\n\t\t\t\t'Borders' : [ 'Nwy', 'Fin', 'GoB', 'Bal', 'Den', 'Ska' ] },\n\t\t'Swi'\t: {\t'Aliases' : [ 'Switzerland' ],\n\t\t\t\t'Borders' : [ 'Pie', 'Mar', 'Bur', 'Mun', 'Tyr' ] },\n\t\t'Syr'\t: {\t'Aliases' : [ 'Syria' ],\n\t\t\t\t'Borders' : [ 'Eas', 'Smy', 'Arm', OTB ] },\n\t\t'Tri'\t: {\t'Aliases' : [ 'Trieste' ],\n\t\t\t\t'Borders' : [ 'Ven', 'Tyr', 'Vie', 'Bud', 'Ser', 'Alb', 'Adr' ] },\n\t\t'Tun'\t: {\t'Aliases' : [ 'Tunis' ],\n\t\t\t\t'Borders' : [ 'NAf', 'Wes', 'TyS', 'Ion', OTB ] },\n\t\t'Tus'\t: {\t'Aliases' : [ 'Tuscany' ],\n\t\t\t\t'Borders' : [ 'GoL', 'Pie', 'Ven', 'Rom', 'TyS' ] },\n\t\t'Tyr'\t: {\t'Aliases' : [ 'Tyrolia' ],\n\t\t\t\t'Borders' : [ 'Tri', 'Ven', 'Pie', 'Swi', 'Mun', 'Boh', 'Vie' ] },\n\t\t'TyS'\t: {\t'Aliases' : [ 'Tyrrhenian Sea' ],\n\t\t\t\t'Borders' : [ 'GoL', 'Tus', 'Rom', 'Nap', 'Ion', 'Tun', 'Wes' ] },\n\t\t'Ukr'\t: {\t'Aliases' : [ 'Ukraine' ],\n\t\t\t\t'Borders' : [ 'Mos', 'Sev', 'Rum', 'Gal', 'War' ] },\n\t\t'Ven'\t: {\t'Aliases' : [ 'Venice' ],\n\t\t\t\t'Borders' : [ 'Apu', 'Rom', 'Tus', 'Pie', 'Tyr', 'Tri', 'Adr' ] },\n\t\t'Vie'\t: {\t'Aliases' : [ 'Vienna' ],\n\t\t\t\t'Borders' : [ 'Boh', 'Gal', 'Bud', 'Tri', 'Tyr' ] },\n\t\t'Wal'\t: {\t'Aliases' : [ 'Wales' ],\n\t\t\t\t'Borders' : [ 'Lvp', 'Yor', 'Lon', 'Eng', 'Iri' ] },\n\t\t'War'\t: {\t'Aliases' : [ 'Warsaw' ],\n\t\t\t\t'Borders' : [ 'Gal', 'Sil', 'Pru', 'Lvn', 'Mos', 'Ukr' ] },\n\t\t'Wes'\t: {\t'Aliases' : [ 'Western Mediterranean', 'WMS' ],\n\t\t\t\t'Borders' : [ 'Tun', 'NAf', 'Mid', 'Spa', 'GoL', 'TyS' ] },\n\t\t'Yor'\t: {\t'Aliases' : [ 'Yorkshire' ],\n\t\t\t\t'Borders' : [ 'Lon', 'Wal', 'Lvp', 'Edi', 'Nth' ] }\n\t }\n","repo_name":"woelpad/diplomatic-pouch","sub_path":"web/Email/Ratings/Tarzan/Python/variants/stnd_map.py","file_name":"stnd_map.py","file_ext":"py","file_size_in_byte":8213,"program_lang":"python","lang":"ky","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"71467614808","text":"def parse(filename):\n inputlines = []\n \n with open(filename, 'r') as f:\n for i in f.readlines():\n inputlines.append(i)\n \n return inputlines\n\ndef solution(input):\n maximum = 0\n current = 0\n\n for line in input:\n if (line == \"\\n\"):\n maximum = max(maximum, current)\n current = 0 \n else:\n current += int(line)\n\n return maximum\n\n\nprint(solution(parse(\"input1.txt\")))\n","repo_name":"AntiChange/AdventOfCode","sub_path":"2022/Day_1/p1.py","file_name":"p1.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"21346597897","text":"import numpy as np\n\nfrom openmdao.api import ExplicitComponent\n\nclass ZeroUpcrossingHullMoment(ExplicitComponent):\n\n\tdef initialize(self):\n\t\tself.options.declare('freqs', types=dict)\n\n\tdef setup(self):\n\t\tfreqs = self.options['freqs']\n\t\tself.omega = freqs['omega']\n\t\tN_omega = len(self.omega)\n\n\t\tself.add_input('resp_hull_moment', val=np.zeros((N_omega,10)), units='(N*m)**2*s/rad')\n\n\t\tself.add_output('v_z_hull_moment', val=np.zeros(10), units='1/s')\n\n\t\tself.declare_partials('*', '*')\n\n\tdef compute(self, inputs, outputs):\n\t\tomega = self.omega\n\n\t\tS_moment = inputs['resp_hull_moment']\n\n\t\tfor i in xrange(10):\n\t\t\tm0 = np.trapz(S_moment[:,i],omega)\n\t\t\tm2 = np.trapz(omega**2. * S_moment[:,i],omega)\n\t\t\t\n\t\t\toutputs['v_z_hull_moment'][i] = 1. / (2. * np.pi) * np.sqrt(m2 / m0)\n\n\tdef compute_partials(self, inputs, partials): \n\t\tomega = self.omega\n\t\tN_omega = len(omega)\n\t\tdomega = omega[1] - omega[0]\n\n\t\tS_moment = inputs['resp_hull_moment']\n\n\t\tpartials['v_z_hull_moment', 'resp_hull_moment'] = np.zeros((10,10*N_omega))\n\n\t\tfor i in xrange(10):\n\t\t\tm0 = np.trapz(S_moment[:,i],omega)\n\t\t\tm2 = np.trapz(omega**2. * S_moment[:,i],omega)\n\n\t\t\tdm0_dresp = np.ones(N_omega) * domega\n\t\t\tdm2_dresp = omega**2. * domega\n\n\t\t\tdm0_dresp[0] += -domega / 2.\n\t\t\tdm2_dresp[0] += -omega[0]**2. * domega / 2.\n\t\t\tdm0_dresp[-1] += -domega / 2.\n\t\t\tdm2_dresp[-1] += -omega[-1]**2. * domega / 2.\n\t\t\n\t\t\tpartials['v_z_hull_moment', 'resp_hull_moment'][i,i:10*N_omega:10] += 1. / (2. * np.pi) * 0.5 / np.sqrt(m2 / m0) * (dm2_dresp / m0 - m2 / m0**2. * dm0_dresp)\n","repo_name":"johnjasa/SparOpt","sub_path":"zero_upcrossing_hull_moment.py","file_name":"zero_upcrossing_hull_moment.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"41250115110","text":"import socket\naddress=(\"0.0.0.0\",5000)\nsoc = socket.socket()\nsoc.bind(address)\nprint(\"Server is running...\")\nsoc.listen(1)\nconn, add = soc.accept()\n\n\n\n\nwhile True:\n data = conn.recv(1024).decode('utf-8')\n with open(data,\"r\") as f:\n data = f.read()\n conn.send(data.encode('utf-8'))\n \nconn.close()\n","repo_name":"SourovRoy1998/Advanced-Python-Programming","sub_path":"Network Programming with Socket/ShareTextFile/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"31984584876","text":"import pandas as pd\r\nfrom sklearn.metrics import confusion_matrix, classification_report\r\nimport statsmodels.api as sm\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom scipy import stats\r\n\r\n\r\ndf = pd.read_csv(r'C:\\Projects\\477\\Logistic Regression\\Log.csv')\r\n\r\noutcome = df['Choice (0/1)']\r\npredictors = df[['Gender', 'Amount purchased', 'Frequency', 'Last purchase', 'First purchase', 'P_History', 'P_Science', 'P_Business', 'P_Educ', 'P_Health']]\r\n\r\npredictors = sm.add_constant(predictors)\r\nmodel = sm.Logit(outcome,predictors).fit()\r\nprint(model.summary())\r\n\r\n#New model without First Purchase\r\noutcome = df['Choice (0/1)']\r\npredictors = df[['Gender', 'Amount purchased', 'Frequency', 'Last purchase', 'P_History', 'P_Science', 'P_Business', 'P_Educ', 'P_Health']]\r\n\r\npredictors = sm.add_constant(predictors)\r\nmodel = sm.Logit(outcome,predictors).fit()\r\nprint(model.summary())\r\n\r\n\r\n#Splitting the data to check accuracy\r\n\r\nx_train, x_test, y_train,y_test = train_test_split(predictors, outcome, test_size=0.3, random_state=42)\r\n\r\nx_train = sm.add_constant(x_train)\r\nmodel = sm.Logit(y_train, x_train).fit()\r\nprint(model.summary())\r\n\r\ny_pred = model.predict(x_test)\r\ny_pred = (y_pred > 0.5).astype(int)\r\n\r\nprint(confusion_matrix(y_pred,y_test))\r\nprint(classification_report(y_pred,y_test))\r\n\r\n#Using untested dataset\r\ndf = pd.read_csv(r'C:\\Projects\\477\\Logistic Regression\\Log.csv')\r\nuntested = pd.read_csv(r'C:\\Projects\\477\\Logistic Regression\\Pred.csv')\r\n\r\n#drop target\r\nX_train = df.drop('Choice (0/1)', axis=1)\r\ny_train = df['Choice (0/1)']\r\n\r\n#split data\r\nX_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=42)\r\n\r\n#fit log model\r\nsk_model = LogisticRegression()\r\nsk_model.fit(X_train, y_train)\r\ny_pred = sk_model.predict(X_val)\r\n\r\n#reports\r\nprint(\"Scikit-Learn Model:\")\r\nprint(confusion_matrix(y_val, y_pred))\r\nprint(classification_report(y_val, y_pred))\r\n\r\n# statsmodel for p-values\r\nX_train_sm = sm.add_constant(X_train)\r\nsm_model = sm.Logit(y_train, X_train_sm).fit()\r\nprint(\"Statsmodels Model:\")\r\nprint(sm_model.summary())","repo_name":"simplybaws/simplybaws.github.io","sub_path":"Logmodel.py","file_name":"Logmodel.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34565841620","text":"\"\"\"empty message\n\nRevision ID: 4772352bbff\nRevises: 483e7fd9289\nCreate Date: 2014-07-26 16:07:05.965587\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '4772352bbff'\ndown_revision = '483e7fd9289'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('trade', sa.Column('creator_flair_css', sa.String(length=64), nullable=True))\n op.add_column('trade', sa.Column('target_flair_css', sa.String(length=64), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('trade', 'target_flair_css')\n op.drop_column('trade', 'creator_flair_css')\n ### end Alembic commands ###\n","repo_name":"edk0/mcflairbot","sub_path":"migrations/versions/4772352bbff_.py","file_name":"4772352bbff_.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70332998489","text":"#################\n#### imports ####\n#################\nfrom __future__ import with_statement\nfrom fabric.api import run, env, local, settings, abort, cd, prefix, sudo\nfrom fabric.contrib.console import confirm\nfrom contextlib import contextmanager as _contextmanager\nfrom fabric.context_managers import shell_env\nimport os\nimport config\n\n# Double tunnel\n# http://stackoverflow.com/questions/6161548/fabric-how-to-double-tunnel\n\n# Port is needed for gateway connections\n# https://github.com/fabric/fabric/issues/884\n# https://github.com/fabric/fabric/commit/d41e39b801320fde3ae2ae994ff78d57feb10959\nPORT = 22\n\nenv.forward_agent = True\nenv.gateway = \"%s@%s\" % (config.GATEWAY_USERNAME, config.GATEWAY_SERVER)\nenv.hosts = [\"%s@%s\" % (config.USERNAME, config.SERVER)]\nenv.passwords = {\n \"%s@%s:%s\" % (config.GATEWAY_USERNAME, config.GATEWAY_SERVER, PORT): config.GATEWAY_PASSWORD,\n \"%s@%s:%s\" % (config.USERNAME, config.SERVER, PORT): config.PASSWORD\n}\n\n\nHERE = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), \"project\"))\n\nPROJECT_DIR = \"/var/www/apps/project\"\nVIRTUAL_ENV = \"%s/venv\" % PROJECT_DIR\nWSGI_SCRIPT = '%s/project.wsgi' % PROJECT_DIR\n\nenv.directory = PROJECT_DIR\nenv.activate = \"source %s/bin/activate\" % VIRTUAL_ENV\n\n\nfor host in env.hosts:\n print(\"Executing on %s\" % (host))\n#print(\"Executing on %s as %s\" % (env.host, env.user))\n#print(\"Executing on %(host)s as %(user)s\" % env)\n\n\n# $ fab hello\ndef hello():\n print(\"Hello world!\")\n\n\n# $ fab goodbye:name=Jeff\n# $ fab goodbye:Jeff\ndef goodbye(name=\"world\"):\n print(\"Bye %s!\" % name)\n\n\n# $ fab -H dazcona@gateway.computing.dcu.ie host_type\n# $ fab -H dazcona@gateway.computing.dcu.ie --password XXXXXX host_type\ndef host_type():\n run('uname -s')\n\n\ndef whoami():\n run('whoami')\n\n\ndef sudo_whoami():\n with settings(sudo_user='root'):\n sudo(\"whoami\") # prints 'root', password prompt bypassed\n with settings(sudo_user='david'):\n sudo(\"whoami\") # prints 'david'\n\n\ndef hostname():\n run('hostname')\n\n\ndef echo():\n local(\"echo Hello World!\")\n\n\ndef uptime():\n local(\"uptime\")\n\n\ndef test():\n with settings(warn_only=True):\n cmd = \"python manage.py test\"\n print(\"Running: %s\" % (cmd))\n result = local(cmd, capture=True)\n if result.failed and not confirm(\"Tests failed. Continue anyway?\"):\n abort(\"Aborting at user request.\")\n\n\ndef add_untracked_files():\n local(\"git add -A\")\n\n\ndef commit():\n local(\"git add . && git commit\")\n\n\ndef push():\n local(\"git push\")\n\n\ndef prepare_deploy():\n print(\"Uploading code\")\n print(\"Dir: %s\" % (HERE))\n # You have to be under the project directory when you try to run this, otherwise it would fail with the message:\n # fatal: Not a git repository (or any of the parent directories): .git\n with cd(HERE):\n # test()\n commit()\n push()\n\n\n@_contextmanager\ndef virtualenv():\n with cd(env.directory):\n with prefix(env.activate):\n yield\n\n\n# Add tasks to deploy the code on the web server\ndef deploy():\n print(\"Deploying code\")\n print(\"Code directory: %s\" % env.directory)\n with settings(warn_only=True, sudo_user=\"root\"):\n if run(\"test -d %s\" % env.directory).failed:\n # Clone\n cmd = \"git clone git@gitlab.computing.dcu.ie:dazcona/predictCS.git %s\" % env.directory\n print(\"Running: %s\" % cmd)\n sudo(cmd)\n # Virtual Environment\n sudo(\"rm -rf %s\" % VIRTUAL_ENV)\n sudo(\"virtualenv %s\" % VIRTUAL_ENV)\n with virtualenv():\n #run(\"pip freeze\")\n sudo(\"pip install bcrypt Flask Flask-Bcrypt Flask-Bootstrap Flask-Login Flask-Mail Flask-Script \"\n \"Flask-SQLAlchemy Flask-WTF Jinja2 numpy scikit-learn scipy sklearn SQLAlchemy Werkzeug WTForms\") # coverage\n #run(\"pip freeze\")\n run(\"deactivate\")\n with cd(env.directory):\n # Pull\n sudo(\"git pull\")\n # Touch the .wsgi file so that mod_wsgi triggers a reload of the application\n with shell_env(APP_SETTINGS=\"project.config.ProductionConfig\"):\n sudo(\"touch %s\" % WSGI_SCRIPT)\n #sudo(\"/etc/init.d/apache2 reload\")\n\n\ndef testing():\n with virtualenv():\n run(\"whoami\")\n sudo(\"whoami\")\n with settings(sudo_user='root'):\n sudo(\"whoami\") # prints 'root', password prompt bypassed\n run(\"cat ~/.ssh/id_rsa.pub\")\n sudo(\"cat ~/.ssh/id_rsa.pub\")\n with settings(sudo_user='david'):\n sudo(\"whoami\") # prints 'david'\n run(\"cat ~/.ssh/id_rsa.pub\")\n sudo(\"cat ~/.ssh/id_rsa.pub\")\n with shell_env(APP_SETTINGS='config.DevelopmentConfig'):\n run(\"echo APP_SETTINGS is $APP_SETTINGS\") # Only visible here","repo_name":"dazcona/fab","sub_path":"fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":4771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"4729857349","text":"from marshmallow import Schema, fields, validate\n\n\nclass BoardSchema(Schema):\n board = fields.List(\n fields.Integer(validate=validate.Range(min=0, max=9)),\n required=True,\n validate=validate.Length(1, 81)\n )\n\n\nclass SolveAllSchema(BoardSchema):\n maxSolutions = fields.Integer(\n validate=validate.Range(min=1),\n description=\"Maximum number of solutions to find\",\n required=True\n )\n\n\nclass BoardPositionSchema(Schema):\n boardPosition = fields.List(\n fields.Integer(validate=validate.Range(min=0, max=8)),\n validate=validate.Length(2, 2)\n )\n\n\nclass BoardStatusSchema(Schema):\n isValid = fields.Bool(description=\"Any invalid or repeated value?\")\n isComplete = fields.Bool(description=\"All positions filled and valid?\")\n isEmpty = fields.Bool(description=\"No position filled?\")\n invalidPositions = fields.List(\n fields.Nested(BoardPositionSchema),\n description=\"List of coordinates (line, column) of invalid values in \"\n \"the board.\",\n validate=validate.Length(1, 81)\n )\n\n\nclass SolvedBoardSchema(Schema):\n board = fields.List(\n fields.Integer(validate=validate.Range(min=0, max=9)),\n validate=validate.Length(81, 81)\n )\n\n\nclass BoardSolutionsSchema(Schema):\n solve_time = fields.Float(\n description=\"Elapsed time, in seconds, for finding all solutions.\",\n )\n solved_boards = fields.List(\n fields.Nested(SolvedBoardSchema),\n )\n status = fields.String(\n description='One of the py-sudoku.SolverStatus values.',\n )\n\n\nclass GeneratedBoardSchema(Schema):\n board = fields.List(\n fields.Integer(validate=validate.Range(min=0, max=9)),\n validate=validate.Length(81, 81)\n )\n gen_time = fields.Float(\n description=\"Elapsed time, in seconds, for generating the board.\",\n )\n status = fields.String(\n description='One of the py-sudoku.GeneratorStatus values.',\n )\n","repo_name":"raulcostajunior/api_sudoku","sub_path":"api/schemas.py","file_name":"schemas.py","file_ext":"py","file_size_in_byte":1984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"21712526579","text":"from app import app, db, User, Post\nfrom flask_script import Manager\nfrom flask_alchemydumps import AlchemyDumps, AlchemyDumpsCommand\nfrom faker import Faker\n\nfake = Faker('zh_CN')\n\nalchemydumps = AlchemyDumps()\nalchemydumps.init_app(app, db)\nmanager = Manager(app)\nmanager.add_command('alchemydumps', AlchemyDumpsCommand)\n\n\n@manager.command\ndef initdb():\n \"\"\"Initialize database.\"\"\"\n db.create_all()\n\n\n@manager.command\n@manager.option('-u', '--user', help='User count')\n@manager.option('-p', '--post', help='Post count')\ndef initdata(user_count=3, post_count=10):\n \"\"\"Initialize data.\"\"\"\n\n db.drop_all()\n db.create_all()\n\n users = []\n for i in range(user_count):\n user = User(name=fake.name())\n users.append(user)\n db.session.add_all(users)\n db.session.commit()\n\n posts = []\n for i in range(post_count):\n post = Post(title=fake.sentence())\n posts.append(post)\n db.session.add_all(posts)\n db.session.commit()\n\n\n@manager.command\ndef add_user():\n user = User(name=fake.name())\n db.session.add(user)\n db.session.commit()\n\n\n@manager.command\ndef list_user():\n users = db.session.query(User).all()\n print(users)\n\n\nif __name__ == \"__main__\":\n manager.run()\n","repo_name":"AngelLiang/Flask-Demos","sub_path":"SQ02-flask-alchemydumps-demo/manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"37495942866","text":"class DLinkNode:\n def __init__(self, key=0, value=0):\n self.key = key\n self.value = value\n self.prev = None\n self.next = None\n\n\nclass LRUCache:\n\n def __init__(self, capacity: int):\n self.head = DLinkNode()\n self.tail = DLinkNode()\n self.cache = dict()\n self.size = 0\n self.capacity = capacity\n self.head.next = self.tail\n self.tail.prev = self.head\n\n def addToHead(self, node: DLinkNode):\n head = self.head.next\n self.head.next = node\n node.prev = self.head\n node.next = head\n head.prev = node\n\n def removeNode(self, node: DLinkNode):\n node.prev.next = node.next\n node.next.prev = node.prev\n\n def moveToHead(self, node: DLinkNode):\n self.removeNode(node)\n self.addToHead(node)\n\n def removeTail(self) -> DLinkNode:\n node = self.tail.prev\n self.removeNode(node)\n return node\n\n def get(self, key: int) -> int:\n if key not in self.cache:\n return -1\n node = self.cache[key]\n self.moveToHead(node)\n return node.value\n\n def put(self, key: int, value: int) -> None:\n if key not in self.cache:\n node = DLinkNode(key=key, value=value)\n self.cache[key] = node\n self.addToHead(node)\n self.size += 1\n if self.size > self.capacity:\n tail = self.removeTail()\n del self.cache[tail.key]\n self.size -= 1\n else:\n node = self.cache[key]\n node.value = value\n self.moveToHead(node)\n\n# Your LRUCache object will be instantiated and called as such:\n# obj = LRUCache(capacity)\n# param_1 = obj.get(key)\n# obj.put(key,value)\n","repo_name":"qbnmmm/leetcode","sub_path":"剑指offer/剑指 Offer II 031. 最近最少使用缓存.py","file_name":"剑指 Offer II 031. 最近最少使用缓存.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73456768408","text":"from django.shortcuts import render,get_object_or_404, redirect\nfrom .models import Product\nfrom .forms import RawProductForm\nfrom .forms import ProductForm\nfrom django.http import Http404\n# Create your views here.\n\n# def animals_createview(request):\n# # print(f'.....post.....{request.POST}')\n# # print(f'.....get.....{request.GET}')\n# if request.method=='POST':\n# my_new_title=request.POST.get('title')\n# # Product.objects.create(title=my_new_title,price=22.12)\n# print(my_new_title)\n# context={}\n# return render(request,'allanimals/animal_create.html',context)\n# def animals_createview(request):\n# my_form = RawProductForm(request.POST)\n# if request.method==\"POST\":\n# my_form=RawProductForm(request.POST)\n# if my_form.is_valid():\n# print(my_form.cleaned_data)\n# Product.objects.create(**my_form.cleaned_data)\n# else:\n# print(my_form.errors)\n# context={\n# 'form':my_form\n# }\n# return render(request,'allanimals/animal_create.html',context)\n\ndef animals_view(request):\n obj=Product.objects.get(id=5)\n context={\n 'object':obj\n }\n return render(request,'allanimals/rex.html',context)\ndef animals_createview(request):\n form=ProductForm(request.POST or None)\n if form.is_valid():\n form.save()\n form=ProductForm()\n\n context={\n 'form':form\n }\n return render(request,'allanimals/animal_create.html',context)\n\ndef render_initial_data2(request):\n initial={\n 'title':\"default title \"\n }\n obj1=Product.objects.get(id=18)\n form=ProductForm(request.POST or None ,instance=obj1)\n # if form.is_valid():\n # form.save()\n context={\n 'form':form\n }\n return render(request,\"allanimals/animal_create.html\",context)\ndef dynamic_lookup_view(request,id):\n # obj=Product.objects.get(id=my_id)\n obj=get_object_or_404(Product,id=id)\n if request.method==\"POST\":\n obj.delete()\n return redirect('../../')\n\n # try :\n # obj = Product.objects.get(id=my_id)\n # except Product.DoesNotExist:\n # raise Http404\n\n context={\n \"object\":obj\n }\n return render(request,\"allanimals/animals_delete.html\",context)\n\ndef list_all_products(request):\n obj=Product.objects.all()\n context={\n 'object':obj\n }\n return render(request,'allanimals/listallanimals.html',context)\n\n\n\n\n\n\n\n","repo_name":"harika-c/PythonDjango","sub_path":"Scripts/products/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8621267757","text":"import pandas as pd\n\n# reading data as a csv file using pandas\n# bank_df = pd.read_csv(\"./\") # automatically converts it into dataframe class\n\n# we can write a dataframe class as a csv file\nportfolio_df = pd.DataFrame({\n 'stock ticker symbol': ['AAPL', 'AMZN', 'T'],\n 'number of stocks': [3, 4, 9],\n 'price per share [$]': [3500, 200, 40]\n})\n\n# index is the side index for the rows\nportfolio_df.to_csv('sample_output.csv', index=False)\n\n# Reading tabular data from HTML\nhouse_prices_df = pd.read_html(\n 'https://www.livingin-canada.com/house-prices-canada.html')\n\n# reading json\ncomments_df = pd.read_json(\"https://jsonplaceholder.typicode.com/comments\")\nprint(comments_df)\n","repo_name":"AybarsAcar/dataAnalyticsFinance","sub_path":"library_review/basics_pandas/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"70887155607","text":"import os\nimport json\nfrom random import random\n\n\nclass AnnotationManager:\n\n\tdef __init__(self):\n\t\tself.annotationDict = {}\n\t\t\"\"\"\n\t\tHere's what this dict looks like:\n\n\t\t{\n\t\t\t'' : {\t\n\t\t\t\t\t\t\t\t : {\n\t\t\t\t\t\t\t\t\t\t\t: , \n\t\t\t\t\t\t\t\t\t\t\t: ,\n\t\t\t\t\t\t\t\t\t\t\t ...\n\t\t\t\t\t\t\t\t\t\t }\n\t\t\t\t\t\t\t\t : {\n\t\t\t\t\t\t\t\t\t\t\t: , \n\t\t\t\t\t\t\t\t\t\t\t: , \n\t\t\t\t\t\t\t\t\t\t\t...\n\t\t\t\t\t\t\t\t\t\t }\n\t\t\t\t\t\t\t\t...\n\t\t\t\t\t\t\t},\n\n\t\t\t'' : {\t\n\t\t\t\t\t\t\t\t : {\n\t\t\t\t\t\t\t\t\t\t\t: , \n\t\t\t\t\t\t\t\t\t\t\t: , \n\t\t\t\t\t\t\t\t\t\t\t...\n\t\t\t\t\t\t\t\t\t\t }\n\t\t\t\t\t\t\t\t : {\n\t\t\t\t\t\t\t\t\t\t\t: , \n\t\t\t\t\t\t\t\t\t\t\t: , \n\t\t\t\t\t\t\t\t\t\t\t...\n\t\t\t\t\t\t\t\t\t\t }\n\t\t\t\t\t\t\t\t...\n\t\t\t\t\t\t\t},\n\t\t\t\t...\n\t\t}\n\t\t\"\"\"\n\n\tdef getAnnotationDict(self):\n\t\treturn self.annotationDict\n\n\t# Function to add a new instance of any category. This will not\n\t# create a new keyframe annotation\n\tdef addAnnotationAtFrame(self, frame, point1, point2, classLabel):\n\t\t\n\t\t# Get second level dict of {id : [Annotation(frame), ...]}\n\t\tidFrameDict = self.annotationDict.get(classLabel)\n\t\tannotation = None\n\t\tif not idFrameDict:\n\t\t\tannotation = Annotation(point1.x, point1.y, point2.x, point2.y, classLabel, 1)\n\t\t\tidFrameDict = {1 : {frame : annotation}}\n\t\t\tself.annotationDict[classLabel] = idFrameDict\n\t\telse:\n\t\t\t# Compute the smallest available id\n\t\t\tidsForCategory = sorted(idFrameDict.keys())\n\t\t\tnewId = idsForCategory[0]\n\t\t\twhile newId in idsForCategory:\n\t\t\t\tnewId += 1\n\t\t\tannotation = Annotation(point1.x, point1.y, point2.x, point2.y, classLabel, newId)\n\t\t\tframeDict = {frame : annotation}\n\t\t\tself.annotationDict[classLabel][newId] = frameDict\n\n\t\treturn annotation\n\n\tdef updateAnnotationAtFrame(self, annotation, frame, point1, point2):\n\n\t\t# Find the annotation framedict (there should be one)\n\t\tidFrameDict = self.annotationDict[annotation.category]\n\t\tframeDict = idFrameDict[annotation.id]\n\n\t\t# If there's an existing annotation for this frame already,\n\t\t# just update its parameters\n\t\tif frameDict.get(frame):\n\t\t\texistingAnnotation = frameDict[frame]\n\t\t\texistingAnnotation.x1 = point1.x\n\t\t\texistingAnnotation.y1 = point1.y\n\t\t\texistingAnnotation.x2 = point2.x\n\t\t\texistingAnnotation.y2 = point2.y\n\t\t\n\t\t# If not, create a new annotation for this frame\n\t\telse: \n\t\t\tcolor = frameDict[frameDict.keys()[0]].color\n\t\t\tframeDict[frame] = Annotation(point1.x, point1.y, point2.x, point2.y, annotation.category, annotation.id, color=color)\n\t\t\tidFrameDict[annotation.id] = frameDict\n\t\t\tself.annotationDict[annotation.category] = idFrameDict\n\t\t\n\t\treturn\n\n\tdef deleteAnnotation(self, category, idx):\n\t\tidFrameDict = self.annotationDict[category]\n\t\tidFrameDict.pop(idx)\n\t\tif not idFrameDict:\n\t\t\tself.annotationDict.pop(category)\n\t\treturn\n\n\tdef deleteAnnotationAtFrame(self, category, idx, frame):\n\t\tidFrameDict = self.annotationDict[category]\n\t\tframeDict = idFrameDict[idx]\n\t\tframeDict.pop(frame)\n\t\tif not frameDict:\n\t\t\tidFrameDict.pop(idx)\n\t\t\tif not idFrameDict:\n\t\t\t\tself.annotationDict.pop(category)\n\t\t\telse:\n\t\t\t\tself.annotationDict[category] = idFrameDict\n\t\treturn\n\n\tdef getWriteableAnnotations(self):\n\t\twriteableDict = {}\n\t\tfor category in self.annotationDict:\n\t\t\tidFrameDict = {}\n\t\t\tfor idx in self.annotationDict[category]:\n\t\t\t\tframeDict = {}\n\t\t\t\tfor frame in self.annotationDict[category][idx]:\n\t\t\t\t\tannotation = self.annotationDict[category][idx][frame]\n\t\t\t\t\tannotationWriteable = [annotation.x1, annotation.y1, \n\t\t\t\t\t\tannotation.x2, annotation.y2, annotation.category, \n\t\t\t\t\t\t\tannotation.id, annotation.color]\n\t\t\t\t\tframeDict[frame] = annotationWriteable\n\t\t\t\tidFrameDict[idx] = frameDict\n\t\t\twriteableDict[category] = idFrameDict\n\t\treturn writeableDict\n\n\tdef getProcessedAnnotations(self, rawData):\n\t\tannotationDict = {}\n\t\tfor category in rawData:\n\t\t\tidFrameDict = {}\n\t\t\tfor idx in rawData[category]:\n\t\t\t\tframeDict = {}\n\t\t\t\tfor frame in rawData[category][idx]:\n\t\t\t\t\trawAnnotation = rawData[category][idx][frame]\n\t\t\t\t\tannotation = Annotation(rawAnnotation[0], rawAnnotation[1], \n\t\t\t\t\t\trawAnnotation[2], rawAnnotation[3], rawAnnotation[4], \n\t\t\t\t\t\t\trawAnnotation[5], color=(rawAnnotation[6][0], \n\t\t\t\t\t\t\t\trawAnnotation[6][1], rawAnnotation[6][2]))\n\t\t\t\t\tframeDict[int(frame)] = annotation\n\t\t\t\tidFrameDict[int(idx)] = frameDict\n\t\t\tannotationDict[category] = idFrameDict\n\t\tself.annotationDict = annotationDict\n\n\tdef saveAnnotations(self, fileName):\n\t\tfileName = ''.join(fileName.split('.')[:-1]) + '.json'\n\t\twriteableDict = self.getWriteableAnnotations()\n\t\twith open(fileName, 'w') as fs:\n\t\t\tjson.dump(writeableDict, fs)\n\t\treturn\n\n\tdef loadAnnotations(self, fileName):\n\t\tfileName = ''.join(fileName.split('.')[:-1]) + '.json'\n\t\twith open(fileName, 'r') as fs:\n\t\t\trawData = json.load(fs)\n\n\t\tconditionedData = self.getProcessedAnnotations(rawData)\n\n\nclass Annotation:\n\tdef __init__(self, x1, y1, x2, y2, category, idx, color=None):\n\t\tself.x1 = x1\n\t\tself.y1 = y1\n\t\tself.x2 = x2\n\t\tself.y2 = y2\n\t\tself.category = category\n\t\tself.id = idx\n\n\t\tif color:\n\t\t\tself.color = color\n\n\t\t# Assign a random color if none specified\n\t\telse:\n\t\t\tself.color = (random(), random(), random())","repo_name":"gokul-raghuraman/videotate","sub_path":"annotationmanager.py","file_name":"annotationmanager.py","file_ext":"py","file_size_in_byte":5084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"3588990517","text":"\nimport numpy as np\nimport sys\n\nsys.path.append('../')\nsys.path.append('E:\\study\\study\\Masters\\Project\\gym-pybullet-drones-0.5.2\\gym-pybullet-drones-0.5.2')\n\nfrom gym_pybullet_drones.envs.BaseAviary import BaseAviary\n\n\nclass MultiFlipController():\n \"\"\"Controller class for multiflips\"\"\"\n\n ################################################################################\n\n def __init__(self, env: BaseAviary):\n \"\"\" Initialization of class HW1Control.\n\n Parameters\n ----------\n env : BaseAviary\n The PyBullet-based simulation environment.\n\n \"\"\"\n self.g = env.G\n \"\"\"float: Gravity acceleration, in meters per second squared.\"\"\"\n self.mass = env.M\n \"\"\"float: The mass of quad from environment.\"\"\"\n self.timestep = env.TIMESTEP\n \"\"\"float: Simulation and control timestep.\"\"\"\n #self.kf_coeff = env.KF\n self.kf_coeff = 1.7518e-8\n\n \"\"\"float: RPMs to force coefficient.\"\"\"\n #self.km_coeff = env.KM\n self.km_coeff = 7.1834e-11\n \"\"\"float: RPMs to torque coefficient.\"\"\"\n\n\n self.p_coeff_position = 1.5\n \"\"\"float: Proportional coefficient for position control.\"\"\"\n self.d_coeff_position = 0.0\n \"\"\"float: Derivative coefficient for position control.\"\"\"\n\n self.cumulative_attitude_error = 0.0\n \"\"\" cumulative attitude error required for PI controller\"\"\"\n\n self.cumulative_altitude_error = 0.0\n \"\"\"cumulative altitude error required for PID controller\"\"\"\n\n self.prev_altitude_error = 0.0\n \"\"\" previous altitude error for PID controller\"\"\"\n\n #self.prop_dia = env.PROP_RADIUS*2\n self.prop_dia = 42.68e-3\n \"\"\" drone propeller diameter\"\"\"\n\n\n self.reset()\n\n ################################################################################\n\n def reset(self):\n \"\"\" Resets the controller counter.\"\"\"\n self.control_counter = 0\n\n ################################################################################\n def attitude_pi_controller(self, measured_attitude, desired_attitude):\n \"\"\"attitude PI controller as per paper,\n takes in 3 attitude angles from IMU and the respective desired angles\"\"\"\n # implement anti-windup mechanism\n kp1 = 1.5e-3\n kp2 = 1.5e-3\n kp3 = 1.5e-3\n ki1 = 0.000\n ki2 = 0.000\n ki3 = 0.000\n attitude_error = desired_attitude-measured_attitude\n self.cumulative_attitude_error += attitude_error\n Kp = np.asarray([[kp1, 0, 0], [0, kp2, 0], [0, 0, kp3]])\n Ki = np.asarray([[ki1, 0, 0], [0, ki2, 0], [0, 0, ki3]])\n result = Kp@attitude_error+Ki@self.cumulative_attitude_error\n print('result in pi=', result)\n return result\n ################################################################################\n def attitude_rate_p_controller(self, measure_rate, desired_rate):\n \"\"\"Proportional controller receives input from the PI controller and Gyro\"\"\"\n kp1 = 1.5e-4\n kp2 = 1.5e-4\n kp3 = 1.5e-3\n Kp = np.asarray([[kp1, 0, 0], [0, kp2, 0], [0, 0, kp3]])\n rate_error = desired_rate-measure_rate\n tau = Kp@rate_error\n print('result in p controller, tau', tau)\n return tau\n #################################################################################\n def altitude_pid_controller(self, measured_altitude, desired_altitude):\n \"\"\" PID to control drone altitude, receives desired altitude and barometer reading\"\"\"\n # implement anti-windup mechanism\n kp = 1.5e-4\n ki = 0.000\n kd = 0.00\n altitude_error = desired_altitude-measured_altitude\n self.cumulative_altitude_error += altitude_error\n error_rate = (altitude_error-self.prev_altitude_error)/self.timestep\n self.prev_altitude_error = altitude_error\n force = kp*altitude_error+ki*self.cumulative_altitude_error+kd*error_rate + self.g*self.mass #is gravity constant in the paper mass*g?\n print('result of pid, force', force)\n return force\n ####################################################################################################################\n def est_rotor_velocity(self, force_vector):\n \"\"\" map from force and torque to rotor angular velocity (squared), see paper for more details\"\"\"\n transf_mat = np.asarray([[self.kf_coeff, self.kf_coeff, self.kf_coeff, self.kf_coeff],\n [0, -self.kf_coeff*self.prop_dia, 0, self.kf_coeff*self.prop_dia],\n [-self.kf_coeff*self.prop_dia, 0, self.kf_coeff*self.prop_dia, 0],\n [-self.km_coeff, self.km_coeff, -self.km_coeff, self.km_coeff]])\n vel_squared = np.linalg.inv(transf_mat)@force_vector\n return vel_squared\n\n def compute_control_flip(self,\n current_attitude,\n current_attitude_rate,\n current_altitude,\n target_attitude,\n target_altitude\n ):\n \"\"\"Compute the propellers' RPMs for the target state, given the current state.\n\n Parameters\n ----------\n current_position : ndarray\n (3,)-shaped array of floats containing global x, y, z, in meters.\n current_velocity : ndarray\n (3,)-shaped array of floats containing global vx, vy, vz, in m/s.\n target_position : ndarray\n (3,)-shaped array of float containing global x, y, z, in meters.\n target_velocity : ndarray, optional\n (3,)-shaped array of floats containing global, in m/s.\n target_acceleration : ndarray, optional\n (3,)-shaped array of floats containing global, in m/s^2.\n\n Returns\n -------\n ndarray\n (4,)-shaped array of ints containing the desired RPMs of each propeller.\n \"\"\"\n self.control_counter += 1\n\n\n\n\n pi_result = self.attitude_pi_controller(current_attitude, target_attitude)\n tau_vec = self.attitude_rate_p_controller(current_attitude_rate, pi_result)\n force = np.asarray(self.altitude_pid_controller(current_altitude, target_altitude))\n #print(\"tau vec=\", tau_vec, \"force\", force)\n tau_vec = np.reshape(tau_vec, (3, 1))\n force_vec = np.vstack((force, tau_vec))\n prop_rpm = np.sqrt(self.est_rotor_velocity(force_vec))\n print(\"before rpm\", self.est_rotor_velocity(force_vec))\n print(\"Prop_rpm\", prop_rpm)\n prop_rpm = np.reshape(prop_rpm, (4,))\n return prop_rpm\n\n\n\n\n\n def compute_control(self,\n current_position,\n current_velocity,\n target_position,\n target_velocity=np.zeros(3),\n target_acceleration=np.zeros(3),\n ):\n \"\"\"Compute the propellers' RPMs for the target state, given the current state.\n\n Parameters\n ----------\n current_position : ndarray\n (3,)-shaped array of floats containing global x, y, z, in meters.\n current_velocity : ndarray\n (3,)-shaped array of floats containing global vx, vy, vz, in m/s.\n target_position : ndarray\n (3,)-shaped array of float containing global x, y, z, in meters.\n target_velocity : ndarray, optional\n (3,)-shaped array of floats containing global, in m/s.\n target_acceleration : ndarray, optional\n (3,)-shaped array of floats containing global, in m/s^2.\n\n Returns\n -------\n ndarray\n (4,)-shaped array of ints containing the desired RPMs of each propeller.\n \"\"\"\n self.control_counter += 1\n\n\n\n ##### Calculate position and velocity errors ###############\n current_pos_error = target_position[2] - current_position[2]\n current_vel_error = target_velocity[2] - current_velocity[2]\n\n #### Calculate input with a PD controller ##################\n # u = desired_acceleration + Kv * velocity_error + Kp * position_error\n u = target_acceleration[2] \\\n + self.d_coeff_position * current_vel_error \\\n + self.p_coeff_position * current_pos_error\n\n ##### Calculate propeller turn rates given the PD input ####\n # turn_rate = sqrt( (m*u + m*g) / (4*Kf) )\n propellers_rpm = np.sqrt((u * self.mass + self.g * self.mass) / (4 * self.kf_coeff))\n\n # For up-down motion, assign the same turn rates to all motors\n propellers_0_and_3_rpm, propellers_1_and_2_rpm = propellers_rpm, propellers_rpm\n\n\n #### Print relevant output #################################\n if self.control_counter % (1 / self.timestep) == 0:\n print(\"current_position\", current_position)\n print(\"current_velocity\", current_velocity)\n print(\"target_position\", target_position)\n print(\"target_velocity\", target_velocity)\n print(\"target_acceleration\", target_acceleration)\n\n return np.array([propellers_0_and_3_rpm, propellers_1_and_2_rpm,\n propellers_1_and_2_rpm, propellers_0_and_3_rpm])\n","repo_name":"shravankumargulvadi/Drone_Acrobatics","sub_path":"gym-pybullet-drones-0.5.2/Multiflip/multiflip_controller.py","file_name":"multiflip_controller.py","file_ext":"py","file_size_in_byte":9267,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"40055790076","text":"# import libraries\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport statsmodels.api as sm\r\n#############################################################################\r\n# load data\r\nda = pd.read_csv(\"50_Startups.csv\")\r\nx = da.iloc[:, :3].values\r\ny = da.iloc[:, -1].values\r\n#############################################################################\r\n# investigation on feature relations\r\n\r\n# Polting variables\r\nplt.scatter(x[:, 0], y)\r\nplt.scatter(x[:, 1], y)\r\nplt.scatter(x[:, 2], y)\r\n#############################################################################\r\n# data preprocessing\r\n\r\n# feature scaling\r\nfrom sklearn.preprocessing import StandardScaler\r\nx_scaler = StandardScaler()\r\nx_sc = x_scaler.fit_transform(x)\r\ny_scaler = StandardScaler()\r\ny_sc = y_scaler.fit_transform(y.reshape(len(y), 1))\r\n#############################################################################\r\n# train-test separation\r\nfrom sklearn.model_selection import train_test_split\r\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.25)\r\n#############################################################################\r\n# Fitting multiple Regression models\r\n\r\n# Multilinear regression\r\nfrom sklearn.linear_model import LinearRegression\r\nlinreg = LinearRegression()\r\nlinreg.fit(x_train, y_train)\r\nr2_lin = linreg.score(x_test, y_test)\r\n\r\n# Polynomial regression\r\nfrom sklearn.preprocessing import PolynomialFeatures\r\npolyreg = PolynomialFeatures(degree = 2)\r\nx_poly = polyreg.fit_transform(x_train)\r\npolyreg_1 = LinearRegression()\r\npolyreg_1.fit(x_poly, y_train)\r\n\r\n# Decision tree regression\r\nfrom sklearn.tree import DecisionTreeRegressor\r\ntreereg = DecisionTreeRegressor(criterion = 'friedman_mse', splitter = 'best', random_state = 10)\r\ntreereg.fit(x_train, y_train)\r\nr2_tree = treereg.score(x_test, y_test)\r\n\r\n# Random forest regression\r\nfrom sklearn.ensemble import RandomForestRegressor\r\nrandreg = RandomForestRegressor(criterion = 'mae', n_estimators = 200, n_jobs = -1, random_state = 10)\r\nrandreg.fit(x_train, y_train)\r\nr2_rand = randreg.score(x_test, y_test)\r\n\r\n# XGBoost\r\nfrom xgboost import XGBRegressor\r\nboostreg = XGBRegressor(booster = 'gbtree', learning_rate = 0.5, n_estimators = 90, random_state = 10)\r\nboostreg.fit(x_train, y_train)\r\nr2_boost = boostreg.score(x_test, y_test)\r\n#############################################################################\r\n# model predictions\r\ny_pred_lin = linreg.predict(x_test)\r\ny_pred_poly = polyreg_1.predict(polyreg.fit_transform(x_test))\r\ny_pred_tree = treereg.predict(x_test)\r\ny_pred_rand = randreg.predict(x_test)\r\ny_pred_boost = boostreg.predict(x_test)\r\n#############################################################################\r\n# model evaluation and validation\r\n\r\n# R2 metric\r\nfrom sklearn.metrics import r2_score\r\nprint(r2_score(y_test, y_pred_lin),\r\n r2_score(y_test, y_pred_poly),\r\n r2_score(y_test, y_pred_tree),\r\n r2_score(y_test, y_pred_rand),\r\n r2_score(y_test, y_pred_boost))\r\n\r\n# k-fold cross validation\r\nfrom sklearn.model_selection import cross_val_score\r\n\r\nlinscore = cross_val_score(estimator = linreg, X = x_train, y = y_train, cv = 10)\r\naccuracy_lin = linscore.mean()\r\n\r\npolyscore = cross_val_score(estimator = polyreg_1, X = polyreg.fit_transform(x_train), y = y_train, cv = 10)\r\naccuracy_poly = polyscore.mean()\r\n\r\ntreescore = cross_val_score(estimator = treereg, X = x_train, y = y_train, cv = 10)\r\naccuracy_tree = treescore.mean()\r\n\r\nrandscore = cross_val_score(estimator = randreg, X = x_train, y = y_train, cv = 10)\r\naccuracy_rand = randscore.mean()\r\n\r\nboostscore = cross_val_score(estimator = boostreg, X = x_train, y = y_train, cv = 10)\r\naccuracy_boost = boostscore.mean()\r\n#############################################################################\r\n# model optimization\r\n\r\n# grid search\r\nfrom sklearn.model_selection import GridSearchCV\r\n\r\nparameters_tree = [{\"criterion\": (\"mse\", \"mae\", \"friedman_mse\"), \"splitter\": (\"best\", \"random\")}]\r\ngrid_search_tree = GridSearchCV(estimator = treereg, param_grid = parameters_tree, scoring = \"r2\", cv = 10)\r\ngrid_search_tree.fit(x_train, y_train)\r\n\r\ngrid_search_tree.best_score_\r\ngrid_search_tree.best_params_\r\ngrid_search_tree.best_estimator_\r\n\r\nparameters_rand = [{\"n_estimators\": [150, 200, 250], \"criterion\": (\"mse\", \"mae\")}]\r\ngrid_search_rand = GridSearchCV(estimator = randreg, param_grid = parameters_rand, scoring = \"r2\", cv = 10, n_jobs = -1)\r\ngrid_search_rand.fit(x_train, y_train)\r\n\r\ngrid_search_rand.best_score_\r\ngrid_search_rand.best_params_\r\ngrid_search_rand.best_estimator_\r\n\r\nparameters_boost = [{\"learning_rate\": [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9], \"n_estimators\": [90, 100, 120], \"booster\": (\"gbtree\", \"gblinear\")}]\r\ngrid_search_boost = GridSearchCV(estimator = boostreg, param_grid = parameters_boost, scoring = \"r2\", cv = 10, n_jobs = -1)\r\ngrid_search_boost.fit(x_train, y_train)\r\n\r\ngrid_search_boost.best_score_\r\ngrid_search_boost.best_params_\r\ngrid_search_boost.best_estimator_","repo_name":"aghelinejad/Data-Science","sub_path":"Regression/Profit-Prediction/XGBoost.py","file_name":"XGBoost.py","file_ext":"py","file_size_in_byte":4993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74456597207","text":"class DLNode:\r\n def __init__(self, val) -> None:\r\n self.data = val\r\n self.right = None\r\n self.left = None\r\n\r\n def insertRight(self, val):\r\n prev = self\r\n middle = DLNode(val)\r\n next = self.right\r\n middle.right = next\r\n middle.left = prev\r\n prev.right = middle\r\n if next is not None:\r\n next.left = middle\r\n \r\n def insertLeft(self, val):\r\n # r q p\r\n p = self\r\n q = DLNode(val)\r\n r = p.left\r\n q.right = p\r\n q.left = r\r\n p.left = q\r\n if r is not None:\r\n r.right = q\r\n\r\n def delete(self):\r\n prev = self.left\r\n next = self.right\r\n if prev is not None:\r\n prev.right = next\r\n if next is not None:\r\n next.left = prev\r\n if prev is None:\r\n return next\r\n return prev\r\n \r\n def __len__(self):\r\n count = 0\r\n curr = self\r\n while curr is not None:\r\n count += 1\r\n curr = curr.right\r\n curr = self.left\r\n while curr is not None:\r\n count += 1\r\n curr = curr.left\r\n return count\r\n \r\n def traverse(self):\r\n current = self\r\n # go all the way to the left\r\n while current.left is not None:\r\n current = current.left\r\n # Now traverse the list by going right\r\n print(\"X <- \", end=\"\")\r\n while current.right is not None:\r\n print(current.data, end=\" <-> \")\r\n current = current.right\r\n print(current.data, \"-> X\")\r\n print()\r\n\r\n def search(self, target):\r\n b = self\r\n while b is not None and b.data != target:\r\n b = b.right\r\n if b is not None:\r\n return b\r\n b = self.left\r\n while b is not None and b.data != target:\r\n b = b.left\r\n return b\r\n \r\ndef buildDlRight(values):\r\n assert len(values) > 0, \"No values present\"\r\n c = DLNode(values[0])\r\n for i in range(1, len(values)):\r\n c.insertRight(values[i])\r\n c = c.right\r\n return c\r\n\r\ndef buildDlLeft(val):\r\n assert len(val) > 0, \"No values\"\r\n c = DLNode(val[0])\r\n for i in range(1, len(val)):\r\n c.insertLeft(val[i])\r\n c = c.left\r\n return c\r\n\r\n# l = buildDlLeft([1, 2, 3, 4, 5, 6, 7, 8, 9])\r\n# l.traverse()","repo_name":"USMAN-FAIZYAB-KHAN/Python-DSA","sub_path":"doublylinkedlist.py","file_name":"doublylinkedlist.py","file_ext":"py","file_size_in_byte":2376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"28127595542","text":"import argparse\nimport pickle\n\nfrom imblearn.under_sampling import RandomUnderSampler\nfrom imblearn.over_sampling import RandomOverSampler, SMOTE\n\nimport numpy as np\n\nimport pandas as pd\n\nfrom sklearn.model_selection import GridSearchCV, train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import classification_report, confusion_matrix\n\nimport xgboost\n\n\nFEATURES = ['is_Jan',\n 'is_Feb',\n 'is_Mar',\n 'is_Apr',\n 'is_May',\n 'is_Jun',\n 'is_Jul',\n 'is_Aug',\n 'is_Sep',\n 'is_Oct',\n 'is_Nov',\n 'is_Dec',\n 'is_g1',\n 'is_g2',\n 'is_g3',\n 'is_turf',\n 'is_dirt',\n 'is_obstacle',\n 'is_right',\n 'is_left',\n 'is_straight',\n 'distance',\n 'is_sunny',\n 'is_cloudy',\n 'is_rainy',\n 'is_turf_good',\n 'is_turf_slightly_heavy',\n 'is_turf_heavy',\n 'is_turf_bad',\n 'is_dirt_good',\n 'is_dirt_slightly_heavy',\n 'is_dirt_heavy',\n 'is_dirt_bad',\n 'is_male',\n 'is_female',\n 'is_castrated',\n 'age',\n 'weight',\n 'horse_weight',\n 'horse_weight_difference',\n 'enter_times',\n 'win_rate',\n 'mean_prise',\n 'jocky_enter_times',\n 'jocky_win_rate',\n 'jocky_mean_prise',\n 'trainer_times',\n 'trainer_win_rate',\n 'trainer_mean_prise',\n 'owner_times',\n 'owner_win_rate',\n 'owner_mean_prise']\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-i',\n '--infile',\n help='入力となる CSV ファイル',\n type=str,\n required=True)\nparser.add_argument('-o',\n '--outfile',\n help='出力となる pickle ファイル',\n type=str,\n required=True)\nargs = parser.parse_args()\ndf = pd.read_csv(args.infile)\n\nX = df[FEATURES].values\ny = (df['order'] == 1.0).values\ny = np.array([1 if i else 0 for i in y], dtype=np.int32)\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\ncount = int(sum(y_train))\nrus = RandomUnderSampler(ratio={0: count, 1: count}, random_state=71)\nX_train, y_train = rus.fit_sample(X_train, y_train)\n\nxgb = xgboost.XGBClassifier()\n\nxgb = GridSearchCV(\n xgboost.XGBClassifier(),\n {'learning_rate': [0.01, 0.05, 0.1, 0.2],\n 'subsample': [0.5, 0.75, 1.0],\n 'max_depth': [2, 4, 6],\n 'n_estimators': [25, 50, 100, 200]},\n cv=4,\n scoring='f1',\n verbose=2,\n n_jobs=-1)\nxgb.fit(X_train, y_train)\n\nprint('XGB')\nprint(\"%s\" % xgb.best_params_)\n\ny_true, y_pred = y_test, xgb.predict(X_test)\nprint(classification_report(y_true, y_pred))\nprint(\"ccuracy: {}\".format(accuracy_score(y_test, y_pred)))\nprint(\"confusion_matrix:\")\nprint(confusion_matrix(y_test, y_pred))\n\nwith open(args.outfile, \"wb\") as f:\n pickle.dump(xgb, f)\n","repo_name":"Kurorororo/keiba_predictor","sub_path":"train_base.py","file_name":"train_base.py","file_ext":"py","file_size_in_byte":3183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"32881371694","text":"#!/usr/bin/python\n\nimport sys\nimport os\n\nmaf1 = open(sys.argv[1])\nmaf2 = open(sys.argv[2])\n\nhead_dict = {}\nh2_dict = {}\ndefault = next(maf1)\nsys.stdout.write(default)\nhead = next(maf1)\nheader = head.rstrip('\\n').split('\\t')\n# track length on first file header to create a spacer for unused fields\nm1_len = len(header)\n# will track positions of header in first maf, tack on other values at end for second maf\ni = 0\nfor entry in header:\n head_dict[entry] = i\n i += 1\nnext(maf2)\nhead2 = next(maf2)\nheader2 = head2.rstrip('\\n').split('\\t')\nj = 0\nfor entry in header2:\n if entry not in head_dict:\n header.append(entry)\n h2_dict[entry] = j\n j += 1\nsys.stdout.write('\\t'.join(header) + '\\n')\n\nspacer = ''\nfor i in xrange(0, len(header)-m1_len, 1):\n spacer = spacer + '\\t'\nfor line in maf1:\n line = line.rstrip('\\n')\n sys.stdout.write(line + spacer + '\\n')\nmaf1.close()\n\nfor line in maf2:\n info = line.rstrip('\\n').split('\\t')\n diff = []\n sys.stdout.write(info[0])\n for i in xrange(1, len(header), 1):\n if header[i] in h2_dict:\n sys.stdout.write('\\t' + info[h2_dict[header[i]]])\n else:\n sys.stdout.write('\\t')\n #for i in xrange(1, len(header2), 1):\n # if header2[i] not in head_dict:\n # sys.stdout.write('\\t' + info[i])\n print\nmaf2.close()","repo_name":"migbro/project-specific","sub_path":"cpci/cbioportal/combine_somatic_germ_maf.py","file_name":"combine_somatic_germ_maf.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"21941720044","text":"# bei Fragen: Max.Schellenberg@ruhr-uni-bochum.de\r\n\r\nimport random\r\nimport sys\r\nimport traceback\r\n\r\nimport preprocessing_item_generation as TG\r\n\r\n_PYTHON_VERSION = sys.version_info\r\n\r\ntry:\r\n from tqdm import tqdm\r\n\r\n _TQDM_AVAILABLE = True\r\nexcept ImportError:\r\n _TQDM_AVAILABLE = False\r\n# random.seed(1337) # Set Seed for Program\r\nimport pandas as pd\r\n\r\n\r\ndef generate_item_tuple(\r\n pre_context, pre_target_context, target, post_target_context, post_context\r\n):\r\n col_name = \"Corr_token\"\r\n\r\n try:\r\n if not pre_context.empty:\r\n pre_context_processed = TG.correct_punct(pre_context[col_name].values)\r\n\r\n elif pre_context.empty:\r\n pre_context_processed = \"\"\r\n except AttributeError:\r\n pre_context_processed = pre_context\r\n\r\n try:\r\n if not post_context.empty:\r\n post_context_processed = TG.correct_punct(post_context[col_name].values)\r\n\r\n elif post_context.empty:\r\n post_context_processed = \"\"\r\n except AttributeError:\r\n post_context_processed = post_context\r\n\r\n pre_target_context_processed = TG.correct_punct(pre_target_context[col_name].values)\r\n target_processed = TG.correct_punct(target[col_name].values)\r\n post_target_context_processed = TG.correct_punct(\r\n post_target_context[col_name].values\r\n )\r\n #\r\n # try:\r\n # pre_context_processed\r\n # except NameError:\r\n # pre_context_processed = pre_context\r\n #\r\n # try:\r\n # post_context_processed\r\n # except NameError:\r\n # post_context_processed = post_context\r\n\r\n return (\r\n pre_context_processed,\r\n pre_target_context_processed,\r\n target_processed,\r\n post_target_context_processed,\r\n post_context_processed,\r\n )\r\n\r\n\r\ndef generate_items_pandas(\r\n items,\r\n epoch_dict,\r\n window_size,\r\n num_items_e2=15,\r\n num_items_e4=15,\r\n num_items_e2_e4=30,\r\n max_len_sent_target=30,\r\n max_sampling_steps=10000,\r\n):\r\n pos_tags_to_skip = [\"$.\", \"$,\", \"$(\"]\r\n generated_items = {item: [] for item in items}\r\n # save already loaded dataframes here\r\n csv_loaded = dict()\r\n assert window_size % 2 == 0, \"Please define an even window_size.\"\r\n index_before_after = int(window_size / 2)\r\n\r\n if _TQDM_AVAILABLE:\r\n items = tqdm(items)\r\n for item in items:\r\n generated_items_tmp = {key: [] for key in epoch_dict.keys()}\r\n print(f\"Processing item '{item}'...\")\r\n\r\n counter = 0\r\n while (\r\n len(generated_items_tmp[\"E2\"]) != num_items_e2\r\n and len(generated_items_tmp[\"E4\"]) != num_items_e4\r\n and len(generated_items_tmp[\"E2_E4\"]) != num_items_e2_e4\r\n ) or counter <= max_sampling_steps:\r\n counter += 1\r\n epoch, csv_file_paths = random.choice(list(epoch_dict.items()))\r\n path_csv1, path_csv2 = random.choice(csv_file_paths)\r\n\r\n if path_csv1 not in csv_loaded:\r\n df1 = pd.read_csv(path_csv1, delimiter=\"\\t\", encoding=\"utf-8\")\r\n csv_loaded[path_csv1] = df1\r\n else:\r\n df1 = csv_loaded[path_csv1]\r\n if path_csv2 not in csv_loaded:\r\n df2 = pd.read_csv(path_csv2, delimiter=\"\\t\", encoding=\"utf-8\")\r\n csv_loaded[path_csv2] = df2\r\n else:\r\n df2 = csv_loaded[path_csv2]\r\n\r\n df1_only_rows_with_item = df1[df1[\"Corr_token\"] == item]\r\n df2_only_rows_with_item = df2[df2[\"Corr_token\"] == item]\r\n # TODO: add count for \"skipping\" punctuation for token count\r\n # generate sentence pairs + contexts\r\n if (\r\n len(df1_only_rows_with_item) > 0\r\n and len(df2_only_rows_with_item) > 0\r\n ):\r\n df1_only_rows_with_item = df1[df1[\"Corr_token\"] == item].sample(1)\r\n df2_only_rows_with_item = df2[df2[\"Corr_token\"] == item].sample(1)\r\n\r\n sent_id1 = df1_only_rows_with_item[\"Sent_ID\"].values[0]\r\n sent_id2 = df2_only_rows_with_item[\"Sent_ID\"].values[0]\r\n\r\n sent_len1 = len(df1[df1[\"Sent_ID\"] == sent_id1])\r\n sent_len2 = len(df2[df2[\"Sent_ID\"] == sent_id2])\r\n\r\n if sent_len1 <= max_len_sent_target and sent_len2 <= max_len_sent_target:\r\n\r\n i1 = df1_only_rows_with_item.index.values[0]\r\n i2 = df2_only_rows_with_item.index.values[0]\r\n try:\r\n target1 = df1.iloc[i1 : i1 + 1]\r\n sent_id1 = target1[\"Sent_ID\"].values[0]\r\n target_sent1 = df1[df1[\"Sent_ID\"] == sent_id1]\r\n start_idx_target_sent1 = target_sent1.index[0]\r\n end_idx_target_sent1 = target_sent1.index[-1]\r\n target_before_sent1 = df1.iloc[start_idx_target_sent1:i1]\r\n target_after_sent1 = df1.iloc[\r\n i1 + 1 : end_idx_target_sent1 + 1\r\n ]\r\n\r\n num_tokens_before_rest1 = index_before_after - (\r\n i1 - start_idx_target_sent1\r\n )\r\n num_tokens_after_rest1 = index_before_after - (\r\n end_idx_target_sent1 - i1\r\n )\r\n\r\n context_after1 = \"\"\r\n context_before1 = \"\"\r\n\r\n # not enough pre-context available\r\n # reason 1: target at beginning of document --> ad num_tokens_before_rest to post-context instead\r\n if (\r\n num_tokens_before_rest1 > 0\r\n and (start_idx_target_sent1 - num_tokens_before_rest1)\r\n <= 0\r\n ):\r\n context_before1 = \"\" # df1[i+1:i+index_before_after+1]\r\n context_after1 = df1[\r\n end_idx_target_sent1\r\n + 1 : end_idx_target_sent1\r\n + index_before_after\r\n + num_tokens_after_rest1\r\n + num_tokens_before_rest1\r\n + 1\r\n ]\r\n else:\r\n context_before1 = df1[\r\n start_idx_target_sent1\r\n - num_tokens_before_rest1 : start_idx_target_sent1\r\n # + 1\r\n ]\r\n\r\n try:\r\n if context_after1.empty:\r\n context_after1 = df1[\r\n end_idx_target_sent1\r\n + 1 : end_idx_target_sent1\r\n + num_tokens_after_rest1\r\n + 1\r\n ]\r\n except AttributeError:\r\n context_after1 = df1[\r\n end_idx_target_sent1\r\n + 1 : end_idx_target_sent1\r\n + num_tokens_after_rest1\r\n + 1\r\n ]\r\n\r\n # not enough post-context available\r\n # reason 1: target at end of document --> ad num_tokens_after_rest to pre-context instead\r\n if (\r\n num_tokens_after_rest1 > 0\r\n and (end_idx_target_sent1 + num_tokens_after_rest1)\r\n >= len(df1)\r\n and context_after1.empty\r\n ):\r\n context_after1 = \"\" # df1[i+1:i+index_before_after+1]\r\n context_before1 = df1[\r\n start_idx_target_sent1\r\n - index_before_after\r\n - num_tokens_after_rest1\r\n - num_tokens_before_rest1 : start_idx_target_sent1 # +1\r\n ]\r\n # reason 2: post context longer than right side of context window\r\n elif num_tokens_after_rest1 < 0 and context_after1.empty:\r\n context_after1 = \"\"\r\n\r\n target2 = df2.iloc[i2: i2 + 1]\r\n sent_id2 = target2[\"Sent_ID\"].values[0]\r\n target_sent2 = df2[df2[\"Sent_ID\"] == sent_id2]\r\n start_idx_target_sent2 = target_sent2.index[0]\r\n end_idx_target_sent2 = target_sent2.index[-1]\r\n target_before_sent2 = df2.iloc[start_idx_target_sent2:i2]\r\n target_after_sent2 = df2.iloc[\r\n i2 + 1: end_idx_target_sent2 + 1\r\n ]\r\n\r\n num_tokens_before_rest2 = index_before_after - (\r\n i2 - start_idx_target_sent2\r\n )\r\n num_tokens_after_rest2 = index_before_after - (\r\n end_idx_target_sent2 - i2\r\n )\r\n\r\n context_after2 = \"\"\r\n context_before2 = \"\"\r\n\r\n # not enough pre-context available\r\n # reason 1: target at beginning of document --> ad num_tokens_before_rest to post-context instead\r\n if (\r\n num_tokens_before_rest2 > 0\r\n and (start_idx_target_sent2 - num_tokens_before_rest2)\r\n <= 0\r\n ):\r\n context_before2 = \"\" # df1[i+1:i+index_before_after+1]\r\n context_after2 = df2[\r\n end_idx_target_sent2\r\n + 1: end_idx_target_sent2\r\n + index_before_after\r\n + num_tokens_after_rest2\r\n + num_tokens_before_rest2\r\n + 1\r\n ]\r\n else:\r\n context_before2 = df2[\r\n start_idx_target_sent2\r\n - num_tokens_before_rest2: start_idx_target_sent2\r\n # + 1\r\n ]\r\n\r\n try:\r\n if context_after2.empty:\r\n context_after2 = df2[\r\n end_idx_target_sent2\r\n + 1: end_idx_target_sent2\r\n + num_tokens_after_rest2\r\n + 1\r\n ]\r\n except AttributeError:\r\n context_after2 = df2[\r\n end_idx_target_sent2\r\n + 1: end_idx_target_sent2\r\n + num_tokens_after_rest2\r\n + 1\r\n ]\r\n\r\n # not enough post-context available\r\n # reason 1: target at end of document --> ad num_tokens_after_rest to pre-context instead\r\n if (\r\n num_tokens_after_rest2 > 0\r\n and (end_idx_target_sent2 + num_tokens_after_rest2)\r\n >= len(df2)\r\n and context_after2.empty\r\n ):\r\n context_after2 = \"\" # df1[i+1:i+index_before_after+1]\r\n context_before2 = df2[\r\n start_idx_target_sent2\r\n - index_before_after\r\n - num_tokens_after_rest2\r\n - num_tokens_before_rest2: start_idx_target_sent2 # +1\r\n ]\r\n # reason 2: post context longer than right side of context window\r\n\r\n elif num_tokens_after_rest2 < 0 and context_after2.empty:\r\n context_after2 = \"\"\r\n\r\n item1 = (\r\n context_before1,\r\n target_before_sent1,\r\n target1,\r\n target_after_sent1,\r\n context_after1,\r\n )\r\n item1 = generate_item_tuple(*item1)\r\n\r\n # add meta info\r\n item1_final = (item1, [path_csv1, item, sent_id1, epoch])\r\n item2 = (\r\n context_before2,\r\n target_before_sent2,\r\n target2,\r\n target_after_sent2,\r\n context_after2,\r\n )\r\n item2 = generate_item_tuple(*item2)\r\n\r\n # add meta info\r\n item2_final = (item2, [path_csv2, item, sent_id2, epoch])\r\n\r\n if (item1_final, item2_final) not in generated_items_tmp[epoch]:\r\n\r\n if epoch == \"E2\":\r\n if len(generated_items_tmp[epoch]) < num_items_e2:\r\n generated_items_tmp[epoch].append((item1_final, item2_final))\r\n print(\r\n f\"Unique item found in {path_csv2}, adding to list...\"\r\n )\r\n\r\n elif epoch == \"E4\":\r\n if len(generated_items_tmp[epoch]) < num_items_e4:\r\n generated_items_tmp[epoch].append((item1_final, item2_final))\r\n print(\r\n f\"Unique item found in {path_csv2}, adding to list...\"\r\n )\r\n\r\n elif epoch == \"E2_E4\":\r\n if len(generated_items_tmp[epoch]) < num_items_e2_e4:\r\n generated_items_tmp[epoch].append((item1_final, item2_final))\r\n print(\r\n f\"Unique item found in {path_csv2}, adding to list...\"\r\n )\r\n\r\n else:\r\n pass\r\n # print(\"Item already in list, skipping...\")\r\n\r\n except Exception as e:\r\n print(\r\n f\"An error occurred in row with indices {i1, i2}, skipping...\"\r\n )\r\n if _PYTHON_VERSION[1] <= 8:\r\n print(e)\r\n else:\r\n traceback.print_exception(e)\r\n\r\n if len(generated_items_tmp[\"E2\"]) == num_items_e2 and len(generated_items_tmp[\"E4\"]) == num_items_e4 and len(generated_items_tmp[\"E2_E4\"]) == num_items_e2_e4:\r\n counter += max_sampling_steps\r\n\r\n e2_items_tmp_values = generated_items_tmp[\"E2\"]\r\n e4_items_tmp_values = generated_items_tmp[\"E4\"]\r\n e2_e4_items_tmp_values = generated_items_tmp[\"E2_E4\"]\r\n\r\n if (\r\n len(e2_items_tmp_values) >= num_items_e2\r\n and len(e4_items_tmp_values) >= num_items_e4\r\n and len(e2_e4_items_tmp_values) >= num_items_e2_e4\r\n ):\r\n generated_items[item] += e2_items_tmp_values\r\n generated_items[item] += e4_items_tmp_values\r\n generated_items[item] += e2_e4_items_tmp_values\r\n random.shuffle(generated_items[item])\r\n\r\n else:\r\n print(\"Not enough items found in at least one epoch, skipping item...\")\r\n\r\n # process final item dict in parallel and write n item pairs sequentially to list, such that result list has format:\r\n # [item_1_1, item_2_1,... item_n_1, ... item_1_2, item2_2, .. item_n_2, ...]\r\n zip_list_values = list(zip(*generated_items.values()))\r\n\r\n results_flattened = []\r\n for item in zip_list_values:\r\n for item_pair in item:\r\n results_flattened.append(item_pair)\r\n\r\n return results_flattened\r\n\r\n\r\ndef all_unique(x):\r\n seen = list()\r\n return not any(i in seen or seen.append(i) for i in x)\r\n\r\n\r\ndef get_targets(filename, target, window_size):\r\n # basically runscript() des sub-programms\r\n\r\n csv_list, results = TG.get_data(filename, target)\r\n line_number = TG.get_list_number(csv_list, results)\r\n\r\n (\r\n target_word_list,\r\n window_before,\r\n window_after,\r\n target_sid_list,\r\n window_before_sid,\r\n window_after_sid,\r\n ) = TG.get_window(line_number, csv_list, window_size)\r\n targets, targets_sid_list = TG.get_sent(\r\n target_word_list,\r\n window_before,\r\n window_after,\r\n target_sid_list,\r\n window_before_sid,\r\n window_after_sid,\r\n )\r\n targets_final, targets_final_sid = TG.filter_target(targets, targets_sid_list)\r\n\r\n ##Erstellung der Zufallsvariable\r\n ##Wenn es kein Vorkommen des Targetworts gibt Return False\r\n if len(targets_final) == 0:\r\n return False\r\n ##Wenn es nur ein Vorkommen gibt dann Zufallsvariable 0 zum Indexen von Position 0\r\n ##Und Funktion laufen lassen.\r\n elif len(targets_final) == 1:\r\n rand_numb = 0\r\n (\r\n target_context_before,\r\n target_sent_before,\r\n target_word,\r\n target_sent_after,\r\n target_context_after,\r\n target_random_sid,\r\n ) = TG.get_random(targets_final, rand_numb, targets_final_sid)\r\n ##In allen \"normalen\" Faellen eine Zufallsvariable erstellen und FUnktion laufen lassen\r\n else:\r\n rand_numb = random.randint(0, len(targets_final) - 1)\r\n (\r\n target_context_before,\r\n target_sent_before,\r\n target_word,\r\n target_sent_after,\r\n target_context_after,\r\n target_random_sid,\r\n ) = TG.get_random(targets_final, rand_numb, targets_final_sid)\r\n\r\n ##Log-Info List mit Corpusname, Targetwort, und die SID des Target words.\r\n log_data = []\r\n log_data.append(filename)\r\n log_data.append(target)\r\n log_data.append(target_random_sid)\r\n\r\n output_tuple = (\r\n target_context_before,\r\n target_sent_before,\r\n target_word,\r\n target_sent_after,\r\n target_context_after,\r\n )\r\n\r\n output_tuple = TG.string_process(output_tuple)\r\n\r\n return output_tuple, log_data\r\n\r\n\r\ndef generate_items(\r\n target_list,\r\n epochen_dict,\r\n window_size,\r\n minquant_e2=15,\r\n minquant_e4=15,\r\n minquant_e2_e4=30,\r\n):\r\n \"\"\"\r\n Input:\r\n target_list(list) - Liste an Target Wörtern (strings), die gesucht werden sollen\r\n epochen_dict(dict) - Dictionary mit den Epochen, Key sind die Kennungen \"E2\", \"E4\", \"E2E4\", Values sind jeweils listen an Tuplen, jeder Eintrag des Tuples ist ein Dateiname\r\n window_size(int) - Window Size des Output (Tokens vor und nach dem Satz, in dem das Target Word vorkommt)\r\n\r\n Output:\r\n results_list(list) - Liste mit Tuplen, immer 2 Items pro Tuple. Jedes Item ist selber ein Tuple, 0 ist der Satz, aufgeteilt in 5 strings, 1 sind die Log-Daten\r\n\r\n Das Programm Iteriert durch alle Targets, darin durch die Epochen-Kennungen und darin durch die Liste pro Epoche.\r\n\r\n Jede Epoche hat einen minquant (minimum quantity) wert. Für E2 und E4 sind es jeweils 15, für E2E4 sind es 30\r\n Findet das Programm genug (15 bzw. 30) Items, macht es automatisch weiter.\r\n Jedes Target hat also 60 Items\r\n\r\n final_results_list(list) ist am Ende noch der Reihenfolge nach und muss geshuffled werden (mit seed)\r\n \"\"\"\r\n\r\n debugmode = True\r\n epochen_key_list = [\"E2\", \"E4\", \"E2_E4\"]\r\n minquant_dict = {\"E2\": minquant_e2, \"E4\": minquant_e4, \"E2_E4\": minquant_e2_e4}\r\n\r\n final_results_list = (\r\n []\r\n ) # Liste mit Items und Log Dateien, angelegt in Tupeln, ein Tuple ist immer ein Item, welches aus 2 Sätzen besteht\r\n loopmustbreak = False\r\n\r\n # Iterieren durch alle Targets\r\n if _TQDM_AVAILABLE:\r\n target_list = tqdm(target_list)\r\n for target in target_list:\r\n results_list = [] # Liste zum zwischenspeichern aller Items für ein Target\r\n logdata_checklist = (\r\n []\r\n ) # Liste um Logdaten zwischenzuspeichern und zu überprüfen, ob ein Item schon einmal vorkam\r\n\r\n if debugmode == True:\r\n print(\"####### \" + \"processing \" + target + \" #######\\n\")\r\n\r\n # Iteriert durch die Keys des Epochen Dict.\r\n for epoche in epochen_key_list:\r\n if debugmode == True:\r\n print(\"####### \" + \"processing in \" + epoche + \" #######\\n\")\r\n\r\n minquant_counter = 0\r\n\r\n # Interiert durch die Liste an Tuplen, die einer jeden Epoche hinterlegt ist\r\n for corpus_tuple in epochen_dict[epoche]:\r\n if minquant_counter < minquant_dict[epoche]:\r\n results = ()\r\n\r\n # Iteriert durch das Tuple\r\n for text in corpus_tuple:\r\n # Notfall Break (?)\r\n if loopmustbreak == True:\r\n loopmustbreak = False\r\n break\r\n\r\n if debugmode == True:\r\n print(\"processing \" + target + \" in \" + text)\r\n\r\n # Reset der Test-Variable\r\n tuple_unique = False\r\n\r\n # Generiert eins von 2 Ergebnissen, wiederholt für das Zweite\r\n while tuple_unique == False:\r\n # Target wird generiert und zwischengespeichert\r\n result = get_targets(text, target, window_size)\r\n\r\n # wenn das Wort nicht im Text gefunden wird, verwerfen\r\n if result == False:\r\n loopmustbreak = True\r\n\r\n if debugmode == True:\r\n print(target + \" not in Text, break \\n\")\r\n\r\n break\r\n\r\n result[1].append(epoche) # epoche in die log datei\r\n\r\n # Wenn das Ergebnis noch nicht zuvor vorkam, abspeichern\r\n # Wenn doppelt, abbrechen.\r\n\r\n if result[1] not in logdata_checklist:\r\n if debugmode == True:\r\n print(\"saving to results tuple\")\r\n\r\n # Log-Data für Duplikationscheck abspeichern\r\n logdata_checklist.append(result[1])\r\n\r\n results += (result,)\r\n tuple_unique = True\r\n\r\n else:\r\n if debugmode == True:\r\n print(\"item doubled, break \\n\")\r\n results = ()\r\n break\r\n\r\n if results:\r\n if debugmode == True:\r\n print(\"saving to results_list\")\r\n results_list.append(results)\r\n minquant_counter += 1\r\n if debugmode == True:\r\n print(\r\n \"Counter: \"\r\n + str(minquant_counter)\r\n + \" out of \"\r\n + str(minquant_dict[epoche])\r\n + \" for \"\r\n + epoche\r\n + \"\\n\"\r\n )\r\n\r\n # Check, ob alle gefundenen Items unique sind, aka: keine Dopplungen\r\n if all_unique(logdata_checklist):\r\n if debugmode == True:\r\n print(\"all Items unique\")\r\n # Check, ob genug Items für ein Target Wort gefunden wurden. Wenn nicht, verwerfen\r\n if (\r\n len(results_list)\r\n == minquant_dict[\"E2\"] + minquant_dict[\"E4\"] + minquant_dict[\"E2_E4\"]\r\n ):\r\n if debugmode == True:\r\n print(\"enough Items found for \" + target + \", saving to final list\\n\")\r\n for i in results_list:\r\n final_results_list.append(i)\r\n else:\r\n if debugmode == True:\r\n print(\r\n \"not enough Items for \" + target + \" found, moving to next target\\n\"\r\n )\r\n\r\n # shufflen der liste\r\n random.shuffle(final_results_list)\r\n\r\n return final_results_list\r\n\r\n\r\ndef main():\r\n ex_filename = \"data/test/annotations/weigel_gnothi_1615.csv\"\r\n ex_filename2 = \"data/test/annotations/dannhauer_catechismus04_1653.csv\"\r\n\r\n ex_epochen_dict = {\r\n \"E2\": [\r\n (ex_filename, ex_filename),\r\n (ex_filename, ex_filename2),\r\n (ex_filename2, ex_filename2),\r\n (ex_filename, ex_filename),\r\n (ex_filename, ex_filename2),\r\n (ex_filename2, ex_filename2),\r\n (ex_filename, ex_filename),\r\n (ex_filename, ex_filename2),\r\n (ex_filename2, ex_filename2),\r\n (ex_filename, ex_filename),\r\n (ex_filename, ex_filename2),\r\n (ex_filename2, ex_filename2),\r\n ],\r\n \"E4\": [\r\n (ex_filename, ex_filename),\r\n (ex_filename, ex_filename2),\r\n (ex_filename2, ex_filename2),\r\n (ex_filename, ex_filename),\r\n (ex_filename, ex_filename2),\r\n (ex_filename2, ex_filename2),\r\n (ex_filename, ex_filename),\r\n (ex_filename, ex_filename2),\r\n (ex_filename2, ex_filename2),\r\n (ex_filename, ex_filename),\r\n (ex_filename, ex_filename2),\r\n (ex_filename2, ex_filename2),\r\n ],\r\n \"E2_E4\": [\r\n (ex_filename, ex_filename),\r\n (ex_filename, ex_filename2),\r\n (ex_filename2, ex_filename2),\r\n (ex_filename, ex_filename),\r\n (ex_filename, ex_filename2),\r\n (ex_filename2, ex_filename2),\r\n (ex_filename, ex_filename),\r\n (ex_filename, ex_filename2),\r\n (ex_filename2, ex_filename2),\r\n (ex_filename, ex_filename),\r\n (ex_filename, ex_filename2),\r\n (ex_filename2, ex_filename2),\r\n ],\r\n }\r\n\r\n ex_target_list = [\"Gott\", \"Sonne\"]\r\n\r\n window_size = 50\r\n\r\n final_targets = generate_items(\r\n ex_target_list, ex_epochen_dict, window_size, 2, 2, 2\r\n )\r\n\r\n for i in final_targets:\r\n print(i)\r\n print(len(final_targets))\r\n return final_targets\r\n\r\n\r\ndebugmode = False\r\n\r\nif __name__ == \"__main__\":\r\n # debugmode = True\r\n # final_targets = main()\r\n\r\n import pickle\r\n\r\n items = [\"und\", \"aber\", \"er\", \"sie\", \"es\"]\r\n path_docs_pairwise = \"data/test/docs_pairwise/docs_pairwise.pkl\"\r\n with open(path_docs_pairwise, \"rb\") as f:\r\n doc_pairwise = pickle.load(f)\r\n import os\r\n\r\n for k, v in doc_pairwise.items():\r\n for i, l in enumerate(v):\r\n doc_pairwise[k][i][0] = os.path.join(\r\n \"data/test/annotations/\", doc_pairwise[k][i][0] + \".csv\"\r\n )\r\n doc_pairwise[k][i][1] = os.path.join(\r\n \"data/test/annotations/\", doc_pairwise[k][i][1] + \".csv\"\r\n )\r\n\r\n # doc_pairwise[\"E2\"] = [doc_pairwise[\"E2\"][0]]\r\n # doc_pairwise[\"E4\"] = [doc_pairwise[\"E4\"][0]]\r\n # doc_pairwise[\"E2_E4\"] = [doc_pairwise[\"E2_E4\"][0]]\r\n\r\n out = generate_items_pandas(items, doc_pairwise, 80, 5, 5, 10, 50, 100)\r\n","repo_name":"theThing92/dta-preprocessing","sub_path":"item_generation.py","file_name":"item_generation.py","file_ext":"py","file_size_in_byte":28824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"3511513307","text":"import logging\nimport time\nimport sys\n\ncurrent_time = time.strftime('%Y-%m-%d')\n\nlogger = logging.getLogger(__name__)\n\nlogging.basicConfig(stream=sys.stdout, level=logging.INFO, format=('%(asctime)s [%(levelname)s] > %(message)s'))\nformatter = logging.Formatter('%(asctime)s [%(levelname)s] > %(message)s')\n\nhandler = logging.FileHandler('logs/maxq-' + current_time.replace(':', '') + '.log', 'w', 'utf-8')\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\n\n","repo_name":"jclishman/maxq-irc-bot","sub_path":"bot_logging.py","file_name":"bot_logging.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"27238722780","text":"#!/usr/bin/python\n#\n# mdetect.py\n# \n# Measure distance using an ultrasonic module\n# Use std deviation calc to detect good readings and \n# to throw out bad readings \n#\n# Created by : Dennis Kornbluh\n# Date : 08/11/2017\n# -----------------------\n\n# Import required Python libraries\nfrom __future__ import print_function\nimport time\nimport RPi.GPIO as GPIO\nimport math\n\n# Use BCM GPIO references\n# instead of physical pin numbers\nGPIO.setmode(GPIO.BCM)\n\n# Define GPIO to use on Pi\nGPIO_TRIGGER = 23\nGPIO_ECHO = 24\n\n# Speed of sound in cm/s at temperature\ntemperature = 20\nspeedSound = 33100 + (0.6*temperature)\n\ndef computeStdDev(list):\n\tmn = mean(list) \n\tnewList = []\n\tfor val in list:\n\t\ttmp = val - mn\n\t\ttmp *= tmp\n\t\tnewList.append(tmp)\n\tmn = mean(newList)\n\tstddev = math.sqrt(mn) \n\treturn stddev\n\t\ndef mean(list):\n\tmn = 0.0\n\tfor val in list:\n\t\tmn += val\n\tmn = mn / len(list)\n\treturn mn\n\nprint(\"Ultrasonic Measurement\")\nprint(\"Speed of sound is\",speedSound/100,\"m/s at \",temperature,\"deg\")\n\n# Set pins as output and input\nGPIO.setup(GPIO_TRIGGER,GPIO.OUT) # Trigger\nGPIO.setup(GPIO_ECHO,GPIO.IN) # Echo\n\nCM2FT = 0.0328084\nLODEV = 1.0\nHIDEV = 100.0\nMAX = 20\nreadCount = 0\nlist = []\n\ntry:\n\twhile 1:\n\t\t# Set trigger to False (Low)\n\t\tGPIO.output(GPIO_TRIGGER, False)\n\n\t\t# Allow module to settle\n\t\ttime.sleep(0.1)\n\n\t\t# Send 10us pulse to trigger\n\t\tGPIO.output(GPIO_TRIGGER, True)\n\t\t# Wait 10us\n\t\ttime.sleep(0.00001)\n\t\tGPIO.output(GPIO_TRIGGER, False)\n\t\tstart = time.time()\n\n\t\twhile GPIO.input(GPIO_ECHO)==0:\n \t\t\tstart = time.time()\n\n\t\twhile GPIO.input(GPIO_ECHO)==1:\n \t\t\tstop = time.time()\n\n\t\t# Calculate pulse length\n\t\telapsed = stop-start\n\n\t\t# Distance pulse travelled in that time is time\n\t\t# multiplied by the speed of sound (cm/s)\n\t\tdistance = elapsed * speedSound\n\n\t\t# That was the distance there and back so halve the value\n\t\tdistance = distance / 2\n\n\t\t# Store MAX readings, compute std deviation\n\t\tlist.append(distance)\n\t\treadCount += 1\n\n\t\tif readCount >= MAX:\n\t\t\treadCount = 0\n\t\t\tstddev = computeStdDev(list)\n\t\t\tlist = []\n\t\t\tif stddev > LODEV and stddev <= HIDEV:\n\t\t\t\tprint(\"Something is moving. Distance={0:5.1f} cm, {1:5.2f} ft. StdDev={2:5.2f}\".format(distance,distance*CM2FT,stddev))\n\t\t\telif stddev > HIDEV:\n\t\t\t\tprint(\"High deviation:{0:5.2f}\".format(stddev))\nexcept KeyboardInterrupt:\n\tprint(\"Quitting\")\n\n# Reset GPIO settings\nGPIO.cleanup()\n","repo_name":"dennishvo/ultrasonic","sub_path":"mdetect.py","file_name":"mdetect.py","file_ext":"py","file_size_in_byte":2377,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"73342038807","text":"import os\nfrom io import BytesIO\n\nfrom flask import flash, jsonify, redirect, request, session\nfrom PIL import Image\nfrom werkzeug.exceptions import NotFound\nfrom wtforms import fields as wtforms_fields\nfrom wtforms.validators import DataRequired\n\nfrom indico.core.db import db\nfrom indico.modules.events import EventLogRealm\nfrom indico.modules.events.controllers.base import RegistrationRequired, RHDisplayEventBase\nfrom indico.modules.events.layout import layout_settings, logger, theme_settings\nfrom indico.modules.events.layout.forms import (ConferenceLayoutForm, CSSForm, CSSSelectionForm,\n LectureMeetingLayoutForm, LogoForm)\nfrom indico.modules.events.layout.util import get_css_file_data, get_css_url, get_js_url, get_logo_data\nfrom indico.modules.events.layout.views import WPLayoutEdit\nfrom indico.modules.events.management.controllers import RHManageEventBase\nfrom indico.modules.events.models.events import EventType\nfrom indico.modules.events.views import WPConferenceDisplay\nfrom indico.modules.logs import LogKind\nfrom indico.modules.logs.util import make_diff_log\nfrom indico.util.fs import secure_filename\nfrom indico.util.i18n import _\nfrom indico.util.string import crc32\nfrom indico.web.flask.templating import get_template_module\nfrom indico.web.flask.util import send_file, url_for\nfrom indico.web.forms import fields as indico_fields\nfrom indico.web.forms.base import FormDefaults, IndicoForm\nfrom indico.web.util import _pop_injected_js, jsonify_data\n\n\nclass RHLayoutBase(RHManageEventBase):\n pass\n\n\ndef _make_theme_settings_form(event, theme):\n try:\n settings = theme_settings.themes[theme]['user_settings']\n except KeyError:\n return None\n form_class = type('ThemeSettingsForm', (IndicoForm,), {})\n for name, field_data in settings.items():\n field_type = field_data['type']\n field_class = getattr(indico_fields, field_type, None) or getattr(wtforms_fields, field_type, None)\n if not field_class:\n raise Exception(f'Invalid field type: {field_type}')\n label = field_data['caption']\n description = field_data.get('description')\n validators = [DataRequired()] if field_data.get('required') else []\n field = field_class(label, validators, description=description, **field_data.get('kwargs', {}))\n setattr(form_class, name, field)\n\n defaults = {name: field_data.get('defaults') for name, field_data in settings.items()}\n if theme == event.theme:\n defaults.update(layout_settings.get(event, 'timetable_theme_settings'))\n\n return form_class(csrf_enabled=False, obj=FormDefaults(defaults), prefix='tt-theme-settings-')\n\n\nclass RHLayoutTimetableThemeForm(RHLayoutBase):\n def _process(self):\n form = _make_theme_settings_form(self.event, request.args['theme'])\n if not form:\n return jsonify()\n tpl = get_template_module('forms/_form.html')\n return jsonify(html=tpl.form_rows(form), js=_pop_injected_js())\n\n\nclass RHLayoutEdit(RHLayoutBase):\n def _process_request(self):\n if self.event.type_ == EventType.conference:\n return self._process_conference()\n else:\n return self._process_lecture_meeting()\n\n def _process_GET(self):\n return self._process_request()\n\n def _process_POST(self):\n old_values = layout_settings.get_all(self.event)\n ret = self._process_request()\n new_values = layout_settings.get_all(self.event)\n # Skip `timetable_theme_settings` as they are dynamically generated from themes.yaml\n changes = {k: (old_values[k], v) for k, v in new_values.items()\n if old_values[k] != v and k != 'timetable_theme_settings'}\n if changes:\n form_cls = ConferenceLayoutForm if self.event.type_ == EventType.conference else LectureMeetingLayoutForm\n form = form_cls(event=self.event)\n self.event.log(EventLogRealm.management, LogKind.change, 'Layout', summary='Layout was updated',\n user=session.user, data={'Changes': make_diff_log(changes, form.log_fields_metadata)})\n return ret\n\n def _get_form_defaults(self):\n defaults = FormDefaults(**layout_settings.get_all(self.event))\n defaults.timetable_theme = self.event.theme\n return defaults\n\n def _process_lecture_meeting(self):\n form = LectureMeetingLayoutForm(obj=self._get_form_defaults(), event=self.event)\n tt_theme_settings_form = _make_theme_settings_form(self.event, form.timetable_theme.data)\n tt_form_valid = tt_theme_settings_form.validate_on_submit() if tt_theme_settings_form else True\n if form.validate_on_submit() and tt_form_valid:\n if tt_theme_settings_form:\n layout_settings.set(self.event, 'timetable_theme_settings', tt_theme_settings_form.data)\n else:\n layout_settings.delete(self.event, 'timetable_theme_settings')\n layout_settings.set_multi(self.event, form.data)\n flash(_('Settings saved'), 'success')\n return redirect(url_for('.index', self.event))\n return WPLayoutEdit.render_template('layout_meeting_lecture.html', self.event, form=form,\n timetable_theme_settings_form=tt_theme_settings_form)\n\n def _process_conference(self):\n form = ConferenceLayoutForm(obj=self._get_form_defaults(), event=self.event)\n css_form = CSSForm()\n logo_form = LogoForm()\n tt_theme_settings_form = _make_theme_settings_form(self.event, form.timetable_theme.data)\n tt_form_valid = tt_theme_settings_form.validate_on_submit() if tt_theme_settings_form else True\n if form.validate_on_submit() and tt_form_valid:\n if tt_theme_settings_form:\n layout_settings.set(self.event, 'timetable_theme_settings', tt_theme_settings_form.data)\n else:\n layout_settings.delete(self.event, 'timetable_theme_settings')\n data = {str(key): value for key, value in form.data.items() if key in layout_settings.defaults}\n layout_settings.set_multi(self.event, data)\n if form.theme.data == '_custom':\n layout_settings.set(self.event, 'use_custom_css', True)\n flash(_('Settings saved'), 'success')\n return redirect(url_for('.index', self.event))\n else:\n if self.event.logo_metadata:\n logo_form.logo.data = self.event\n if self.event.has_stylesheet:\n css_form.css_file.data = self.event\n return WPLayoutEdit.render_template('layout_conference.html', self.event, form=form,\n logo_form=logo_form, css_form=css_form,\n timetable_theme_settings_form=tt_theme_settings_form)\n\n\nclass RHLayoutLogoUpload(RHLayoutBase):\n def _process(self):\n f = request.files['logo']\n try:\n img = Image.open(f)\n except OSError:\n flash(_('You cannot upload this file as a logo.'), 'error')\n return jsonify_data(content=None)\n if img.format.lower() not in {'jpeg', 'png', 'gif'}:\n flash(_('The file has an invalid format ({format})').format(format=img.format), 'error')\n return jsonify_data(content=None)\n if img.mode == 'CMYK':\n flash(_('The logo you uploaded is using the CMYK colorspace and has been converted to RGB. Please check if '\n 'the colors are correct and convert it manually if necessary.'), 'warning')\n img = img.convert('RGB')\n image_bytes = BytesIO()\n img.save(image_bytes, 'PNG')\n image_bytes.seek(0)\n content = image_bytes.read()\n self.event.logo = content\n self.event.logo_metadata = {\n 'hash': crc32(content),\n 'size': len(content),\n 'filename': os.path.splitext(secure_filename(f.filename, 'logo'))[0] + '.png',\n 'content_type': 'image/png'\n }\n flash(_('New logo saved'), 'success')\n logger.info(\"New logo '%s' uploaded by %s (%s)\", f.filename, session.user, self.event)\n return jsonify_data(content=get_logo_data(self.event))\n\n\nclass RHLayoutLogoDelete(RHLayoutBase):\n def _process(self):\n self.event.logo = None\n self.event.logo_metadata = None\n flash(_('Logo deleted'), 'success')\n logger.info('Logo of %s deleted by %s', self.event, session.user)\n return jsonify_data(content=None)\n\n\nclass RHLayoutCSSUpload(RHLayoutBase):\n def _process(self):\n f = request.files['css_file']\n try:\n self.event.stylesheet = f.read().decode().strip()\n except UnicodeDecodeError:\n flash(_('CSS files must be ASCII or UTF-8 encoded.'), 'error')\n return jsonify_data(success=False, content=None)\n self.event.stylesheet_metadata = {\n 'hash': crc32(self.event.stylesheet),\n 'size': len(self.event.stylesheet),\n 'filename': secure_filename(f.filename, 'stylesheet.css')\n }\n db.session.flush()\n flash(_('New CSS file saved. Do not forget to enable it (\"Use custom CSS\") after verifying that it is correct '\n 'using the preview.'), 'success')\n logger.info('CSS file for %s uploaded by %s', self.event, session.user)\n return jsonify_data(content=get_css_file_data(self.event))\n\n\nclass RHLayoutCSSDelete(RHLayoutBase):\n def _process(self):\n self.event.stylesheet = None\n self.event.stylesheet_metadata = None\n layout_settings.set(self.event, 'use_custom_css', False)\n flash(_('CSS file deleted'), 'success')\n logger.info('CSS file for %s deleted by %s', self.event, session.user)\n return jsonify_data(content=None)\n\n\nclass RHLayoutCSSPreview(RHLayoutBase):\n def _process(self):\n form = CSSSelectionForm(event=self.event, formdata=request.args, csrf_enabled=False)\n css_url = None\n js_url = None\n if form.validate():\n css_url = get_css_url(self.event, force_theme=form.theme.data, for_preview=True)\n js_url = get_js_url(self.event, force_theme=form.theme.data, for_preview=True)\n return WPConferenceDisplay(self, self.event, css_override_form=form, css_url_override=css_url,\n js_url_override=js_url).display()\n\n\nclass RHLayoutViewStylesheet(RHLayoutBase):\n def _process(self):\n form = CSSSelectionForm(event=self.event, formdata=request.args, csrf_enabled=False)\n if not form.validate() or not form.theme.data:\n raise NotFound\n return redirect(get_css_url(self.event, force_theme=form.theme.data))\n\n\nclass RHLayoutCSSSaveTheme(RHLayoutBase):\n def _process(self):\n form = CSSSelectionForm(event=self.event)\n if form.validate_on_submit():\n layout_settings.set(self.event, 'use_custom_css', form.theme.data == '_custom')\n if form.theme.data != '_custom':\n layout_settings.set(self.event, 'theme', form.theme.data)\n flash(_('Settings saved'), 'success')\n return redirect(url_for('.index', self.event))\n\n\nclass RHLogoDisplay(RHDisplayEventBase):\n def _check_access(self):\n try:\n RHDisplayEventBase._check_access(self)\n except RegistrationRequired:\n # in case registrants need to register they should be able to see the logo\n pass\n\n def _process(self):\n if not self.event.has_logo:\n raise NotFound\n metadata = self.event.logo_metadata\n return send_file(metadata['filename'], BytesIO(self.event.logo), mimetype=metadata['content_type'],\n conditional=True)\n\n\nclass RHLayoutCSSDisplay(RHDisplayEventBase):\n def _check_access(self):\n try:\n RHDisplayEventBase._check_access(self)\n except RegistrationRequired:\n # in case registrants need to register they should be able to see the css\n pass\n\n def _process(self):\n if not self.event.has_stylesheet:\n raise NotFound\n data = BytesIO(self.event.stylesheet.encode())\n return send_file(self.event.stylesheet_metadata['filename'], data, mimetype='text/css', conditional=True)\n","repo_name":"indico/indico","sub_path":"indico/modules/events/layout/controllers/layout.py","file_name":"layout.py","file_ext":"py","file_size_in_byte":12377,"program_lang":"python","lang":"en","doc_type":"code","stars":1560,"dataset":"github-code","pt":"31"} +{"seq_id":"6209225538","text":"import pygame\nfrom pygame import sprite\n\n\ndef is_collide_down(rect, platform):\n return rect.colliderect(platform.rect_down)\n\n\ndef is_collide_top(rect, platform):\n return rect.colliderect(platform.rect_up)\n\n\ndef is_collide_right(rect, platform):\n return rect.colliderect(platform.rect_right)\n\n\ndef is_collide_left(rect, platform):\n return rect.colliderect(platform.rect_left)\n\n\ndef is_rect_stand_on_block(platform_group, under_player_rect):\n for platform in platform_group:\n if under_player_rect.colliderect(platform.rect_up_upper):\n return True\n\n return False\n\n\ndef is_above_water(screen, rect, platform_group):\n full_height_rect = pygame.Rect(rect.x, 0, rect.width, screen.get_height())\n\n for platform in platform_group:\n if full_height_rect.colliderect(platform.rect):\n return False\n return True\n\n\nclass Player(sprite.Sprite):\n def __init__(self):\n sprite.Sprite.__init__(self)\n self.walk_textures = []\n self.run_textures = []\n self.walk_texture_width = 17\n self.run_texture_width = 20\n self.texture_height = 16\n\n walk_full_image = pygame.image.load(\"img/player/cat_0/cat_0_walk.png\").convert_alpha()\n run_full_image = pygame.image.load(\"img/player/cat_0/cat_0_run.png\").convert_alpha()\n\n for i in range(walk_full_image.get_width() // self.walk_texture_width):\n self.walk_textures.append(pygame.transform.scale(\n walk_full_image.subsurface(i * self.walk_texture_width, 0, self.walk_texture_width,\n self.texture_height),\n (self.walk_texture_width * 3, self.texture_height * 3)))\n\n for i in range(walk_full_image.get_width() // self.run_texture_width):\n self.run_textures.append(pygame.transform.scale(\n run_full_image.subsurface(i * self.run_texture_width, 0, self.run_texture_width,\n self.texture_height), (self.run_texture_width * 3, self.texture_height * 3)))\n\n self.image = self.run_textures[0]\n self.rect = self.image.get_rect()\n\n self.death = False\n\n self.speed = 2\n self.posX = 200\n self.posY = 300\n\n self.jumping = False\n self.falling = False\n self.jump_index = 0\n self.walk = 0\n self.left = True\n\n self.run_animation_index = 1\n\n def walk_x(self, x):\n self.walk = x\n\n if x != 0:\n self.left = x < 0\n\n def jump(self):\n if self.jumping:\n return\n\n self.jumping = True\n self.falling = False\n self.jump_index = 0\n\n def is_gravity_active(self, screen, platform, platform_group, new_pos_x):\n if self.jumping: # wenn er Springt, dann wird durch den jump_index, die Schwerkraft geregelt\n return False\n\n if self.rect.y >= platform.rect_up.y: # ist der block nicht unter dem Player\n return False\n\n under_player_rect = pygame.Rect(self.rect.x + new_pos_x - 1, self.rect.y + self.rect.height + 2,\n self.rect.width - 1, 1)\n\n if under_player_rect.colliderect(platform.rect_up):\n return False\n\n if is_collide_top(pygame.Rect(self.rect.x + new_pos_x - 1, self.rect.y + self.rect.height,\n self.rect.width - 1,\n screen.get_height() - self.rect.y + self.rect.height), platform):\n return False\n\n return not is_rect_stand_on_block(platform_group, under_player_rect)\n\n def reset(self):\n self.death = False\n\n self.speed = 2\n self.posX = 200\n self.posY = 310\n\n self.jumping = False\n self.falling = False\n self.jump_index = 0\n self.walk = 0\n self.left = True\n\n self.run_animation_index = 1\n\n def update_image(self):\n if self.walk != 0:\n self.run_animation_index += 0.3\n\n if self.run_animation_index >= 30:\n self.run_animation_index = 0\n\n self.image = self.run_textures[int(self.run_animation_index) // 10]\n\n else:\n self.image = self.walk_textures[0]\n\n if self.left:\n self.image = pygame.transform.flip(self.image, True, False)\n\n def update_position(self, screen, map):\n new_pos_x = self.walk * self.speed\n\n rect = pygame.Rect(self.rect.x + new_pos_x - 1, self.rect.y, self.rect.width - 1, self.rect.height)\n\n for platform in map.platform_group:\n\n if is_collide_top(rect, platform) and \\\n not is_collide_left(rect, platform) and \\\n not is_collide_right(rect, platform): # block oben\n self.falling = False\n self.jumping = False\n self.posY = platform.rect_up.y - platform.rect_up.height - self.rect.height\n\n elif self.is_gravity_active(screen, platform, map.platform_group,\n new_pos_x): # springt nicht, ist aber auch auf keinen block drauf\n self.posY += 1\n\n if is_collide_down(rect, platform): # unten block\n self.falling = True\n\n if is_collide_left(rect, platform) and not is_collide_down(rect, platform): # links block\n new_pos_x = platform.rect_left.x - (self.rect.x + self.rect.width)\n\n if is_collide_right(rect, platform) and not is_collide_down(rect, platform): # rechts block\n new_pos_x = platform.rect_right.x - self.rect.x\n\n if is_above_water(screen, rect, map.platform_group):\n self.posY += 1\n\n self.posX += new_pos_x\n if self.posX <= 0:\n self.posX = 0\n\n self.rect.x = screen.get_width() / 2\n\n if self.jumping:\n if self.falling and self.jump_index <= 0:\n self.falling = False\n self.jumping = False\n elif self.falling:\n self.jump_index -= 0.2\n elif self.jump_index >= 20:\n self.falling = True\n else:\n self.jump_index += 0.2\n\n self.rect.y = self.posY + -self.jump_index * 10\n else:\n self.rect.y = self.posY\n\n def test_death(self, screen):\n if self.rect.y + self.rect.height >= screen.get_height() - 16:\n self.rect.y = screen.get_height()\n self.death = True\n\n def update(self, screen, map):\n if not self.death:\n self.update_position(screen, map)\n self.test_death(screen)\n self.update_image()\n","repo_name":"Spark61/Jump-and-run","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":6636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"21628795032","text":"class AltitudeSum:\n def __init__(self): #ut\n self.reset()\n self._enabled = True\n\n def reset(self): #ut\n self.sum = 0\n self._last_val = None\n self.min = 0\n self.max = 0\n\n def process(self, val, delta, is_riding): #ut\n is_enabled = self._enabled and is_riding\n if self._last_val == None or not is_enabled:\n self._last_val = val\n else:\n diff = val - self._last_val\n if diff >= delta:\n self.sum += diff\n if abs(diff) >= delta:\n self._last_val = val\n\n if is_enabled:\n self.min = val if self.min == 0 else min(self.min, val)\n self.max = max(self.max, val)\n \n def enable(self, enabled): #ut\n self._enabled = enabled","repo_name":"snst/bikecomputer","sub_path":"src/altitude_sum.py","file_name":"altitude_sum.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22562722687","text":"import re\nimport logging\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base, declared_attr\nfrom sqlalchemy.orm import sessionmaker\n\nfrom .config import SQL_DB_URI\n\nlogger = logging.getLogger(__name__)\nengine = create_engine(SQL_DB_URI)\n# had to remove that for mysql : connect_args={\"check_same_thread\": False}\n\nSessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)\n\n\n# Dependency\ndef get_db():\n logger.info(\"Creating db\")\n db = SessionLocal()\n try:\n yield db\n finally:\n db.close()\n\n\ndef resolve_table_name(name):\n \"\"\"Resolves table names to their mapped names.\"\"\"\n names = re.split(\"(?=[A-Z])\", name) # noqa\n return \"_\".join([x.lower() for x in names if x])\n\n\nclass CustomBase:\n @declared_attr\n def __tablename__(self):\n return resolve_table_name(self.__name__)\n\n\nBase = declarative_base(cls=CustomBase)\n","repo_name":"RaphOb/booking","sub_path":"api/booking/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"20357401505","text":"import json\nimport pickle\nfrom sklearn.preprocessing import StandardScaler\n\nfrom flask import Flask,request,app,jsonify,url_for,render_template\nimport numpy as np\nimport pandas as pd\n\napp=Flask(__name__)\n## Load the model\nregmodel=pickle.load(open('regmodel.pkl','rb'))\nscalar=pickle.load(open('scaling.pkl','rb'))\n@app.route('/')\ndef home():\n return render_template('home.html')\n\n\ndef preprocess_dict(data):\n # Drop the 'Car_Name' key from the dictionary\n data.pop('Car_Name', None)\n\n # Calculate the 'Age' key based on the 'Year' key\n data['Age'] = 2023 - data['Year']\n\n # Drop the 'Year' key from the dictionary\n data.pop('Year', None)\n\n # Add Fuel_Type_Diesel, Fuel_Type_Petrol, and Transmission_Manual keys\n fuel_type = data.get('Fuel_Type', None)\n if fuel_type is not None and isinstance(fuel_type, str):\n if fuel_type.lower() == 'diesel':\n data['Fuel_Type_Diesel'] = 1\n data['Fuel_Type_Petrol'] = 0\n elif fuel_type.lower() == 'petrol':\n data['Fuel_Type_Diesel'] = 0\n data['Fuel_Type_Petrol'] = 1\n else:\n print(f\"Could not determine Fuel_Type_Diesel and Fuel_Type_Petrol for fuel type '{fuel_type}'\")\n\n transmission = data.get('Transmission', None)\n if transmission is not None and isinstance(transmission, str):\n if transmission.lower() == 'manual':\n data['Transmission_Manual'] = 1\n else:\n data['Transmission_Manual'] = 0\n else:\n print(f\"Invalid value '{transmission}' for key 'Transmission'\")\n\n data.pop('Seller_Type', None)\n data.pop('Fuel_Type', None)\n data.pop('Transmission', None)\n\n return data\n\n\n@app.route('/predict_api',methods=['POST'])\ndef predict_api():\n\n data=request.json['data']\n\n data = preprocess_dict(data)\n\n print(np.array(list(data.values())).reshape(1,-1))\n\n new_data=scalar.transform(np.array(list(data.values())).reshape(1,-1))\n output=regmodel.predict(new_data)\n print(output[0])\n\n return jsonify(output[0])\n\n\n@app.route('/predict',methods=['POST'])\ndef predict():\n\n data = request.form.to_dict()\n for key in ['Year', 'Present_Price', 'Kms_Driven', 'Owner']:\n data[key] = float(data[key])\n\n # Preprocess the data\n data = preprocess_dict(data)\n\n new_data=scalar.transform(np.array(list(data.values())).reshape(1,-1))\n output=regmodel.predict(new_data)\n print(output[0])\n\n return render_template(\"home.html\",prediction_text=\"The Car price prediction is {}\".format(output)+\"Lakhs\")\n\n\nif __name__==\"__main__\":\n app.run(debug=True)","repo_name":"Yuvraj-Sharma-2000/carprice","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"16507088828","text":"from template import BasicStrategy\n\nclass DoubleMAStrategy(BasicStrategy):\n def __init__(self, initial_data) -> None:\n super(DoubleMAStrategy, self).__init__(initial_data)\n \n def get_signal(self, parameters:list) -> list:\n df2 = self.df2\n fast_window, slow_window = parameters\n\n sma = df2['close'].rolling(fast_window).mean().shift()\n lma = df2['close'].rolling(slow_window).mean().shift()\n ma_diff = list(sma - lma)\n\n signal = [0]*slow_window\n for i in range(slow_window, len(ma_diff)-1):\n if ma_diff[i] > 0: \n signal.append(1)\n elif ma_diff[i] < 0:\n signal.append(-1)\n else:\n signal.append(signal[-1]) # 跟随趋势\n signal.append(-1)\n return signal","repo_name":"algo23-Jinzhao/Assignment2","sub_path":"择时回测系统(向量化框架)/core/strategy/doubleMAStrategy.py","file_name":"doubleMAStrategy.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19896929905","text":"import sqlite3\r\nfrom flask import jsonify, url_for\r\n\r\nDB_PATH = './profiles.db' \r\nNOTSTARTED = 'Not Started'\r\n\r\ndef make_public_profile(row):\r\n res = {}\r\n for field in row.keys():\r\n if field == 'profile_id':\r\n res['uri'] = url_for('get_profile', profile_id = row['profile_id'], _external = True)\r\n else:\r\n res[field] = row[field]\r\n\r\n return res\r\n\r\ndef get_all_profiles():\r\n try:\r\n conn = sqlite3.connect(DB_PATH)\r\n conn.row_factory = sqlite3.Row\r\n c = conn.cursor()\r\n c.execute('select * from profiles')\r\n rows = c.fetchall()\r\n result = jsonify( { 'profiles': list(map(make_public_profile, rows)) } )\r\n return result\r\n except Exception as e:\r\n print('Error: ', e)\r\n return None\r\n\r\ndef get_profile(profile_id):\r\n try:\r\n conn = sqlite3.connect(DB_PATH)\r\n conn.row_factory = sqlite3.Row\r\n c = conn.cursor()\r\n c.execute(\"select * from profiles where profile_id=?;\" , [profile_id])\r\n r = c.fetchone()\r\n return jsonify(make_public_profile(r))\r\n except Exception as e:\r\n print('Error: ', e)\r\n return None\r\n\r\n\r\ndef add_user(nickname, profile_picture, gender, email):\r\n try:\r\n conn = sqlite3.connect(DB_PATH)\r\n c = conn.cursor()\r\n c.execute('insert into profiles(nickname, pp, gender, email) values(?,?, ?, ?)', (nickname, profile_picture, gender, email))\r\n conn.commit()\r\n result = get_profile(c.lastrowid)\r\n return result\r\n except Exception as e:\r\n print('Error: ', e)\r\n return None\r\n\r\ndef add_user_key(profile_id, nickname, profile_picture, gender, email):\r\n try:\r\n conn = sqlite3.connect(DB_PATH)\r\n c = conn.cursor()\r\n c.execute('insert into profiles(profile_id, nickname, pp, gender, email) values(?, ?,?, ?, ?)', (profile_id, nickname, profile_picture, gender, email))\r\n conn.commit()\r\n result = profile_id\r\n return result\r\n except Exception as e:\r\n print('Error: ', e)\r\n return None\r\n\r\ndef update_profile(profile_id, nickname, profile_picture, gender, email):\r\n try:\r\n conn = sqlite3.connect(DB_PATH)\r\n c = conn.cursor()\r\n if (nickname):\r\n c.execute('update profiles set nickname=? where profile_id=?', (nickname, profile_id))\r\n if (profile_picture):\r\n c.execute('update profiles set pp=? where profile_id=?', (profile_picture, profile_id))\r\n if (gender):\r\n c.execute('update profiles set gender=? where profile_id=?', (gender, profile_id))\r\n if (email):\r\n c.execute('update profiles set email=? where profile_id=?', (email, profile_id))\r\n conn.commit()\r\n result = get_profile(profile_id)\r\n return result\r\n except Exception as e:\r\n print('Error: ', e)\r\n return None\r\n\r\ndef remove_profile(profile_id):\r\n try:\r\n conn = sqlite3.connect(DB_PATH)\r\n c = conn.cursor()\r\n c.execute('DELETE FROM profiles WHERE profile_id=?', [profile_id])\r\n conn.commit()\r\n return jsonify( { 'result': True } )\r\n except Exception as e:\r\n print('Error: ', e)\r\n return None","repo_name":"Oanikulin/RESTPprofile","sub_path":"server/helper_profile.py","file_name":"helper_profile.py","file_ext":"py","file_size_in_byte":3222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40148433589","text":"#coding:utf8\n'''\nCreated on 2017年10月24日\n\n@author: Administrator\n'''\nf=open('simple.txt','a+')\ns='文本文件的读取方法\\n文本��件的写入方法\\n'\nf.write(s)\nf.close()\n","repo_name":"lbq972149981/PythonBaseComponent","sub_path":"PythonBaseComponent/text7/7-1.py","file_name":"7-1.py","file_ext":"py","file_size_in_byte":184,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19777539809","text":"import tensorflow as tf\nimport numpy as np\nfrom math import ceil\n\ntf.set_random_seed(1)\nn = 6\nf = 3\ns = 2\nstartw = 0\nendw = 3\nstarth = 0\nendh = 3\n\narr = np.arange(n * n * 3).reshape((1, n, n, 3))\n# pad_total = ((n - 1) * s + f - n)\n# if pad_total % 2 != 0:\n# pad_total += 1\n# print('pad_total', pad_total)\n# arr = np.pad(arr,\n# ((0, 0), (pad_total // 2, pad_total // 2), (pad_total // 2, pad_total // 2), (0, 0)),\n# 'constant')\nprint('arr shape', arr.shape)\narr1 = arr[:, starth:endh, startw:endw, 0]\narr2 = arr[:, starth:endh, startw:endw, 1]\narr3 = arr[:, starth:endh, startw:endw, 2]\nprint('convolution sum', arr1.sum() + arr2.sum() + arr3.sum())\nprint('arr1', arr1)\n# input = tf.Variable(tf.random_normal([1, 51, 50, s]))\ninp = tf.constant(arr, dtype=tf.float32)\nprint('inp', inp)\n# filter = tf.Variable(tf.random_normal([3, 3, 3, 1]))\nfil = tf.ones([f, f, 3, 1])\nop = tf.nn.conv2d(inp, fil, strides=[1, s, s, 1], padding='SAME')\npool = tf.nn.max_pool(inp, [1, f, f, 1], [1, s, s, 1], padding='SAME')\n\nwith tf.Session() as sess:\n # sess.run(tf.global_variables_initializer())\n inp_run = sess.run(inp)\n fil_run = sess.run(fil)\n op_run = sess.run(op)\n pool_run = sess.run(pool)\n # print('fil_run', fil_run, type(fil_run))\n h = 7\n w = 7\n print('op_run one:', op_run[:, :h, :w, 0], op_run.shape)\n # print('op_run', op_run)\n print('pool_run', pool_run[:, :h, :w, 0], pool_run.shape)\n","repo_name":"JasonDu1993/TensorFlowTest","sub_path":"tf_conv_pool.py","file_name":"tf_conv_pool.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"36169764798","text":"import time\nimport numpy as np\nfrom riglib.experiment import traits\nimport scipy.io as sio\nfrom riglib.bmi import extractor\n\n\nchannels = np.arange(1, 97)\nn_chan = len(channels)\n\nextractor_cls = extractor.LFPMTMPowerExtractor\n\nclass BlackrockData(object):\n# class BlackrockData(traits.HasTraits):\n '''Stream Blackrock neural data.'''\n\n def init(self):\n from riglib import blackrock, source\n\n if 'spike' in extractor_cls.feature_type: # e.g., 'spike_counts'\n self.neurondata = source.DataSource(blackrock.Spikes, channels=channels)\n elif 'lfp' in extractor_cls.feature_type: # e.g., 'lfp_power'\n self.neurondata = source.MultiChanDataSource(blackrock.LFP, channels=channels)\n else:\n raise Exception(\"Unknown extractor class, unable to create data source object!\")\n\n try:\n super(BlackrockData, self).init()\n except:\n print(\"BlackrockData: running without a task\")\n\n def run(self):\n self.neurondata.start()\n\n\n\nif __name__ == '__main__':\n\n self = BlackrockData()\n self.init()\n self.run()\n\n n_secs = 60*10\n update_rate = 0.1\n N = int(n_secs / update_rate)\n\n samp_freq = 2000\n n_samp = int(n_secs * samp_freq) # approx number of samples we'll collect per channel\n\n data = np.zeros((n_chan, 2*n_samp))\n idxs = np.zeros(n_chan)\n\n for k in range(N):\n t_start = time.time()\n\n new_data = self.neurondata.get_new(channels=channels)\n\n for row in range(n_chan):\n d = new_data[row]\n #print row, d.shape\n idx = idxs[row]\n data[row, idx:idx+len(d)] = d\n idxs[row] += len(d)\n\n t_elapsed = time.time() - t_start\n time.sleep(update_rate - t_elapsed)\n\n self.neurondata.stop()\n \n save_dict = dict()\n save_dict['data'] = data\n\n sio.savemat('cbpy_lfp_data.mat', save_dict)\n","repo_name":"carmenalab/brain-python-interface","sub_path":"tests/ibmi/lfp_data_acq_test.py","file_name":"lfp_data_acq_test.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"31"} +{"seq_id":"3069239566","text":"from django.db import models\nfrom django.contrib.auth.models import User, Group\nfrom localisation.models import *\nfrom etatcivil.models import *\nfrom vieprofessionnelle.models import *\n\nclass Parametre(models.Model):\n monnaie = models.CharField(max_length=10, null=True, blank=True, default=\"\")\n id_chapeau = models.CharField(max_length=10, null=True, blank=True, default=\"\")\n id_secteuragricole = models.IntegerField(default=0)\n id_secteurfemmeactive = models.IntegerField(default=0)\n id_secteurinformel = models.IntegerField(default=0)\n id_accident = models.IntegerField(default=0)\n id_deces = models.IntegerField(default=0)\n id_responsable = models.IntegerField(default=0)\n id_tresoriere = models.IntegerField(default=0)\n id_conseillere1 = models.IntegerField(default=0)\n id_conseillere2 = models.IntegerField(default=0)\n\n# Liste des variables Globales\nLISTE_MODELS = {\n 'Authentification': {'Utilisateur': User, 'Groupe': Group, },\n 'Localisation': {\n 'District': District,\n 'Région': Region,\n 'Département': Departement,\n 'Ville': Ville,\n 'Commune': Commune,\n 'Quartier': Quartier,\n 'Marché': Marche,\n },\n 'Etat_civil': {\n 'Type_de_pièce': TypePiece,\n 'Niveau': Niveau,\n 'Niveau_scolaire': NiveauScolaire,\n 'Sexe': Sexe,\n 'Nationalité': Nationalite,\n 'Situation_matrimoniale': SituationMatrimoniale,\n },\n 'Vie_professionnelle': {\n 'Type_de_secteur': TypeSecteur,\n 'Secteur': Secteur,\n 'Membre': Membre,\n 'Secteur_agricole': SecteurAgricole,\n 'Secteur_informel': SecteurInformel,\n 'Secteur_femme_active': SecteurFemmeActive,\n 'Type_de_parent': TypeParent,\n 'Parent': Parent,\n 'Type_état_de_santé': TypeEtatSante,\n 'Etat_de_santé': EtatSante,\n 'Type_document': TypeDocument,\n 'Document': Document,\n 'Chapeau': Chapeau,\n 'Type_de_personne_ressource': TypePersonneRessource,\n 'Personne_ressource': PersonneRessource,\n 'Quantité_du_groupement': QuantiteGroupement,\n 'Type_de_responsabilité': TypeResponsabilite,\n 'Montant_du_financement': MontantFinancement,\n },\n 'Paramètres': {\n 'Paramètre': Parametre,\n },\n}\n\n","repo_name":"fakobekobe/iris","sub_path":"utilisateur/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2308,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1764530472","text":"from collections import OrderedDict\r\n\r\nmessage = \"encryptthis\"\r\nkey = \"thisiskey\"\r\nordm = [ord(i)-97 for i in message]\r\nkl = list(OrderedDict.fromkeys(key))\r\nabcd = [chr(i) for i in range(97,123)]\r\nprint(abcd)\r\nfor i in abcd:\r\n if i not in kl:\r\n kl.append(i)\r\nprint(kl)\r\ncode = \"\"\r\nfor j in ordm:\r\n code += kl[j]\r\nprint(message)\r\nprint(code)\r\n\r\nordc = [ord(i)-97 for i in code]\r\ndec = \"\"\r\nfor j in code:\r\n dec += abcd[kl.index(j)]\r\nprint(dec) \r\n \r\n","repo_name":"maurya-ashutosh/cs-bachelors","sub_path":"Ciphers/OtherSubstitutionCipher.py","file_name":"OtherSubstitutionCipher.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"3357292920","text":"# def siftdown(heap, root, child):\n# item = heap[child]\n# while child > root:\n# parent = (child-1)>>1\n# parent_item = heap[parent]\n# if item < parent_item:\n# heap[child] = parent_item\n# child = parent\n# continue\n# break\n# heap[child] = item\n\n# def siftup(heap, parent):\n# leaf = len(heap)\n# root = parent\n# item = heap[parent]\n# child = (parent<<1) + 1\n# while child < leaf:\n# right = child + 1\n# if right < leaf and heap[right] < heap[child]:\n# child = right\n# heap[parent] = heap[child]\n# parent = child\n# child = (parent<<1) + 1\n# heap[parent] = item\n# siftdown(heap, root, parent)\n\n# def heapify(iterable):\n# n = len(iterable)\n# for i in reversed(range(n//2)):\n# siftup(iterable, i)\n\n# def heappop(heap):\n# item = heap.pop()\n# if heap:\n# heap[0], item = item, heap[0]\n# siftup(heap, 0)\n# return item\n\n# def heappush(heap, item):\n# n = len(heap)\n# heap.append(item)\n# siftdown(heap, 0, n-1)\n\n# def heappushpop(heap, item):\n# if heap and heap[0] < item:\n# heap[0], item = item, heap[0]\n# siftup(heap, 0)\n# return item\n\n# def heapreplace(heap, item):\n# heap[0], item = item, heap[0]\n# siftup(heap, 0)\n# return item\n\n\n# If available, use C implementation <- 도저히 따라 잡을 수 없었던 이유...\nfrom heapq import * \n\ndef solution(scoville, K):\n heapify(scoville)\n cnt = 0\n while scoville[0] < K:\n if len(scoville) == 1: return -1\n heappush(scoville, heappop(scoville) + (heappop(scoville)<<1))\n cnt += 1\n return cnt\n","repo_name":"SSAFY-algamza/ssafy-algorithm-study","sub_path":"oror-sine/PGS/PGS_더 맵게.py","file_name":"PGS_더 맵게.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"32353435069","text":"import sys\n\n#n:회원수 m: 친분관계\n\nn, m = map(int, input().split())\nw = list(map(int, input().split()))\nw.insert(0, 0)\nd = {}\nans = 0\nfor i in range(m):\n p, r = map(int, input().split())\n if p in d:\n d[p].append(r)\n else:\n d[p] = [r]\n if r in d:\n d[r].append(p)\n else:\n d[r] = [p]\n\n\n# print(d.items())\nd_list = list(d.items())\n#print(d_list, len(d_list))\n\n# print(d_list[1][0])\n# print(d_list[1][1])\n# print(d_list[1][1][0])\n\n\nfor i in range(len(d_list)):\n p = d_list[i][0]\n best = True\n for j in range(len(d_list[i][1])):\n temp = d_list[i][1][j]\n if w[p] <= w[temp]:\n best = False\n break\n if best == True:\n ans += 1 \n\nif len(d_list) != n:\n ans += n - len(d_list)\n\n\n\n\nprint(ans)","repo_name":"suleesulee/TIL","sub_path":"Algorithm/Softeer/lv3_우물안개구리.py","file_name":"lv3_우물안개구리.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6354302617","text":"import sys\nimport json\nimport gzip\n\n\ndef read_reddit(path):\n\twith gzip.open(path, 'rb') as f:\n\t\tposts_raw = f.read().decode('utf-8')\n\tposts = json.loads(posts_raw)\n\n\t#filter for only title + self-text and timestamp\n\tfiltered_posts = []\n\tfor post in posts:\n\t\ttokens = post[\"title\"] + post[\"selftext\"]\n\t\tfiltered_posts.append((post[\"created_utc\"], tokens))\n\n\treturn filtered_posts\n","repo_name":"enewe101/tot","sub_path":"tot/reddit_reader.py","file_name":"reddit_reader.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"43584448514","text":"\"\"\"Logger.\"\"\"\n\nfrom yoyo import step\n\n__depends__ = {\"20230123_01_IMTPT-auth\"}\n\nsteps = [\n step(\n \"\"\"\n create table status(\n id serial primary key,\n name name unique)\n \"\"\",\n \"drop table status\",\n ),\n step(\n \"\"\"\n create table type(\n id serial primary key,\n name name unique)\n \"\"\",\n \"drop table type\",\n ),\n step(\n \"\"\"\n create table service(\n id serial primary key,\n name name unique)\n \"\"\",\n \"drop table service\",\n ),\n step(\n \"\"\"\n create table logger(\n id bigserial primary key,\n status_id integer not null,\n type_id ineger not null,\n service_id ineger nou null,\n msg varchar(2056) not null,\n date_create timestamp not null default now(),\n constraint logger_status_fk\n foreign key(status_id)\n references status(id),\n constraint logger_type_fk\n foreign key(type_id)\n references type(id),\n constraint logger_service_fk\n foreign key(service_id)\n references service(id)\n )\n \"\"\",\n \"drop table logger\",\n ),\n]\n","repo_name":"GauterHeart/PythonArch","sub_path":"PythonArch/migration/20230129_01_DQchu-logger.py","file_name":"20230129_01_DQchu-logger.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"24071002910","text":"import torch\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom utils import torch_skew_symmetric\n\ndef batch_episym(x1, x2, F):\n batch_size, num_pts = x1.shape[0], x1.shape[1]\n x1 = torch.cat([x1, x1.new_ones(batch_size, num_pts,1)], dim=-1).reshape(batch_size, num_pts,3,1)\n x2 = torch.cat([x2, x2.new_ones(batch_size, num_pts,1)], dim=-1).reshape(batch_size, num_pts,3,1)\n F = F.reshape(-1,1,3,3).repeat(1,num_pts,1,1)\n x2Fx1 = torch.matmul(x2.transpose(2,3), torch.matmul(F, x1)).reshape(batch_size,num_pts)\n Fx1 = torch.matmul(F,x1).reshape(batch_size,num_pts,3)\n Ftx2 = torch.matmul(F.transpose(2,3),x2).reshape(batch_size,num_pts,3)\n\n ys = x2Fx1**2 * (\n 1.0 / (Fx1[:, :, 0]**2 + Fx1[:, :, 1]**2 + 1e-15) +\n 1.0 / (Ftx2[:, :, 0]**2 + Ftx2[:, :, 1]**2 + 1e-15))\n return ys\n\nclass MatchLoss(object):\n def __init__(self, config):\n self.loss_essential = config.loss_essential\n self.loss_classif = config.loss_classif\n self.ess_loss_margin = config.ess_loss_margin\n self.obj_geod_th = config.obj_geod_th\n self.loss_essential_init_iter = config.loss_essential_init_iter\n\n def weight_estimation(self, gt_geod_d, is_pos, ones):\n dis = torch.abs(gt_geod_d - self.obj_geod_th) / self.obj_geod_th\n\n weight_p = torch.exp(-dis)\n weight_p = weight_p*is_pos\n\n weight_n = ones\n weight_n = weight_n*(1 - is_pos)\n weight = weight_p + weight_n\n\n return weight\n\n def run(self, global_step, data, logits, ys, e_hat, y_hat):\n R_in, t_in, xs, pts_virt = data['Rs'], data['ts'], data['xs'], data['virtPts']\n pts1_virts, pts2_virts = pts_virt[:, :, :2], pts_virt[:,:,2:]\n loss = 0\n classif_loss = 0\n # Classification loss\n with torch.no_grad():\n ones = torch.ones((xs.shape[0], 1)).to(xs.device)\n for i in range(len(logits)):\n gt_geod_d = ys[i]\n is_pos = (gt_geod_d < self.obj_geod_th).type(gt_geod_d.type())\n is_neg = (gt_geod_d >= self.obj_geod_th).type(gt_geod_d.type())\n with torch.no_grad():\n pos = torch.sum(is_pos, dim=-1, keepdim=True)\n pos_num = F.relu(pos - 1) + 1\n neg = torch.sum(is_neg, dim=-1, keepdim=True)\n neg_num = F.relu(neg - 1) + 1\n pos_w = neg_num / pos_num\n pos_w = torch.max(pos_w, ones)\n weight = self.weight_estimation(gt_geod_d, is_pos, ones)\n classif_loss += F.binary_cross_entropy_with_logits(weight * logits[i], is_pos, pos_weight=pos_w)\n\n geod = batch_episym(pts1_virts, pts2_virts, e_hat[-1])\n e_l = torch.min(geod, self.ess_loss_margin*geod.new_ones(geod.shape))\n essential_loss = e_l.mean()\n\n # Check global_step and add essential loss\n if self.loss_essential > 0 and global_step >= self.loss_essential_init_iter:\n loss += self.loss_essential * essential_loss\n if self.loss_classif > 0:\n loss += self.loss_classif * classif_loss\n\n return loss, essential_loss.item(), classif_loss.item()\n","repo_name":"sailor-z/CLNet","sub_path":"loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":3162,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"31"} +{"seq_id":"207415809","text":"from datetime import datetime, date, timedelta\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.db.models import deletion\nfrom django.db.models.query_utils import Q\nfrom pyotp.otp import OTP\nfrom accounts.views import send_otp\nfrom chat.models import Notification\nfrom lab.models import Medias\nfrom django.views.generic.base import View\n# from requests.models import Response\nfrom hospital.models import HospitalMedias, HospitalStaffDoctorSchedual, HospitalStaffDoctors, ServiceAndCharges\nfrom patient import models\nimport patient\nfrom patient.models import Booking, ForSome, Orders, LabTest, PicturesForMedicine, Temp, Slot, phoneOPTforoders\nfrom django.http.response import HttpResponse, HttpResponseRedirect, JsonResponse\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic.edit import CreateView, UpdateView\nfrom accounts.models import CustomUser, DoctorForHospital, HospitalPhones, Hospitals, Labs, OPDTime, Patients, Pharmacy\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.views.generic.list import ListView\nfrom django.contrib import messages\nfrom django.urls.base import resolve, reverse\nfrom django.core.files.storage import FileSystemStorage\nfrom django.views.decorators.csrf import csrf_exempt\nimport json\nfrom patient import PaytmChecksum\nfrom django.utils.encoding import force_bytes,force_text,DjangoUnicodeDecodeError\nfrom django.utils.http import urlsafe_base64_encode,urlsafe_base64_decode\nfrom accounts.utils import generate_token\nimport base64\nimport pyotp\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom datetime import datetime,timedelta\nimport random\nimport http.client\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.template.loader import render_to_string\nfrom django.core.mail import EmailMessage, message\nfrom django.conf import settings\nimport ast\nconn = http.client.HTTPConnection(\"2factor.in\")\nfrom django.utils.decorators import method_decorator\nfrom django.db.models.signals import post_save\nfrom channels.layers import get_channel_layer \nfrom django.db import transaction\n# Create your views here.\nclass generateKey:\n @staticmethod\n def returnValue(bookindId):\n return str(bookindId) + str(datetime.date(datetime.now())) + \"Some Random Secret Key\"\n\ndef verifyOTP(request,orderID):\n # try:\n # order = get_object_or_404(Orders,id=orderID) #booking id find\n # except Exception as e:\n # messages.add_message(request,messages.ERROR,\"Booking id number does not Exits\")\n # return HttpResponseRedirect(reverse(\"hospitalsingup\")) # False Call\n # if request.POST:\n # first=request.POST.get(\"first\")\n # second=request.POST.get(\"second\")\n # third=request.POST.get(\"third\")\n # forth=request.POST.get(\"forth\")\n # fifth=request.POST.get(\"fifth\")\n # sixth=request.POST.get(\"sixth\")\n\n # postotp = first+second+third+forth+fifth+sixth #added in one string\n\n # keygen = generateKey()\n # key = base64.b32encode(keygen.returnValue(orderID).encode()) # Generating Key\n # OTP = pyotp.HOTP(key) # HOTP Model\n # if OTP.verify(postotp, order.counter): # Verifying the OTP\n # order.is_booking_Verified = True\n # order.taken_date_time=True\n # order.save()\n # messages.add_message(request,messages.SUCCESS,\"Mobile Verified Successfuly\")\n # #emila message for email verification\n # current_site=get_current_site(request) #fetch domain \n # email_subject='Active your Account',\n # message=render_to_string('accounts/activate.html',\n # {\n # 'user':user,\n # 'domain':current_site.domain,\n # 'uid':urlsafe_base64_encode(force_bytes(user.pk)),\n # 'token':generate_token.make_token(user)\n # } #convert Link into string/message\n # )\n # print(message)\n # email_message=EmailMessage(\n # email_subject,\n # message,\n # settings.EMAIL_HOST_USER,\n # [user.email]\n # )#compose email\n # print(email_message)\n # email_message.send() #send Email\n # messages.add_message(request,messages.SUCCESS,\"Sucessfully Singup Please Verify Your Account Email\") \n return HttpResponseRedirect(reverse(\"dologin\"))\n # return HttpResponseRedirect(reverse(\"dologin\"))\n\n\"\"\"\nPersonal Details of Patients\n\"\"\"\nclass patientdDashboardViews(SuccessMessageMixin,ListView):\n def get(self, request, *args, **kwargs):\n try: \n patient = get_object_or_404(Patients, admin=request.user.id)\n\n if patient.fisrt_name and patient.last_name and patient.address and patient.city and patient.zip_Code and patient.state and patient.country and patient.dob and patient.profile_pic and patient.gender and patient.bloodgroup:\n return render(request,\"patient/index.html\") \n else:\n messages.add_message(request,messages.ERROR,\"Some detail still Missing !\")\n \n return render(request,\"patient/patient_update.html\",{'patient':patient})\n except Exception as e:\n return HttpResponse(e)\n \nclass patientdUpdateViews(SuccessMessageMixin,UpdateView):\n def get(self, request, *args, **kwargs):\n try: \n patient = get_object_or_404(Patients, admin=request.user.id)\n return render(request,\"patient/patient_update.html\",{'patient':patient})\n except Exception as e:\n return HttpResponse(e)\n \n def post(self,request, *agrs, **kwargs):\n profile_pic = request.FILES.get('profile_pic')\n name_title = request.POST.get('name_title')\n alternate_mobile = request.POST.get('alternate_mobile')\n address = request.POST.get('address')\n city = request.POST.get('city')\n zip_Code = request.POST.get('zip_Code')\n print(zip_Code)\n state = request.POST.get('state')\n print(state)\n country = request.POST.get('country')\n print(country)\n gender = request.POST.get('gender')\n dob = request.POST.get('dob')\n bloodgroup = request.POST.get('bloodgroup')\n age1 = (date.today() - datetime.strptime(dob, \"%Y-%m-%d\").date()) // timedelta(days=365.2425)\n # import datetime\n # age = (datetime.date.today() - datetime.datetime.strptime(dob, \"%Y-%m-%d\").date())/365\n print(age1)\n try: \n user= request.user\n user.patients.name_title=name_title\n user.patients.fisrt_name=user.first_name\n user.patients.last_name=user.last_name\n if profile_pic:\n fs=FileSystemStorage()\n filename1=fs.save(profile_pic.name,profile_pic)\n profile_pic_url=fs.url(filename1)\n user.patients.profile_pic=profile_pic_url\n user.profile_pic = profile_pic_url\n user.patients.alternate_mobile=alternate_mobile\n user.patients.address=address\n user.patients.city=city\n user.patients.state=state\n user.patients.zip_Code=zip_Code\n user.patients.country=country\n user.patients.gender=gender\n user.patients.dob=dob\n user.patients.bloodgroup=bloodgroup\n user.patients.age=age1\n user.patients.save() \n user.save()\n messages.add_message(request,messages.SUCCESS,\"User Detail updates Successfully !\")\n return HttpResponseRedirect(reverse(\"patient_home\"))\n except Exception as e:\n return HttpResponse(e)\n\n\"\"\"\" \nHospital list and profile\n\"\"\"\nclass HospitalListViews(ListView):\n # context_object_name = \"hospital\"\n paginate_by = 10\n model = Hospitals\n template_name = \"patient/hospital_list.html\"\n # paginate_by=3\n\n def get_queryset(self):\n filter_val=self.request.GET.get(\"filter\",\"\")\n order_by=self.request.GET.get(\"orderby\",\"id\")\n if filter_val!=\"\":\n hospitals=Hospitals.objects.filter( Q(is_verified=True,is_deactive=False,admin__is_active=True) and (Q(hopital_name__contains=filter_val) | Q(about__contains=filter_val) | Q(city__contains=filter_val) | Q(specialist__contains=filter_val))).order_by(order_by)\n else:\n hospitals=Hospitals.objects.filter(is_verified=True,is_deactive=False,admin__is_active=True).order_by(order_by)\n hospital_media_list = []\n for hospital in hospitals:\n medias = HospitalMedias.objects.filter(is_active=True,hospital=hospital) \n hospital_media_list.append({'hospital':hospital,'medias':medias})\n print(hospital_media_list) \n return hospital_media_list\n \n def get_context_data(self,**kwargs):\n context=super(HospitalListViews,self).get_context_data(**kwargs)\n context[\"filter\"]=self.request.GET.get(\"filter\",\"\")\n context[\"orderby\"]=self.request.GET.get(\"orderby\",\"id\")\n context[\"all_table_fields\"]=Hospitals._meta.get_fields()\n return context\n\n # def get(self, request, *args, **kwargs): \n # hospitals = Hospitals.objects.filter(is_verified=True,is_deactive=False,admin__is_active=True)\n # hospital_media_list = []\n # for hospital in hospitals:\n # medias = HospitalMedias.objects.filter(is_active=True,hospital=hospital) \n # hospital_media_list.append({'hospital':hospital,'medias':medias})\n # param = {'hospital_media_list':hospital_media_list} \n # return render(request,\"patient/hospital_list.html\",param)\n \nclass HospitalDetailsViews(DetailView):\n def get(self, request, *args, **kwargs):\n hosital_id=kwargs['id']\n hospital = get_object_or_404(Hospitals,is_verified=True,is_deactive=False,id=hosital_id)\n doctors = HospitalStaffDoctors.objects.filter(is_active=True,hospital=hospital)\n hospitalservice = ServiceAndCharges.objects.filter(user=hospital.admin)\n hospitalstaffdoctor_list = []\n for hospitalstaffdoctor in doctors:\n hospitalstaffdoctorschedual = HospitalStaffDoctorSchedual.objects.filter(hospitalstaffdoctor=hospitalstaffdoctor)\n opd_time = []\n for dcsh in hospitalstaffdoctorschedual:\n if dcsh.work == \"OPD\":\n start_time = dcsh.start_time\n end_time = dcsh.end_time\n opd_time.append({'start_time':start_time,'end_time':end_time})\n hospitalstaffdoctor_list.append({'hospitalstaffdoctor':hospitalstaffdoctor,'hospitalstaffdoctorschedual':hospitalstaffdoctorschedual})\n param = {'hospital':hospital,'hospitalstaffdoctor_list':hospitalstaffdoctor_list,'hospitalservice':hospitalservice} \n return render(request,\"patient/hospital_details.html\",param)\n \nclass DoctorsBookAppoinmentViews(SuccessMessageMixin,View):\n def get(self, request, *args, **kwargs):\n hosital_id=kwargs['id']\n hositaldcotorid_id=kwargs['did']\n hospital = get_object_or_404(Hospitals,is_verified=True,is_deactive=False,id=hosital_id)\n hospitalstaffdoctor = get_object_or_404(HospitalStaffDoctors,is_active=True,id=hositaldcotorid_id)\n hospitalservice = ServiceAndCharges.objects.filter(user=hospital.admin)\n opdtime = OPDTime.objects.get(user=hospital.admin) \n someone = ForSome.objects.filter(patient=request.user.patients)\n hospitalstaffdoctorschedual =HospitalStaffDoctorSchedual.objects.filter(hospitalstaffdoctor=hospitalstaffdoctor)\n opd_time = []\n opd_time.append(opdtime.opening_time)\n \n # opd_time = []\n # for dcsh in hospitalstaffdoctorschedual:\n # if dcsh.work == \"OPD\":\n # shift = dcsh.shift\n # start_time = dcsh.start_time\n # end_time = dcsh.end_time\n # opd_time.append({'shift':shift,'start_time':start_time,'end_time':end_time})\n \n param = {'hospital':hospital,'hospitalservice':hospitalservice,'hospitalstaffdoctor':hospitalstaffdoctor,'hospitalstaffdoctorschedual':hospitalstaffdoctorschedual,'opdtime':opdtime,'someones':someone} \n return render(request,\"patient/bookappoinment.html\",param)\n\n\"\"\"\" \nHistory for Hospital Booking\n\"\"\"\nclass ViewBookedAnAppointmentViews(SuccessMessageMixin,ListView):\n paginate_by = 1\n def get(self,request):\n booked = Booking.objects.filter(patient = request.user)\n labbooks = Slot.objects.filter(patient = request.user)\n booking_labtest_list =[]\n for labbook in labbooks: \n labtests = LabTest.objects.filter(slot=labbook)\n booking_labtest_list.append({'labbook':labbook,'labtests':labtests})\n phamacybooking = PicturesForMedicine.objects.filter(patient = request.user)\n print(booked)\n param = {'booked':booked,\"booking_labtest_list\":booking_labtest_list,'phamacybooking':phamacybooking}\n \n return render(request,\"patient/appointmentlist.html\",param)\n\n\n\ndef bookingConfirmation(request,booking_id):\n try:\n booking = get_object_or_404(Booking,id=booking_id,patient=request.user )\n notifications = Notification.objects.filter(booking=booking,to_user=request.user)\n for notification in notifications:\n notification.user_has_seen =True\n notification.save()\n context = {'booking' : booking}\n return render(request , 'patient/confirmation.html', context)\n except Exception as e:\n messages.add_message(request,messages.ERROR,\"page not found!\")\n return render(request , 'accounts/404.html',)\n\n\n\nclass BookAnAppointmentViews(SuccessMessageMixin,View):\n def post(self,request, *args, **kwargs):\n # try:\n if request.method == \"POST\":\n doctorid = request.POST.get('doctorid')\n hospitalstaffdoctor = get_object_or_404(HospitalStaffDoctors,id=doctorid)\n serviceid = request.POST.get('serviceid')\n someone = request.POST.get('someone')\n \n service = ServiceAndCharges.objects.get(id=serviceid)\n date = request.POST.get('date')\n time = request.POST.get('time')\n now = datetime.now()\n now5 = now + timedelta(minutes=5)\n\n \n print(doctorid,hospitalstaffdoctor,serviceid,service,date,time)\n with transaction.atomic():\n if someone:\n forsome = get_object_or_404(ForSome,id=someone)\n booking = Booking(patient = request.user,for_whom=forsome,hospitalstaffdoctor=hospitalstaffdoctor,service=service,applied_date=date,applied_time=time,is_applied=True,is_active=True,amount=service.service_charge,status=\"booked\")\n else:\n booking = Booking(patient = request.user,hospitalstaffdoctor=hospitalstaffdoctor,service=service,applied_date=date,applied_time=time,is_applied=True,is_active=True,amount=service.service_charge,status=\"booked\",)\n booking.reject_within_5__lt = now\n booking.reject_within_5 = now5\n booking.save()\n\n print(booking.reject_within_5__lt)\n print(booking.reject_within_5)\n\n print(\"booking saved\")\n order = Orders(patient=request.user,service=service,amount=service.service_charge,booking_for=1,bookingandlabtest=booking.id,status=1)\n order.save()\n\n\n print(\"order saved\")\n tc = 0\n try:\n tc = Temp.objects.filter(user=request.user).count()\n except:\n tc = 0\n print(\"tc check below\")\n print(tc)\n if tc > 0:\n temp = Temp.objects.get(user=request.user)\n temp.delete()\n temp = Temp(user=request.user,order_id=order.id)\n temp.save()\n mobile= request.user.phone\n key = send_otp(mobile)\n print(key)\n if key:\n obj = phoneOPTforoders(order_id=order,user=request.user,otp=key)\n obj.save()\n notification = Notification(notification_type=\"1\",from_user= request.user,to_user=booking.hospitalstaffdoctor.hospital.admin,booking=booking)\n notification.save()\n # conn.request(\"GET\", \"https://2factor.in/API/R1/?module=SMS_OTP&apikey=f08f2dc9-aa1a-11eb-80ea-0200cd936042&to=\"+str(mobile)+\"&otpvalue=\"+str(key)+\"&templatename=WomenMark1\")\n # res = conn.getresponse()\n # data = res.read()\n # data=data.decode(\"utf-8\")\n # data=ast.literal_eval(data)\n # print(data) \n return JsonResponse({'message' : 'success','status': True,'Booking_id':booking.id,\"otp\":key})\n else:\n return JsonResponse({'message' : 'Error','status': False})\n\n def new_method(self, now, booking):\n booking.reject_within_5__lt = now\n \n \n # except Exception as e:\n # messages.add_message(request,messages.ERROR,\"Network Issue try after some time\")\n # return HttpResponse(e)\n\n \n # import checksum generation utility\n # You can get this utility from https://developer.paytm.com/docs/checksum/\n \n # paytmParams = dict()\n\n # paytmParams[\"body\"] = {\n # \"requestType\" : \"Payment\",\n # \"mid\" : \"Vsrdcl31860853647501\",\n # \"websiteName\" : \"WEBSTAGING\",\n # \"orderId\" : str(booking.id),\n # \"callbackUrl\" : \"http://127.0.0.1:8000/patient/handlerequest\",\n # \"txnAmount\" : {\n # \"value\" : str(booking.amount),\n # \"currency\" : \"INR\",\n # },\n # \"userInfo\" : {\n # \"custId\" : str(request.user.phone),\n # },\n # }\n\n # # Generate checksum by parameters we have in body\n # # Find your Merchant Key in your Paytm Dashboard at https://dashboard.paytm.com/next/apikeys \n # checksum = PaytmChecksum.generateSignature(json.dumps(paytmParams[\"body\"]), \"JDhgGD%hhT&OtVEE\")\n\n # paytmParams[\"head\"] = {\n # \"signature\" : checksum\n # }\n \n\n # post_data = json.dumps(paytmParams)\n\n # # for Staging\n # url = \"https://securegw.paytm.in/theia/api/v1/initiateTransaction?mid=Vsrdcl31860853647501&orderId=\"+str(booking.id)\n\n # # for Production\n # # url = \"https://securegw.paytm.in/theia/api/v1/initiateTransaction?mid=YOUR_MID_HERE&orderId=ORDERID_98765\"\n # response = requests.post(url, data = post_data, headers = {\"Content-type\": \"application/json\"}).json()\n # print(response)\n # print(response['body']['txnToken'])\n \n \n # paytmParams[\"head\"] = {\n # \"tokenType\" : \"TXN_TOKEN\",\n # \"token\" : response['body']['txnToken']\n # }\n # post_data = json.dumps(paytmParams)\n\n # # for Staging\n # url = \"https://securegw-stage.paytm.in/theia/api/v2/fetchPaymentOptions?mid=Vsrdcl31860853647501&orderId=\"+str(booking.id)\n \n\n # # for Production\n # # url = \"https://securegw.paytm.in/theia/api/v2/fetchPaymentOptions?mid=YOUR_MID_HERE&orderId=ORDERID_98765\"\n # response = requests.post(url, data = post_data, headers = {\"Content-type\": \"application/json\"}).json()\n # print(response) \n\ndef CancelBookedAnAppointmentViews(request,id):\n booked = Booking.objects.get(id=id)\n booked.is_cancelled = True\n booked.Status = \"cancelled\"\n booked.save()\n messages.add_message(request,messages.SUCCESS,\"Cancelled Successfully !\")\n return HttpResponseRedirect(reverse('viewbookedanappointment'))\n\ndef send_otp(phone):\n if phone:\n key = random.randint(999,9999)\n print(key)\n return key\n else:\n return False\n\n\n \n\n\"\"\"\"\nHistory for Lab Booking\n\"\"\"\n\nclass BookAnAppointmentForLABViews(SuccessMessageMixin,View):\n def post(self,request, *args, **kwargs): \n if request.method == \"POST\":\n serviceid_list = request.POST.getlist('serviceid[]')\n date = request.POST.get('date')\n labid = request.POST.get('labid')\n someone = request.POST.get('someone')\n lab = get_object_or_404(Labs,id=labid)\n time = request.POST.get('time') \n print(serviceid_list,date,labid,lab,time)\n if someone:\n forsome = get_object_or_404(ForSome,id=someone)\n labbooking = Slot(patient = request.user,for_whom=forsome,lab=lab,applied_date=date,applied_time=time,is_applied=True,is_active=True,status=\"booked\") \n else: \n labbooking = Slot(patient = request.user,lab=lab,applied_date=date,applied_time=time,is_applied=True,is_active=True,status=\"booked\") \n labbooking.save()\n total = 0\n \n for serviceid in serviceid_list:\n service = ServiceAndCharges.objects.get(id=serviceid)\n labservices = LabTest(service=service,lab=lab,slot=labbooking,is_active=True)\n labservices.save()\n total =total + service.service_charge \n labbooking.amount=total\n labbooking.save()\n print(\"booking saved\")\n order = Orders(patient=request.user,service=service,booking_for=2,bookingandlabtest=labbooking.id,amount=total,status=1)\n order.save()\n print(\"order\")\n tc = 0\n try:\n tc = Temp.objects.filter(user=request.user).count()\n except:\n tc = 0\n print(\"tc check below\")\n print(tc)\n if tc > 0:\n temp = Temp.objects.get(user=request.user)\n temp.delete()\n temp = Temp(user=request.user,order_id=order.id)\n temp.save() \n print(\"temp\")\n mobile= request.user.phone\n key = send_otp(mobile)\n print(key)\n if key:\n obj = phoneOPTforoders(order_id=order,user=request.user,otp=key)\n obj.save()\n notification = Notification(notification_type=\"1\",from_user= request.user,to_user=lab.admin,slot=labbooking)\n notification.save()\n # conn.request(\"GET\", \"https://2factor.in/API/R1/?module=SMS_OTP&apikey=f08f2dc9-aa1a-11eb-80ea-0200cd936042&to=\"+str(mobile)+\"&otpvalue=\"+str(key)+\"&templatename=WomenMark1\")\n # res = conn.getresponse()\n # data = res.read()\n # data=data.decode(\"utf-8\")\n # data=ast.literal_eval(data)\n # print(data) \n return JsonResponse({'message' : 'success','status': True,'Booking_id':labbooking.id,\"otp\":key})\n else:\n return JsonResponse({'message' : 'error','status': False,})\n \n # except Exception as e:\n # messages.add_message(request,messages.ERROR,\"Network Issue try after some time\")\n # return HttpResponse(e)\n\ndef ReportSendToDoctorViews(request,id):\n slot = get_object_or_404(Slot,id=id)\n slot.send_to_doctor =True\n slot.save()\n messages.add_message(request,messages.SUCCESS,\"Send to doctor Successfully !\")\n return HttpResponseRedirect(reverse(\"viewbookedanappointment\"))\n\n\ndef CancelLabBookedAnAppointmentViews(request,id):\n booked = Slot.objects.get(id=id)\n booked.is_cancelled = True\n booked.status = \"cancelled\"\n booked.save()\n messages.add_message(request,messages.SUCCESS,\"Cancelled Successfully !\")\n return HttpResponseRedirect(reverse('viewbookedanappointment'))\n \n\"\"\"\nLab View and Profile \n\"\"\"\nclass LabListViews(ListView):\n def get(self, request, *args, **kwargs):\n labs = Labs.objects.filter(is_verified=True,is_deactive=False,admin__is_active=True)\n lab_media_list = []\n for lab in labs:\n medias = Medias.objects.filter(is_active=True,user=lab.admin) \n lab_media_list.append({'lab':lab,'medias':medias})\n print(lab_media_list)\n param = {'lab_media_list':lab_media_list} \n return render(request,\"patient/lab_list.html\",param)\n \nclass labDetailsViews(DetailView):\n def get(self, request, *args, **kwargs):\n lab_id=kwargs['id']\n lab = get_object_or_404(Labs,is_verified=True,is_deactive=False,id=lab_id)\n services = ServiceAndCharges.objects.filter(user=lab.admin)\n someones = ForSome.objects.filter(patient=request.user.patients)\n opdtime = OPDTime.objects.get(user=lab.admin) \n param = {'lab':lab,'services':services,'opdtime':opdtime,'someones':someones} \n return render(request,\"patient/lab_details.html\",param)\n\n\ndef slotConfirmation(request,slot_id):\n try:\n slot = get_object_or_404(Slot,id=slot_id,patient=request.user )\n notifications = Notification.objects.filter(slot=slot,to_user=request.user)\n for notification in notifications:\n notification.user_has_seen =True\n notification.save()\n context = {'slot' : slot}\n return render(request , 'patient/slotconfirmation.html', context)\n except Exception as e:\n messages.add_message(request,messages.ERROR,\"page not found!\")\n return render(request , 'accounts/404.html',)\n\n\"\"\"\nPharmacy view and profile\n\"\"\"\n\nclass PharmacyListViews(ListView):\n def get(self, request, *args, **kwargs):\n pharamcy = Pharmacy.objects.filter(is_verified=True,is_deactive=False,admin__is_active=True)\n param = {'pharamcys':pharamcy} \n print(pharamcy)\n return render(request,\"patient/pharmacy_list.html\",param)\n\nclass PharmacyDetailsViews(DetailView):\n def get(self, request, *args, **kwargs):\n pharmacy_id=kwargs['id']\n pharmacy = get_object_or_404(Pharmacy,id=pharmacy_id)\n param = {'pharmacy':pharmacy} \n return render(request,\"patient/pharmacy_details.html\",param)\n\nclass UploadPresPhotoViews(SuccessMessageMixin,View):\n def post(self,request, *args, **kwargs):\n \n if request.method == \"POST\":\n prescription = request.FILES.get('prescription')\n if prescription:\n fs=FileSystemStorage()\n filename1=fs.save(prescription.name,prescription)\n profile_pic_url=fs.url(filename1)\n print(prescription)\n date = request.POST.get('date')\n pharmacyid = request.POST.get('pharmacyid')\n add_note = request.POST.get('add_note')\n pharmacy = get_object_or_404(Pharmacy,id=pharmacyid)\n time = request.POST.get('time') \n print(time,date,pharmacy,pharmacyid,prescription)\n picturesformedicine = PicturesForMedicine(patient = request.user,pharmacy=pharmacy,prescription=profile_pic_url,applied_date=date,applied_time=time,is_applied=True,is_active=True,add_note=add_note,status=\"booked\") \n picturesformedicine.save()\n service = get_object_or_404(ServiceAndCharges,id=13)\n print(\"booking saved\")\n order = Orders(patient=request.user,service=service,booking_for=3,bookingandlabtest=picturesformedicine.id,status=1,)\n order.save()\n print(\"order\")\n tc = 0\n try:\n tc = Temp.objects.filter(user=request.user).count()\n except:\n tc = 0\n print(\"tc check below\")\n print(tc)\n if tc > 0:\n temp = Temp.objects.get(user=request.user)\n temp.delete()\n temp = Temp(user=request.user,order_id=order.id)\n temp.save() \n print(\"temp\")\n mobile= request.user.phone\n key = send_otp(mobile)\n print(key)\n if key:\n obj = phoneOPTforoders(order_id=order,user=request.user,otp=key)\n obj.save()\n notification = Notification(notification_type=\"1\",from_user= request.user,to_user=pharmacy.admin,picturesmedicine=picturesformedicine)\n notification.save()\n # conn.request(\"GET\", \"https://2factor.in/API/R1/?module=SMS_OTP&apikey=f08f2dc9-aa1a-11eb-80ea-0200cd936042&to=\"+str(mobile)+\"&otpvalue=\"+str(key)+\"&templatename=WomenMark1\")\n # res = conn.getresponse()\n # data = res.read()\n # data=data.decode(\"utf-8\")\n # data=ast.literal_eval(data)\n # print(data)\n return JsonResponse({'message' : 'success','status': True,'Booking_id':picturesformedicine.id,\"otp\":key})\n else:\n return JsonResponse({'message' : 'error','status': False,})\n # return render(request,\"patient/amount_confirmation.html\")\n # return HttpResponseRedirect(reverse(\"pharmacy_details\" , kwargs={'id':pharmacyid}))\n\ndef picturesformedicineConfirmation(request,booking_id): \n # try:\n picturesformedicine = get_object_or_404(PicturesForMedicine,id=booking_id,patient=request.user )\n notifications = Notification.objects.filter(picturesmedicine=picturesformedicine,to_user=request.user)\n for notification in notifications:\n notification.user_has_seen =True\n notification.save()\n print(\"hello i m in view of confirmation\")\n context = {'picturesformedicine' : picturesformedicine}\n return render(request , 'patient/pharmacy_confirmation.html', context)\n # except Exception as e:\n # messages.add_message(request,messages.ERROR,\"page not found!\")\n # return render(request , 'accounts/404.html',)\n\ndef CancelPictureForMedicineViews(request,id):\n booked = PicturesForMedicine.objects.get(id=id)\n booked.is_cancelled = True\n booked.Status = \"cancelled\"\n booked.save()\n messages.add_message(request,messages.SUCCESS,\"Cancelled Successfully !\")\n return HttpResponseRedirect(reverse('viewbookedanappointment'))\n\n\"\"\"\nAdd Someone As patient and Update and delete\n\"\"\"\ndef AddSomeoneAsPatient(request):\n if request.method == \"POST\":\n action =request.POST.get(\"action\")\n fisrt_name = request.POST.get(\"fisrt_name\")\n last_name = request.POST.get(\"last_name\")\n name_title = request.POST.get(\"name_title\")\n age = request.POST.get(\"age\")\n email = request.POST.get(\"email\")\n add_notes = request.POST.get(\"add_notes\")\n phone = request.POST.get(\"phone\")\n ID_number = request.POST.get(\"ID_number\")\n status = request.POST.get(\"status\")\n ID_proof = request.FILES.get(\"ID_proof\")\n address = request.POST.get(\"address\")\n city = request.POST.get(\"city\")\n gender = request.POST.get(\"gender\")\n bloodgroup = request.POST.get(\"bloodgroup\")\n id = request.POST.get(\"id\")\n did = request.POST.get(\"did\")\n someoneid = request.POST.get(\"someoneid\")\n state = \"Gujarat\"\n country = \"India\"\n zip_Code = request.POST.get(\"zip_Code\")\n page_name = request.POST.get(\"page_name\")\n if action == \"add\": \n # for Hospital staff user creation\n try:\n profile_pic_url = \"\"\n if ID_proof:\n fs=FileSystemStorage()\n filename=fs.save(ID_proof.name,ID_proof)\n media_url=fs.url(filename)\n profile_pic_url = media_url\n print(\"insdie id_proof\") \n \n patient=get_object_or_404(Patients,admin=request.user)\n someone = ForSome(patient=patient,name_title=name_title,fisrt_name=fisrt_name,last_name=last_name,address=address,city=city,state=state,country=country,zip_Code=zip_Code,age=age,phone=phone,ID_proof=profile_pic_url,add_notes=add_notes,gender=gender,is_active=True,email=email,bloodgroup=bloodgroup) \n someone.save() \n messages.add_message(request,messages.SUCCESS,\"Successfully Added\")\n if page_name == \"HOMEVISIT\":\n return HttpResponseRedirect(reverse(\"home_visit_doctor\", kwargs={'id':id,\"did\":did}))\n if page_name == \"OPD\":\n return HttpResponseRedirect(reverse(\"bookappoinment\", kwargs={'id':id,\"did\":did}))\n if page_name == \"LAB\":\n return HttpResponseRedirect(reverse(\"laboratory_details\", kwargs={'id':id}))\n # if page_name == \"ONLINE\":\n # if page_name == \"SETTING\":\n except Exception as e:\n return HttpResponse(e)\n elif action == \"update\":\n try:\n profile_pic_url = \"\"\n if ID_proof:\n fs=FileSystemStorage()\n filename=fs.save(ID_proof.name,ID_proof)\n media_url=fs.url(filename)\n profile_pic_url = media_url\n print(\"insdie id_proof\") \n \n patient=get_object_or_404(Patients,admin=request.user)\n someone = get_object_or_404(ForSome,id=someoneid)\n someone.patient=patient\n someone.name_title=name_title\n someone.fisrt_name=fisrt_name\n someone.last_name=last_name\n someone.address=address\n someone.city=city\n someone.state=state\n someone.country=country\n someone.zip_Code=zip_Code\n someone.age=age\n someone.phone=phone\n someone.ID_proof=profile_pic_url\n someone.add_notes=add_notes\n someone.gender=gender\n someone.is_active=True\n someone.email=email\n someone.bloodgroup=bloodgroup \n someone.save() \n messages.add_message(request,messages.SUCCESS,\"Successfully updated\")\n return HttpResponseRedirect(reverse(\"bookappoinment\", kwargs={'id':id,\"did\":did}))\n except Exception as e:\n return HttpResponse(e) \n elif action == \"delete\":\n patient=get_object_or_404(Patients,admin=request.user)\n patient.is_active= delete()\n messages.add_message(request,messages.SUCCESS,\"Successfully Deleted\")\n else:\n return HttpResponse(\"on other side\")\n \n\"\"\"\nCheckout page\n\"\"\"\ndef CheckoutViews(request):\n temp= Temp.objects.get(user=request.user)\n order = get_object_or_404(Orders,id=temp.order_id)\n order.status=1\n order.save()\n book_for=order.booking_for\n if book_for == \"1\":\n booking = get_object_or_404(Booking,id=order.bookingandlabtest)\n param ={'order':order,'booking':booking}\n if book_for == \"2\":\n booking = get_object_or_404(Slot,id=order.bookingandlabtest)\n services = LabTest.objects.filter(slot=booking)\n param ={'order':order,'booking':booking,'services':services}\n if book_for == \"3\":\n booking = get_object_or_404(PicturesForMedicine,id=order.bookingandlabtest)\n booking.amount_paid = True\n param ={'order':order,'booking':booking}\n return render(request,\"patient/checkout.html\",param)\n\ndef PayForMedicine(request,id):\n booking = get_object_or_404(PicturesForMedicine,id=id)\n booking.amount_paid = True\n booking.status = \"Amount Paid\"\n booking.save()\n notification = Notification(notification_type=\"1\",from_user= request.user,to_user=booking.pharmacy.admin,picturesmedicine=booking)\n notification.save()\n order = get_object_or_404(Orders,bookingandlabtest=booking.id,booking_for=\"3\")\n order.status=1\n order.save()\n param ={'order':order,'booking':booking}\n return render(request,\"patient/checkout.html\",param)\n\ndef PaytmProcessViews(request):\n return HttpResponse(\"onpayment page\")\n\n\"\"\"\nPaytm handler\n\"\"\"\n@csrf_exempt\ndef handlerequest(request):\n #paytm will send you post request here\n print(\"paytm came\")\n # paytmParams = dict()\n # paytmChecksum = \"CHECKSUM_VALUE\"\n # paytmParams = request.form.to_dict()\n # paytmChecksum = paytmChecksum\n # paytmChecksum = paytmParams['CHECKSUMHASH']\n # paytmParams.pop('CHECKSUMHASH', None)\n\n # # Verify checksum\n # # Find your Merchant Key in your Paytm Dashboard at https://dashboard.paytm.com/next/apikeys \n # isVerifySignature = PaytmChecksum.verifySignature(paytmParams, \"JDhgGD%hhT&OtVEE\", paytmChecksum)\n # if isVerifySignature:\n # print(\"Checksum Matched\")\n # else:\n # print(\"Checksum Mismatched\")\n pass\n\n\"\"\"\nList of doctor or hospital for online\n\"\"\"\ndef ListofVirtualDoctor(reuqest):\n return render(reuqest,\"patient/virtual.html\")\n\n\"\"\"\nHome visit doctor list\n\"\"\"\nclass HomeVisitDoctor(CreateView):\n def get(self, request, *args, **kwargs):\n hospital_id= kwargs['id']\n hositaldcotorid_id= kwargs['did']\n hospital = get_object_or_404(Hospitals,is_verified=True,is_deactive=False,id=hospital_id)\n hospitalstaffdoctor = get_object_or_404(HospitalStaffDoctors,is_active=True,id=hositaldcotorid_id)\n someones = ForSome.objects.filter(patient=request.user.patients)\n hospitalservice = ServiceAndCharges.objects.filter(user=hospital.admin)\n param = {'someones':someones,'hospital':hospital,'hospitalstaffdoctor':hospitalstaffdoctor,'hospitalservice':hospitalservice}\n return render(request,\"patient/home_visit.html\",param)\n\ndef BookanAppointmentForHomeVisit(request):\n if request.method == \"POST\":\n if request.method == \"POST\":\n doctorid = request.POST.get('doctorid') \n hospitalstaffdoctor = get_object_or_404(HospitalStaffDoctors,id=doctorid)\n serviceid = request.POST.get('serviceid')\n someone = request.POST.get('someone')\n \n service = ServiceAndCharges.objects.get(id=serviceid)\n date = request.POST.get('date')\n time = request.POST.get('time')\n\n print(doctorid,hospitalstaffdoctor,serviceid,service,date,time)\n if someone:\n forsome = get_object_or_404(ForSome,id=someone)\n booking = Booking(patient = request.user,for_whom=forsome,hospitalstaffdoctor=hospitalstaffdoctor,service=service,applied_date=date,applied_time=time,is_applied=True,is_active=True,amount=service.service_charge,booking_type=\"HOME\")\n else:\n booking = Booking(patient = request.user,hospitalstaffdoctor=hospitalstaffdoctor,service=service,applied_date=date,applied_time=time,is_applied=True,is_active=True,amount=service.service_charge,booking_type=\"HOME\")\n booking.save()\n print(\"booking saved\")\n order = Orders(patient=request.user,service=service,amount=service.service_charge,booking_for=1,bookingandlabtest=booking.id,status=1)\n order.save()\n print(\"order saved\")\n if Temp.objects.get(user=request.user):\n temp = Temp.objects.get(user=request.user)\n temp.delete()\n temp = Temp(user=request.user,order_id=order.id)\n temp.save()\n mobile= request.user.phone\n key = send_otp(mobile)\n print(key)\n if key:\n obj = phoneOPTforoders(order_id=order,user=request.user,otp=key)\n obj.save()\n # conn.request(\"GET\", \"https://2factor.in/API/R1/?module=SMS_OTP&apikey=f08f2dc9-aa1a-11eb-80ea-0200cd936042&to=\"+str(mobile)+\"&otpvalue=\"+str(key)+\"&templatename=WomenMark1\")\n # res = conn.getresponse()\n # data = res.read()\n # data=data.decode(\"utf-8\")\n # data=ast.literal_eval(data)\n # print(data) \n return JsonResponse({'message' : 'success','status': True,'Booking_id':booking.id,\"otp\":key})\n else:\n return JsonResponse({'message' : 'error','status': False,})","repo_name":"maksudmansuri/hospital","sub_path":"patient/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":40395,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"25704290379","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom collections import defaultdict\nfrom django.core.management.base import BaseCommand\nfrom django.db import transaction\nfrom django.utils import timezone\nfrom django.utils.text import slugify\nfrom bober_si.models import *\nfrom bober_simple_competition.models import AttemptConfirmation\nfrom bober_paper_submissions.models import JuniorYear\nimport json\nimport os\nfrom django.db.models import Sum\n\n\n\nclass Command(BaseCommand):\n # @transaction.atomic\n help = \"Assign an award for each attempt in a list\"\n\n def add_arguments(self, parser):\n parser.add_argument('school_competition_slug', nargs=1)\n parser.add_argument('national_competition_slug', nargs=1)\n\n def handle(self, *args, **options):\n if len(args) < 3:\n args += (None,) * (3 - len(args))\n scslug = options.get('school_competition_slug', [args[0]])[0]\n ncslug = options.get('national_competition_slug', [args[1]])[0]\n school_competition = SchoolCompetition.objects.get(slug=scslug)\n national_competition = SchoolCompetition.objects.get(slug=ncslug)\n organizer = national_competition.administrator_code_generator.codes.filter(\n code_parts__name='admin_privileges', \n code_parts__value='view_all_admin_codes'\n )[0].creator_set.all()[0]\n attempts_by_teacher = defaultdict(\n lambda: defaultdict(\n lambda: defaultdict(list)))\n cqs_list = []\n for cqs in CompetitionQuestionSet.objects.filter(\n competition = national_competition).order_by('name'):\n school_cqs = CompetitionQuestionSet.objects.get(\n competition = school_competition,\n name = cqs.name)\n cqs_list.append(cqs)\n for a in Attempt.objects.filter(\n competitionquestionset = school_cqs,\n attemptaward__award__name = 'napreduje').distinct():\n # print a.id, school_cqs\n if a.attemptaward_set.filter(award__name = 'napreduje', \n revoked_by=None).exclude(attempt__confirmed_by=None).count() < 1:\n # print \"ha-ha!\"\n continue\n teacher = a.confirmed_by.all()[0]\n school = SchoolTeacherCode.objects.filter(\n teacher = teacher,\n competition_questionset = a.competitionquestionset,\n code__value = a.access_code\n )[0].school\n attempts_by_teacher[teacher][school][cqs].append(a)\n for teacher, school_dict in attempts_by_teacher.items():\n for school, cqs_dict in school_dict.items():\n # print school, cqs_dict\n # print teacher.user\n for cqs in cqs_list:\n # print \" \", cqs\n attempts = cqs_dict[cqs]\n for a in attempts:\n code = national_competition.competitor_code_create(\n access_code = None,\n competition_questionset = cqs,\n code_data = {\n 'competitor_privileges':[\n 'attempt',\n 'resume_attempt',\n ]\n })\n code.save()\n short_code_value = code.value[\n code.value.find(code.format.separator)+1:]\n teacher.created_codes.add(code)\n stc = SchoolTeacherCode(\n competition_questionset = cqs,\n school = school,\n teacher = teacher,\n code = code\n )\n stc.save()\n school_name = school.name\n teacher_name = u\"{} {} <{}>\".format(\n teacher.user.first_name,\n teacher.user.last_name,\n teacher.user.email)\n competitor_name = u\"{} {}\".format(\n a.competitor.first_name,\n a.competitor.last_name)\n print (u\"\\t\".join([str(a.id), school.name, teacher_name, competitor_name, short_code_value]))\n","repo_name":"polz113/bober","sub_path":"django/bober/bober_si/management/commands/create_si_national_codes.py","file_name":"create_si_national_codes.py","file_ext":"py","file_size_in_byte":4483,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"31"} +{"seq_id":"28754868736","text":"'''Advent of code, day 17: TRICK SHOT'''\r\nfrom math import inf\r\nfrom termbars import TerminalBar, TBPRESET_MODERN\r\n\r\nclass Probe:\r\n '''Probe class'''\r\n def __init__(self, velx, vely):\r\n self.vel = [velx, vely]\r\n self.pos = [0, 0]\r\n self.highest_y = -inf\r\n\r\n def step(self):\r\n '''Perform step'''\r\n self.pos[0] += self.vel[0]\r\n self.pos[1] += self.vel[1]\r\n self.highest_y = self.pos[1] if self.pos[1] > self.highest_y else self.highest_y\r\n self.vel[0] += -1 if self.vel[0] > 0 else (1 if self.vel[0] < 0 else 0)\r\n self.vel[1] -= 1\r\n\r\n def in_target(self, target: list[list[int, int], list[int, int]]):\r\n '''Check if in the target zone'''\r\n return target[0][0] <= self.pos[0] <= target[0][1] \\\r\n and target[1][0] <= self.pos[1] <= target[1][1]\r\n\r\n def past_target(self, target: list[list[int, int], list[int, int]]):\r\n '''Check if past the target'''\r\n return self.pos[0] > target[0][1] \\\r\n or self.pos[1] < target[1][0] \\\r\n or (self.vel[0] == 0 and not target[0][0] <= self.pos[0] <= target[0][1])\r\n\r\ndef simulate_probes(target_area, lowx = 1, lowy = 1, limitx = 500, limity = 500):\r\n '''Simulate probes'''\r\n tbar = TerminalBar((limitx-lowx) * (limity-lowy))\r\n tbar.update_preset(TBPRESET_MODERN)\r\n highest_height = [-inf, 0, 0]\r\n number_of_successes = 0\r\n for velx in range(lowx, limitx):\r\n for vely in range(lowy, limity):\r\n tbar.update(tbar.bar_state[0] + 1)\r\n probe = Probe(velx, vely)\r\n while not probe.past_target(target_area):\r\n probe.step()\r\n if probe.in_target(target_area):\r\n number_of_successes += 1\r\n if probe.highest_y > highest_height[0]:\r\n highest_height = [probe.highest_y, velx, vely]\r\n break\r\n\r\n return highest_height, number_of_successes\r\n\r\n\r\ndef task_1(simul):\r\n '''Code for task 1:\r\n '''\r\n return simul[0]\r\n\r\ndef task_2(simul):\r\n '''Code for task 2:\r\n '''\r\n return simul[1]\r\n\r\ndef main():\r\n '''Main function'''\r\n with open(\"aoc17_input.txt\", \"r\", encoding = \"utf-8\") as file:\r\n contents = file.readlines()\r\n target_area = contents[0].split(\"=\")\r\n target_area = [target_area[1].split(\"..\"), target_area[2].split(\"..\")]\r\n target_area = [[int(target_area[0][0]), int(target_area[0][1][:-3])],\r\n [int(target_area[1][0]), int(target_area[1][1])]]\r\n simul_probes = simulate_probes(target_area, -500, -500, 500, 500)\r\n print(task_1(simul_probes))\r\n print(task_2(simul_probes))\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"AkzidenzGrotesk-py/aoc-2021","sub_path":"aoc17_trick_shot.py","file_name":"aoc17_trick_shot.py","file_ext":"py","file_size_in_byte":2724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"17162001616","text":"#导入库\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom pandas import DataFrame,Series\nfrom sklearn import metrics\nfrom sklearn import cross_validation\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn import model_selection\nfrom sklearn.linear_model import LinearRegression\nfrom pylab import *\nfrom matplotlib import font_manager\nimport scipy.stats as stats\nfrom sklearn.externals import joblib\nmpl.rcParams['font.sans-serif'] = ['SimHei']\nmpl.rcParams['axes.unicode_minus'] = False\n\n##读取数据\ndata1=pd.read_excel('C:\\\\Users\\\\11384\\\\Desktop\\\\python大作业\\\\2013-2018总表.xlsx',index_col=0,)\ndata1=data1.dropna()\n#AQI5年来每日的折线图 生成AQI与其他污染物的回归方程\nAQIday=data1.loc[:,'AQI']\nAQIday.plot(title='2013/12-2018/11 每日AQI指数 ',LineWidth=2,marker='o',grid=True,use_index=True)\nplt.xlabel('日期',fontsize=12)\nplt.ylabel('AQI',fontsize=12)\nplt.show()\nX1=data1.iloc[:,2:8].astype(float)\ny1=data1.iloc[:,0].astype(float)\nX1_train,X1_test,y1_train,y1_test=model_selection.train_test_split(X1,y1,test_size=0.3,random_state=1)\nlinregTr1=LinearRegression()\nlinregTr1.fit(X1_train,y1_train)\nprint(linregTr1.intercept_,linregTr1.coef_)\ny1_train_pred=linregTr1.predict(X1_train)\ny1_test_pred=linregTr1.predict(X1_test)\ntrain_err1=metrics.mean_squared_error(y1_train,y1_train_pred)\ntest_err1=metrics.mean_squared_error(y1_test,y1_test_pred)\nprint('the mean squar error of train and test are:{:.2f},{:.2f}'.format(train_err1,test_err1))\npredict_score1=linregTr1.score(X1_test,y1_test)\nprint('The decision coeficient is:{:.2f}'.format(predict_score1))\n\n#饼图\npiedata=data1.loc[:,'质量等级']\ndatasum=piedata.value_counts()\ndatasum.plot(kind='pie',figsize=(6,6),title='五年来空气质量等级分布',fontsize=14,autopct='%1.1f%%')\nplt.show()\npiedata2014=data1.iloc[32:396,1]\ndatasum2014=piedata2014.value_counts()\ndatasum2014.plot(kind='pie',figsize=(6,6),title='2014空气质量等级分布',fontsize=14,autopct='%1.1f%%')\nplt.show()\npiedata2015=data1.iloc[396:761,1]\ndatasum2015=piedata2015.value_counts()\ndatasum2015.plot(kind='pie',figsize=(6,6),title='2015空气质量等级分布',fontsize=14,autopct='%1.1f%%')\nplt.show()\npiedata2016=data1.iloc[762:1127,1]\ndatasum2016=piedata2016.value_counts()\ndatasum2016.plot(kind='pie',figsize=(6,6),title='2016空气质量等级分布',fontsize=14,autopct='%1.1f%%')\nplt.show()\npiedata2017=data1.iloc[1128:1492,1]\ndatasum2017=piedata2017.value_counts()\ndatasum2017.plot(kind='pie',figsize=(6,6),title='2017空气质量等级分布',fontsize=14,autopct='%1.1f%%')\nplt.show()\npiedata2018=data1.iloc[1493:1826,1]\ndatasum2018=piedata2018.value_counts()\ndatasum2018.plot(kind='pie',figsize=(6,6),title='2018空气质量等级分布',fontsize=14,autopct='%1.1f%%')\nplt.show()\n\n#AQI 每月\ndata2=pd.read_excel('C:\\\\Users\\\\11384\\\\Desktop\\\\python大作业\\\\2013-2018上海空气质量指数月统计历史数据.xlsx',index_col=0,skiprows=1)\nAQImonth=data2.loc[:,'AQI']\nAQImonth.plot(title='2013/12-2018/11 每月AQI指数曲线 ',LineWidth=2,marker='o',linestyle='dashed',grid=True,use_index=True)\nplt.xlabel('Year',fontsize=12)\nplt.ylabel('AQI',fontsize=12)\nplt.show()\n\n#冬季总表\ndata3=pd.read_excel('C:\\\\Users\\\\11384\\\\Desktop\\\\python大作业\\\\2013-2018冬季总表.xlsx',index_col=0)\n#数据处理 清洗\ndata3.describe()\ndata3drop=data3.dropna()\ndata3drop.describe()\n#风向\ndata3drop.loc[data3drop['风向']=='北风','风向']=1\ndata3drop.loc[data3drop['风向']=='东北风','风向']=2\ndata3drop.loc[data3drop['风向']=='东风','风向']=3\ndata3drop.loc[data3drop['风向']=='东南风','风向']=4\ndata3drop.loc[data3drop['风向']=='南风','风向']=5\ndata3drop.loc[data3drop['风向']=='西南风','风向']=6\ndata3drop.loc[data3drop['风向']=='西风','风向']=7\ndata3drop.loc[data3drop['风向']=='西北风','风向']=8\n#质量等级\ndata3drop.loc[data3drop['质量等级']=='优','质量等级']=1\ndata3drop.loc[data3drop['质量等级']=='良','质量等级']=2\ndata3drop.loc[data3drop['质量等级']=='轻度污染','质量等级']=3\ndata3drop.loc[data3drop['质量等级']=='中度污染','质量等级']=4\ndata3drop.loc[data3drop['质量等级']=='重度污染','质量等级']=5\ndata3drop.loc[data3drop['质量等级']=='严重污染','质量等级']=5 #因为严重污染只有一天 无法做分类所以归类合并重度污染\n\n#只用温度 风力 风向拟合\nX3=data3drop.iloc[:,7:11].astype(float)\ny3=data3drop.iloc[:,0].astype(float)\nX3_train,X3_test,y3_train,y3_test=model_selection.train_test_split(X3,y3,test_size=0.3,random_state=1)\nlinregTr3=LinearRegression()\nlinregTr3.fit(X3_train,y3_train)\nprint(linregTr3.intercept_,linregTr3.coef_)\ny3_train_pred=linregTr3.predict(X3_train)\ny3_test_pred=linregTr3.predict(X3_test)\ntrain_err3=metrics.mean_squared_error(y3_train,y3_train_pred)\ntest_err3=metrics.mean_squared_error(y3_test,y3_test_pred)\nprint('the mean squar error of train and test are:{:.2f},{:.2f}'.format(train_err3,test_err3))\npredict_score3=linregTr3.score(X3_test,y3_test)\nprint('The decision coeficient is:{:.2f}'.format(predict_score3))\n\n\n#10个X(所有)拟合最终AQI函数\nX2=data3drop.iloc[:,1:11].astype(float)\ny2=data3drop.iloc[:,0].astype(float)\nX2_train,X2_test,y2_train,y2_test=model_selection.train_test_split(X2,y2,test_size=0.3,random_state=1)\nlinregTr2=LinearRegression()\nlinregTr2.fit(X2_train,y2_train)\nprint(linregTr2.intercept_,linregTr2.coef_)\ny2_train_pred=linregTr2.predict(X2_train)\ny2_test_pred=linregTr2.predict(X2_test)\ntrain_err2=metrics.mean_squared_error(y2_train,y2_train_pred)\ntest_err2=metrics.mean_squared_error(y2_test,y2_test_pred)\nprint('the mean squar error of train and test are:{:.2f},{:.2f}'.format(train_err2,test_err2))\npredict_score2=linregTr2.score(X2_test,y2_test)\nprint('The decision coeficient is:{:.2f}'.format(predict_score2))\n\n\n#神经网络确定质量等级\nX4=data3drop.iloc[:,1:11].astype(float)\ny4=data3drop.iloc[:,13].astype(float)\nX4_train,X4_test,y4_train,y4_test=model_selection.train_test_split(X4,y4,test_size=0.3,random_state=1)\nmlp = MLPClassifier(solver='lbfgs',alpha=10,hidden_layer_sizes=(11,11,11,11,11,11,11),random_state=1)\nmlp.fit(X4_train,y4_train)\nprint(mlp.score(X4_train,y4_train))\ny4_predicted4 = mlp.predict(X4_test)\nprint(\"Classification report for %s\" % mlp)\nprint (metrics.classification_report(y4_test, y4_predicted4) )\nprint( \"Confusion matrix:\\n\", metrics.confusion_matrix(y4_test, y4_predicted4))\n\n#决策树确定质量等级\nfrom sklearn import tree\nX5=data3drop.iloc[:,1:11].astype(float)\ny5=data3drop.iloc[:,13].astype(float)\nX5_train,X5_test,y5_train,y5_test=model_selection.train_test_split(X5,y5,test_size=0.3,random_state=1)\nclf=tree.DecisionTreeClassifier()\nclf=clf.fit(X5_train,y5_train)\nprint(clf.score(X5_train,y5_train))\ny5_predicted5=clf.predict(X5_test)\nprint(metrics.classification_report(y5_predicted5,y5_test))\nprint('confusion matrix:')\nprint(metrics.confusion_matrix(y5_predicted5,y5_test))\n\n#用最高,最低气温加污染物拟合函数 (能推广)\nX6=data3drop.iloc[:,1:9].astype(float)\ny6=data3drop.iloc[:,0].astype(float)\nX6_train,X6_test,y6_train,y6_test=model_selection.train_test_split(X6,y6,test_size=0.3,random_state=1)\nlinregTr6=LinearRegression()\nlinregTr6.fit(X6_train,y6_train)\nprint(linregTr6.intercept_,linregTr6.coef_)\ny6_train_pred=linregTr6.predict(X6_train)\ny6_test_pred=linregTr6.predict(X6_test)\ntrain_err6=metrics.mean_squared_error(y6_train,y6_train_pred)\ntest_err6=metrics.mean_squared_error(y6_test,y6_test_pred)\nprint('the mean squar error of train and test are:{:.2f},{:.2f}'.format(train_err6,test_err6))\npredict_score6=linregTr6.score(X6_test,y6_test)\nprint('The decision coeficient is:{:.2f}'.format(predict_score6))\n\n#额外随机选取上海某个月检验正确率\ndata4=pd.read_excel('C:\\\\Users\\\\11384\\\\Desktop\\\\python大作业\\\\2015年678月假设检验.xlsx',index_col=0)\nX7=data4.iloc[:,2:10]\ny7=data4.iloc[:,0]\npredict_y7=linregTr6.predict(X7)\nprint(metrics.mean_squared_error(predict_y7,y7))\npredict_new_Y_value=y7.astype(float)\nprint(stats.pearsonr(predict_new_Y_value, predict_y7))#相关性\n\n#画出图 发现波峰波谷有明显误差\nx7 = range(1,93)\nplt.figure(figsize=(20,8),dpi=80)\nplt.plot(x7,predict_new_Y_value,label=\"预测曲线\",color=\"#F08080\")\nplt.plot(x7,predict_y7,label=\"实际曲线\",color=\"#DB7093\",linestyle=\"--\")\n_xtick_labels = [format(i) for i in x7]\nplt.xticks(x7,_xtick_labels)\nplt.grid(alpha=0.4,linestyle=':')\nplt.legend(loc=\"upper left\")\nplt.show()\n#查找原因发现污染物受月份影响非常明显 有一定周期性所以冬天与夏天在污染物系数有明显不同\nO3month=data2.loc[:,'O3']\nO3month.plot(title='2013/12-2018/11 每月O3指数曲线 ',LineWidth=2,marker='o',linestyle='dashed',grid=True,use_index=True)\nplt.xlabel('Year',fontsize=12)\nplt.ylabel('O3',fontsize=12)\nplt.show()\n\nPM25month=data2.loc[:,'PM2.5']\nPM25month.plot(title='2013/12-2018/11 每月PM2.5指数曲线 ',LineWidth=2,marker='o',linestyle='dashed',grid=True,use_index=True)\nplt.xlabel('Year',fontsize=12)\nplt.ylabel('PM2.5',fontsize=12)\nplt.show()\n\nPM10month=data2.loc[:,'PM10']\nPM10month.plot(title='2013/12-2018/11 每月PM10指数曲线 ',LineWidth=2,marker='o',linestyle='dashed',grid=True,use_index=True)\nplt.xlabel('Year',fontsize=12)\nplt.ylabel('PM10',fontsize=12)\nplt.show()\n\nSO2month=data2.loc[:,'SO2']\nSO2month.plot(title='2013/12-2018/11 每月SO2指数曲线 ',LineWidth=2,marker='o',linestyle='dashed',grid=True,use_index=True)\nplt.xlabel('Year',fontsize=12)\nplt.ylabel('SO2',fontsize=12)\nplt.show()\n\nCOmonth=data2.loc[:,'CO']\nCOmonth.plot(title='2013/12-2018/11 每月CO指数曲线 ',LineWidth=2,marker='o',linestyle='dashed',grid=True,use_index=True)\nplt.xlabel('Year',fontsize=12)\nplt.ylabel('CO',fontsize=12)\nplt.show()\n\nNO2month=data2.loc[:,'NO2']\nNO2month.plot(title='2013/12-2018/11 每月NO2指数曲线 ',LineWidth=2,marker='o',linestyle='dashed',grid=True,use_index=True)\nplt.xlabel('Year',fontsize=12)\nplt.ylabel('NO2',fontsize=12)\nplt.show()\n\n#于是决定重新再用6 7 8月+12 1 2月的进行再次拟合(最后模型)\n\ndata5=pd.read_excel('C:\\\\Users\\\\11384\\\\Desktop\\\\python大作业\\\\2013-2018夏冬两季总表.xlsx',index_col=0)\nX8=data5.iloc[:,1:9].astype(float)\ny8=data5.iloc[:,0].astype(float)\nX8_train,X8_test,y8_train,y8_test=model_selection.train_test_split(X8,y8,test_size=0.3,random_state=1)\nlinregTr8=LinearRegression()\nlinregTr8.fit(X8_train,y8_train)\nprint(linregTr8.intercept_,linregTr8.coef_)\ny8_train_pred=linregTr8.predict(X8_train)\ny8_test_pred=linregTr8.predict(X8_test)\ntrain_err8=metrics.mean_squared_error(y8_train,y8_train_pred)\ntest_err8=metrics.mean_squared_error(y8_test,y8_test_pred)\nprint('the mean squar error of train and test are:{:.2f},{:.2f}'.format(train_err8,test_err8))\npredict_score8=linregTr8.score(X8_test,y8_test)\nprint('The decision coeficient is:{:.2f}'.format(predict_score8))\njoblib.dump(linregTr8,'回归线性模型.pkl')\n\n#额外选取假设检验\ndata6=pd.read_excel('C:\\\\Users\\\\11384\\\\Desktop\\\\python大作业\\\\2014年10月2016年3月2018年5月假设检验.xlsx',index_col=0)\nX9=data6.iloc[:,1:9]\ny9=data6.iloc[:,0]\npredict_y9=linregTr8.predict(X9)\nprint(metrics.mean_squared_error(predict_y9,y9))\npredict_new_Y_value2=y9.astype(float)\nprint(stats.pearsonr(predict_new_Y_value2, predict_y9)) #相关性\n\nx9 = range(1,94)\nplt.figure(figsize=(20,8),dpi=80)\nplt.plot(x9,predict_new_Y_value2,label=\"预测曲线\",color=\"#F08080\")\nplt.plot(x9,predict_y9,label=\"实际曲线\",color=\"#DB7093\",linestyle=\"--\")\n_xtick_labels = [format(i) for i in x9]\nplt.xticks(x7,_xtick_labels)\nplt.grid(alpha=0.4,linestyle=':')\nplt.legend(loc=\"upper left\")\nplt.show()\n\n#至此上海的全部研究完 接下来进行全国的推广\n\n#推广到苏州用上海的最后模型进行检验 选用了2018年苏州的数据\ndata7=pd.read_excel('C:\\\\Users\\\\11384\\\\Desktop\\\\python大作业\\\\2018年苏州空气质量指数日历史数据.xlsx',index_col=0)\ndata7drop=data7.dropna()\nX10=data7drop.iloc[:,1:9]\ny10=data7drop.iloc[:,0]\npredict_y10=linregTr8.predict(X10)\nprint(metrics.mean_squared_error(predict_y10,y10))\npredict_new_Y_value3=y10.astype(float)\nprint(stats.pearsonr(predict_new_Y_value3, predict_y10))\n\nx10 = range(1,298)\nplt.figure(figsize=(20,8),dpi=80)\nplt.plot(x10,predict_new_Y_value3,label=\"预测曲线\",color=\"#F08080\")\nplt.plot(x10,predict_y10,label=\"实际曲线\",color=\"#DB7093\",linestyle=\"--\")\n_xtick_labels = [format(i) for i in x10]\nplt.xticks(x10,_xtick_labels)\nplt.grid(alpha=0.4,linestyle=':')\nplt.legend(loc=\"upper left\")\nplt.show()\n\n#推广到北京\ndata8=pd.read_excel('C:\\\\Users\\\\11384\\\\Desktop\\\\python大作业\\\\2018年北京空气质量指数日历史数据.xlsx',index_col=0)\ndata8drop=data8.dropna()\nX11=data8drop.iloc[:,1:9]\ny11=data8drop.iloc[:,0]\npredict_y11=linregTr8.predict(X11)\nprint(metrics.mean_squared_error(predict_y11,y11))\npredict_new_Y_value4=y11.astype(float)\nprint(stats.pearsonr(predict_new_Y_value4, predict_y11))\n\nx11 = range(1,271)\nplt.figure(figsize=(20,8),dpi=80)\nplt.plot(x11,predict_new_Y_value4,label=\"预测曲线\",color=\"#F08080\")\nplt.plot(x11,predict_y11,label=\"实际曲线\",color=\"#DB7093\",linestyle=\"--\")\n_xtick_labels = [format(i) for i in x11]\nplt.xticks(x11,_xtick_labels)\nplt.grid(alpha=0.4,linestyle=':')\nplt.legend(loc=\"upper left\")\nplt.show()\n\n#说明此线性模型具有一定推广性\n\n#至此开始做每个污染物和气温的时序分析\n# O3\ndata10=data8=pd.read_excel('C:\\\\Users\\\\11384\\\\Desktop\\\\python大作业\\\\预测\\\\上海\\\\shanghai.xlsx',index_col=0)\n\nfrom statsmodels.graphics.tsaplots import plot_acf\nplot_acf(data10.loc[:,'O3_8h'])\nfrom statsmodels.stats.diagnostic import acorr_ljungbox\nprint('白噪声-检验结果:', acorr_ljungbox(data10.loc[:,'O3_8h'], lags=1))\nfrom statsmodels.tsa.stattools import adfuller as ADF\nprint('ADF-检验结果:', ADF(data10.loc[:,'O3_8h'])) \nO3last2month=data10.iloc[-60:,7]\n\nD_O3 = O3last2month.diff().dropna() #对原数据进行1阶差分,删除非法值\nprint('差分序列-ADF-检验结果为:', ADF(D_O3)) #平稳性检测\n\nfrom statsmodels.tsa.arima_model import ARIMA\nO3last2month= O3last2month.astype(float)\npmax = int(len(D_O3)/10) #一般阶数不超过length/10\nqmax = int(len(D_O3)/10) #一般阶数不超过length/10\ne_matrix = [] #评价矩阵\nfor p in range(pmax+1):\n tmp = []\n for q in range(qmax+1):\n try: #存在部分报错,所以用try来跳过报错。\n tmp.append(ARIMA(O3last2month, (p,1,q)).fit().aic)\n except:\n tmp.append(None)\n e_matrix.append(tmp)\ne_matrix = pd.DataFrame(e_matrix) #从中可以找出最小值\np,q = e_matrix.stack().idxmin() #先用stack展平,然后用找出最小值位置。\nprint('AIC最小的p值和q值为:%s、%s' %(p,q))\nmodel = ARIMA(O3last2month, (p,1,q)).fit() \nmodel.summary2() #给出模型报告\nprint(model.forecast(5)) #作为期5天的预测,返回预测结果、标准误差、置信区间。\npreO3=model.forecast(1)[0]\n#PM2.5\nfrom statsmodels.graphics.tsaplots import plot_acf\nplot_acf(data10.loc[:,'PM2.5'])\nfrom statsmodels.stats.diagnostic import acorr_ljungbox\nprint('白噪声-检验结果:', acorr_ljungbox(data1.loc[:,'PM2.5'], lags=1))\nfrom statsmodels.tsa.stattools import adfuller as ADF\nprint('ADF-检验结果:', ADF(data10.loc[:,'PM2.5'])) \n\nPM25last2month=data10.iloc[-60:,2]\nfrom statsmodels.tsa.arima_model import ARIMA\nPM25last2month= PM25last2month.astype(float)\npmax = int(len(PM25last2month)/10) #一般阶数不超过length/10\nqmax = int(len(PM25last2month)/10) #一般阶数不超过length/10\ne_matrix = [] #评价矩阵\nfor p in range(pmax+1):\n tmp = []\n for q in range(qmax+1):\n try: #存在部分报错,所以用try来跳过报错。\n tmp.append(ARIMA(PM25last2month, (p,0,q)).fit().aic)\n except:\n tmp.append(None)\n e_matrix.append(tmp)\ne_matrix = pd.DataFrame(e_matrix) #从中可以找出最小值\np,q = e_matrix.stack().idxmin() #先用stack展平,然后用找出最小值位置。\nprint('AIC最小的p值和q值为:%s、%s' %(p,q))\nmodel = ARIMA(PM25last2month, (p,0,q)).fit() \nmodel.summary2() #给出模型报告\nprint(model.forecast(5)) #作为期5天的预测,返回预测结果、标准误差、置信区间。\nprePM25=model.forecast(1)[0]\n#Co\nfrom statsmodels.graphics.tsaplots import plot_acf\nplot_acf(data10.loc[:,'CO'])\nfrom statsmodels.stats.diagnostic import acorr_ljungbox\nprint('白噪声-检验结果:', acorr_ljungbox(data1.loc[:,'CO'], lags=1))\nfrom statsmodels.tsa.stattools import adfuller as ADF\nprint('ADF-检验结果:', ADF(data10.loc[:,'CO'])) \n\nCOlast2month=data10.iloc[-60:,5]\nfrom statsmodels.tsa.arima_model import ARIMA\nCOlast2month= COlast2month.astype(float)\npmax = int(len(COlast2month)/10) #一般阶数不超过length/10\nqmax = int(len(COlast2month)/10) #一般阶数不超过length/10\ne_matrix = [] #评价矩阵\nfor p in range(pmax+1):\n tmp = []\n for q in range(qmax+1):\n try: #存在部分报错,所以用try来跳过报错。\n tmp.append(ARIMA(COlast2month, (p,0,q)).fit().aic)\n except:\n tmp.append(None)\n e_matrix.append(tmp)\ne_matrix = pd.DataFrame(e_matrix) #从中可以找出最小值\np,q = e_matrix.stack().idxmin() #先用stack展平,然后用找出最小值位置。\nprint('AIC最小的p值和q值为:%s、%s' %(p,q))\nmodel = ARIMA(COlast2month, (p,0,q)).fit() \nmodel.summary2() #给出模型报告\nprint(model.forecast(5)) #作为期5天的预测,返回预测结果、标准误差、置信区间。\npreCO=model.forecast(1)[0]\n\n#PM10\nfrom statsmodels.graphics.tsaplots import plot_acf\nplot_acf(data10.loc[:,'PM10'])\nfrom statsmodels.stats.diagnostic import acorr_ljungbox\nprint('白噪声-检验结果:', acorr_ljungbox(data1.loc[:,'PM10'], lags=1))\nfrom statsmodels.tsa.stattools import adfuller as ADF\nprint('ADF-检验结果:', ADF(data10.loc[:,'PM10'])) \n\nPM10last2month=data10.iloc[-60:,3]\nfrom statsmodels.tsa.arima_model import ARIMA\nPM10last2month= PM10last2month.astype(float)\npmax = int(len(PM10last2month)/10) #一般阶数不超过length/10\nqmax = int(len(PM10last2month)/10) #一般阶数不超过length/10\ne_matrix = [] #评价矩阵\nfor p in range(pmax+1):\n tmp = []\n for q in range(qmax+1):\n try: #存在部分报错,所以用try来跳过报错。\n tmp.append(ARIMA(PM10last2month, (p,0,q)).fit().aic)\n except:\n tmp.append(None)\n e_matrix.append(tmp)\ne_matrix = pd.DataFrame(e_matrix) #从中可以找出最小值\np,q = e_matrix.stack().idxmin() #先用stack展平,然后用找出最小值位置。\nprint('AIC最小的p值和q值为:%s、%s' %(p,q))\nmodel = ARIMA(PM10last2month, (p,0,q)).fit() \nmodel.summary2() #给出模型报告\nprint(model.forecast(5)) #作为期5天的预测,返回预测结果、标准误差、置信区间。\nprePM10=model.forecast(1)[0]\n\n#NO2\nfrom statsmodels.graphics.tsaplots import plot_acf\nplot_acf(data10.loc[:,'NO2'])\nfrom statsmodels.stats.diagnostic import acorr_ljungbox\nprint('白噪声-检验结果:', acorr_ljungbox(data10.loc[:,'NO2'], lags=1))\nfrom statsmodels.tsa.stattools import adfuller as ADF\nprint('ADF-检验结果:', ADF(data1.loc[:,'NO2'])) \n\nNO2last2month=data10.iloc[-60:,6]\nfrom statsmodels.tsa.arima_model import ARIMA\nNO2last2month= NO2last2month.astype(float)\npmax = int(len(NO2last2month)/10) #一般阶数不超过length/10\nqmax = int(len(NO2last2month)/10) #一般阶数不超过length/10\ne_matrix = [] #评价矩阵\nfor p in range(pmax+1):\n tmp = []\n for q in range(qmax+1):\n try: #存在部分报错,所以用try来跳过报错。\n tmp.append(ARIMA(NO2last2month, (p,0,q)).fit().aic)\n except:\n tmp.append(None)\n e_matrix.append(tmp)\ne_matrix = pd.DataFrame(e_matrix) #从中可以找出最小值\np,q = e_matrix.stack().idxmin() #先用stack展平,然后用找出最小值位置。\nprint('AIC最小的p值和q值为:%s、%s' %(p,q))\nmodel = ARIMA(NO2last2month, (p,0,q)).fit()\nmodel.summary2() #给出模型报告\nprint(model.forecast(5)) #作为期5天的预测,返回预测结果、标准误差、置信区间。\npreNO2=model.forecast(1)[0]\n\n#SO2\nfrom statsmodels.graphics.tsaplots import plot_acf\nplot_acf(data10.loc[:,'SO2'])\nfrom statsmodels.stats.diagnostic import acorr_ljungbox\nprint('白噪声-检验结果:', acorr_ljungbox(data10.loc[:,'SO2'], lags=1))\nfrom statsmodels.tsa.stattools import adfuller as ADF\nprint('ADF-检验结果:', ADF(data10.loc[:,'SO2'])) \n\nSO2last2month=data10.iloc[-60:,4]\nfrom statsmodels.tsa.arima_model import ARIMA\nSO2last2month= SO2last2month.astype(float)\npmax = int(len(SO2last2month)/10) #一般阶数不超过length/10\nqmax = int(len(SO2last2month)/10) #一般阶数不超过length/10\ne_matrix = [] #评价矩阵\nfor p in range(pmax+1):\n tmp = []\n for q in range(qmax+1):\n try: #存在部分报错,所以用try来跳过报错。\n tmp.append(ARIMA(SO2last2month, (p,0,q)).fit().aic)\n except:\n tmp.append(None)\n e_matrix.append(tmp)\ne_matrix = pd.DataFrame(e_matrix) #从中可以找出最小值\np,q = e_matrix.stack().idxmin() #先用stack展平,然后用找出最小值位置。\nprint('AIC最小的p值和q值为:%s、%s' %(p,q))\nmodel = ARIMA(SO2last2month, (p,0,q)).fit() \nmodel.summary2() #给出模型报告\nprint(model.forecast(5)) #作为期5天的预测,返回预测结果、标准误差、置信区间。\npreSO2=model.forecast(1)[0]\n\n#最低气温\nfrom statsmodels.graphics.tsaplots import plot_acf\nplot_acf(data10.loc[:,u'最低气温'])\nfrom statsmodels.stats.diagnostic import acorr_ljungbox\nprint('白噪声-检验结果:', acorr_ljungbox(data10.loc[:,u'最低气温'], lags=1))\nfrom statsmodels.tsa.stattools import adfuller as ADF\nprint('ADF-检验结果:', ADF(data10.loc[:,u'最低气温'])) \n\nD_TMIN = O3last2month.diff().dropna() #对原数据进行1阶差分,删除非法值\nprint('差分序列-ADF-检验结果为:', ADF(D_TMIN)) #平稳性检测\n\nTMINlast2month=data10.iloc[-60:,8]\nfrom statsmodels.tsa.arima_model import ARIMA\nTMINlast2month= TMINlast2month.astype(float)\npmax = int(len(D_TMIN)/10) #一般阶数不超过length/10\nqmax = int(len(D_TMIN)/10) #一般阶数不超过length/10\ne_matrix = [] #评价矩阵\nfor p in range(pmax+1):\n tmp = []\n for q in range(qmax+1):\n try: #存在部分报错,所以用try来跳过报错。\n tmp.append(ARIMA(TMINlast2month, (p,1,q)).fit().aic)\n except:\n tmp.append(None)\n e_matrix.append(tmp)\ne_matrix = pd.DataFrame(e_matrix) #从中可以找出最小值\np,q = e_matrix.stack().idxmin() #先用stack展平,然后用找出最小值位置。\nprint('AIC最小的p值和q值为:%s、%s' %(p,q))\nmodel = ARIMA(TMINlast2month, (p,1,q)).fit() \nmodel.summary2() #给出模型报告\nprint(model.forecast(5)) #作为期5天的预测,返回预测结果、标准误差、置信区间。\npreTMIN=model.forecast(1)[0]\n\n#最高气温\nfrom statsmodels.graphics.tsaplots import plot_acf\nplot_acf(data10.loc[:,u'最高气温'])\nfrom statsmodels.stats.diagnostic import acorr_ljungbox\nprint('白噪声-检验结果:', acorr_ljungbox(data10.loc[:,u'最高气温'], lags=1))\nfrom statsmodels.tsa.stattools import adfuller as ADF\nprint('ADF-检验结果:', ADF(data10.loc[:,u'最高气温'])) \n\nD_TMAX = O3last2month.diff().dropna() #对原数据进行1阶差分,删除非法值\nprint('差分序列-ADF-检验结果为:', ADF(D_TMAX)) #平稳性检测\n\nTMAXlast2month=data10.iloc[-60:,9]\nfrom statsmodels.tsa.arima_model import ARIMA\nTMAXlast2month= TMAXlast2month.astype(float)\npmax = int(len(D_TMAX)/10) #一般阶数不超过length/10\nqmax = int(len(D_TMAX)/10) #一般阶数不超过length/10\ne_matrix = [] #评价矩阵\nfor p in range(pmax+1):\n tmp = []\n for q in range(qmax+1):\n try: #存在部分报错,所以用try来跳过报错。\n tmp.append(ARIMA(TMAXlast2month, (p,2,q)).fit().aic)\n except:\n tmp.append(None)\n e_matrix.append(tmp)\ne_matrix = pd.DataFrame(e_matrix) #从中可以找出最小值\np,q = e_matrix.stack().idxmin() #先用stack展平,然后用找出最小值位置。\nprint('AIC最小的p值和q值为:%s、%s' %(p,q))\nmodel = ARIMA(TMAXlast2month, (p,2,q)).fit() #这个用d=1没有值\nmodel.summary2() #给出模型报告\nprint(model.forecast(5)) #作为期5天的预测,返回预测结果、标准误差、置信区间。\npreTMAX=model.forecast(1)[0]\n \n\n\npredictX=np.array([prePM25,prePM10,preSO2,preCO,preNO2,preO3,preTMIN,preTMAX]).T\nprint(predictX)\npreAQI=linregTr8.predict(predictX)\nprint(preAQI)\n","repo_name":"xiaocnj/-","sub_path":"总代码.py","file_name":"总代码.py","file_ext":"py","file_size_in_byte":24595,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"43729388577","text":"import os\nimport matplotlib.pyplot as plt\nimport cv2\nimport numpy\n\n\ndef load_images(floder_path):\n images = []\n for filename in os.listdir(floder_path):\n img = cv2.imread(os.path.join(floder_path,filename))\n if img is not None:\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # detector = cv2.CascadeClassifier(path)\n # faces = detector.detectMultiScale(gray,1.3,5)\n images.append(gray)\n return images\n\n\n\npath = 'C:\\\\Users\\\\Katlic\\\\Documents\\\\LCO_Bootcamp\\\\LCO_class_assign_prac\\\\PracticeFolder'\nlist_of_images = load_images(path)\nprint(\"num of human pics in the folder : \", len(list_of_images))\n","repo_name":"Dikhit/LCO_training","sub_path":"python_projects/openCV/find_human_pic_in_folder/using_cv/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"35753164501","text":"#! /usr/bin/python\nimport os\nimport subprocess\n\n\ndef get_time():\n res = os.popen(\"time/t\")\n for line in res:\n print(line)\n\n\ndef write_to_shell():\n # writes shell command\n # os.system(\"cd\")\n path = \"E:\\\\Coding Projects\\PythonProjects\\Multitasking\"\n output_file = path + \"\\\\dir_output.txt\"\n temp = open(output_file, \"w\")\n temp.close()\n\n exit_code = os.system(\"E:\\\\Coding Projects\\PythonProjects\\Multitasking\\dir > {}\".format(output_file))\n\n if exit_code == 0:\n f = open(output_file, \"r\")\n print(f.read())\n\n\ndef std_out_example():\n try:\n proc = os.popen(\"dir 2>1\")\n for line in proc:\n print(line, \"1111\")\n except Exception as e:\n print(\"Failed running popen\")\n #proc = subprocess.run(\"<>\")\n\nstd_out_example()\n#write_to_shell()\n\nget_time()\n","repo_name":"shachash1984/Python_Multitasking","sub_path":"Multitasking.py","file_name":"Multitasking.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"30046162802","text":"\"\"\"\nThis demo shows how to use the `experiment` package to log both to `Visdom` and `mlflow`.\n\"\"\"\nfrom experiment import MLflowExperiment\nfrom experiment import VisdomExperiment\nfrom experiment.visdom import create_parameters_windows, Line, Window\nimport logging\nimport mlflow\nfrom traitlets import Enum, Float, Int, Unicode\nimport time\n\ntry:\n from tqdm import trange\nexcept ImportError:\n trange = range\n\n\nclass Main(MLflowExperiment, VisdomExperiment):\n #\n # Description of the experiment. Used in the help message.\n #\n description = Unicode(\"Demonstration of using Visdom and MLflow logging.\")\n\n #\n # Parameters of experiment\n #\n epochs = Int(100, config=True, help=\"Number of epochs\")\n lr = Float(0.5, config=True, help=\"Learning rate of training\").tag(parameter=True)\n loss_type = Enum((\"mse\", \"l1\"), config=True, default_value=\"mse\", help=\"Loss type.\")\n\n def run(self):\n \"\"\"Running the experiment\"\"\"\n\n logging.info(\"Starting experiment\")\n logging.info(\"Using {} loss\".format(self.loss_type))\n\n #\n # Create the Visdom window and loss plot. The same window can be used for multiple plots.\n #\n win = Window(env=self.visdom_env, xlabel=\"epoch\", ylabel=\"Loss\", title=\"Loss\")\n loss_plot = Line(\"util\", win)\n\n loss = 100\n for i in trange(self.epochs):\n loss_plot.append(x=i, y=loss)\n mlflow.log_metric(\"loss\", loss)\n\n loss = loss * self.lr\n\n #\n # Update the properties view window.\n #\n self.visdom_params_win.update(x=i)\n\n time.sleep(.5)\n\n logging.info(\"Experiment finished\")\n\n\nif __name__ == \"__main__\":\n main = Main()\n main.initialize()\n main.start()\n","repo_name":"amitibo/experiment","sub_path":"examples/demo_visdom_experiment.py","file_name":"demo_visdom_experiment.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"36475287096","text":"# -*- coding: utf-8 -*-\n\nfrom .types import table_name, Required, item_schema, return_values, expected_schema\n\npost = {\n u'TableName': table_name,\n u'Item': item_schema,\n Required(u'Expected', {}): expected_schema, # It is optional but with a def value\n Required(u'ReturnValues', u'NONE'): return_values, # It is optional but with a def value\n}\n","repo_name":"dekked/dynamodb-mock","sub_path":"ddbmock/validators/put_item.py","file_name":"put_item.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"27399778464","text":"from django.urls import path\nfrom django.views.decorators.csrf import csrf_exempt\nfrom . import views\n\nurlpatterns = [\n path('allproject', views.allProject, name=\"allproject\"),\n path('collegelist', views.college_List, name=\"collegelist\"),\n path('upload', views.Upload, name=\"upload\"),\n path('treanding', views.treandingProject, name=\"treanding\"),\n path('aboutus', views.aboutUs, name=\"aboutus\"),\n \n]\n","repo_name":"Rushimalani13/ProjectHub","sub_path":"ProjectCommunity/projects/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72666326488","text":"from django.db import connection\n\nimport math\n\nclass HistoryFunction(object):\n def __init__(self, function, name, length=5, small_threshold=.01, medium_threshold=.05, large_threshold=.1):\n self.function = function\n self.length = length + 1\n self.small_threshold = small_threshold\n self.medium_threshold = medium_threshold\n self.large_threshold = large_threshold\n self.name = name\n def __call__(self, company):\n with connection.cursor() as c:\n c.execute(\"select orders.price from trading_order as orders where orders.company_id=%s and orders.open=false order by orders.created_at DESC limit %s\",[company.id, self.length])\n data = c.fetchall()\n while len(data) < self.length:\n data.append((100,))\n diffs = []\n for i in range(self.length-1): #Map diffs to [-4,4], based on direction and strength\n diff = data[i][0]-data[i+1][0]\n diff_value = 1\n diff_value += diff/data[i][0] > self.small_threshold\n diff_value += diff/data[i][0] > self.medium_threshold\n diff_value += diff/data[i][0] > self.large_threshold\n diff_value *= cmp(diff, 0)\n diffs.append(diff_value)\n return self.function(diffs, data)\n\ndef rising(diffs, data):\n return sum(diffs)/40 +.5\ndef slow_rising(diffs, data):\n res = 0\n for diff in diffs:\n res += diff-cmp(diff,0) #reduces magnitude of diffs by one\n return res/30+.5\ndef falling(diffs, data):\n return -(sum(diffs)/40) +.5\ndef slow_falling(diffs, data):\n res = 0\n for diff in diffs:\n res += diff-cmp(diff,0) #reduces magnitude of diffs by one\n return -(res/30)+.5\ndef now_rising(diffs, data):\n return -diffs[0] - diffs[1] + diffs[3] + diffs[4]\ndef now_falling(diffs, data):\n return diffs[0] + diffs[1] - diffs[3] - diffs[4]\ndef accelerating(diffs, data):\n return (diffs[4]-diffs[0])/8+.5\ndef decelerating(diffs, data):\n return (diffs[0]-diffs[4])/8+.5\ndef steady_growth(diffs, data):\n res = 0\n for diff in diffs:\n res += abs(diff-1)\n return 1-(res/25)\n\n\n_func = [\n (rising, 4, \"rising\"),\n (slow_rising, 4, \"slow_rising\"),\n (falling, 1, \"falling\"),\n (slow_falling, 1, \"slow_falling\"),\n (now_rising, 5, \"now_rising\"),\n (now_falling, 1, \"now_falling\"),\n (accelerating, 3, \"accelerating\"),\n (decelerating, 1, \"decelerating\"),\n (steady_growth, 1, \"steady_growth\")\n]\n\nfunctions = []\nfor function in _func:\n functions.append((HistoryFunction(function[0], function[2]+\"_normal\"),function[1]+3))\n functions.append((HistoryFunction(function[0], function[2]+\"_large\", small_threshold = .02, medium_threshold = .1, large_threshold = .2),function[1]+1))\n functions.append((HistoryFunction(function[0], function[2]+\"_small\", small_threshold = .005, medium_threshold = .025, large_threshold = .05),function[1]+1))\n","repo_name":"keyserbrian1/stocks","sub_path":"apps/trading/management/commands/history_functions.py","file_name":"history_functions.py","file_ext":"py","file_size_in_byte":2909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"16578061880","text":"from PIL import Image, ImageDraw, ImageFont\nimport numpy as np\nimport os\nfrom matplotlib import pyplot as plt\n\nSAMPLE_RATE = 0.6\n\nclass Converter:\n def __init__(self, font_style=None,font_size=12):\n # 设置字体风格和大小\n if font_style is None:\n self.font = ImageFont.load_default()\n else:\n self.font = ImageFont.truetype(font_style, font_size)\n # 设置可用转换字符列表,按期望亮度大小升序排列\n self.symbols = np.array(list(\" .,-=vM#@\"))\n \n self.raw_img=None\n self.ascii_img=None\n self.ascii_mat=None\n \n self.fig=None\n\n def init_pyplot(self):\n plt.ion()\n fig=plt.figure(\"output\")\n self.fig = fig\n\n \n def convert(self,raw_img,is_colorful=True,sample_rate=SAMPLE_RATE):\n self.raw_image=raw_img\n img=self.raw_image\n \n # 计算当前字体下字母的纵横比\n aspect_ratio = self.font.getsize(\"x\")[0] / self.font.getsize(\"x\")[1]\n \n # 使图片适应字体纵横比\n new_img_size = np.array(\n [img.size[0] * sample_rate, img.size[1] * sample_rate * aspect_ratio]\n ).astype(int)\n img = self.raw_image.resize(new_img_size)\n \n # 为彩色字符画图片保留副本\n self.raw_img = np.array(img)\n\n # 将原图转换为灰度图\n img = img.convert(\"L\")\n img=np.array(img)\n \n # 规范化像素灰度取值范围,将其限制在字符列表大小以内的整数值\n img = ((img - img.min()) / (img.max() - \\\n img.min()) * (self.symbols.size - 1)).astype(int)\n \n # 保持位置不变,将每个像素映射为字符\n self.ascii_mat=self.symbols[img.astype(int)]\n \n letter_size = self.font.getsize(\"x\")\n img_out_size = new_img_size * letter_size\n bg_color = \"black\"\n\n ascii_img=Image.new(\"RGB\", tuple(img_out_size), bg_color)\n draw = ImageDraw.Draw(ascii_img)\n \n # 在新图片上画出所有字符\n y = 0\n for i, line in enumerate(self.ascii_mat):\n for j, ch in enumerate(line):\n if(is_colorful):\n color = tuple(self.raw_img[i, j]) # 从图片副本中采样颜色\n draw.text((letter_size[0] * j, y), ch[0], fill=color, font=self.font)\n else:\n draw.text((letter_size[0] * j, y), ch[0], font=self.font)\n y += letter_size[1] \n \n self.ascii_img=ascii_img\n\n def show_console(self):\n \"\"\"\n 字符画显示于控制台\n \"\"\"\n os.system(\"cls\")\n if(self.ascii_mat is None):\n print(\"不存在转换结果,请先调用convert方法...\")\n return\n for line in self.ascii_mat:\n for i in line:\n print(i,end=\"\")\n print()\n\n def show_pyplot(self,interval=0.1):\n \"\"\"\n 字符画显示于pyplot\n \"\"\"\n if(not self.ascii_img):\n print(\"不存在转换结果,请先调用convert方法...\")\n return\n \n if(not self.fig):\n self.init_pyplot()\n \n self.fig.clf()\n ax = self.fig.add_subplot(1, 1, 1)\n ax.axis('off')\n \n ax.imshow(self.ascii_img, cmap='gray')\n ax.plot()\n\n plt.pause(interval)\n\n def save(self,save_path):\n self.ascii_img.save(save_path)\n\n\n\n\nif __name__==\"__main__\":\n \n converter=Converter()\n \n converter.convert(\"./materials/images/1.jpg\",is_colorful=True,sample_rate=0.8)\n converter.show_pyplot()\n\n input()","repo_name":"xiaoyu2018/AsciiPicMaker","sub_path":"converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":3679,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"34715249689","text":"import os.path\nimport pkg_resources\nimport bg_helper as bh\nimport fs_helper as fh\nimport settings_helper as sh\n\n\nSETTINGS = sh.get_all_settings(__name__).get(sh.APP_ENV, {})\n\n_package_repos_base_path = SETTINGS.get('package_repos_base_path')\n_kenjyco_libs_repo_names = SETTINGS.get('kenjyco_libs_repo_names')\n_dependency_repos_base_path = SETTINGS.get('dependency_repos_base_path')\nif not _package_repos_base_path or not _kenjyco_libs_repo_names or not _dependency_repos_base_path:\n # Sync settings.ini with vimdiff\n sh.sync_settings_file(__name__)\n SETTINGS = sh.get_all_settings(__name__).get(sh.APP_ENV, {})\n _package_repos_base_path = SETTINGS.get('package_repos_base_path')\n _kenjyco_libs_repo_names = SETTINGS.get('kenjyco_libs_repo_names')\n _dependency_repos_base_path = SETTINGS.get('dependency_repos_base_path')\n\nassert _package_repos_base_path and _kenjyco_libs_repo_names and _dependency_repos_base_path, (\n 'PACKAGE_REPOS_BASE_PATH, KENJYCO_LIBS_REPO_NAMES, and DEPENDENCY_REPOS_BASE_PATH are not set'\n)\n\n_dependency_repos_dict = {\n # 'beautifulsoup4': 'https://code.launchpad.net/beautifulsoup', # Not a git repo\n 'boto3': 'https://github.com/boto/boto3',\n 'click': 'https://github.com/pallets/click',\n 'cryptography': 'https://github.com/pyca/cryptography',\n 'hiredis': 'https://github.com/redis/hiredis-py',\n 'ipython': 'https://github.com/ipython/ipython',\n 'jinja2': 'https://github.com/pallets/jinja',\n 'lxml': 'https://github.com/lxml/lxml',\n 'pipdeptree': 'https://github.com/naiquevin/pipdeptree',\n 'psycopg2-binary': 'https://github.com/psycopg/psycopg2',\n 'pymongo': 'https://github.com/mongodb/mongo-python-driver',\n 'pymysql': 'https://github.com/PyMySQL/PyMySQL',\n 'pytest': 'https://github.com/pytest-dev/pytest',\n 'pytest-cov': 'https://github.com/pytest-dev/pytest-cov',\n 'pytz': 'https://git.launchpad.net/pytz',\n 'redis': 'https://github.com/redis/redis-py',\n 'requests': 'https://github.com/psf/requests',\n 'sqlalchemy': 'https://github.com/sqlalchemy/sqlalchemy',\n 'ujson': 'https://github.com/ultrajson/ultrajson',\n 'urllib3': 'https://github.com/urllib3/urllib3',\n 'xmljson': 'https://github.com/sanand0/xmljson',\n}\n\n# _skip_editable_install_for_these = (\n# 'cryptography', # Requires rust compiler\n# 'hiredis', # Exact version needed for redis-helper\n# 'lxml', # RuntimeError when trying to build without Cython\n# 'redis', # Exact version needed for redis-helper\n# 'requests', # Not compatible with latest urllib3\n# 'pymongo', # Exact version needed for mongo-helper\n# 'pytz', # Has setup.py in src directory, not root of repo\n# 'sqlalchemy', # Latest (2.0 beta) not compatible with sql-helper\n# 'ujson', # Exact version needed for redis-helper\n# 'urllib3', # Not compatible with latest requests\n# )\n\n_package_repos_base_path = fh.abspath(_package_repos_base_path)\n_dependency_repos_base_path = fh.abspath(_dependency_repos_base_path)\n\n\ndef _get_clone_status_for_packages():\n cloned = {}\n uncloned = {}\n for repo in _kenjyco_libs_repo_names:\n repo_path = os.path.join(_package_repos_base_path, repo)\n if os.path.isdir(repo_path):\n cloned[repo] = repo_path\n else:\n uncloned[repo] = repo_path\n\n return {\n 'cloned': cloned,\n 'uncloned': uncloned\n }\n\n\ndef _get_clone_status_for_dependencies():\n cloned = {}\n uncloned = {}\n for repo in _dependency_repos_dict:\n repo_path = os.path.join(_dependency_repos_base_path, repo)\n if os.path.isdir(repo_path):\n cloned[repo] = repo_path\n else:\n uncloned[repo] = repo_path\n\n return {\n 'cloned': cloned,\n 'uncloned': uncloned\n }\n\n\ndef _clone_packages(show=True):\n \"\"\"Clone package repos locally\n\n - show: if True, show the `git` command before executing\n \"\"\"\n clone_status = _get_clone_status_for_packages()\n for name in clone_status['uncloned']:\n url = 'https://github.com/kenjyco/{}'.format(name)\n bh.tools.git_clone(\n url,\n path=_package_repos_base_path,\n name=name,\n show=show\n )\n\n\ndef _clone_dependencies(show=True):\n \"\"\"Clone dependency repos locally\n\n - show: if True, show the `git` command before executing\n \"\"\"\n clone_status = _get_clone_status_for_dependencies()\n for name in clone_status['uncloned']:\n url = _dependency_repos_dict[name]\n bh.tools.git_clone(\n url,\n path=_dependency_repos_base_path,\n name=name,\n show=show\n )\n\n\ndef clone_all_missing(show=True):\n \"\"\"Clone package and dependency repos locally\n\n - show: if True, show the `git` command before executing\n \"\"\"\n _clone_packages(show=show)\n _clone_dependencies(show=show)\n\n\ndef install_packages_in_editable_mode(show=True):\n \"\"\"Install all kenjyco packages that are cloned locally in editable mode\n\n - show: if True, show the `pip` command before executing\n \"\"\"\n cloned_locally = _get_clone_status_for_packages()['cloned']\n # cloned_locally.update(_get_clone_status_for_dependencies()['cloned'])\n installed_packages = [p.key for p in pkg_resources.working_set]\n # editable_install_ok = (set(cloned_locally.keys()) & set(installed_packages)) - set(_skip_editable_install_for_these)\n editable_install_ok = set(cloned_locally.keys()) & set(installed_packages)\n paths = [cloned_locally[pkg] for pkg in editable_install_ok]\n return bh.tools.pip_install_editable(paths, show=show)\n","repo_name":"kenjyco/libs","sub_path":"kenjyco_libs/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"7375496472","text":"import sys\nimport math\nfrom functools import lru_cache\n\n# -------------------------function-------------------------\n\n@lru_cache()\ndef get_actionables(player_id, black_board, white_board):\n \"\"\"\n 可能なアクションを返却\n\n 処理:\n 1. 左方向に対してい置ける場所を取得\n 2. 右方向に対してい置ける場所を取得\n 3. 上方向に対してい置ける場所を取得\n 4. 下方向に対してい置ける場所を取得\n 5. 左上方向に対してい置ける場所を取得\n 6. 右上方向に対してい置ける場所を取得\n 7. 左下方向に対してい置ける場所を取得\n 8. 右下方向に対し���い置ける場所を取得\n \"\"\"\n mask_lr = 0x7e7e7e7e7e7e7e7e\n mask_ud = 0x00ffffffffffff00\n mask_lu_ru_ld_rd = 0x007e7e7e7e7e7e00\n\n # 空白の場所\n blank_board = ~(black_board | white_board)\n\n\n if player_id == 1:\n # 1. 左方向に対してい置ける場所を取得\n white_lr_mask = white_board & mask_lr # 列1-6かつ白の場所\n white_ud_mask = white_board & mask_ud\n white_mask_lu_ru_ld_rd = white_board & mask_lu_ru_ld_rd\n\n l_white = (black_board << 1) & white_lr_mask # 黒の1つ左の場所かつ列1-6かつ白の場所\n r_white = (black_board >> 1) & white_lr_mask\n u_white = (black_board << 8) & white_ud_mask\n d_white = (black_board >> 8) & white_ud_mask\n lu_white = (black_board << 9) & white_mask_lu_ru_ld_rd\n ru_white = (black_board << 7) & white_mask_lu_ru_ld_rd\n ld_white = (black_board >> 7) & white_mask_lu_ru_ld_rd\n rd_white = (black_board >> 9) & white_mask_lu_ru_ld_rd\n\n l_white |= (l_white << 1) & white_lr_mask # 上記に当てはまる場所(l_white)かつ1つ左の白かつ列1-6の場所(white_lr_maskに当てはまる箇所)を追加\n r_white |= (r_white >> 1) & white_lr_mask\n u_white |= (u_white << 8) & white_ud_mask\n d_white |= (d_white >> 8) & white_ud_mask\n lu_white |= (lu_white << 9) & white_mask_lu_ru_ld_rd\n ru_white |= (ru_white << 7) & white_mask_lu_ru_ld_rd\n ld_white |= (ld_white >> 7) & white_mask_lu_ru_ld_rd\n rd_white |= (rd_white >> 9) & white_mask_lu_ru_ld_rd\n\n l_white |= (l_white << 1) & white_lr_mask # 上記に当てはまる場所(l_white)かつ1つ左の白かつ列1-6の場所(white_lr_maskに当てはまる箇所)を追加\n r_white |= (r_white >> 1) & white_lr_mask\n u_white |= (u_white << 8) & white_ud_mask\n d_white |= (d_white >> 8) & white_ud_mask\n lu_white |= (lu_white << 9) & white_mask_lu_ru_ld_rd\n ru_white |= (ru_white << 7) & white_mask_lu_ru_ld_rd\n ld_white |= (ld_white >> 7) & white_mask_lu_ru_ld_rd\n rd_white |= (rd_white >> 9) & white_mask_lu_ru_ld_rd\n\n l_white |= (l_white << 1) & white_lr_mask # 上記に当てはまる場所(l_white)かつ1つ左の白かつ列1-6の場所(white_lr_maskに当てはまる箇所)を追加\n r_white |= (r_white >> 1) & white_lr_mask\n u_white |= (u_white << 8) & white_ud_mask\n d_white |= (d_white >> 8) & white_ud_mask\n lu_white |= (lu_white << 9) & white_mask_lu_ru_ld_rd\n ru_white |= (ru_white << 7) & white_mask_lu_ru_ld_rd\n ld_white |= (ld_white >> 7) & white_mask_lu_ru_ld_rd\n rd_white |= (rd_white >> 9) & white_mask_lu_ru_ld_rd\n\n l_white |= (l_white << 1) & white_lr_mask # 上記に当てはまる場所(l_white)かつ1つ左の白かつ列1-6の場所(white_lr_maskに当てはまる箇所)を追加\n r_white |= (r_white >> 1) & white_lr_mask\n u_white |= (u_white << 8) & white_ud_mask\n d_white |= (d_white >> 8) & white_ud_mask\n lu_white |= (lu_white << 9) & white_mask_lu_ru_ld_rd\n ru_white |= (ru_white << 7) & white_mask_lu_ru_ld_rd\n ld_white |= (ld_white >> 7) & white_mask_lu_ru_ld_rd\n rd_white |= (rd_white >> 9) & white_mask_lu_ru_ld_rd\n\n l_white |= (l_white << 1) & white_lr_mask # 上記に当てはまる場所(l_white)かつ1つ左の白かつ列1-6の場所(white_lr_maskに当てはまる箇所)を追加\n r_white |= (r_white >> 1) & white_lr_mask\n u_white |= (u_white << 8) & white_ud_mask\n d_white |= (d_white >> 8) & white_ud_mask\n lu_white |= (lu_white << 9) & white_mask_lu_ru_ld_rd\n ru_white |= (ru_white << 7) & white_mask_lu_ru_ld_rd\n ld_white |= (ld_white >> 7) & white_mask_lu_ru_ld_rd\n rd_white |= (rd_white >> 9) & white_mask_lu_ru_ld_rd\n\n legal_left = (l_white << 1) & blank_board\n legal_right = (r_white >> 1) & blank_board\n legal_up = (u_white << 8) & blank_board\n legal_down = (d_white >> 8) & blank_board\n legal_lu = (lu_white << 9) & blank_board\n legal_ru = (ru_white << 7) & blank_board\n legal_ld = (ld_white >> 7) & blank_board\n legal_rd = (rd_white >> 9) & blank_board\n elif player_id == 0:\n # 1. 左方向に対してい置ける場所を取得\n black_lr_mask = black_board & mask_lr # 列1-6かつ白の場所\n black_ud_mask = black_board & mask_ud\n black_mask_lu_ru_ld_rd = black_board & mask_lu_ru_ld_rd\n\n l_black = (white_board << 1) & black_lr_mask # 黒の1つ左の場所かつ列1-6かつ白の場所\n r_black = (white_board >> 1) & black_lr_mask\n u_black = (white_board << 8) & black_ud_mask\n d_black = (white_board >> 8) & black_ud_mask\n lu_black = (white_board << 9) & black_mask_lu_ru_ld_rd\n ru_black = (white_board << 7) & black_mask_lu_ru_ld_rd\n ld_black = (white_board >> 7) & black_mask_lu_ru_ld_rd\n rd_black = (white_board >> 9) & black_mask_lu_ru_ld_rd\n\n l_black |= (l_black << 1) & black_lr_mask # 上記に当てはまる場所(l_black)かつ1つ左の白かつ列1-6の場所(black_lr_maskに当てはまる箇所)を追加\n r_black |= (r_black >> 1) & black_lr_mask\n u_black |= (u_black << 8) & black_ud_mask\n d_black |= (d_black >> 8) & black_ud_mask\n lu_black |= (lu_black << 9) & black_mask_lu_ru_ld_rd\n ru_black |= (ru_black << 7) & black_mask_lu_ru_ld_rd\n ld_black |= (ld_black >> 7) & black_mask_lu_ru_ld_rd\n rd_black |= (rd_black >> 9) & black_mask_lu_ru_ld_rd\n\n l_black |= (l_black << 1) & black_lr_mask # 上記に当てはまる場所(l_black)かつ1つ左の白かつ列1-6の場所(black_lr_maskに当てはまる箇所)を追加\n r_black |= (r_black >> 1) & black_lr_mask\n u_black |= (u_black << 8) & black_ud_mask\n d_black |= (d_black >> 8) & black_ud_mask\n lu_black |= (lu_black << 9) & black_mask_lu_ru_ld_rd\n ru_black |= (ru_black << 7) & black_mask_lu_ru_ld_rd\n ld_black |= (ld_black >> 7) & black_mask_lu_ru_ld_rd\n rd_black |= (rd_black >> 9) & black_mask_lu_ru_ld_rd\n\n l_black |= (l_black << 1) & black_lr_mask # 上記に当てはまる場所(l_black)かつ1つ左の白かつ列1-6の場所(black_lr_maskに当てはまる箇所)を追加\n r_black |= (r_black >> 1) & black_lr_mask\n u_black |= (u_black << 8) & black_ud_mask\n d_black |= (d_black >> 8) & black_ud_mask\n lu_black |= (lu_black << 9) & black_mask_lu_ru_ld_rd\n ru_black |= (ru_black << 7) & black_mask_lu_ru_ld_rd\n ld_black |= (ld_black >> 7) & black_mask_lu_ru_ld_rd\n rd_black |= (rd_black >> 9) & black_mask_lu_ru_ld_rd\n\n l_black |= (l_black << 1) & black_lr_mask # 上記に当てはまる場所(l_black)かつ1つ左の白かつ列1-6の場所(black_lr_maskに当てはまる箇所)を追加\n r_black |= (r_black >> 1) & black_lr_mask\n u_black |= (u_black << 8) & black_ud_mask\n d_black |= (d_black >> 8) & black_ud_mask\n lu_black |= (lu_black << 9) & black_mask_lu_ru_ld_rd\n ru_black |= (ru_black << 7) & black_mask_lu_ru_ld_rd\n ld_black |= (ld_black >> 7) & black_mask_lu_ru_ld_rd\n rd_black |= (rd_black >> 9) & black_mask_lu_ru_ld_rd\n\n l_black |= (l_black << 1) & black_lr_mask # 上記に当てはまる場所(l_black)かつ1つ左の白かつ列1-6の場所(black_lr_maskに当てはまる箇所)を追加\n r_black |= (r_black >> 1) & black_lr_mask\n u_black |= (u_black << 8) & black_ud_mask\n d_black |= (d_black >> 8) & black_ud_mask\n lu_black |= (lu_black << 9) & black_mask_lu_ru_ld_rd\n ru_black |= (ru_black << 7) & black_mask_lu_ru_ld_rd\n ld_black |= (ld_black >> 7) & black_mask_lu_ru_ld_rd\n rd_black |= (rd_black >> 9) & black_mask_lu_ru_ld_rd\n\n legal_left = (l_black << 1) & blank_board\n legal_right = (r_black >> 1) & blank_board\n legal_up = (u_black << 8) & blank_board\n legal_down = (d_black >> 8) & blank_board\n legal_lu = (lu_black << 9) & blank_board\n legal_ru = (ru_black << 7) & blank_board\n legal_ld = (ld_black >> 7) & blank_board\n legal_rd = (rd_black >> 9) & blank_board\n\n # 9. 1-8の合計\n legal = legal_left | legal_right | legal_up | legal_down | legal_lu | legal_ru | legal_ld | legal_rd\n\n return legal\n\n@lru_cache()\ndef set_board(action, player_id, black_board, white_board):\n \"\"\"\n ボードの更新\n \"\"\"\n mask_lr = 0x7e7e7e7e7e7e7e7e\n mask_ud = 0x00ffffffffffff00\n mask_lu_ru_ld_rd = 0x007e7e7e7e7e7e00\n\n reverse = 0x0000000000000000\n\n # 石を置く\n if player_id == 1:\n mask_left = mask_lr & white_board # 左方向\n mask_right = mask_lr & white_board\n mask_up = mask_ud & white_board\n mask_down = mask_ud & white_board\n mask_left_up = mask_lu_ru_ld_rd & white_board\n mask_right_up = mask_lu_ru_ld_rd & white_board\n mask_left_down = mask_lu_ru_ld_rd & white_board\n mask_right_down = mask_lu_ru_ld_rd & white_board\n\n l_rev = (action << 1) & mask_left\n r_rev = (action >> 1) & mask_right\n u_rev = (action << 8) & mask_up\n d_rev = (action >> 8) & mask_down\n lu_rev = (action << 7) & mask_left_up\n ru_rev = (action << 9) & mask_right_up\n ld_rev = (action >> 9) & mask_left_down\n rd_rev = (action >> 7) & mask_right_down\n\n for i in range(5):\n l_rev |= (l_rev << 1) & mask_left\n r_rev |= (r_rev >> 1) & mask_right\n u_rev |= (u_rev << 8) & mask_up\n d_rev |= (d_rev >> 8) & mask_down\n lu_rev |= (lu_rev << 7) & mask_left_up\n ru_rev |= (ru_rev << 9) & mask_right_up\n ld_rev |= (ld_rev >> 9) & mask_left_down\n rd_rev |= (rd_rev >> 7) & mask_right_down\n\n if (l_rev << 1) & black_board != 0:\n reverse |= l_rev\n if (r_rev >> 1) & black_board != 0:\n reverse |= r_rev\n if (u_rev << 8) & black_board != 0:\n reverse |= u_rev\n if (d_rev >> 8) & black_board != 0:\n reverse |= d_rev\n if (lu_rev << 7) & black_board != 0:\n reverse |= lu_rev\n if (ru_rev << 9) & black_board != 0:\n reverse |= ru_rev\n if (ld_rev >> 9) & black_board != 0:\n reverse |= ld_rev\n if (rd_rev >> 7) & black_board != 0:\n reverse |= rd_rev\n\n black_board |= (action | reverse)\n white_board ^= reverse\n \n elif player_id == 0:\n mask_left = mask_lr & black_board # 左方向\n mask_right = mask_lr & black_board\n mask_up = mask_ud & black_board\n mask_down = mask_ud & black_board\n mask_left_up = mask_lu_ru_ld_rd & black_board\n mask_right_up = mask_lu_ru_ld_rd & black_board\n mask_left_down = mask_lu_ru_ld_rd & black_board\n mask_right_down = mask_lu_ru_ld_rd & black_board\n\n l_rev = (action << 1) & mask_left\n r_rev = (action >> 1) & mask_right\n u_rev = (action << 8) & mask_up\n d_rev = (action >> 8) & mask_down\n lu_rev = (action << 7) & mask_left_up\n ru_rev = (action << 9) & mask_right_up\n ld_rev = (action >> 9) & mask_left_down\n rd_rev = (action >> 7) & mask_right_down\n\n for i in range(5):\n l_rev |= (l_rev << 1) & mask_left\n r_rev |= (r_rev >> 1) & mask_right\n u_rev |= (u_rev << 8) & mask_up\n d_rev |= (d_rev >> 8) & mask_down\n lu_rev |= (lu_rev << 7) & mask_left_up\n ru_rev |= (ru_rev << 9) & mask_right_up\n ld_rev |= (ld_rev >> 9) & mask_left_down\n rd_rev |= (rd_rev >> 7) & mask_right_down\n \n if (l_rev << 1) & white_board != 0:\n reverse |= l_rev\n if (r_rev >> 1) & white_board != 0:\n reverse |= r_rev\n if (u_rev << 8) & white_board != 0:\n reverse |= u_rev\n if (d_rev >> 8) & white_board != 0:\n reverse |= d_rev\n if (lu_rev << 7) & white_board != 0:\n reverse |= lu_rev\n if (ru_rev << 9) & white_board != 0:\n reverse |= ru_rev\n if (ld_rev >> 9) & white_board != 0:\n reverse |= ld_rev\n if (rd_rev >> 7) & white_board != 0:\n reverse |= rd_rev\n\n white_board |= (action | reverse)\n black_board ^= reverse\n \n return black_board, white_board\n\nclass MiniMaxV6V3Player():\n \"\"\"\n ゲームクラス3を使用\n negamax(depth4)\n \"\"\"\n EVALUATE_MASK_NOT25 = 0x42c300000000c342\n EVALUATE_MASK_1 = 0x0000182424180000\n EVALUATE_MASK_2 = 0x003c425a5a423c00\n EVALUATE_MASK_5 = 0x1800248181240018\n EVALUATE_MASK_10 = 0x2400810000810024\n EVALUATE_MASK_100 = 0x8100000000000081\n\n def __init__(self, player_id=\"\"):\n # \"1\": 先行(黒)、\"0\": 後攻(白)\n if player_id != \"\":\n self.player_id = player_id\n\n def action(self, black_board, white_board, actionables):\n \"\"\"\n アクションをする\n \"\"\"\n\n # actionables = game.get_actionables(self.player_id)\n # if actionables == 0:\n # raise Exception(\"アクションできません\")\n\n action = self._choice(black_board, white_board, actionables)\n\n # ゲーム情報を元に戻す\n black_board, white_board = set_board(action, self.player_id, black_board, white_board)\n\n return black_board, white_board\n \n @lru_cache()\n def get_actionables_list(self, actionables):\n actionables_list = []\n mask = 0x8000000000000000\n for i in range(64):\n if mask & actionables != 0:\n actionables_list.append(mask)\n mask = mask >> 1\n return actionables_list\n \n def _choice(self, black_board, white_board, actionables):\n \"\"\"\n 最適な手を選択する\n \"\"\"\n # 価値が最も高い手を選択する\n alpha = float(\"-inf\") # マイナス無限\n beta = float(\"inf\")\n\n max_action = None\n\n actionables_list = self.get_actionables_list(actionables)\n\n search_depth = 0 # 探索深さ\n\n actionables_list.reverse()\n for action in actionables_list:\n next_black_board, next_white_board = set_board(action, self.player_id, black_board, white_board)\n\n next_action_player_id = 1 - self.player_id\n \n value = - self.nega_ab(next_action_player_id, next_black_board, next_white_board, search_depth+1, False, -beta, -alpha)\n\n if value > alpha:\n alpha = value\n max_action = action\n \n return max_action\n\n def nega_ab(self, action_player_id, black_board, white_board, depth, is_pass, alpha, beta):\n \"\"\"\n nega_alpha_beta法\n \"\"\"\n if depth == 4:\n return self._evaluate(action_player_id, black_board, white_board)\n\n max_value = float(\"-inf\") # マイナス無限\n \n actionables = get_actionables(action_player_id, black_board, white_board)\n actionables_list = self.get_actionables_list(actionables)\n for action in actionables_list:\n next_black_board, next_white_board = set_board(action, action_player_id, black_board, white_board)\n\n next_action_player_id = 1 - action_player_id\n\n value = -self.nega_ab(next_action_player_id, next_black_board, next_white_board, depth+1, False, -beta, -alpha)\n\n # betaカット\n if value >= beta:\n return value\n \n alpha = max(alpha, value)\n max_value = max(max_value, value)\n \n # パスの場合(max_valueが-infのまま)\n if max_value == float(\"-inf\"):\n # 2回連続でパスの場合\n if (is_pass):\n return self._evaluate(action_player_id, black_board, white_board)\n \n next_action_player_id = 1 - action_player_id\n return - self.nega_ab(next_action_player_id, black_board, white_board, depth, True, -beta, -alpha)\n \n return max_value\n \n @lru_cache()\n def _evaluate(self, action_player_id, black_board, white_board):\n \"\"\"\n 葉ノードの評価値を計算(末端ノード)\n\n ゲームが終了した場合+10000 or -10000を返す\n 評価方法\n + 自分の角の数 * 100\n + 相手の角の数 * -100\n + 自分の端の数 * 10\n + 相手の端の数 * -10\n + 自分の石の数 * 1\n + 相手の石の数 * -1\n + b2,b7,g2,g7のどれかに自分の石がある場合 * -100\n + b2,b7,g2,g7のどれかに相手の石がある場合 * -100\n \"\"\"\n black_count = bin(black_board).count(\"1\")\n white_count = bin(white_board).count(\"1\")\n black_corner_count = 0\n white_corner_count = 0\n black_edge_count = 0\n white_edge_count = 0\n result = 0\n\n # 角の数を計算\n mask_corner = 0x8100000000000081\n black_corner_count = bin(black_board & mask_corner).count(\"1\")\n white_corner_count = bin(white_board & mask_corner).count(\"1\")\n \n # 端の数を計算\n mask_edge = 0x7e8181818181817e\n black_edge_count = bin(black_board & mask_edge).count(\"1\")\n white_edge_count = bin(white_board & mask_edge).count(\"1\")\n\n # 評価値を計算\n if action_player_id == 1:\n result += black_count * 1\n result += white_count * -1\n result += black_corner_count * 100\n result += white_corner_count * -100\n result += black_edge_count * 10\n result += white_edge_count * -10\n elif action_player_id == 0:\n result += black_count * -1\n result += white_count * 1\n result += black_corner_count * -100\n result += white_corner_count * 100\n result += black_edge_count * -10\n result += white_edge_count * 10\n \n return result \n \n# -------------------------/function-------------------------\n\n_id = int(input()) # id of your player.\nboard_size = int(input())\nplayer = MiniMaxV6V3Player(_id)\n\n# game loop\nwhile True:\n state = \"\"\n for i in range(board_size):\n line = input() # rows from top to bottom (viewer perspective).\n state+=line\n \n action_count = int(input()) # number of legal actions for this turn.\n\n for i in range(action_count):\n action = input() # the action\n\n # boardを変換\n black_str = state.replace(\".\", \"0\")\n white_str = state.replace(\"0\", \"2\")\n white_str = white_str.replace(\"1\", \"0\").replace(\".\",\"0\").replace(\"2\",\"1\")\n\n black_board = int(black_str, 2)\n white_board = int(white_str, 2)\n\n actionables = get_actionables(_id, black_board, white_board)\n\n action = player._choice(black_board, white_board, actionables)\n print(\"action: \", action, file=sys.stderr, flush=True)\n\n # actionを変換(16進数64bitからa8のような形式に変換)\n mask = 0x8000000000000000\n choice_action = \"\"\n for i in range(64):\n if mask & action != 0:\n choice_action = chr(97 + (i % 8)) + str((i // 8) + 1)\n break\n mask = mask >> 1\n\n print(\"choice_action: \", choice_action, file=sys.stderr, flush=True)\n # a-h1-8\n print(choice_action)","repo_name":"etomaro/othello","sub_path":"FORGAME/0822.py","file_name":"0822.py","file_ext":"py","file_size_in_byte":20250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"11604590730","text":"import os\nimport pickle\nimport argparse\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nsns.set_theme(style = \"darkgrid\")\nplt.rcParams[\"text.usetex\"] = True\n\nparser = argparse.ArgumentParser(description = \"Plot Values using Seaborn\")\nparser.add_argument(\"input\", help = \"Input File to Plot\")\nargs = parser.parse_args()\n\nheader = os.path.basename(args.input).split(\"_\")\nwith open(args.input, \"rb\") as datafile:\n raw_data = pickle.load(datafile)\n\ninverted_ararys = [\n [\n [\n [\n raw_data[\"y\"][k][l][0][x][1],\n raw_data[\"y\"][k][l][0][x][0],\n raw_data[\"y\"][k][l][1][x][1],\n raw_data[\"y\"][k][l][1][x][0],\n 1.0\n ] for x in range(len(raw_data[\"x\"]))\n ] for l in range(4)\n ] for k in range(3)\n]\n\nvalues = [\n [\n np.array(inverted_ararys[k][l]) for l in range(4) \n ] for k in range(3)\n]\n\ndata = [\n [\n pd.DataFrame(values[k][l], raw_data[\"x\"], columns = [\"AMP - Average\", \"AMP - Worst\", \"NAMP - Average\", \"NAMP - Worst\", \"OPT\"]) for l in range(4)\n ] for k in range(3)\n]\n\nfig, ax = plt.subplots(3, 4, sharex = True, sharey = True)\n\nfor k in range(3):\n for l in range(4):\n plot = sns.lineplot(ax = ax[k, l], data = data[k][l], palette = \"tab10\", linewidth = 2.5)\n if k != 2 or l != 3:\n plot.get_legend().remove()\n else:\n plt.setp(plot.get_legend().get_texts(), fontsize = 17)\n\nax[0][0].set_title(r'$\\ell = \\frac{1}{4}k$', fontsize = 17)\nax[0][1].set_title(r'$\\ell = \\frac{2}{4}k$', fontsize = 17)\nax[0][2].set_title(r'$\\ell = \\frac{3}{4}k$', fontsize = 17)\nax[0][3].set_title(r'$\\ell = \\frac{4}{4}k$', fontsize = 17)\n\nax[0][0].set_ylabel(r'$k = \\frac{1}{4}n_A$', fontsize = 17)\nax[1][0].set_ylabel(r'$k = \\frac{2}{4}n_A$', fontsize = 17)\nax[2][0].set_ylabel(r'$k = \\frac{3}{4}n_A$', fontsize = 17)\n\ndef goal_function(fun: str) -> str:\n if \"MAX\" in fun:\n return r'$f_{max}$'\n elif \"SUM\" in fun:\n return r'$f_{sum}$'\n else:\n return r'$f_{cov}$'\n\nif header[2][0] == \"P\":\n fig.suptitle(goal_function(header[1]) + r' : $n_A = 16 \\cdot z,\\, n_B = 400 \\cdot z,\\, |\\mathcal{V}| = 10$', fontsize = 23, y = 0.95)\nelse:\n fig.suptitle(goal_function(header[1]) + r' : $n_A = n_B = |\\mathcal{V}| = 16 \\cdot z$', fontsize = 23, y = 0.95)\n\nfig.text(0.5, 0.04, r'$z$', ha = \"center\", fontsize = 20)\nfig.text(0.04, 0.5, r'$val_{ALG} / val_{OPT}$', va = \"center\", rotation = \"vertical\", fontsize = 20)\n\nplt.show()","repo_name":"lukasgeis/BipartiteRegulatorProbing","sub_path":"scripts/parametersPlot.py","file_name":"parametersPlot.py","file_ext":"py","file_size_in_byte":2555,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"39187936813","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.tree import DecisionTreeRegressor\n\nif __name__ == \"__main__\":\n N = 300\n x = np.random.rand(N) * 8 - 4 # [-4,4)\n x.sort()\n # y1 = np.sin(x) + 3 + np.random.randn(N) * 0.1\n # y2 = np.cos(0.3 * x) + np.random.randn(N) * 0.01\n y1 = np.sin(x) + np.random.randn(N) * 0.05\n y2 = np.cos(x) + np.random.randn(N) * 0.1\n y = np.vstack((y1, y2))\n y = np.vstack((y1, y2)).T\n x = x.reshape(-1, 1) # 转置后,得到N个样本,每个样本都是1维的\n\n deep = 3\n reg = DecisionTreeRegressor(criterion='mse', max_depth=deep)\n dt = reg.fit(x, y)\n\n x_test = np.linspace(-4, 4, num=1000).reshape(-1, 1)\n print('x_test : \\n', x_test)\n y_hat = dt.predict(x_test)\n print('y_hat : \\n', y_hat)\n\n plt.scatter(y[:, 0], y[:, 1], c='r', s=40, label='Actual')\n plt.scatter(y_hat[:, 0], y_hat[:, 1], c='g', marker='s', s=100, label='Depth=%d' % deep, alpha=1)\n plt.legend(loc='upper left')\n plt.xlabel('y1')\n plt.ylabel('y2')\n plt.grid()\n plt.show()\n","repo_name":"littleheap/MachineLearning-Algorithms","sub_path":"4.Decision Tree & Random Forest/4.4 MultiOutput_DTR.py","file_name":"4.4 MultiOutput_DTR.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"31"} +{"seq_id":"38756135323","text":"from sqlalchemy import create_engine, MetaData\nfrom sqlalchemy.orm import Session\n\nfrom application.providers.orm.models import Base\n\nclass Singleton(object):\n _instance = None\n def __new__(class_, *args, **kwargs):\n if not isinstance(class_._instance, class_):\n print(\"Database initialized ...\")\n class_._instance = object.__new__(class_, *args, **kwargs)\n return class_._instance\n\n\nclass SqlOrm(Singleton):\n def __init__(self):\n self.engine = create_engine(\"sqlite:///test.db\")\n self.session = Session(self.engine)\n Base.metadata.create_all(self.engine)\n \n \n def get_session(self) -> Session:\n return self.session","repo_name":"BaturayArslan/TO-DO","sub_path":"application/providers/orm/SqlOrm.py","file_name":"SqlOrm.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22741917035","text":"from django.conf import settings\nfrom django_mako_plus.controller import view_function\nfrom django_mako_plus.controller.router import get_renderer\nfrom homepage import models as hmod\nfrom django.http import HttpResponse, HttpResponseRedirect, Http404\nfrom django.http import HttpRequest\nfrom django import forms\nimport requests\nfrom django.core import mail\nfrom django.core.mail import send_mail\n\ntemplater = get_renderer('rental')\n\n@view_function\ndef process_request(request):\n# params = prepare_params(request)\n \n \n params = {}\n try:\n rental_items = hmod.Rentals.objects.all()\n except hmod.Rentals.DoesNotExist:\n print('Database contains no Products')\n\n params['rental_items'] = rental_items\n return templater.render_to_response(request,'index.html',params)\n\n# @view_function\n# def Overdue Rentals(request):\n# import datetime\n# now = datetime.datetime.now()\n# thirty = now - datetime.timedelta(days=30)\n# sixty= ...\n# qry = hmod.Rental.objects.fitler(due_date__range =[sixty, thiryt], return_date=None)\n@view_function\ndef checkout(request):\n# params = prepare_params(request)\n \n params = {}\n \n form = checkoutform()\n\n if request.method == 'POST':\n form = checkoutform(request.POST)\n if form.is_valid():\n API_URL = 'http://dithers.cs.byu.edu/iscore/api/v1/charges'\n API_KEY = 'dcda4e87c67ada2cd111032aaf259bae'\n amount = form.cleaned_data['amount']\n credit_card_type = form.cleaned_data['credit_card_type']\n credit_card_number= form.cleaned_data['credit_card_number']\n cc_exp_month = form.cleaned_data['cc_exp_month']\n cc_exp_year =form.cleaned_data['cc_exp_year']\n cvc = form.cleaned_data['cvc']\n name = form.cleaned_data['name']\n description = form.cleaned_data['description']\n r = requests.post(API_URL, data={\n 'apiKey': API_KEY,\n 'currency': 'usd',\n 'amount': amount,\n 'type': 'visa',\n 'number': '4732817300654',\n 'exp_month': '10',\n 'exp_year' : '15',\n 'cvc': '411',\n 'name': 'Cosmo Limesandal',\n 'description': description,\n })\n\n #just for debugging, print the response text\n print(r.text)\n\n #parse the response to a dictionary\n resp = r.json() \n if 'error' in resp:\n print(\"ERROR: \", resp['error'])\n\n else:\n print(resp.keys())\n print(resp['ID'])\n connection = mail.get_connection()\n connection.open()\n email = mail.EmailMessage('Colonial Heritage Foundation', 'Thank you for your Business!!!', 'joshuamensink@gmail.com',\n ['joshua@mensink.name'], connection=connection)\n email.send()\n\n return HttpResponseRedirect('/shopping/receipt/', params)\n \n params['form'] = form\n return templater.render_to_response(request, 'checkout.html' ,params)\n\nclass checkoutform(forms.Form):\n name = forms.CharField(required=True, min_length=1, max_length=100)\n address = forms.CharField(required=True, min_length=1, max_length=100)\n Street = forms.CharField(required=True, min_length=1, max_length=100)\n city = forms.CharField(required=True, min_length=1, max_length=100)\n state= forms.CharField(required=True, min_length=1, max_length=100)\n zip_code = forms.CharField(required=True, min_length=1, max_length=100)\n amount = forms.DecimalField(max_digits=10, decimal_places=2)\n credit_card_type = forms.CharField(required=True, min_length = 1, max_length = 100)\n credit_card_number = forms.IntegerField(required=True)\n cc_exp_month = forms.CharField(required=True, min_length = 1, max_length = 100)\n cc_exp_year = forms.CharField(required=True, min_length = 1, max_length = 100)\n cvc = forms.CharField(required=True, min_length = 1, max_length = 100)\n description=forms.CharField(required=True, min_length = 1, max_length = 100)\n\n\n\n@view_function\ndef payment(request):\n\n\n#send the request with the data\n API_URL = 'http://dithers.cs.byu.edu/iscore/api/v1/charges'\n API_KEY = 'dcda4e87c67ada2cd111032aaf259bae'\n\n r = requests.post(API_URL, data={\n 'apiKey': API_KEY,\n 'currency': 'usd',\n 'amount': '5.99',\n 'type': 'Visa',\n 'number': '4732817300654',\n 'exp_month': '10',\n 'exp_year' : '15',\n 'cvc': '411',\n 'name': 'Cosmo Limesandal',\n 'description': 'Charge for cosmo@is411.byu.edu',\n })\n\n #just for debugging, print the response text\n print(r.text)\n\n #parse the response to a dictionary\n resp = r.json()\n if 'error' in resp:\n print(\"ERROR: \", resp['error'])\n\n else:\n print(resp.keys())\n print(resp['ID'])\n# @view_function\n# def additem(request):\n\n# params = {}\n\n# #add to the shooping cart\n# item = hmod.ProductSpecification.objects.get(id=request.urlparams[0])\n# #make sure that we have a shopping cart in the session \n# if 'shopping_cart' not in request.session:\n# request.session['shopping_cart'] = {}\n# #add the item to the shopping cart\n# if item.id in request.session['shopping_cart']:\n# request.session['shopping_cart'][item.id] += 1\n# else:\n# request.session['shopping_cart'][item.id] = 1\n\n# return templater.render_to_response(request,'index.html',params)\n\n@view_function\ndef search(request):\n params = {}\n\n products = hmod.ProductSpecification.objects.filter(name__icontains=request.urlparams[0])\n \n productCount = hmod.ProductSpecification.objects.filter(name__icontains=request.urlparams[0]).count()\n \n params['products'] = products\n params['productCount'] = productCount\n \n return templater.render_to_response(request, 'index.search.html', params)\n\n@view_function\ndef returnrental(request):\n params = {}\n if 'rental_cart' not in request.session:\n request.session['rental_cart'] = {}\n print('>>>>>>>>>>>>>>>>>>>>>3')\n print('>>>>>>>>>>>>>>>>>>>>>4')\n product_list = []\n quantity_list = []\n for k,v in request.session['rental_cart'].items():\n product_object = hmod.Rentals.objects.get(id=k)\n quantity = int(v)\n quantity_list.append(quantity)\n product_list.append(product_object)\n print('>>>>>>>>>>>>>>>>>>>>>5')\n params['items'] = product_list\n params['qty'] = quantity_list\n \n return templater.render_to_response(request, 'returnrental.html', params)\n@view_function\ndef batchemail(request):\n# params = prepare_params(request)\n\n params = {}\n try:\n print('>>>>>>>>>>>> 1')\n all_rentals = hmod.Rentals.objects.all().order_by('days_late')\n except hmod.Rentals.DoesNotExist:\n print('Database contains no rentals')\n #return HttpResponseRedirect('homepage/database_err')\n print('>>>>>>>>>>>2')\n params['all_rentals'] = all_rentals\n print('>>>>>>>>>>>>>>>>>3')\n\n emailbody = templater.render(request, 'batchemail.html', params)\n\n connection = mail.get_connection()\n connection.open()\n # email = mail.EmailMessage('Colonial Heritage Foundation',emailbody, 'joshuamensink@gmail.com',\n # ['joshua@mensink.name'],html_message=emailbody, connection=connection)\n send_mail('Colonial Heritage Foundation',emailbody,'joshuamensink@gmail.com', ['joshua@mensink.name'], html_message=emailbody)\n\n return HttpResponseRedirect('/homepage/index/', params)","repo_name":"mensinkj/PythonPartialProject","sub_path":"rental/views/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":7598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12721099440","text":"import pygame as py\nimport chess\n\n\n# some of the following code is taken from the chess minimax project\n\nclass chessGame:\n def __init__(self, id):\n self.moveCount = 0\n self.imagePath = \"chessImages/\"+str(id)+\"board.png\"\n self.board = chess.Board()\n self.WIDTH = 600\n self.HEIGHT = 600\n self.imageNames = ['b', 'k', 'n', 'p', 'q', 'r', 'wB', 'wK', 'wN', 'wP', 'wQ', 'wR']\n self.white, self.black = (252, 204, 116), (87, 58, 46)\n self.SQDI = self.WIDTH // 8\n self.screen = py.display.set_mode((self.WIDTH, self.HEIGHT))\n\n # this is very inefficient, however more efficient ways seem to break it\n b = py.transform.scale(py.image.load(\"chessImages/b.png\"), (self.SQDI, self.SQDI))\n k = py.transform.scale(py.image.load(\"chessImages/k.png\"), (self.SQDI, self.SQDI))\n n = py.transform.scale(py.image.load(\"chessImages/n.png\"), (self.SQDI, self.SQDI))\n p = py.transform.scale(py.image.load(\"chessImages/p.png\"), (self.SQDI, self.SQDI))\n q = py.transform.scale(py.image.load(\"chessImages/q.png\"), (self.SQDI, self.SQDI))\n r = py.transform.scale(py.image.load(\"chessImages/r.png\"), (self.SQDI, self.SQDI))\n wB = py.transform.scale(py.image.load(\"chessImages/wB.png\"), (self.SQDI, self.SQDI))\n wK = py.transform.scale(py.image.load(\"chessImages/wK.png\"), (self.SQDI, self.SQDI))\n wN = py.transform.scale(py.image.load(\"chessImages/wN.png\"), (self.SQDI, self.SQDI))\n wP = py.transform.scale(py.image.load(\"chessImages/wP.png\"), (self.SQDI, self.SQDI))\n wQ = py.transform.scale(py.image.load(\"chessImages/wQ.png\"), (self.SQDI, self.SQDI))\n wR = py.transform.scale(py.image.load(\"chessImages/wR.png\"), (self.SQDI, self.SQDI))\n\n self.IMAGES = {\n \"b\": b,\n \"k\": k,\n \"n\": n,\n \"p\": p,\n \"r\": r,\n \"q\": q,\n \"B\": wB,\n \"K\": wK,\n \"N\": wN,\n \"P\": wP,\n \"Q\": wQ,\n \"R\": wR\n }\n\n def updateBoard(self):\n count = 0\n self.screen.fill(self.white)\n for n in range(8):\n for j in range(8):\n if count % 2 == 0:\n py.draw.rect(self.screen, self.white, [self.SQDI * j, self.SQDI * n, self.SQDI, self.SQDI])\n count += 1\n\n else:\n py.draw.rect(self.screen, self.black, [self.SQDI * j, self.SQDI * n, self.SQDI, self.SQDI])\n count += 1\n count += 1\n\n for m in range(8):\n for j in range(1, 9):\n if self.board.piece_at(int(j + m * 8 - 1)) is not None:\n self.screen.blit(self.IMAGES[str(self.board.piece_at((j + m * 8) - 1))], (\n self.SQDI * j - (self.WIDTH / 8), ((self.WIDTH / 8 * 7) - self.SQDI * m)))\n # uses m and j values to work out which piece it has, and where it should go\n\n py.image.save(self.screen, self.imagePath)\n\n def legalMoves(self):\n lmoves = []\n for moves in self.board.legal_moves:\n lmoves.append(str(moves))\n return lmoves\n\n def makeMove(self, playerMove):\n self.moveCount += 1\n self.board.push_uci(playerMove)\n\n def resetBoard(self):\n self.board = chess.Board()\n self.updateBoard()\n","repo_name":"MatthewJC3/SneakySnake-bot","sub_path":"chessGame.py","file_name":"chessGame.py","file_ext":"py","file_size_in_byte":3381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"71421305689","text":"import os\nimport sys\nimport numpy as np\n\n# This program subsets data lists into training, testing and validation \n# and normalizes the aa class counts for both the testing and validation datasets\n\n# To run this script, you need:\n# 1) a \"pdb_all.list\" file with 4-chracter names of all proteins in \"/boxes\" folder\n# 2) \"/boxes\" folder with generated boxes and centers from prebox_maker.py\n# 3) empty \"/testing\" and \"/validation\" folders\n\nN_TEST_VAL = 1\n\ndef move_boxes(box_path, new_path, pdb_list):\n \"\"\" moves the first \"N_TEST_VAL\" number of boxes from the training folder to a new folder \"\"\"\n\n global N_TEST_VAL\n \n # moves N_TEST number of boxes to a new path\n moved_files = []\n for i, line in enumerate(pdb_list):\n if i >= N_TEST_VAL:\n break\n pdb_id = line[0:4]\n \n os.rename(box_path + \"boxes_\" + pdb_id + \".npy\", new_path + \"boxes_\" + pdb_id + \".npy\")\n os.rename(box_path + \"centers_\" + pdb_id + \".npy\", new_path + \"centers_\" + pdb_id + \".npy\")\n \n moved_files.append(pdb_id)\n\n del pdb_list[0:N_TEST_VAL]\n\n return moved_files, pdb_list\n \ndef normalize_aa_classes(list, path): \n \"\"\" makes sure each aa appears an equal number of times for both the testing and validation sets \"\"\"\n\n # counts the number of aa per class (20 classes)\n aa_count = np.zeros(20) # indices in this list will encode the 20 aa. \n \n for pdb in list:\n centers = np.load(path + \"centers_\" + pdb + \".npy\", allow_pickle = True)\n for aa in centers:\n aa_count[aa] += 1\n\n # finds the least frequent aa to determine the number of aa we take from each class\n min_count = int(min(aa_count)) \n\n # creates two new lists of boxes and their centers with equal numbers of amino acid types\n new_centers = []\n new_boxes = []\n aa_count = np.zeros(20) # temporary aa count (needs to be <= min_count)\n \n for pdb in list:\n centers = np.load(path + \"centers_\" + pdb + \".npy\", allow_pickle = True)\n boxes = np.load(path + \"boxes_\" + pdb + \".npy\", allow_pickle = True)\n for aa, box in zip(centers, boxes): #aa is the number encoding the amino acid\n if aa_count[aa] < min_count:\n aa_count[aa] += 1\n new_centers.append(aa)\n new_boxes.append(box)\n \n np.save(path + \"boxes_normalized.npy\", np.asarray(new_boxes)) # add number to the protein and give matching number to the aa list\n np.save(path + \"centers_normalized.npy\", np.asarray(new_centers))\n\ndef get_training_list(list, path):\n \"\"\" combines all training preboxes into a single file \"\"\"\n\n boxes_list = []\n centers_list = []\n\n for pdb in list:\n centers = np.load(path + \"centers_\" + pdb + \".npy\", allow_pickle = True)\n boxes = np.load(path + \"boxes_\" + pdb + \".npy\", allow_pickle = True)\n for aa, box in zip(centers, boxes): #aa is the number encoding the amino acid\n centers_list.append(aa)\n boxes_list.append(box)\n\n print(\"-------------------------------------------\")\n print(\"Total number of boxes: \", len(boxes_list))\n print(\"-------------------------------------------\")\n\n np.save(path + \"boxes_train.npy\", np.asarray(boxes_list)) # add number to the protein and give matching number to the aa list\n np.save(path + \"centers_train.npy\", np.asarray(centers_list))\n\n# ---------- main ----------\npdb_id_list = open(\"../data/input/pdb_all.list\", \"r\")\npdb_list = []\n\nfor line in pdb_id_list:\n pdb_list.append(line[0:4])\npdb_id_list.close()\n\nbox_size = \"1\"\nvoxel_size = \"9\"\n\nbox_path = \"../data/input/boxes_s\" + box_size + \"_\" + voxel_size + \"A/\"\nval_path = \"../data/input/validation_s\" + box_size + \"_\" + voxel_size + \"A/\"\ntest_path = \"../data/input/testing_s\" + box_size + \"_\" + voxel_size + \"A/\"\n\n# subsetting boxes to the testing or validation folders\ntest_list, pdb_list = move_boxes(box_path, test_path, pdb_list)\nval_list, pdb_list = move_boxes(box_path, val_path, pdb_list)\n\n# normalizing the test and validation datasets\nnormalize_aa_classes(test_list, test_path)\nnormalize_aa_classes(val_list, val_path)\n\n# combining the remaining training preboxes into one file\nget_training_list(pdb_list, box_path)\n\nprint(\"Finished subsetting data.\")\n\n\n\n\n","repo_name":"akulikova64/cgm_3d_cnn","sub_path":"extras/subset_data.py","file_name":"subset_data.py","file_ext":"py","file_size_in_byte":4079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33603303046","text":"import re\r\nimport itertools\r\nimport urllib.request\r\nimport os\r\nimport time\r\nimport wget\r\nfrom urllib.error import URLError, HTTPError, ContentTooShortError\r\nfrom urllib.parse import urljoin\r\n\r\ndef download(url, user_agent='wswp', retry=3, charset='utf-8'):\r\n #print(\"download \", url)\r\n request = urllib.request.Request(url)\r\n request.add_header('User-agent', user_agent)\r\n try:\r\n resp = urllib.request.urlopen(request)\r\n cs = resp.headers.get_content_charset()\r\n if not cs:\r\n cs = charset\r\n html = resp.read().decode(cs)\r\n except(URLError,HTTPError,ContentTooShortError)as e:\r\n print('Download error:', e.reason)\r\n html =None\r\n if retry > 0:\r\n if hasattr(e,'code') and 500 <= e.code < 600:\r\n # recursively retry 5xx HTTP errors\r\n return download(url, retry-1)\r\n return html\r\n\r\ndef crawl_episode(sitemap):\r\n links = re.findall(r'

', sitemap)\r\n links = list(map(lambda x: re.findall(r'\"([^\"]*)\"', x), links))\r\n links = list(itertools.chain(*links))\r\n links = [urljoin('https://www.bbc.co.uk', x) for x in links]\r\n return links\r\n\r\n\r\ndef download_pdf_mp3(article, store_path='D:\\\\python\\\\bbc_takeway'):\r\n print(article)\r\n html = download(article)\r\n pdf_link = re.findall(r' 100:\r\n print('Only download 100 articles')\r\n break\r\n download_pdf_mp3(a)\r\n time.sleep(1)\r\n ++i\r\n","repo_name":"yeerwu/bbc_takeaway_crawler","sub_path":"bbc_crawler.py","file_name":"bbc_crawler.py","file_ext":"py","file_size_in_byte":2483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"23929102892","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\n#Draw bar graphs\ndef plotbar(max):\n fig = plt.figure(figsize=(12, 5))\n x = ['2020-1','2020-2','2020-3','2020-4','2020-5','2020-6',\n '2020-7','2020-8','2020-9','2020-10','2020-11','2020-12',\n '2021-1','2021-2','2021-3','2020-4','2020-5','2021-6']\n plt.bar(x, max, 0.4, color=\"red\")\n plt.xlabel(\"Months\")\n plt.ylabel(\"Max Close in each months\")\n plt.title(\"High Price chart\")\n plt.savefig(\"barChart.png\")\n plt.xticks(rotation=45)\n plt.show()\n\n\n#Get the maximum value for the month\ndef monmax(data): #[['Date','Close]]\n max = []\n y2020 = data[data['Date'].dt.year.isin(np.arange(2020,2021))]\n\n for i in range(1, 12):\n mondata = y2020[y2020['Date'].dt.month.isin(np.arange(i,i+1))]\n max.append(mondata['Close'].max())\n open_day = '2020-12-01'\n close_day = '2020-12-31'\n con1 = y2020['Date'] >= open_day\n con2 = y2020['Date'] < close_day\n mondata = y2020[con1 & con2]\n max.append(mondata['Close'].max())\n\n y2021 = data[data['Date'].dt.year.isin(np.arange(2021, 2022))]\n for i in range(1, 7):\n mondata = y2021[y2021['Date'].dt.month.isin(np.arange(i, i + 1))]\n max.append(mondata['Close'].max())\n\n return max\n\n\ndef buquan(order_data):\n # Remove duplicate dates\n order_data.drop_duplicates(inplace=True)\n order_data = order_data.reset_index(drop=True) # Reset Index\n # Complementary vacancy date, mean value complements other values\n order_data = order_data.set_index(pd.to_datetime(order_data['Date'])).drop('Date', axis=1)\n order_data = order_data.resample('D').mean().interpolate()\n\n return order_data\n\n\nif __name__ == \"__main__\":\n data = pd.read_csv('./indexData.csv') #Data Reading\n data['Date'] = pd.to_datetime(data['Date']) #Convert the time in csv\n\n data.dropna(axis=0, how='any', inplace=True, subset=None) #Remove rows with null values\n\n data.drop_duplicates(subset=['Date'], keep='first', inplace=True) #Only one date duplicate is retained\n data = data.reset_index(drop=True) # Reset Index\n\n data.sort_values('Date', inplace=True) #Disordered time rearrangement\n#\n # Select data for the 2019-2021 time period\n open_day = '2018-12-30'\n close_day = '2021-12-31'\n con1 = data['Date'] >= open_day\n con2 = data['Date'] < close_day\n order_data = data[con1 & con2]\n\n\n # # Histogram data processing\n # # Select data for the time period 2020.6-2021.6\n open_day = '2019-06-01'\n close_day = '2021-06-30'\n con1 = order_data['Date'] >= open_day\n con2 = order_data['Date'] < close_day\n datanew = order_data[con1 & con2]\n bar_data = datanew[['Date','Close']]\n\n max = monmax(bar_data) #Sub-function to get the maximum value in a month\n plotbar(max) #Sub-functions to draw histograms\n","repo_name":"icesylh/2415Donghan-Li-MidFianlProject","sub_path":"MidtermProject.py","file_name":"MidtermProject.py","file_ext":"py","file_size_in_byte":2851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"21836436220","text":"import random\nimport json\n\n\nfrom dash_back.models import Post, Online #type: ignore\nfrom paho.mqtt import client as mqtt_client #type: ignore\nfrom datetime import datetime, timezone\n\nbroker = '159.89.103.242'\nport = 1883\ntopic1 = \"data/#\"\n\ntopic2 = \"mqtt/mqtt\"\n# generate client ID with pub prefix randomly\nclient_id = f'python-mqtt-{random.randint(0, 100)}'\n# username = 'emqx'\n# password = 'public'\n\ndef connect_mqtt() -> mqtt_client:\n def on_connect(client, userdata, flags, rc):\n if rc == 0:\n print(\"Connected to MQTT Broker!\")\n else:\n print(\"Failed to connect, return code %d\\n\", rc)\n\n client = mqtt_client.Client(client_id)\n #client.username_pw_set(username, password)\n client.on_connect = on_connect\n client.connect(broker, port)\n return client\n\ndef subscribe(client: mqtt_client):\n def on_message(client, userdata, msg):\n #print(f\"Received `{msg.payload.payload}` from `{msg.topic}` topic\")\n #data = msg.payload.decode()\n topic = msg.topic\n myList = topic.split('/')\n dev_id = myList[1]\n data_out=json.loads(msg.payload.decode())\n timestamp = int(data_out['payload']['timestamp'])\n timestamp = datetime.fromtimestamp(timestamp, tz=timezone.utc).isoformat()\n\n value = float(data_out['payload']['power'])\n print(data_out,dev_id)\n Post.objects.get_or_create(devId=dev_id,value=value, created_date=timestamp)\n Online.objects.get_or_create(dev=dev_id, saved_date=timestamp, pow=value )\n client.subscribe([(topic1, 0), (topic2, 0)])\n client.on_message = on_message\n\ndef run():\n client = connect_mqtt()\n subscribe(client)\n client.loop_start()\n\nif __name__ == '__main__':\n run()\n","repo_name":"gkmanev/django_dash","sub_path":"project/dashboard/mqtt/_mqtt.py","file_name":"_mqtt.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"74325495769","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom typing import Any, Callable, List, Optional, Union\n\n# Dependency imports\n\nimport tensorflow as tf\n\nfrom mediapipe.model_maker.python.core.data import dataset as ds\n\nDEFAULT_QUANTIZATION_STEPS = 500\n\n\ndef _get_representative_dataset_generator(dataset: tf.data.Dataset,\n num_steps: int) -> Callable[[], Any]:\n \"\"\"Gets a representative dataset generator for post-training quantization.\n\n The generator is to provide a small dataset to calibrate or estimate the\n range, i.e, (min, max) of all floating-point arrays in the model for\n quantization. Usually, this is a small subset of a few hundred samples\n randomly chosen, in no particular order, from the training or evaluation\n dataset. See tf.lite.RepresentativeDataset for more details.\n\n Args:\n dataset: Input dataset for extracting representative sub dataset.\n num_steps: The number of quantization steps which also reflects the size of\n the representative dataset.\n\n Returns:\n A representative dataset generator.\n \"\"\"\n\n def representative_dataset_gen():\n \"\"\"Generates representative dataset for quantization.\"\"\"\n for data, _ in dataset.take(num_steps):\n yield [data]\n\n return representative_dataset_gen\n\n\nclass QuantizationConfig(object):\n \"\"\"Configuration for post-training quantization.\n\n Refer to\n https://www.tensorflow.org/lite/performance/post_training_quantization\n for different post-training quantization options.\n \"\"\"\n\n def __init__(\n self,\n optimizations: Optional[Union[tf.lite.Optimize,\n List[tf.lite.Optimize]]] = None,\n representative_data: Optional[ds.Dataset] = None,\n quantization_steps: Optional[int] = None,\n inference_input_type: Optional[tf.dtypes.DType] = None,\n inference_output_type: Optional[tf.dtypes.DType] = None,\n supported_ops: Optional[Union[tf.lite.OpsSet,\n List[tf.lite.OpsSet]]] = None,\n supported_types: Optional[Union[tf.dtypes.DType,\n List[tf.dtypes.DType]]] = None,\n experimental_new_quantizer: bool = False,\n ):\n \"\"\"Constructs QuantizationConfig.\n\n Args:\n optimizations: A list of optimizations to apply when converting the model.\n If not set, use `[Optimize.DEFAULT]` by default.\n representative_data: A representative ds.Dataset for post-training\n quantization.\n quantization_steps: Number of post-training quantization calibration steps\n to run (default to DEFAULT_QUANTIZATION_STEPS).\n inference_input_type: Target data type of real-number input arrays. Allows\n for a different type for input arrays. Defaults to None. If set, must be\n be `{tf.float32, tf.uint8, tf.int8}`.\n inference_output_type: Target data type of real-number output arrays.\n Allows for a different type for output arrays. Defaults to None. If set,\n must be `{tf.float32, tf.uint8, tf.int8}`.\n supported_ops: Set of OpsSet options supported by the device. Used to Set\n converter.target_spec.supported_ops.\n supported_types: List of types for constant values on the target device.\n Supported values are types exported by lite.constants. Frequently, an\n optimization choice is driven by the most compact (i.e. smallest) type\n in this list (default [constants.FLOAT]).\n experimental_new_quantizer: Whether to enable experimental new quantizer.\n\n Raises:\n ValueError: if inference_input_type or inference_output_type are set but\n not in {tf.float32, tf.uint8, tf.int8}.\n \"\"\"\n if inference_input_type is not None and inference_input_type not in {\n tf.float32, tf.uint8, tf.int8\n }:\n raise ValueError('Unsupported inference_input_type %s' %\n inference_input_type)\n if inference_output_type is not None and inference_output_type not in {\n tf.float32, tf.uint8, tf.int8\n }:\n raise ValueError('Unsupported inference_output_type %s' %\n inference_output_type)\n\n if optimizations is None:\n optimizations = [tf.lite.Optimize.DEFAULT]\n if not isinstance(optimizations, list):\n optimizations = [optimizations]\n self.optimizations = optimizations\n\n self.representative_data = representative_data\n if self.representative_data is not None and quantization_steps is None:\n quantization_steps = DEFAULT_QUANTIZATION_STEPS\n self.quantization_steps = quantization_steps\n\n self.inference_input_type = inference_input_type\n self.inference_output_type = inference_output_type\n\n if supported_ops is not None and not isinstance(supported_ops, list):\n supported_ops = [supported_ops]\n self.supported_ops = supported_ops\n\n if supported_types is not None and not isinstance(supported_types, list):\n supported_types = [supported_types]\n self.supported_types = supported_types\n\n self.experimental_new_quantizer = experimental_new_quantizer\n\n @classmethod\n def for_dynamic(cls) -> 'QuantizationConfig':\n \"\"\"Creates configuration for dynamic range quantization.\"\"\"\n return QuantizationConfig()\n\n @classmethod\n def for_int8(\n cls,\n representative_data: ds.Dataset,\n quantization_steps: int = DEFAULT_QUANTIZATION_STEPS,\n inference_input_type: tf.dtypes.DType = tf.uint8,\n inference_output_type: tf.dtypes.DType = tf.uint8,\n supported_ops: tf.lite.OpsSet = tf.lite.OpsSet.TFLITE_BUILTINS_INT8\n ) -> 'QuantizationConfig':\n \"\"\"Creates configuration for full integer quantization.\n\n Args:\n representative_data: Representative data used for post-training\n quantization.\n quantization_steps: Number of post-training quantization calibration steps\n to run.\n inference_input_type: Target data type of real-number input arrays.\n inference_output_type: Target data type of real-number output arrays.\n supported_ops: Set of `tf.lite.OpsSet` options, where each option\n represents a set of operators supported by the target device.\n\n Returns:\n QuantizationConfig.\n \"\"\"\n return QuantizationConfig(\n representative_data=representative_data,\n quantization_steps=quantization_steps,\n inference_input_type=inference_input_type,\n inference_output_type=inference_output_type,\n supported_ops=supported_ops)\n\n @classmethod\n def for_float16(cls) -> 'QuantizationConfig':\n \"\"\"Creates configuration for float16 quantization.\"\"\"\n return QuantizationConfig(supported_types=[tf.float16])\n\n def set_converter_with_quantization(self, converter: tf.lite.TFLiteConverter,\n **kwargs: Any) -> tf.lite.TFLiteConverter:\n \"\"\"Sets input TFLite converter with quantization configurations.\n\n Args:\n converter: input tf.lite.TFLiteConverter.\n **kwargs: arguments used by ds.Dataset.gen_tf_dataset.\n\n Returns:\n tf.lite.TFLiteConverter with quantization configurations.\n \"\"\"\n converter.optimizations = self.optimizations\n\n if self.representative_data is not None:\n tf_ds = self.representative_data.gen_tf_dataset(\n batch_size=1, is_training=False, **kwargs)\n converter.representative_dataset = tf.lite.RepresentativeDataset(\n _get_representative_dataset_generator(tf_ds, self.quantization_steps))\n\n if self.inference_input_type:\n converter.inference_input_type = self.inference_input_type\n if self.inference_output_type:\n converter.inference_output_type = self.inference_output_type\n if self.supported_ops:\n converter.target_spec.supported_ops = self.supported_ops\n if self.supported_types:\n converter.target_spec.supported_types = self.supported_types\n\n if self.experimental_new_quantizer is not None:\n converter.experimental_new_quantizer = self.experimental_new_quantizer\n return converter\n","repo_name":"google/mediapipe","sub_path":"mediapipe/model_maker/python/core/utils/quantization.py","file_name":"quantization.py","file_ext":"py","file_size_in_byte":8008,"program_lang":"python","lang":"en","doc_type":"code","stars":23977,"dataset":"github-code","pt":"31"} +{"seq_id":"38687707015","text":"import tkinter as tk\r\nimport time\r\nimport math \r\n\r\nWIDTH=400\r\nHEIGHT=400\r\n\r\nm=tk.Tk()\r\nm.title(\"Analog Clock\")\r\ncanvas=tk.Canvas(m,width=WIDTH,height=HEIGHT,bg=\"white\")\r\ncanvas.pack()\r\n\r\ndef clock():\r\n canvas.delete(\"all\")\r\n now=time.localtime()\r\n hour=now.tm_hour%12\r\n minute=now.tm_min\r\n seconds=now.tm_sec\r\n\r\n #shape of clock\r\n canvas.create_oval(2,3,WIDTH,HEIGHT,outline=\"black\",width=2)\r\n\r\n #hour numbers\r\n for i in range(12):\r\n angle=i* math.pi/6 - math.pi/2\r\n x=WIDTH/2 + 0.7* WIDTH/2 * math.cos(angle)\r\n y=HEIGHT/2 + 0.7* WIDTH/2 * math.sin(angle)\r\n if i==0:\r\n canvas.create_text(x,y-10,text=str(i+12),font=(\"Roboto\",15))\r\n else:\r\n canvas.create_text(x,y,text=str(i),font=(\"Roboto\",15))\r\n \r\n #minute lines\r\n for i in range(60):\r\n angle=i* math.pi/30 - math.pi/2\r\n x1=WIDTH/2 + 0.8* WIDTH/2 *math.cos(angle)\r\n y1=HEIGHT/2 + 0.8* HEIGHT/2 *math.sin(angle)\r\n x2=WIDTH/2 + 0.9* WIDTH/2 *math.cos(angle)\r\n y2=HEIGHT/2 + 0.9* HEIGHT/2 *math.sin(angle)\r\n if i%5==0:\r\n canvas.create_line(x1,y1,x2,y2,fill=\"black\",width=3)\r\n else:\r\n canvas.create_line(x1,y1,x2,y2,fill=\"black\",width=1)\r\n \r\n #hour hands\r\n hour_angle=(hour+minute/60)* math.pi/6 - math.pi/2\r\n hour_x=WIDTH/2 + 0.5 * WIDTH/2 *math.cos(hour_angle)\r\n hour_y=HEIGHT/2 + 0.5 * HEIGHT/2 *math.sin(hour_angle)\r\n canvas.create_line(WIDTH/2,HEIGHT/2,hour_x,hour_y,fill=\"black\",width=6)\r\n \r\n #minute hands\r\n minute_angle=(minute+seconds/60)* math.pi/30 - math.pi/2\r\n minute_x=WIDTH/2 + 0.7 * WIDTH/2 *math.cos(minute_angle)\r\n minute_y=HEIGHT/2 + 0.7 * HEIGHT/2 *math.sin(minute_angle)\r\n canvas.create_line(WIDTH/2,HEIGHT/2,minute_x,minute_y,fill=\"black\",width=4)\r\n\r\n #second hands\r\n second_angle=seconds* math.pi/30 - math.pi/2\r\n second_x=WIDTH/2 + 0.6 * WIDTH/2 *math.cos(second_angle)\r\n second_y=HEIGHT/2 + 0.6 * HEIGHT/2 *math.sin(second_angle)\r\n canvas.create_line(WIDTH/2,HEIGHT/2,second_x,second_y,fill=\"red\",width=2)\r\n\r\ndef update_clock():\r\n clock()\r\n canvas.after(1000, update_clock) # Schedule the next update after 1000 milliseconds (1 second)\r\n\r\nupdate_clock()\r\n\r\nm.mainloop()\r\n","repo_name":"ritikab1707/python_analog_clock","sub_path":"clock.py","file_name":"clock.py","file_ext":"py","file_size_in_byte":2265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22327756","text":"\"\"\"\r\n模拟鼠标键盘操作。\r\n\r\n本以为点击任务栏的图标很方便的,但是没有想到鼠标移动有效,点击就是没有小。\r\n只能曲线实现了,先移动鼠标,显示缩略图后,再点击缩略图实现\r\n\r\n\"\"\"\r\n\r\nimport time\r\nfrom pymouse import PyMouse\r\nfrom pykeyboard import PyKeyboard\r\n\r\nm=PyMouse()\r\nk=PyKeyboard()\r\n\r\ndef click_ico(x,y):\r\n \"\"\"\r\n 针对点击图标专门写一个点击函数。\r\n 需要注意的是,这里的x,y对应np.array.shape的y,x\r\n \"\"\"\r\n # 在win10下一直无法实现,能移动鼠标,但是点击就是没有用\r\n m=PyMouse()\r\n m.click(x,y)\r\n\r\n\r\ndef mouse_click(x,y,button=1,n=1):\r\n \"\"\"模拟鼠标点击\"\"\"\r\n # 注意,这里将 x,y=y,x\r\n # 是为了和 plt.image 的处理保存一致的理解\r\n m.click(x=y,y=x,button=button,n=n)\r\n time.sleep(0.5)\r\n\r\ndef keyboard_input(string=None,function_key=None, keys=[]):\r\n \"\"\"模拟键盘输入\"\"\"\r\n # 如果是普通键盘输入\r\n if string:\r\n k.type_string(string)\r\n time.sleep(0.2)\r\n return\r\n #如果是功能键,比如F1,F2\r\n if function_key:\r\n num=int(function_key.lower().replace('f',''))\r\n k.tap_key(k.function_keys[num]) # Tap F5\r\n time.sleep(0.2)\r\n return\r\n\r\ndef keyboard_enter():\r\n \"\"\"键盘回车键\"\"\"\r\n k.tab_key([k.enter_key])\r\n time.sleep(0.1)\r\n\r\ndef keyboard(string):\r\n \"\"\"键盘输入\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"suzhenyu22/ocr-stock","sub_path":"common/userinput.py","file_name":"userinput.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"zh","doc_type":"code","stars":12,"dataset":"github-code","pt":"31"} +{"seq_id":"10324823316","text":"import pygame\nimport funcs\n\nNO_DRAW = -1\nLASSO = 1\nMOVE_TOOL = 2\n\naqua = (0, 255, 255, 0) # морская волна\nblack = (0, 0, 0, 0) # черный\nblue = (0, 0, 255, 0) # синий\nfuchsia = (255, 0, 255, 0) # фуксия\ngray = (128, 128, 128, 0) # серый\ngreen = (0, 128, 0, 0) # зеленый\nlime = (0, 255, 0, 0) # цвет лайма\norange = (255, 128, 0) # оранжевый\nmaroon = (128, 0, 0, 0) # темно-бордовый\nnavy_blue = (0, 0, 128, 0) # темно-синий\nolive = (128, 128, 0, 0) # оливковый\npurple = (128, 0, 128, 0) # фиолетовый\nred = (255, 0, 0, 0) # красный\nsilver = (192, 192, 192, 0) # серебряный\nteal = (0, 128, 128, 0) # зелено-голубой\nwhite = (255, 255, 255, 0) # белый\nyellow = (255, 255, 0, 0) # желтый\nlight_gray = (204, 204, 204)\n\n# warm = [(238, 0, 0), (238, 238, 0), (238, 0, 238), (238, 18, 137), (238, 44, 44), (238, 48, 167), (238, 58, 140), (238, 59, 59), (238, 64, 0), (238, 92, 66), (238, 99, 99), (238, 106, 80), (238, 118, 0), (238, 118, 33), (238, 121, 66), (238, 130, 98), (238, 154, 0), (238, 154, 73), (238, 173, 14), (238, 180, 34), (238, 201, 0)]\n# cold = [(0, 0, 238), (67, 110, 238), (92, 172, 238), (0, 178, 238), (0, 229, 238), (0, 238, 238), (0, 238, 0), (0, 238, 118), (78, 238, 148), (92, 172, 238), (118, 238, 0), (122, 103, 238), (145, 44, 238), (159, 121, 238), (178, 58, 238), (209, 95, 238)]\n\ncolors = [[0, 0, 0], [24,24,24], [48,48,48], [64,64,64], [128,128,128],[155,155,155],[200,200,200],[255,255,255],\n [27,38,49],[40,55,71],[46,64,83],[52,73,94],[93,109,126],[133,146,158],[174,182,191],[214,219,223],\n [77,86,86],[95,106,106],[113,125,126],[149,165,166],[170,183,184],[191,201,202],[213,219,219],[229,232,232],\n [98,101,103],[121,125,127],[144,148,151],[189,195,199],[202,207,210],[229,231,233],[248,249,249],[255,255,255],\n [100,30,22],[123,36,28],[146,43,33],[192,57,43],[205,97,85],[217,136,128],[230,176,170],[242,215,213],\n [120,40,31],[148,49,38],[176,58,46],[220,76,60],[236,112,99],[241,148,138],[245,183,177],[250,219,216],\n [74,35,90],[91,44,111],[108,52,131],[142,68,173],[165,105,189],[187,143,206],[210,180,222],[232,218,239],\n [21,67,96],[26,82,118],[31,97,141],[41,128,185],[84,153,199],[127,179,213],[169,204,227],[212,230,241],\n [20,90,50],[25,111,61],[34,141,84],[34,174,96],[82,190,128],[125,206,160],[169,223,191],[212,239,223],\n [125,102,8],[154,125,10],[183,149,11],[230,196,15],[244,208,63],[247,220,111],[249,231,159],[252,243,207],\n [126,81,9],[156,100,12],[185,119,14],[242,156,18],[245,176,65],[248,196,113],[250,215,160],[253,235,208],\n [110,44,0],[135,54,0],[160,64,0],[211,84,0],[220,118,51],[229,152,102],[237,187,153],[246,221,204]\n ]\n\ncolors2 = {'Skins': ([111, 62, 33], [137, 59, 47], [191, 125, 84],[235, 182, 156],[248, 206, 176], [240, 216, 156]),\n 'Summer': ([2,151,157],[114,227,209],[255,231,209], [247,200,48], [255,184,140],[231,151,150]),\n 'Sunset': ([233, 175, 105], [252,120,150],[193,107,188],[152,89,197],[108,66,196],[30,171,215]),\n 'Forest': ([150,79,27], [218,148,50], [211,222,146], [178, 164, 17], [75, 116, 47], [92,107,40]),\n 'Coffee': ([92,58,42],[121,84,63],[172,138,104],[200,173,139],[223,213,191],[206,159,85]),\n '4 bit': ([255, 0, 0], [255, 128, 0], [255, 255, 0], [0, 128, 0], [0, 0, 255], [128, 0, 128])}\n\nbackground = white\ndraw = pygame.draw\n\nbrush_icon = pygame.image.load('icons/brush.png')\ncircle_icon = pygame.image.load('icons/circle.png')\neraser_icon = pygame.image.load('icons/eraser.png')\nfill_icon = pygame.image.load('icons/fill.png')\npencil_icon = pygame.image.load('icons/pencil.png')\ncolor_pick_icon = pygame.image.load('icons/color_pick.png')\nrect_icon = pygame.image.load('icons/rect.png')\nswap_col_icon = pygame.image.load('icons/swap_col.png')\nrotate_icon = pygame.image.load('icons/rotate.png')\nmove_icon = pygame.image.load('icons/move.png')\nline_icon = pygame.image.load('icons/line.png')\nmirror_icon = pygame.image.load('icons/mirror.png')\nlasso_icon = pygame.image.load('icons/lasso.png')\nicons = [brush_icon, pencil_icon, eraser_icon, fill_icon, color_pick_icon, swap_col_icon, circle_icon, rect_icon,\n line_icon, lasso_icon, rotate_icon, move_icon, mirror_icon]\ndraw_tools = {}\n\n\nclass UIItem:\n def __init__(self, x, y, width, height, image=None, color=None, text=('', white), font=('arial', 0), frame_w=0, frame_col=black):\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n self.image = image\n self.color = color\n self.frame_w = frame_w\n self.frame_col = frame_col\n self.text = text[0]\n self.text_col = text[1]\n self.font_name = font[0]\n self.font_size = font[1]\n self.old_color = color\n\n def mouse_hover(self):\n mouse_x, mouse_y = pygame.mouse.get_pos()\n if mouse_x in range(self.x, self.x + self.width) and mouse_y in range(self.y, self.y + self.height):\n return True\n return False\n\n def get_rectangle(self):\n x = max(0, self.x - 5)\n y = max(0, self.y - 5)\n return (x, y, self.width + 10, self.height + 10)\n\n def mouse_down(self):\n if self.mouse_hover() and pygame.mouse.get_pressed()[0] == 1:\n return True\n return False\n\n def tint(self, enabled, amt=2):\n if enabled:\n if self.color:\n self.color = funcs.decrease_brightness(self.old_color, amt=amt)\n else:\n self.color = gray\n else:\n if self.color:\n self.color = self.old_color\n\n def draw_frame(self, surface, color1, width=2):\n padding = width // 2\n pygame.draw.rect(surface, color1, (self.x + padding, self.y + padding, self.width - 2 * padding, self.height - 2 * padding), width)\n\n def draw(self, surface, color_override=None):\n col = black\n if self.color:\n col = self.color\n if color_override:\n col = color_override\n if self.image:\n img = pygame.transform.scale(self.image, (self.width, self.height))\n if self.color:\n funcs.colorize(img, self.color)\n surface.blit(img, (self.x, self.y))\n else:\n rgb_col = funcs.rgba_to_rgb(col, white)\n draw.rect(surface, rgb_col, ((self.x, self.y), (self.width, self.height)))\n if self.text:\n fsize = self.font_size\n if not fsize:\n fsize = min((self.height - 2), (self.width // len(self.text)))\n font = pygame.font.SysFont(self.font_name, fsize, bold=True)\n title = font.render(self.text, 0, self.text_col)\n text_rect = title.get_rect()\n text_len = text_rect.width\n text_height = text_rect.height\n tx = (self.width - text_len) // 2\n ty = (self.height - text_height) // 2\n surface.blit(title, (self.x + tx, self.y + ty))\n if self.frame_w:\n self.draw_frame(surface, self.frame_col, self.frame_w)\n\n def hide(self, on, surface):\n if on:\n self.draw(surface, color_override=background)\n\n\nclass UIButton(UIItem):\n def __init__(self, x, y, width, height, action=None, image=None, color=None, text=('', white), font=('arial', 0), frame_w=0, frame_col=black):\n super().__init__(x, y, width, height, image=image, color=color, text=text, font=font, frame_w=frame_w, frame_col=frame_col)\n self.action = action\n self.state = False\n\n def onclick(self):\n if self.mouse_down():\n self.state = not self.state\n\n\nclass Rectangle(UIItem):\n def __init__(self, x, y, width, height, color=white, image=None, text=('', white), font=('arial', 0)):\n super().__init__(x, y, width, height, color=color, image=image, text=text, font=font)\n self.neighbors = []\n\n\n def draw_border(self, surface, up=False, bottom=False, left=False, right=False, color=red):\n if up:\n pygame.draw.line(surface, color, (self.x, self.y), (self.x + self.width, self.y), 1)\n if right:\n pygame.draw.line(surface, color, (self.x + self.width, self.y), (self.x + self.width, self.y + self.height), 1)\n if bottom:\n pygame.draw.line(surface, color, (self.x, self.y + self.height), (self.x + self.width, self.y + self.height), 1)\n if left:\n pygame.draw.line(surface, color, (self.x, self.y), (self.x, self.y + self.height), 1)\n\n def draw_set_borders(self, surface, in_set):\n if not self.neighbors:\n return\n if self not in in_set:\n return\n up, bottom, left, right = False, False, False, False\n for i, neighbor in enumerate(self.neighbors):\n if i == 0:\n up = neighbor not in in_set\n if i == 1:\n bottom = neighbor not in in_set\n if i == 2:\n left = neighbor not in in_set\n if i == 3:\n right = neighbor not in in_set\n self.draw_border(surface, up=up, bottom=bottom, left=left, right=right)\n\n def get_rc(self, canvas):\n for r, row in enumerate(canvas.table):\n for c, px in enumerate(row):\n if px == self:\n return r, c\n\n\n\nclass Canvas(UIItem):\n def __init__(self, x, y, width, height, rows, cols, bg_col=white):\n super().__init__(x, y, width, height)\n self.rows = rows\n self.cols = cols\n self.x_offset = 0\n self.y_offset = 0\n self.lasso_clear_flag = False\n self.selected_pixel_set = []\n self.old_selected_set = []\n self.row_size = height // rows\n self.col_size = width // cols\n self.paint_history = []\n self.bg_col = bg_col\n cs, rs = self.col_size, self.row_size\n self.table = [[Rectangle((i * cs + x), (j * rs + y), cs, rs, color=bg_col) for i in range(cols)] for j in range(rows)]\n for i in range(rows):\n for j in range(cols):\n if i:\n self.table[i][j].neighbors.append(self.table[i - 1][j])\n if i < rows - 1:\n self.table[i][j].neighbors.append(self.table[i + 1][j])\n if j:\n self.table[i][j].neighbors.append(self.table[i][j - 1])\n if j < cols - 1:\n self.table[i][j].neighbors.append(self.table[i][j + 1])\n\n def merge_img(self, col, row, img, color=None, surface=None):\n image = img\n history_obj = []\n x, y = image.get_size()\n if min(x, y) == 0:\n return\n if color:\n if color != image.get_at((x // 2, y // 2)):\n funcs.colorize(image, color)\n lm_y, lm_x = min(y, self.rows - 1), min(x, self.cols - 1)\n for i in range(lm_y):\n for j in range(lm_x):\n pix = image.get_at((i, j))\n p_row = min((row + i - y // 2), self.rows - 1)\n p_col = min((col + j - x // 2), self.cols - 1)\n if self.table[p_row][p_col].color != pix and pix[3] != 0:\n cl = self.table[p_row][p_col].color\n history_obj.append({self.table[p_row][p_col]: self.table[p_row][p_col].color})\n self.table[p_row][p_col].color = funcs.rgba_to_rgb(cl, pix)\n if surface:\n self.table[p_row][p_col].draw(surface)\n self.paint_history.append(history_obj)\n\n def lasso_selection(self):\n to_select = []\n for row in self.table:\n start, end = -1, 0\n for i, px in enumerate(row):\n if px in self.selected_pixel_set:\n if start == -1:\n start = i\n else:\n end = i\n to_select += row[start:end+1]\n self.selected_pixel_set = to_select\n\n def pixel_paint(self, col, row, color=black, brush=None, size=(1, 1), alpha=None, surface=None, overlays=None):\n row = min(abs(row), self.rows - 1)\n col = min(abs(col), self.cols - 1)\n startrow = max((row - size[1] // 2), 0)\n endrow = min((row + size[1] // 2), self.rows - 1)\n startcol = max((col - size[0] // 2), 0)\n endcol = min((col + size[0] // 2), self.cols - 1)\n if brush:\n if isinstance(brush, pygame.Surface):\n if brush.get_size() != size:\n br = pygame.transform.scale(brush, size)\n else:\n br = brush\n self.merge_img(col, row, br, color, surface=surface)\n elif callable(brush):\n brush(self, row, col, color, surface=surface)\n else:\n history_obj = []\n # print(f'srow:{startrow}, endrow:{endrow}, startcol:{startcol}, endcol:{endcol}')\n if alpha:\n color = (*color[:3], alpha)\n for i in range(startrow, endrow + 1):\n for j in range(startcol, endcol + 1):\n if self.table[i][j].color != color:\n history_obj.append([{self.table[i][j]: self.table[i][j].color}])\n self.table[i][j].color = color\n if surface:\n self.table[i][j].draw(surface)\n # print(color)\n if surface and overlays and color[:3] == background[:3]:\n for ov in overlays:\n if i in range(0, ov.rows) and j in range(0, ov.cols):\n px = ov.table[i][j]\n if px.color != background:\n px.draw(surface)\n self.paint_history.append(history_obj)\n if len(self.paint_history) > 100:\n self.paint_history.pop(0)\n # return max(0, startrow - 1), max(0, startcol - 1), min(self.rows, endrow + 1), min(self.cols, endcol + 1)\n\n def undo(self):\n if self.paint_history:\n history_obj = self.paint_history.pop()\n for obj in history_obj:\n pixel = obj.key\n color = obj.value\n pixel.color = color\n\n def line(self, p0, p1):\n points = []\n n = funcs.diagonal_distance(p0, p1)\n for step in range(n + 1):\n t = step / n if n else 0\n lerp_x, lerp_y = funcs.lerp_point(p0, p1, t)\n row, col = funcs.grid_snap(lerp_x, lerp_y, self.x, self.y, self.row_size, self.col_size)\n points.append((int(row), int(col)))\n return points\n\n def show_selected_set(self, surface):\n if self.old_selected_set:\n for px in self.old_selected_set:\n px.draw_border(surface, up=True, bottom=True, right=True, left=True, color=white)\n self.old_selected_set = []\n for px in self.selected_pixel_set:\n px.draw(surface)\n px.draw_set_borders(surface, self.selected_pixel_set)\n\n def get_at_mouse_pos(self, mouse_x, mouse_y):\n mouse_x, mouse_y = mouse_x - self.x_offset, mouse_y - self.y_offset\n row, col = funcs.grid_snap(mouse_x, mouse_y, self.x, self.y, self.row_size, self.col_size)\n return self.table[row][col]\n\n def line_paint(self, old_mpos, new_mpos, color=black, brush=None, size=(1, 1), alpha=255, surface=None, overlays=None):\n if brush == NO_DRAW:\n return\n if brush == MOVE_TOOL:\n old_r, old_c = funcs.grid_snap(*old_mpos, self.x + self.x_offset,\n self.y + self.y_offset, self.row_size, self.col_size)\n new_r, new_c = funcs.grid_snap(*new_mpos, self.x + self.x_offset, self.y + self.y_offset, self.row_size,\n self.col_size)\n dr, dc = new_r - old_r, new_c - old_c\n funcs.move_selected_pixels(self, dr, dc)\n if brush == LASSO and self.lasso_clear_flag:\n self.old_selected_set = self.selected_pixel_set\n self.selected_pixel_set = []\n self.lasso_clear_flag = False\n old_mpos = old_mpos[0] - self.x_offset, old_mpos[1] - self.y_offset\n new_mpos = new_mpos[0] - self.x_offset, new_mpos[1] - self.y_offset\n line = self.line(old_mpos, new_mpos)\n if min(size) == 0:\n return\n if brush and isinstance(brush, pygame.Surface):\n step = max(min(size) // 3, 1)\n else:\n step = 1\n for i in range(1, len(line), step):\n row, col = line[i]\n if brush == LASSO:\n if self.table[row][col] not in self.selected_pixel_set:\n self.selected_pixel_set.append(self.table[row][col])\n self.pixel_paint(col, row, color=color, size=size, brush=brush, alpha=alpha, surface=surface, overlays=overlays)\n\n def draw(self, surface, col_override=None, area=None):\n if area:\n startrow, startcol, endrow, endcol = area\n for i in range(startrow, endrow + 1):\n for j in range(startcol, endcol + 1):\n self.table[i][j].draw(surface)\n return\n draw.rect(surface, background, ((max(self.x - self.col_size, 0), max(self.y - self.row_size, 0)),\n (self.width + self.col_size * 2, self.height + self.row_size * 2)), max(1, min(self.row_size, self.col_size)) // 2)\n for row_i, row in enumerate(self.table):\n if self.height >= self.y_offset + row_i * self.row_size >= -self.row_size:\n for col_i, px in enumerate(row):\n if self.width >= self.x_offset + col_i * self.col_size >= -self.col_size:\n if px.color[:3] != background[:3]:\n px.draw(surface, color_override=col_override)\n if px in self.selected_pixel_set:\n px.draw_set_borders(surface, self.selected_pixel_set)\n x_frame_pos, y_frame_pos = max(self.x, self.x + self.x_offset), max(self.y, self.y + self.y_offset)\n draw.rect(surface, black, (x_frame_pos, y_frame_pos, self.cols * self.col_size, self.rows * self.row_size), 1)\n draw.rect(surface, black, ((self.x - 2, self.y - 2), (self.width + 4, self.height + 4)), 2)\n\n def pan(self, dx, dy):\n if self.col_size * self.cols < self.width and (self.x_offset + dx < 0 or self.x_offset + self.col_size * self.cols > self.width):\n self.x_offset = funcs.constrain(self.x_offset, 0, self.width - self.cols * self.col_size)\n return\n if self.row_size * self.rows < self.height and (self.y_offset < 0 or self.y_offset + self.row_size * self.rows > self.height):\n self.y_offset = funcs.constrain(self.y_offset, 0, self.height - self.rows * self.row_size)\n return\n self.x_offset += dx\n self.y_offset += dy\n x, y = self.x + self.x_offset, self.y + self.y_offset\n self.table = [[Rectangle((i * self.col_size + x), (j * self.row_size + y),\n self.col_size, self.row_size, color=self.table[j][i].color)\n for i in range(self.cols)] for j in range(self.rows)]\n\n\n def scale(self, amt, mouse_x, mouse_y):\n mouse_x = funcs.constrain(mouse_x, self.x + self.x_offset + 1, self.x + self.x_offset + self.cols * self.col_size - 1)\n mouse_y = funcs.constrain(mouse_y, self.y + self.y_offset + 1, self.y + self.y_offset + self.rows * self.row_size - 1)\n if self.row_size + amt < 1 or self.col_size + amt < 1:\n return\n if self.row_size + amt > self.height // 20 or self.col_size + amt > self.width // 20:\n return\n row_before_zoom, col_before_zoom = funcs.grid_snap(mouse_x, mouse_y,\n self.x + self.x_offset, self.y + self.y_offset,\n self.row_size, self.col_size)\n mouse_x_rel = (mouse_x - self.table[row_before_zoom][col_before_zoom].x) / self.col_size\n mouse_y_rel = (mouse_y - self.table[row_before_zoom][col_before_zoom].y) / self.row_size\n self.col_size += amt\n self.row_size += amt\n x, y = self.x + self.x_offset, self.y + self.y_offset\n self.table = [[Rectangle((i * self.col_size + x), (j * self.row_size + y),\n self.col_size, self.row_size, color=self.table[j][i].color)\n for i in range(self.cols)] for j in range(self.rows)]\n if self.row_size * self.rows == self.height and self.col_size * self.cols == self.width:\n self.pan(-self.x_offset, -self.y_offset)\n return\n row_after_zoom, col_after_zoom = funcs.grid_snap(mouse_x, mouse_y,\n self.x + self.x_offset, self.y + self.y_offset,\n self.row_size, self.col_size)\n row_after_zoom = funcs.constrain(row_after_zoom, 0, self.rows - 1)\n col_after_zoom = funcs.constrain(col_after_zoom, 0, self.cols - 1)\n d_mouse_x = (mouse_x - self.table[row_after_zoom][col_after_zoom].x)\n d_mouse_y = (mouse_y - self.table[row_after_zoom][col_after_zoom].y)\n needed_mouse_x = int(self.table[row_after_zoom][col_after_zoom].width * mouse_x_rel)\n needed_mouse_y = int(self.table[row_after_zoom][col_after_zoom].height * mouse_y_rel)\n dmx, dmy = d_mouse_x - needed_mouse_x, d_mouse_y - needed_mouse_y\n dr, dc = row_after_zoom - row_before_zoom, col_after_zoom - col_before_zoom\n dy, dx = dr * self.row_size + dmy, dc * self.col_size + dmx\n self.pan(dx, dy)\n\n def clear(self):\n for row in self.table:\n for px in row:\n px.color = background\n self.paint_history = []\n\n def hide(self, surface, on):\n if on:\n draw.rect(surface, background, (self.x, self.y, self.width, self.height))\n\n\nclass UISlider(UIItem):\n def __init__(self, x, y, width, height, max_v, min_v, slider_color=None, color=black, bg_color=light_gray, image=None, scale_w=6, scale_h=None, title_h=20, text=('', black), font=('arial', 15)):\n super().__init__(x, y + title_h, width, height, color=color, text=text, font=font)\n self.title_x = x\n self.title_y = y\n self.title_h = title_h\n self.slider_color = color\n if slider_color:\n self.slider_color = slider_color\n if scale_h:\n self.scale_h = scale_h\n else:\n self.scale_h = self.height - scale_w * 2\n scale_indent = max(2, (self.height - self.scale_h) // 2)\n self.scale_y = self.y + scale_indent\n self.scale_x = x + (width - scale_w) // 2\n self.scale_w = scale_w\n self.min_v = min_v\n self.max_v = max_v\n self.bg_color = bg_color\n self.slider_r = scale_w\n self.slider_y = self.scale_y + self.scale_h - self.slider_r\n self.value = min_v\n self.title = Rectangle(x, y, width + 1, title_h, color=bg_color, image=image, text=text, font=font)\n self.title.text = self.title.text[:self.title.text.find(' ') + 1] + str(self.value)\n\n def move_slider(self):\n mouse_y = pygame.mouse.get_pos()[1]\n self.slider_y = funcs.constrain(mouse_y, self.scale_y, (self.scale_y + self.scale_h))\n self.value = self.max_v - funcs.map_value(self.slider_y, self.scale_y, (self.scale_y + self.scale_h), self.min_v, self.max_v)\n text = self.title.text\n self.title.text = text[:text.find(' ') + 1] + str(self.value)\n\n def set_value(self, val):\n val = funcs.constrain(val, self.min_v, self.max_v)\n self.value = val\n self.slider_y = funcs.map_value(val, self.max_v, self.min_v, self.scale_y, self.scale_y + self.scale_h)\n text = self.title.text\n self.title.text = text[:text.find(' ') + 1] + str(self.value)\n\n def draw(self, surface, col_override=None, area=None):\n draw.rect(surface, self.bg_color, ((self.x, self.y), (self.width, self.height)))\n self.title.draw(surface)\n draw.line(surface, self.color, (self.x, self.y), (self.x + self.width, self.y))\n draw.line(surface, self.color, (self.title_x, self.title_y), (self.title_x + self.width, self.title_y))\n draw.line(surface, self.color, (self.title_x, self.title_y), (self.title_x, self.y))\n draw.line(surface, self.color, (self.title_x + self.width, self.title_y), (self.title_x + self.width, self.y))\n draw.line(surface, self.color, (self.x, self.y), (self.x, self.y + self.height))\n draw.line(surface, self.color, (self.x + self.width, self.y), (self.x + self.width, self.y + self.height))\n draw.line(surface, self.color, (self.x, self.y + self.height), (self.x + self.width, self.y + self.height))\n r = self.scale_w // 2\n s_y, s_h = self.scale_y + r, self.scale_h - self.scale_w\n draw.rect(surface, self.color, ((self.scale_x, s_y), (self.scale_w, s_h)))\n draw.circle(surface, self.color, (self.scale_x + r, s_y), r)\n draw.circle(surface, self.color, (self.scale_x + r, s_y + s_h), r)\n draw.circle(surface, self.slider_color, (self.scale_x + r, self.slider_y), self.slider_r)\n\n def get_rectangle(self):\n x = max(0, self.x - 5)\n y = max(0, self.y - 5 - self.title_h)\n return (x, y, self.width + 10, self.height + 10 + self.title_h)\n\n\nclass UITooltip(UIItem):\n def __init__(self, x, y, width, height, rows, cols, color=black, bg_color=light_gray, image=None, title_h=0, text=('', black), font=('arial', 0), show_grid=True, border_w=1):\n super().__init__(x, y + title_h, width, height + title_h, color=color)\n self.title_x = x\n self.title_y = y\n self.tinted = None\n self.clicked = None\n self.selected = None\n self.show_grid = show_grid\n self.rows = rows\n self.cols = cols\n self.title_h = title_h\n self.border_w = border_w\n self.row_size = height // rows\n self.col_size = width // cols\n rs, cs = self.row_size, self.col_size\n self.title = Rectangle(x, y, width + 1, title_h, color=bg_color, image=image, text=text, font=font)\n self.table = [[Rectangle((x + 1 + j * cs), (self.y + 1 + i * rs), cs - 1, rs - 1, color=background) for j in range(cols)] for i in range(rows)]\n\n def draw_grid(self, surface):\n draw.line(surface, self.color, (self.title_x, self.title_y), (self.title_x + self.width, self.title_y), self.border_w)\n draw.line(surface, self.color, (self.title_x, self.title_y), (self.title_x, self.y), self.border_w)\n draw.line(surface, self.color, (self.title_x + self.width, self.title_y), (self.title_x + self.width, self.y), self.border_w)\n hor_space, vert_space = self.col_size, self.row_size\n x, y = self.x, self.y\n for i in range(self.rows + 1):\n draw.line(surface, self.color, (x, y), (x + self.width, y), self.border_w)\n y += vert_space\n for i in range(self.cols + 1):\n draw.line(surface, self.color, (x, self.y), (x, y - self.row_size), self.border_w)\n x += hor_space\n\n def get_rectangle(self):\n x = max(0, self.x - 5)\n y = max(0, self.y - 5 - self.title_h)\n return (x, y, self.width + 10, self.height + 10 + self.title_h)\n\n def update_item_sizes(self):\n self.row_size = (self.height - self.title_h) // self.rows\n self.col_size = self.width // self.cols\n for row, line in enumerate(self.table):\n for col, item in enumerate(line):\n item.height = self.row_size - 4\n item.width = self.col_size - 4\n item.x = self.x + 2 + col * self.col_size\n item.y = self.y + 2 + row * self.row_size\n if isinstance(item, UITooltip):\n item.update_item_sizes()\n\n def set_table_size(self, rows1=0, cols1=0):\n if rows1 == 0:\n rows1 = self.rows\n if cols1 == 0:\n cols1 = self.cols\n self.rows = rows1\n self.cols = cols1\n while len(self.table) < rows1:\n self.table.append([None for i in range(self.cols)])\n for i in range(self.cols):\n self.set_item(-1, i, Rectangle(1, 1, 1, 1, white))\n\n while len(self.table[0]) < cols1:\n for i, line in enumerate(self.table):\n line.append(None)\n self.set_item(i, -1, Rectangle(1, 1, 1, 1, white))\n\n while len(self.table) > rows1:\n self.table.pop()\n\n while len(self.table[0]) > cols1:\n for line in self.table:\n line.pop()\n self.update_item_sizes()\n\n def get_item(self, row1, col1):\n return self.table[row1][col1]\n\n def append_item(self, item):\n if not self.table or len([x for x in self.table[-1] if isinstance(x, Rectangle) or x is None]) == 0:\n self.height += self.row_size\n self.rows += 1\n self.table.append([None for i in range(self.cols)])\n self.set_item(-1, 0, item)\n for i in range(1, self.cols):\n self.set_item(-1, i, Rectangle(1, 1, 1, 1, white))\n else:\n occupied = len([x for x in self.table[-1] if isinstance(x, UIButton)])\n self.set_item(-1, occupied, item)\n\n def pop_item(self, index=-1, col=-1):\n if self.table:\n if len(self.table[-1]) == 1:\n self.table.pop(index)\n self.height -= self.row_size\n self.rows -= 1\n self.update_item_sizes()\n else:\n self.table[-1].pop(col)\n\n def set_item(self, row, col, item):\n item.height = self.row_size - 3\n item.width = self.col_size - 3\n xcol, xrow = col, row\n if col < 0:\n xcol = xcol * -1 + 1\n if row < 0:\n xrow = xrow * -1 + 1\n item.x = self.x + 2 + xcol * self.col_size\n item.y = self.y + 2 + xrow * self.row_size\n self.table[row][col] = item\n\n def draw(self, surface):\n for row in self.table:\n for item in row:\n item.draw(surface)\n if self.selected:\n self.selected.draw_frame(surface, red, 3)\n self.title.draw(surface)\n if self.show_grid:\n self.draw_grid(surface)\n\n\nclass Popup(UIItem):\n def __init__(self, x, y, width, height, color=black, starting_items = {}, bg_color=light_gray, image=None, title_h=0, text=('', black), font=('arial', 0)):\n super().__init__(x, y + title_h, width, height + title_h, color=color)\n self.title_x = x\n self.title_y = y\n self.items = starting_items\n self.tinted = None\n self.clicked = None\n self.bg_color = bg_color\n self.image = image\n self.title_h = title_h\n self.title = Rectangle(x, y, width, title_h, color=bg_color, image=image, text=text, font=font)\n\n def add_item(self, item, x, y, name='', size = None, reference_element=None):\n if reference_element:\n if reference_element in self.items.values():\n item.x = reference_element.x + x\n item.y = reference_element.y + y\n else:\n item.x = self.x + x\n item.y = self.y + y\n if size:\n item.width, item.height = size\n else:\n item.width, item.height = min(self.width, item.width), min(self.height, item.height)\n self.items[name] = item\n\n def get_rectangle(self):\n return self.x, self.title_y, self.width, self.height + self.title_h\n\n def draw(self, surface, color_override=None):\n draw.rect(surface, self.bg_color, (self.x + 1, self.title_y + 1, self.width - 2, self.height + self.title_h - 2))\n if self.image:\n img = pygame.transform.scale(self.image, (self.width, self.height))\n surface.blit(img)\n for item in self.items.values():\n item.draw(surface)\n if self.title:\n self.title.draw(surface)\n draw.rect(surface, self.color, (self.x + 1, self.title_y + 1, self.width - 2, self.height + self.title_h - 2), 2)\n draw.line(surface, self.color, (self.x, self.y), (self.x + self.width, self.y), 2)\n","repo_name":"PaperPlane0/PixelPy","sub_path":"UI.py","file_name":"UI.py","file_ext":"py","file_size_in_byte":32570,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"7015889198","text":"from django.shortcuts import render\nfrom pdfParse.settings import UPLOAD_DIR, STATIC_URL\nfrom .forms import UploadFileForm\nfrom nltk.corpus.reader import wordnet\nfrom nltk.corpus import wordnet\nfrom pdfminer.high_level import extract_text # extract text from pdf\nimport yake # natural language processing module\n\n\ndef parse_pdf(request, pdfName, context):\n try:\n # turning entire pdf into a string\n exctext = extract_text(str(pdfName))\n except Exception as e:\n print(e)\n Division_list = [\n {\"division_name\": \"Map Division, Schwarzman\",\n \"tags\": [\"map\", \"Global\", \"US\"],\n 'updated_tags': [\"map\", \"Global\", \"US\"]\n },\n {\"division_name\": \"Manuscripts, Archives & Rare Books, Schomburg\",\n \"tags\": [\"education\", \"learning\", \"schools\", 'student', 'campus,'],\n \"updated_tags\": [\"Education\", \"learning\", \"Schools\"]\n }\n ]\n\n\n # appending synonyms of tags for each division['updated_tags]\n for div in Division_list:\n for tags in div.get('tags', None):\n if tags is not None:\n for syn in wordnet.synsets(tags):\n for l in syn.lemmas():\n if l.name() not in div[\"updated_tags\"]:\n div['updated_tags'].append(l.name().lower())\n else:\n print('invalid synonym')\n\n if(exctext):\n # Natural Language Processing\n # kw_extractor = yake.KeywordExtractor() # initializing the keyword extractor\n max_ngram_size = 1 # maximum size of ngrams (size of keywords)\n deduplication_threshold = 0.90 # 90% probability\n numOfKeywords = 200 # number of keywords to extract\n custom_kw_extractor = yake.KeywordExtractor(lan=\"en\", n=max_ngram_size, dedupLim=deduplication_threshold,\n top=numOfKeywords, features=None) # setting the parameters of the keyword extractor\n keywords = custom_kw_extractor.extract_keywords(\n exctext) # extracting keywords from the text\n key_words = []\n for word in keywords:\n try:\n key_words.append(word[0].lower())\n except Exception as e:\n print(e)\n # matching keywords\n matches = {}\n matched_words = []\n for div in Division_list:\n matches[div['division_name']] = 0\n\n for div in Division_list:\n for tag in div['updated_tags']:\n for words in keywords:\n if (tag in words) or (tag == words):\n matches[div['division_name']] += 1\n count = matches[div['division_name']]\n matched_words.append(words[0])\n max_match = max(matches, key=matches.get)\n context['matched_words'] = matched_words\n context['max_match'] = max_match\n context['tag_count'] = matches[context['max_match']]\n else:\n print(\"No text found or error while extracting text from pdf\")\n return render(request, 'classifier/home.html', context)\n\n\n\ndef home(request):\n form = UploadFileForm()\n context = {'form': form}\n if request.method == 'POST':\n form = UploadFileForm(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n file_name = form.instance.file\n parse_pdf(request, file_name, context)\n form = UploadFileForm()\n return render(request, 'classifier/home.html', context)\n\n# Create your views here.\n","repo_name":"amahi2001/pdfProj","sub_path":"pdfParse/classifier/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"15157779372","text":"import unittest\nfrom biothings_explorer.query_graph_handler.batch_edge_query import BatchEdgeQueryHandler\nfrom biothings_explorer.smartapi_kg.metakg import MetaKG\n\n\nclass TestBatchEdgeQueryHandler(unittest.TestCase):\n kg = MetaKG()\n kg.construct_MetaKG_sync()\n\n def test_query_function_subscribe_and_unsubscribe_function(self):\n batch_handler = BatchEdgeQueryHandler(self.kg)\n batch_handler.subscribe(1)\n batch_handler.subscribe(2)\n batch_handler.subscribe(3)\n self.assertIn(2, batch_handler.subscribers)\n batch_handler.unsubscribe(2)\n self.assertNotIn(2, batch_handler.subscribers)\n","repo_name":"newgene/bte-py","sub_path":"biothings_explorer/query_graph_handler/__test__/integration/batch_edge_query_handler_test.py","file_name":"batch_edge_query_handler_test.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5457445775","text":"from tkinter import *\nRoot=Tk()\nRoot.title(\"Interfaz grafica\")\nFrame=Frame(Root,width=500,height=400)\nFrame.pack()\nImagen=PhotoImage(file=\"Imagen.png\")\n#Label=Label(Frame,text=\"Hola Jaider Ivan\",fg=\"red\",font=(\"Comic Sans Ms\",18))\n#Label.place(x=200,y=200)\nLabel(Frame,image=Imagen).place(x=200,y=100) #Abreviatura\nRoot.mainloop()","repo_name":"ivanveraj/Python","sub_path":"PycharmProjects/Interfaces Graficas/InterfazGrafica2.pyw","file_name":"InterfazGrafica2.pyw","file_ext":"pyw","file_size_in_byte":330,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"71636400408","text":"import numpy as np\nimport cv2\nimport cv2 as cv\nimport mediapipe as mp\nimport matplotlib.pylab as plt\nimport pytesseract\nimport time\nfrom playsound import playsound\n\nFileNameCounter = int(input(\"Start from: \"))\n\nLOCATIONS = { ## X, Y, W, H\n\t\"Energy\": (10, 6, 96, 25)\n}\n\npytesseract.pytesseract.tesseract_cmd = './tesseract.exe'\ntessdata_dir_config = r'--tessdata-dir \"./tesseract-5.0.0-rc1/tessdata\"'\n\ncap = cv2.VideoCapture('http://192.168.1.56:8080/video')\ncap.set(cv2.CAP_PROP_BUFFERSIZE, 1);\n\ndef crop_minAreaRect(img, rect):\n\tbox = cv2.boxPoints(rect)\n\tbox = np.int0(box)\n\twidth = int(rect[1][0])\n\theight = int(rect[1][1])\n\tsrc_pts = box.astype(\"float32\")\n\tdst_pts = np.array([[0, height-1],\n\t\t\t\t\t\t[0, 0],\n\t\t\t\t\t\t[width-1, 0],\n\t\t\t\t\t\t[width-1, height-1]], dtype=\"float32\")\n\tM = cv2.getPerspectiveTransform(src_pts, dst_pts)\n\twarped = cv2.warpPerspective(img, M, (width, height))\n\tif width > height:\n\t\twarped = cv2.rotate(warped, cv2.cv2.ROTATE_90_CLOCKWISE)\n\treturn warped\n\ndef OCRinRect(img, x, y, w, h):\n\trect = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)\n\tcropped = img[y:y + h, x:x + w]\n\ttext = pytesseract.image_to_string(img_croped)\n\treturn text\n\nwhile True:\n\tsuccess, img = cap.read()\n\timgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n\timgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\tret, thresh = cv2.threshold(imgray, 127, 255, 0)\n\tcontours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n\t# Largest contour\n\tareas = [cv2.contourArea(c) for c in contours]\n\tmax_index = np.argmax(areas)\n\tcnt=contours[max_index]\n\n\trect = cv2.minAreaRect(cnt)\n\timg_croped = crop_minAreaRect(img, rect)\n\n\n\tkey = cv2.waitKey(1) & 0xFF\n\n\t# KEYBOARD INTERACTIONS\n\t# Q is quit\n\n\tif key == ord('q'):\n\t\tcv2.destroyAllWindows()\n\t\tbreak\n\telif key == 32:\n\t\tcv2.imwrite(f\"./Images/Unfiled/{FileNameCounter}.png\", img_croped)\n\t\tprint(\"Wrote file:\", FileNameCounter)\n\t\tFileNameCounter += 1\n\t\tplaysound('C:\\\\Users\\\\jimmy\\\\Desktop\\\\Coding\\\\Python\\\\ML\\\\Ding.mp3')\n\t\tpass\n\telif key != 255:\n\t\tprint(\"No Bindings For\", key, chr(key))\n\n\tcv2.drawContours(img, [cnt], 0, (255,0,0), 3)\n\tcv2.imshow(\"LIVE\", img)\n\tcv2.imshow(\"CROP\", img_croped)\n","repo_name":"code32123/FishML","sub_path":"Characteristics/Scanner.py","file_name":"Scanner.py","file_ext":"py","file_size_in_byte":2156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34851317787","text":"\nimport subprocess\nfrom packaging import version\nimport json\nimport requests\nfrom distutils.version import StrictVersion\nimport os\n\n\ndef F_PipVersionCheck(package_name):\n '''\n Author : Arun Kumar\n Purpose : To install latest version of PIP\n '''\n\n global ver\n global oldVer\n global subprocessOp\n global subprocess_return\n\n url = \"https://pypi.python.org/pypi/{}/json\".format(package_name)\n data = requests.get(url).json()\n ver=sorted(list(data[\"releases\"].keys()), key=StrictVersion, reverse=True)\n\n subprocessOp = subprocess.Popen(\"pip --version\", shell=True, stdout=subprocess.PIPE)\n subprocess_return = subprocessOp.stdout.read()\n oldVer=str(subprocess_return).split(\" \")[1]\n\n if version.parse(oldVer)1]\n split = [1 for i in range(len(r.T)) if r.T[i].sum()>1] \n tp = [1 for i in range(len(r)) if r[i].sum()==1]\n tp = len(tp) + len(split)\n fp = [1 for i in range(len(r)) if r[i].sum()==0]\n fp = len(fp) + len(split)\n fn = len(gt) - tp\n \n return {\n \"tp\": tp, \"fp\": fp, \"fn\": fn, \n \"join\": len(join), \"split\": len(split)\n }\n\ndef get_track_performance_old(pred_t, gt_t, output, pipeline='maskrcnn'):\n '''\n For evaluating tracking performance on benchmark data from \n Yeast Image Toolkit.\n Compares the prediction results with ground truth data. \n Parameters\n ----------\n pred_t : ndarray\n Tracking prediction data array with int type.\n gt_t : ndarray\n Tracking ground truth data array with int type.\n output : dict\n Detecron2 predictor output from the detecron2 Mask R-CNN model.\n pipeline : str, optional\n Can be set to 'maskrcnn' or 'YeaZ'. The default is 'maskrcnn'.\n Returns\n -------\n dict\n Performance indicators: true positives (tp), false positives (fp), \n false negatives (fn), joined tracks (join), \n split tracks (split).\n '''\n pred = copy.deepcopy(pred_t)\n gt = copy.deepcopy(gt_t)\n if pipeline == 'YeaZ':\n masks = [mask for mask in output]\n if pipeline == 'maskrcnn': \n masks = [\n m for i in output for m in np.array(\n i['instances'].pred_masks.to('cpu'), dtype=int)\n ]\n r = np.zeros((len(pred),len(gt)))\n c1 = 0\n labels_matched = []\n for pred_frame, pred_lab, mask in zip(pred[:,0], pred[:,1], masks):\n c2 = 0\n for gt_frame, gt_lab, gt_x, gt_y in zip(\n gt[:,0], gt[:,1], gt[:,2], gt[:,3]\n ):\n if (pred_frame==gt_frame) & (mask[gt_y,gt_x]==1) & (pred_lab!=-1):\n r[c1,c2]=1\n labels_matched.append((pred_lab, gt_lab))\n c2+=1\n c1+=1\n \n #n_matched_tracks = len(Counter(labels_matched)) \n tracking_pairs = [i for i in Counter(labels_matched).keys()]\n tracking_pairs = [\n [i for i,j in tracking_pairs], [j for i,j in tracking_pairs]\n ] \n join, c_0 = np.unique(tracking_pairs[0], return_counts=True)\n join = len(join[c_0>1])\n split, c_1 = np.unique(tracking_pairs[1], return_counts=True)\n split = len(split[c_1>1])\n \n #calculate by # of correct links\n n_matched_links = sum(np.array(list(Counter(labels_matched).values()))-1)\n pred_number_of_links = sum(np.array(list(Counter(pred[:,1]).values()))-1)\n gt_number_of_links = sum(np.array(list(Counter(gt[:,1]).values()))-1)\n fn = gt_number_of_links - n_matched_links\n fp = pred_number_of_links - n_matched_links\n \n return {\n \"tp\": n_matched_links, \"fp\": fp, \"fn\": fn,\n \"join\": join, \"split\": split\n }\n\ndef get_track_performance(pred_t, gt_t, output, pipeline='maskrcnn'):\n '''\n For evaluating tracking performance on benchmark data from \n Yeast Image Toolkit.\n Compares the prediction results with ground truth data. \n Parameters\n ----------\n pred_t : ndarray\n Tracking prediction data array with int type.\n gt_t : ndarray\n Tracking ground truth data array with int type.\n output : dict\n Detecron2 predictor output from the detecron2 Mask R-CNN model.\n pipeline : str, optional\n Can be set to 'maskrcnn' or 'YeaZ'. The default is 'maskrcnn'.\n Returns\n -------\n dict\n Performance indicators: true positives (tp), false positives (fp), \n false negatives (fn), joined tracks (join), \n split tracks (split).\n '''\n pred = copy.deepcopy(pred_t)\n gt = copy.deepcopy(gt_t)\n if pipeline == 'YeaZ':\n masks = [mask for mask in output]\n if pipeline == 'maskrcnn': \n masks = [\n m for i in output for m in np.array(\n i['instances'].pred_masks.to('cpu'), dtype=int)\n ]\n r = np.zeros((len(pred),len(gt)))\n c1 = 0\n labels_matched = []\n for pred_frame, pred_lab, mask in zip(pred[:,0], pred[:,1], masks):\n c2 = 0\n for gt_frame, gt_lab, gt_x, gt_y in zip(\n gt[:,0], gt[:,1], gt[:,2], gt[:,3]\n ):\n if (pred_frame==gt_frame) & (mask[gt_y,gt_x]==1) & (pred_lab!=-1):\n r[c1,c2]=1\n labels_matched.append((pred_lab, gt_lab))\n c2+=1\n c1+=1\n \n #n_matched_tracks = len(Counter(labels_matched)) \n tracking_pairs = [i for i in Counter(labels_matched).keys()]\n tracking_pairs = [\n [i for i,j in tracking_pairs], [j for i,j in tracking_pairs]\n ] \n join, c_0 = np.unique(tracking_pairs[0], return_counts=True)\n join = len(join[c_0>1])\n split, c_1 = np.unique(tracking_pairs[1], return_counts=True)\n split = len(split[c_1>1])\n \n #calculate by # of correct links\n n_matched_links = sum(np.array(list(Counter(labels_matched).values()))-1) - split - join \n pred_number_of_links = sum(np.array(list(Counter(pred[:,1]).values()))-1)\n gt_number_of_links = sum(np.array(list(Counter(gt[:,1]).values()))-1)\n fn = gt_number_of_links - n_matched_links + split\n fp = pred_number_of_links - n_matched_links + join \n \n return {\n \"tp\": n_matched_links, \"fp\": fp, \"fn\": fn,\n \"join\": join, \"split\": split\n }\n\n\nimport warnings\ndef calculate_metrics(results, *args):\n '''\n Calculate 4 standard performance metrics using performance indicators.\n Parameters\n ----------\n results : dict\n Contains at least 3 performance indicators: \n true positives (tp), false positives (fp), false negatives (fn)\n Returns\n -------\n dict\n Performance metrics outcomes for \n F1-score, accuracy, precision and recall.\n '''\n if len(args) > 0:\n warnings.warn('Using deprecated arguments in calculate_metrics, they will be removed.')\n \n precision = results[\"tp\"]/(results[\"tp\"]+results[\"fp\"])\n recall = results[\"tp\"]/(results[\"tp\"]+results[\"fn\"])\n accuracy = results[\"tp\"]/(results[\"tp\"]+results[\"fp\"]+results[\"fn\"])\n F = 2 * ((precision*recall) / (precision + recall))\n \n return {\n 'F1-score': F, 'Accuracy': accuracy, \n 'Precision': precision, 'Recall': recall\n }\n\n\ndef match_detections_and_ground_truths(ground_truth, detections, masks):\n \"\"\"Considering ground truth coordinates versus segmentation masks,\n yields tuples (ground_truth_index, detection_index) for every\n ground truth sample and detection in the same frame, such that the\n ground truth (x, y) coordinate matches the mask:\n\n # dataframe location, NOT index\n `masks[detection_location, y, x] == True`\n \n `detections` and `masks` must have the same length, as each item of mask is a\n height x width segmentation mask for that detection.\n\n Note that masks indices, should match detection locations.\n\n `ground_truths`, `detections` must be dataframes with columns\n [`frame`, `x`, `y`] and [`frame`, `mask`] respecitvely.\n \n Their indices must be unique.\n\n The `mask` column must point to the index of the mask for that detection,\n usually this is incremental from 0.\n \"\"\"\n matches = []\n # iterate through grount truth and detected cells per time frame\n for frame, frame_ground_truths in ground_truth.groupby('frame'):\n frame_detections = detections[detections['frame'] == frame]\n frame_masks = masks[frame_detections['mask'].values]\n x, y = np.round(frame_ground_truths[['x', 'y']].values).astype(int).T\n mask_values_at_yx = frame_masks[:, y, x]\n found = mask_values_at_yx.sum(0) > 0\n detection_indices, ground_truth_indices = np.where(mask_values_at_yx)\n found = found[ground_truth_indices]\n matches.extend(zip(\n frame_ground_truths.index[ground_truth_indices[found]],\n frame_detections.index[detection_indices[found]]))\n return pd.DataFrame(matches, columns=['ground truth index', 'detection index'])\n\n\ndef get_segmention_metrics(ground_truth, detections, masks):\n \"\"\"For the segmentation task, returns how many true positives and true/false\n positives as a dictionary including how many ground truths were detected by\n the same mask (merged).\n \n Arguments the same as `match_detections_and_ground_truths`\"\"\"\n matches = match_detections_and_ground_truths(ground_truth, detections, masks)\n\n # We defined true positives to be those ground truths that are picked up as\n # a detection, but only if this detection has only one ground truths\n # assinged to it.\n\n # figuring out which detections have more than one ground truth, they 'join'\n # ground truths together, and filtering out matches on this detection.\n detection_joining_gt = matches.groupby('detection index').count() > 1\n detection_joining_gt = detection_joining_gt.index[detection_joining_gt['ground truth index']]\n unjoined_matches = matches[~matches['detection index'].isin(detection_joining_gt)]\n # then the amount of true positives, equals the amount of ground truths that\n # still have a detection assigned.\n tp = len(unjoined_matches['ground truth index'].unique())\n\n # Anything that\n split = int((unjoined_matches.groupby('ground truth index').count() - 1).sum())\n metrics = {\n 'tp': tp,\n 'fp': len(set(detections.index) - set(unjoined_matches['detection index'])) + split,\n 'fn': len(set(ground_truth.index) - set(unjoined_matches['ground truth index'])),\n 'join': len(detection_joining_gt), 'split': split,\n }\n return metrics\n\n\ndef compare_links_old(a, b, mapping):\n \"\"\"\n `a` and `b` dataframes with columns `frame` and `cell`.\n\n For every pair of rows in `a`, where the second is one\n frame ahead of the first, and where both have the same `cell` value, count as:\n\n over matching: if the first occurs in several such pairs (except for the first found),\n unmapped: if both can't be mapped as per indices to rows in b,\n unmatched: if the matched rows in `b` have different `cell` values,\n true: if the matched rows in `b` have the `cell` values,\n false counts all pairs that didn't fall into true, e.g. this sums overmatching,\n unmapped and unmatched.\"\"\"\n true, over_matching, unmapped, unmatched = 0, 0, 0, 0\n for cell, rows in a.groupby('cell'):\n for index0, frame0 in rows['frame'].iteritems():\n matched = rows.index[(rows['frame'] - frame0) == 1]\n # should be 1 match, others are assumed falses.\n over_matching += len(matched[1:])\n for index1 in matched[:1]: # loops once or not at all\n if index0 not in mapping or index1 not in mapping:\n unmapped += 1\n elif b['cell'][mapping[index0]] == b['cell'][mapping[index1]]:\n assert mapping[index0] != mapping[index1], (\n \"Uncanny, different ground truths were mapped to the same detection\")\n true += 1\n else:\n unmatched += 1\n return {'true': true, 'false': over_matching+unmapped+unmatched,\n 'over matching': over_matching, 'unmapped': unmapped,\n 'unmatched': unmatched}\n\n\ndef get_tracking_metrics_old(ground_truth, detections, masks):\n \"\"\"For the tracking task, returns how many true positives and true/false\n positives as a dictionary including how many ground truths were detected by\n the same mask (merged).\n \n Arguments the same as `match_detections_and_ground_truths`\"\"\"\n matches = pd.DataFrame(\n match_detections_and_ground_truths(ground_truth, detections, masks),\n columns=['ground truth index', 'detection index'])\n gt_to_det = {gt: rows['detection index'].values[0] for gt, rows in matches.groupby('ground truth index') if len(rows) == 1}\n det_to_gt = {det: rows['ground truth index'].values[0] for det, rows in matches.groupby('detection index') if len(rows) == 1}\n\n comparison_gt = compare_links_old(ground_truth, detections, gt_to_det)\n comparison_det = compare_links_old(detections, ground_truth, det_to_gt)\n assert comparison_det['true'] == comparison_det['true'], (\n \"Uncanny, different links matches going from ground truth to \"\n \"detections as vice versa. This shouldn't happen\"\n )\n assert comparison_gt['over matching'] == 0, (\n \"Ground truth has marked multiple cells in one frame as the same\")\n return {'tp': comparison_gt['true'], 'fp': comparison_det['false'],\n 'fn': comparison_gt['false'],\n # when a cell was tracked multiple times in a frame.\n 'over matching': comparison_det['over matching'],\n # also specify propagated segmentation errors\n 'segmentation fn': comparison_gt['unmapped'],\n 'segmentation fp': comparison_det['unmapped']}\n\n\ndef compare_links(a, b, mapping):\n a = a[a['cell'] >= 0].copy()\n a['other index'] = [mapping.get(i, -2) for i in a.index]\n a['previous frame'] = a['frame'] - 1\n\n to_other = pd.merge(\n a, a, how='inner',\n left_on=['frame','cell'],\n right_on=['previous frame', 'cell']\n )[['other index_x', 'other index_y']].applymap(\n lambda value: (value if value < 0 else int(b.loc[value]['cell'])))\n to_other.columns = [('other cell', 't'), ('other cell', 't+1')]\n\n propagated = (to_other < 0).max(1)\n to_other = to_other[~propagated]\n outliers = (to_other == -1).max(1)\n to_other = to_other[~outliers]\n\n true = (to_other[('other cell', 't')] == to_other[('other cell', 't+1')]).sum()\n false = (to_other[('other cell', 't')] != to_other[('other cell', 't+1')]).sum()\n\n return {'untracked': outliers.sum(), 'unmapped': propagated.sum(),\n 'true': true, 'false': false + outliers.sum() + propagated.sum()}\n\n\ndef get_tracking_metrics(ground_truth, detections, masks):\n # If multiple detections got the same label in a frame,\n # either select the one with the highest segmentation score or\n # the first one.\n if 'segmentation_score' in detections.columns:\n sorted_detections = detections.sort_values(\n ['frame', 'cell', 'segmentation_score'], ascending=False)\n\n best_detection_in_frame = (\n (sorted_detections.groupby(['frame', 'cell']).cumcount() == 0) |\n (sorted_detections['cell'] < 0))\n\n det = detections.loc[best_detection_in_frame]\n overmatching = (~best_detection_in_frame).sum()\n else:\n first_detection_in_frame = (\n (detections.groupby(['frame', 'cell']).cumcount() == 0) |\n (detections['cell'] < 0))\n det = detections.loc[first_detection_in_frame]\n overmatching = (~first_detection_in_frame).sum()\n\n matches = match_detections_and_ground_truths(ground_truth, det, masks)\n detection_joining_gt = matches.groupby('detection index')['ground truth index'].count() > 1\n detection_joining_gt = detection_joining_gt.index[detection_joining_gt]\n unjoined_matches = matches[~matches['detection index'].isin(detection_joining_gt)]\n\n assert (unjoined_matches.groupby('detection index')['ground truth index']\n .count() > 1).sum() == 0,(\n \"Uncanny, joins should have been removed\")\n\n det_to_gt = unjoined_matches.groupby('detection index')['ground truth index'].first()\n gt_to_det = unjoined_matches.groupby('ground truth index')['detection index'].first()\n comparison_gt = compare_links(ground_truth, det, gt_to_det)\n comparison_det = compare_links(det, ground_truth, det_to_gt)\n assert comparison_det['true'] == comparison_det['true'], (\n \"Uncanny, different links matches going from ground truth to \"\n \"detections as vice versa. This shouldn't happen\")\n assert comparison_det['untracked'] == 0, (\n \"Uncanny, ground truth should not have untracked (cell == -1) cell labels\")\n return {'tp': comparison_gt['true'], 'fp': comparison_det['false'],\n 'fn': comparison_gt['false'],\n # when a cell was tracked multiple times in a frame.\n 'over matching': overmatching,\n # ground truth links matched in segmentation but note tracked.\n 'tracking fn': comparison_gt['untracked'],\n # also specify propagated segmentation errors\n 'segmentation fn': comparison_gt['unmapped'],\n 'segmentation fp': comparison_det['unmapped'],\n 'untracked fn': comparison_det['untracked'],}\n","repo_name":"DanKaptijn/yeastcells-detection-maskrcnn","sub_path":"yeastcells/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":17634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1996201342","text":"# 计算线性代数\nimport tensorflow as tf\nimport numpy as np #科学计算模块\n\n# create data, X数据是100\nx_data = np.random.rand(100).astype(np.float32) # 生成100个随机数列\ny_data = x_data*0.5 + 0.8 #线性函数(二元一次方程) 其中0.1为weight, 0.3为biases\n\n# 创建结构\nWeights = tf.Variable(tf.random_uniform([1], -1.0, 1.0)) # tf.Variable定义变量, 生成一个从-1到1的随机数\nbiases = tf.Variable(tf.zeros([1])) # 定义偏值为0\ny = Weights * x_data + biases\nloss = tf.reduce_mean(tf.square(y-y_data)) # 计算 y 和 y_data 的误差:\noptimizer = tf.train.GradientDescentOptimizer(0.1) # 使用优化器减少误差,梯度下降优化器\ntrain = optimizer.minimize(loss)\n\ninit = tf.global_variables_initializer() # 初始化变量\nsess = tf.Session()\nsess.run(init) # Very important\n\nfor step in range(2000):\n sess.run(train)\n if step % 20 == 0:\n print(step, sess.run(Weights), sess.run(biases))\n\n#print(np.random.rand(100).astype(np.float32))\n#print(x_data)\n#print(y_data)\nprint(Weights)","repo_name":"Coralma/tensorflow-sample","sub_path":"basic/math_calculator.py","file_name":"math_calculator.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"36916710719","text":"#min max(제출용)\nimport sys\nsys.stdin = open('4828.txt')\nT = int(input())\n\nfor tc in range(1,T+1):\n N = int(input())\n arr = list(map(int,input().split()))\n maxV = 0\n minV = arr[0]\n for i in range(N):\n if arr[i] > maxV:\n maxV = arr[i]\n if arr[i] < minV:\n minV = arr[i]\n print(f'#{tc} {maxV-minV}')\n","repo_name":"burgerfacegirl/Algorithm","sub_path":"SWEA/4828.py","file_name":"4828.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22107639160","text":"#!/usr/bin/env python\n\n\nfrom dataportal.broker.simple_broker import fill_event\nfrom metadatastore.api import find_events\n\ndef blank_events(header):\n \"Return a list of unfilled events.\"\n from dataportal.broker import DataBroker as db\n raw_events = list(db.fetch_events(header, fill=False))\n raw_events.reverse()\n return raw_events\n\n\ndef events_generator(header):\n \"Return a generator of Events. Large (nonscalar) data is lazy-loaded.\"\n for e in blank_events(header):\n fill_event(e)\n yield e\n\n\ndef ui_imagearray(header, index=0):\n \"Return image array from the header object and event index.\"\n e = blank_events(header)[index]\n fill_event(e)\n nm = [k for k in e.data if k.endswith('image_lightfield')][0]\n rv = e.data[nm][0]\n return rv\n\n\ndef ui_nonzerofraction(header, index=0):\n x = ui_imagearray(header, index)\n cnz = (x.flatten() != 0).sum()\n rv = float(cnz) / x.size\n return rv\n\n\ndef has_pe1signal(header):\n \"\"\"Return True if header contains a non-trivial pe1_image_lightfield.\n\n pe1_image_lightfield must exist and it must have at least 10 unique\n values.\n \"\"\"\n from dataportal import get_images, get_events\n from filestore.handlers import IntegrityError\n detname = 'pe1_image_lightfield'\n if not header.descriptors: return False\n e = next(get_events(header, fill=False))\n if detname not in e.data:\n return False\n try:\n imgs = get_images(header, 'pe1_image_lightfield')\n except (IntegrityError, IOError):\n return False\n A = imgs.get_frame(0)\n uniquevalues = set(A.flatten())\n rv = (len(uniquevalues) >= 10)\n return rv\n","repo_name":"NSLS-II-XPD/ipython_ophyd","sub_path":"archived/uiophyd/brokerutils.py","file_name":"brokerutils.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"7896032989","text":"from flask import Flask, request, jsonify\nfrom flask_cors import CORS, cross_origin\nfrom layout import load_layout, load_csv_embeddings, t_sne_coordinates, hierarchical_coordinates, fit_range\nfrom random import random, seed\nfrom sklearn.metrics.pairwise import euclidean_distances\nimport numpy as np\nRANDOM_SEED = 123451251\nDEFAULT_RANGE = (0,100)\nDIM = 300\n\napp = Flask(__name__)\ncors = CORS(app)\napp.config['CORS_HEADERS'] = 'Content-Type'\n\nwords,vecs = load_csv_embeddings('embeddings/glove.840B.300d.txt', ' ')\nvecs = np.array(vecs)\nmask = np.random.choice(len(vecs) - 1, 5000)\navg_distance = np.mean(euclidean_distances(vecs[mask]))\nprint('avg_distance: {}'.format(avg_distance))\n\nembeddings = { word: embedding for word, embedding in zip(words, vecs) }\n\ndef generate_similarity_layout(words, embeddings, target_range=DEFAULT_RANGE, layout='t-sne'):\n \"\"\"\n Generates a layout for the given words from the given embeddings dictionary.\n Coordinates are based on the similarity between words.\n Returns a list of coordinates, one for each given word.\n\n If a word is not in the dictionary, a semi-random coordinate is returned instead.\n 'max' determines the maximum value of coordinates.\n \"\"\"\n np.random.seed(RANDOM_SEED)\n X = [embeddings.get(word, np.random.random(DIM) * target_range[1]) for word in words]\n if layout == 't-sne':\n points = t_sne_coordinates(X)\n else:\n points = hierarchical_coordinates(X, avg_distance)\n return fit_range(points, target_range[0], target_range[1])\n \ndef json_friendly(coodinates):\n \"\"\"Flask does not support numpy data structures for json conversion.\"\"\"\n return [[float(v) for v in coord] for coord in coodinates]\n\ndef get_coordinates(words, layout, target_range=DEFAULT_RANGE):\n \"\"\"\n Maps words to coordinates by looking them up in the layout.\n\n If a word is not in the dictionary, a semi-random coordinate is returned instead.\n 'max' determines the maximum value of coordinates. \n \"\"\"\n seed(RANDOM_SEED) \n points = []\n for word in words:\n coordinate = list(layout.get(word, [random() * target_range[1], random() * target_range[1]]))\n points.append(coordinate)\n return fit_range(points, target_range[0], target_range[1])\n\n@app.route('/')\n@cross_origin()\ndef hello_world():\n return 'Service is Running!'\n\n@app.route('/layout/embeddings', methods=[ 'POST' ])\n@cross_origin()\ndef generate_layout():\n if (request.is_json):\n req = request.get_json()\n else:\n req = request.form\n words = req.get('words')\n target_range = req.get('range', DEFAULT_RANGE)\n print('generating coordinates...', end='')\n coords = generate_similarity_layout(words, embeddings, target_range)\n print('done!')\n return { 'points': json_friendly(coords) }\n\n@app.route('/layout/clustered', methods=[ 'POST' ])\n@cross_origin()\ndef generate_hierarchical_layout():\n if (request.is_json):\n req = request.get_json()\n else:\n req = request.form\n words = req.get('words')\n target_range = req.get('range', DEFAULT_RANGE)\n print('generating coordinates...', end='')\n coords = generate_similarity_layout(words, embeddings, target_range, layout='hierarchical')\n print('done!')\n return { 'points': json_friendly(coords) }\n","repo_name":"simulacrum6/comment-vis","sub_path":"layout/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":3285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"71008576727","text":"import matplotlib.pyplot as plt\nimport cartopy.crs as ccrs\nimport cartopy.feature as cfeature\nfrom matplotlib.offsetbox import AnchoredText\nfrom cartopy.io.shapereader import Reader\nfrom cartopy.feature import ShapelyFeature\nimport geopandas as gpd\nimport pandas as pd\nimport matplotlib.patches as mpatches\nfrom scipy.interpolate import griddata\nimport numpy as np\nimport matplotlib.patheffects as pe\nimport rioxarray\nimport scipy\n\n# bathymetry\n#fname = '/Users/jeanmensa/_Sync/Tanzania_data/shapefiles/GEBCO_2014_contours.shp'\n#shape_bathymetry = ShapelyFeature(Reader(fname).geometries(), ccrs.PlateCarree(), facecolor='none', edgecolor='silver',linewidth=0.6)\n\n# coastline\nfname = '/Users/jeanmensa/_Sync/Tanzania_data/shapefiles/coastlines-split-4326/poly_Africa.shp'\nshape_coastline = ShapelyFeature(Reader(fname).geometries(), ccrs.PlateCarree(), facecolor='silver', edgecolor='gray', linewidth=0.6)\n\n# mangroves\nfname = '/Users/jeanmensa/My Drive (jmensa@wcs.org)/WCS Tanzania Marine Program/GIS/Global Mangrove Distribution (USGS)/data/commondata/data0/usgs_mangroves2_TZ.shp'\nshape_mangroves = ShapelyFeature(Reader(fname).geometries(), ccrs.PlateCarree(), facecolor='green', edgecolor='darkgreen', linewidth=0.4)\n#mangrove_label = mpatches.Rectangle((0, 0), 1, 0.1, linewidth=1, facecolor='green', edgecolor='green' )\n\n# coral reefs\nfname = '/Users/jeanmensa/My Drive (jmensa@wcs.org)/WCS Tanzania Marine Program/Data/CoralReef/East_Africa/Tanzania_GCRMN.shp'\nshape_coralreefs = ShapelyFeature(Reader(fname).geometries(), ccrs.PlateCarree(), facecolor=[(0,191/255,1)], edgecolor=[(0,140/255,1)], linewidth=0.2)\n#coral_label = mpatches.Rectangle((0, 0), 1, 0.1, linewidth=1, facecolor=[(0,191/255,1)], edgecolor=[(0,140/255,1)])\n\n# MPA\nfname = '/Users/jeanmensa/My Drive (jmensa@wcs.org)/WCS Tanzania Marine Program/GIS/MCA/TUMCA.shp'\nshape_MPA = ShapelyFeature(Reader(fname).geometries(), ccrs.PlateCarree(), facecolor=[(0,191/255,1)], edgecolor=[(0,140/255,1)], linewidth=0.2)\n\n# features layers \n# fname = '/Users/jeanmensa/My Drive (jmensa@wcs.org)/WCS Tanzania Marine Program/Data/Pemba/coral_reef/CoralNet_Coordinates_24may22.shp' # Misali\nfname = '/Users/jeanmensa/My Drive (jmensa@wcs.org)/WCS Tanzania Marine Program/Data/CoralReef/HB_CR_Survey/TUMCA/CoralNet_percentages_TUMCA.shp'\ndata = gpd.read_file(fname)\n\n# deep stations to fill gaps in stations > 50, this should be taken from the original survey design\nfname = '/Users/jeanmensa/My Drive (jmensa@wcs.org)/WCS Tanzania Marine Program/Data/CoralReef/HB_CR_Survey/TUMCA/Deep_coords_TUMCA.shp'\ncoords = gpd.read_file(fname)\ncoords = coords.to_crs('EPSG:4326')\n#coords = coords[coords['Area'] == 'TUMCA']\n\ndata = pd.concat([coords, data])\ndata = data.drop(['id','Area','Area_ID','UID'],axis=1)\n\ndata.loc[data['Row Labels'] != data['Row Labels'],'Sand'] = 1 # fill deep stations with Sand\ndata = data.fillna(0) # fill everything else with 0\n\ndata['Depth'] = abs(data['Depth'].astype('float')) # set positive depth values\ndata['Labels'] = data.index+1 # create label field for Marxan\n\n# villages\n#fname = '/Users/jeanmensa/My Drive (jmensa@wcs.org)/WCS Tanzania Marine Program/GIS/hotosm_tz_villages_Pemba.shp'\nfname = '/Users/jeanmensa/My Drive (jmensa@wcs.org)/WCS Tanzania Marine Program/Data/Admin/ZNZ/places/places.shp'\nvillages = gpd.read_file(fname)\n\n#villages_Z3=['Wesha', 'Tundaua', 'Wambaa', 'Mkoani'] # Misali\nvillages_Z3=['Bumbwini', 'Mkokotoni', 'Kilindi', 'Nungwi','Gomani'] #TUMCA\nvillages_Z3=['Bumbwini', 'Mkokotoni', 'Gomani'] #TUMCA\n\n\n# grid final, features are to be projected on this grid\n\nfname = '/Users/jeanmensa/My Drive (jmensa@wcs.org)/WCS Tanzania Marine Program/Research/Analysis/Marxan/grids/TUMCA/marxan_grid_TUMCA_500m.shp'\nmarxan_grid = gpd.read_file(fname)\n\n# Misali\n\nlabel = 'Misali'\n\nfeatures = ['HC','SG','RB','Depth']\ncolor_features = {'HC':'blue','SG':'green','RB':'gray','Depth':'pink'}\ncoeff = {'HC':200,'SG':200,'RB':200,'Depth':2}\n\nxmin = 39.4\nymin = -5.5\nxmax = 39.7\nymax = -5.0\n\n# Tumca\nlabel = 'TUMCA'\n\nfeatures = ['HC','Soft coral', 'Seagrass','Rubble','Depth']\ncolor_features = {'HC':'darkblue','Soft coral':'blue','Seagrass':'green','Rubble':'gray','Depth':'pink'}\ncoeff = {'HC':100,'Soft coral':100,'Seagrass':100,'Rubble':100,'Depth':1}\n\nxmin = 39.11\nymin = -6.05\nxmax = 39.33\nymax = -5.7\n\nxmin = 39.12\nymin = -6.02\nxmax = 39.31\nymax = -5.74\n\nextent = [xmin,xmax,ymin,ymax]\n\n# interpolation to 500m grid\n\nilons = marxan_grid.geometry.x\nilats = marxan_grid.geometry.y\n\ndata = data.to_crs('epsg:21037')\n\nlons = data.geometry.x\nlats = data.geometry.y\n\ndata_i=data.iloc[:0]\ndata_i['geometry'] = marxan_grid['geometry']\n\nfor var in features:\n var_i = griddata((lons,lats), data[var], (ilons, ilats), method='linear')\n data_i[var] = var_i\n\n\nfor var in features:\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1, projection=ccrs.PlateCarree())\n ax.set_extent(extent, crs=ccrs.PlateCarree())\n# ax.add_feature(shape_bathymetry)\n ax.add_feature(shape_coastline)\n ax.add_feature(shape_mangroves)\n# ax.add_feature(shape_coralreefs)\n\n for z3 in villages_Z3:\n village = villages[villages.name == z3]\n plt.text(float(village.geometry.x)+0.001, float(village.geometry.y)+0.001, z3, fontsize=8, path_effects=[pe.withStroke(linewidth=1, foreground=\"white\")], transform=ccrs.PlateCarree())\n plt.scatter(x=village.geometry.x, y=village.geometry.y, color='gray', s=12, edgecolor='black', linewidths=0.5, alpha=1, zorder=100, transform=ccrs.PlateCarree())\n\n data_n = data_i.dropna(subset=var)\n data_n = data_n.to_crs('epsg:4326')\n\n# plt.tricontourf(data_n.geometry.x, data_n.geometry.y, data_n[var].values, 30, transform=ccrs.PlateCarree(),zorder=0)\n plt.scatter(x=data_n.geometry.x, y=data_n.geometry.y, color=color_features[var], s=data_n[var].astype('float')*coeff[var], edgecolor='black', linewidths=0.5, alpha=1, zorder=100, transform=ccrs.PlateCarree())\n #plt.scatter(x=data.geometry.x, y=data.geometry.y, color=color_features[var], s=data[var].astype('float')*coeff[var], edgecolor='black', linewidths=0.5, alpha=1, zorder=100, transform=ccrs.PlateCarree())\n\n plt.scatter(x=-99, y=-99, color=color_features[var], s=0.6*coeff[var], edgecolor='black', linewidths=0.5, alpha=1, zorder=0, transform=ccrs.PlateCarree(), label='60%')\n plt.scatter(x=-99, y=-99, color=color_features[var], s=0.5*coeff[var], edgecolor='black', linewidths=0.5, alpha=1, zorder=0, transform=ccrs.PlateCarree(), label='50%')\n plt.scatter(x=-99, y=-99, color=color_features[var], s=0.4*coeff[var], edgecolor='black', linewidths=0.5, alpha=1, zorder=0, transform=ccrs.PlateCarree(), label='40%')\n plt.scatter(x=-99, y=-99, color=color_features[var], s=0.3*coeff[var], edgecolor='black', linewidths=0.5, alpha=1, zorder=0, transform=ccrs.PlateCarree(), label='30%')\n plt.scatter(x=-99, y=-99, color=color_features[var], s=0.2*coeff[var], edgecolor='black', linewidths=0.5, alpha=1, zorder=0, transform=ccrs.PlateCarree(), label='20%')\n plt.scatter(x=-99, y=-99, color=color_features[var], s=0.1*coeff[var], edgecolor='black', linewidths=0.5, alpha=1, zorder=0, transform=ccrs.PlateCarree(), label='10%')\n plt.legend(loc = 'lower right', title=var)\n #plt.legend([coral_label, manngrove_lavel],['coral areas','mangrove areas'])\n plt.savefig('img/'+label+'/features_'+var+'_'+label+'.png',dpi=300,bbox_inches='tight')\n print('img/'+label+'/features_'+var+'_'+label+'.png')\n plt.close()\n\n\n''' features from FPM '''\n\ngears = ['Jarife','Juya','Madema','Mishipi','Mtando','Pweza']\n\n# attributes 'f_KuN', 'f_KaN', 'Mammals', 'Sea turtles', 'Sharks', 'Rays', 'Permanent', 'Temporary', 'None'\n\n# prepare for interpolation\n\nf_S_t = np.zeros(len(data_i.geometry.y))\nf_R_t = np.zeros(len(data_i.geometry.y))\nf_T_t = np.zeros(len(data_i.geometry.y))\nf_M_t = np.zeros(len(data_i.geometry.y))\n\nfor g in range(len(gears)):\n f_gear = rioxarray.open_rasterio('/Users/jeanmensa/My Drive (jmensa@wcs.org)/WCS Tanzania Marine Program/Research/Analysis/FPM/'+gears[g]+'.nc')\n\n interp = scipy.interpolate.RegularGridInterpolator((f_gear.y.values, f_gear.x.values), np.squeeze(f_gear['Sharks'].values), bounds_error=False, fill_value=0, method='linear')\n f_S = interp((data_i.geometry.y, data_i.geometry.x))\n\n interp = scipy.interpolate.RegularGridInterpolator((f_gear.y.values, f_gear.x.values), np.squeeze(f_gear['Rays'].values), bounds_error=False, fill_value=0, method='linear')\n f_R = interp((data_i.geometry.y, data_i.geometry.x))\n\n interp = scipy.interpolate.RegularGridInterpolator((f_gear.y.values, f_gear.x.values), np.squeeze(f_gear['Sea turtles'].values), bounds_error=False, fill_value=0, method='linear')\n f_T = interp((data_i.geometry.y, data_i.geometry.x))\n\n interp = scipy.interpolate.RegularGridInterpolator((f_gear.y.values, f_gear.x.values), np.squeeze(f_gear['Mammals'].values), bounds_error=False, fill_value=0, method='linear')\n f_M = interp((data_i.geometry.y, data_i.geometry.x))\n\n f_S[f_S != f_S] = 0\n f_R[f_R != f_R] = 0\n f_T[f_T != f_T] = 0\n f_M[f_M != f_M] = 0\n\n f_S_t = f_S_t + f_S/len(gears)\n f_R_t = f_R_t + f_R/len(gears)\n f_T_t = f_T_t + f_T/len(gears)\n f_M_t = f_M_t + f_M/len(gears)\n\ndef scale(var):\n return (var-np.min(var))/np.max(var-np.min(var))\n\ndata_i['Sharks'] = scale(f_S_t)\ndata_i['Rays'] = scale(f_R_t)\ndata_i['Turtles'] = scale(f_T_t)\ndata_i['Mammals'] = scale(f_M_t)\n\ndata_i['Sharks'] = f_S_t\ndata_i['Rays'] = f_R_t\ndata_i['Turtles'] = f_T_t\ndata_i['Mammals'] = f_M_t\n\nspecies_l = ['Sharks','Rays','Turtles','Mammals']\n\ndata_i = data_i.to_crs('epsg:4326')\n\nfor var in species_l:\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1, projection=ccrs.PlateCarree())\n ax.set_extent(extent, crs=ccrs.PlateCarree())\n# ax.add_feature(shape_bathymetry)\n ax.add_feature(shape_coastline)\n ax.add_feature(shape_mangroves)\n# ax.add_feature(shape_coralreefs)\n\n for z3 in villages_Z3:\n village = villages[villages.name == z3]\n plt.text(float(village.geometry.x)+0.001, float(village.geometry.y)+0.001, z3, fontsize=6, path_effects=[pe.withStroke(linewidth=1, foreground=\"white\")], transform=ccrs.PlateCarree())\n plt.scatter(x=village.geometry.x, y=village.geometry.y, color='gray', s=12, edgecolor='black', linewidths=0.5, alpha=1, zorder=100, transform=ccrs.PlateCarree())\n\n# data_n = data_i.loc[data_i[var]>0,:]\n\n plt.scatter(x=data_n.geometry.x, y=data_n.geometry.y, c=data_n[var].values, s=13, marker='s', transform=ccrs.PlateCarree(), zorder=0, cmap='magma', vmin=0, vmax=5.5e-07)\n plt.savefig('img/'+label+'/features_'+var+'_'+label+'.png',dpi=300,bbox_inches='tight')\n print('img/'+label+'/features_'+var+'_'+label+'.png')\n plt.close()\n\n","repo_name":"jungla/marxan_analysis","sub_path":"plot_features.py","file_name":"plot_features.py","file_ext":"py","file_size_in_byte":10532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"20361840087","text":"\"\"\"\n\nView separated aerial phases and steps produced in dryft.signal.\n\nLicensed under an MIT License (c) Ryan Alcantara 2019\n\nDistributed here: https://github.com/alcantarar/dryft\n\n\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom dryft import signal\n\n\ndef aerial(force, aerial_values, aerial_loc, stance_begin, stance_end, good_stances, colormap=plt.cm.viridis):\n \"\"\"Plot aerial phase waveforms with middle identified and separated aerial phase values.\n\n Visualizes the aerial phase values used to correct for drift in `dryft.signal.detrend` .\n\n Parameters\n ----------\n force : `ndarray`\n Filtered vertical ground reaction force (vGRF) signal [n,]. Using unfiltered signal will cause unreliable results.\n aerial_values : `ndarray`\n Array of force signal measured at middle of each aerial phase. Output from `signal.aerialforce()`\n aerial_loc : `ndarray`\n Array of frame indexes for values in aerial_values. Output from `signal.aerialforce()`\n stance_begin : `ndarray`\n Array of frame indexes for start of each stance phase. Output from `signal.splitsteps()`\n stance_end : `ndarray`\n Array of frame indexes for end of each stance phase. Same size as `begin`. Output from `signal.splitsteps()`\n good_stances : `ndarray`\n Boolean array of which stance phases meet min_tc & max_tc requirements.\n colormap : `colormap`\n Default is `matplotlib.plt.cm.viridis`\n\n \"\"\"\n # define beginning/end of aerial phases\n if False in good_stances:\n begin, end = signal.findgoodaerial(stance_begin, stance_end, good_stances)\n else:\n begin = stance_end[good_stances][:-1]\n end = stance_begin[good_stances][1:]\n\n if aerial_values.shape[0] == begin.shape[0] == end.shape[0]:\n colors = colormap(np.linspace(0, 1, aerial_values.shape[0]))\n plt.fig, (plt1, plt2) = plt.subplots(2, 1, figsize=(15, 7))\n\n # plot of aerial phases\n plt1.set_title('Aerial phases (black dot is middle)')\n plt1.set_ylabel('force (N)')\n plt1.grid()\n for i in range(begin.shape[0]):\n plt1.plot(force[begin[i]:end[i]],\n color=colors[i])\n plt1.plot(aerial_loc[i]-begin[i], aerial_values[i],'k.')\n # plot of aerial phases\n # plot all the aerial phase values separate\n plt2.set_title('Force measured at middle of aerial phases')\n plt2.set_xlabel('Frame')\n plt2.set_ylabel('force (N)')\n plt2.grid()\n for i, n in enumerate(aerial_loc):\n plt2.plot(n, aerial_values[i],\n marker='o',\n color=colors[i])\n plt.show(block = False)\n else: raise IndexError(\"Number of aerial_values isn't number of steps - 1.\")\n\n\ndef stance(force, begin, end, colormap=plt.cm.viridis):\n \"\"\"Plots separated steps on top of each other.\n\n Requires an `ndarray` of beginning/end of stance phase indexes and 1d force data. Use to confirm `step.split`.\n\n Parameters\n ----------\n force : `ndarray`\n Filtered vertical ground reaction force (vGRF) signal [n,]. Using unfiltered signal will cause unreliable results.\n begin : `ndarray`\n Array of frame indexes for start of each stance phase.\n end : `ndarray`\n Array of frame indexes for end of each stance phase. Same size as `begin`.\n colormap : `colormap`\n Default is `matplotlib.plt.cm.viridis`\n \"\"\"\n colors = colormap(np.linspace(0,1,begin.shape[0]))\n\n fig, ax = plt.subplots()\n ax.set_title('All separated steps')\n ax.grid()\n ax.set_xlabel('frames')\n ax.set_ylabel('force (N)')\n for i,n in enumerate(end): plt.plot(force[begin[i]:end[i]], color = colors[i])\n plt.tight_layout()\n plt.show(block = False)","repo_name":"alcantarar/dryft","sub_path":"dryft/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":3794,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"31"} +{"seq_id":"69869689047","text":"#\n# @lc app=leetcode.cn id=19 lang=python3\n#\n# [19] 删除链表的倒数第 N 个结点\n#\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n# @lc code=start\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:\n \"\"\"使用n+1长度的队列,取前三个,删除第二个, 一趟扫描,内存最优\"\"\"\n from queue import Queue\n q = Queue(maxsize=n+1)\n q.put(head)\n node = head.next\n while node:\n if q.full():\n q.get()\n q.put(node)\n node = node.next\n if not q.full():\n return head.next # type: ignore\n a = q.get()\n q.get()\n c = q.get() if not q.empty() else None\n a.next = c\n return head\n\n# @lc code=end\nS = Solution()\nhead_list = [1, 2]\nn = 1\nhead = ListNode(head_list[0])\nnode = head\nfor i in head_list[1:]:\n node.next = ListNode(i)\n node = node.next\nL = S.removeNthFromEnd(head, n)\nwhile L:\n print(L.val)\n L = L.next\n","repo_name":"mkroen/mkleetcode","sub_path":"leetcode/19.删除链表的倒数第-n-个结点.py","file_name":"19.删除链表的倒数第-n-个结点.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"42293151681","text":"\"\"\"\nlrcnn网络训练\n\"\"\"\nimport os\nimport argparse\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nfrom torch import nn\nfrom torch.utils.data import Dataset\nfrom torch.utils.data.dataloader import DataLoader\nfrom dataset import MyDataset\nimport torchvision\nimport numpy as np\nimport cv2\nfrom PIL import Image\n# from utils import AverageMeter\nfrom lrcnn_model import LRCNN\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--HR-path', type=str, default='DATA/T91/sub_LR/1')\n parser.add_argument('--LR-path', type=str, default='DATA/T91/sub_LR/2')\n parser.add_argument('--output-dir', type=str, default='DATA/T91/out')\n parser.add_argument('--batch-size', type=int, default=16)\n parser.add_argument('--seed', type=int, default=18374288)\n parser.add_argument('--num-epoch', type=int, default=5)\n parser.add_argument('--lr', default=0.001)\n args = parser.parse_args()\n\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n\n cudnn.benchmark = True\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n torch.manual_seed(args.seed) # 设定随机数种子\n model = LRCNN().to(device)\n criterion = nn.MSELoss() # 损失函数\n optimizer = optim.Adam([ # 优化器\n {'params': model.conv1.parameters()},\n {'params': model.conv2.parameters()},\n {'params': model.conv3.parameters(), 'lr': args.lr*0.1}\n ], lr=args.lr)\n\n transforms_imag = torchvision.transforms.ToTensor()\n train_input_root = './DATA/T91/sub_LR/1'\n train_label_root = './DATA/T91/sub_LR/2'\n eval_input_root = './DATA/Set14/sub_LR/1'\n eval_label_root = './DATA/Set14/sub_LR/2'\n dataset_train=MyDataset(train_input_root, train_label_root, transform=transforms_imag)\n trainloader=DataLoader(dataset_train, shuffle=False)\n\n for epoch in range(args.num_epoch):\n model.train()\n print(epoch)\n print('*'*8)\n # epoch_losses = AverageMeter\n for b_index, (data, label) in enumerate(trainloader):\n x = data.to(device)\n y = label.to(device)\n preds = model(x)\n # vis\n temp = (255*preds).permute(0,2,3,1).detach().cpu().numpy().astype(np.uint8)[0]\n cv2.imwrite('./1.jpg', temp)\n loss = criterion(preds, y)\n # epoch_losses.update(loss.item(), len(x))\n print(loss.item())\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n torch.save(model.state_dict(), os.path.join(args.output_dir, 'epoch_{}.pth'.format(epoch)))\n\n model.eval()\n\n # epoch_psnr = AverageMeter()\n\n # for data in ev\n\n\n\n\n\n\n\n","repo_name":"chuanshuogushi/SR","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"23767172873","text":"from skimage.util import random_noise\nimport numpy as np\nfrom scipy import misc\n\n\ndef apply_noise(img, ratio, mode='s&p', scale=1):\n \"\"\"\n :param img:\n :param ratio:\n :param mode:\n :param scale: the noise function by default normalise the iamge between 0 and 1. so set the scale to 255 if you want.\n :return:\n \"\"\"\n img = random_noise(img, mode=mode, amount=ratio) * scale\n return img\n\n\ndef main():\n path = '/media/user/research_data1/test.png'\n img = misc.imread(path)/255\n print(img)\n img = apply_noise(img, 1)\n print(img)\n misc.imsave('test.png', img)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"eslambakr/Future_Imitiation","sub_path":"Imitation_Learning/Training/RGB/utils/noise.py","file_name":"noise.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"545503081","text":"import requests\nimport sys\nimport json\nimport os\nimport time\nimport logging\nimport tabulate\nimport yaml\nimport pandas as pd\nfrom pandas import ExcelWriter\nfrom logging.handlers import TimedRotatingFileHandler\n\nrequests.packages.urllib3.disable_warnings()\n\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\n\ndef get_logger(logfile, level):\n '''\n Create a logger\n '''\n if logfile is not None:\n\n '''\n Create the log directory if it doesn't exist\n '''\n\n fldr = os.path.dirname(logfile)\n if not os.path.exists(fldr):\n os.makedirs(fldr)\n\n logger = logging.getLogger()\n logger.setLevel(level)\n \n log_format = '%(asctime)s | %(levelname)-8s | %(funcName)-20s | %(lineno)-3d | %(message)s'\n formatter = logging.Formatter(log_format)\n \n file_handler = TimedRotatingFileHandler(logfile, when='midnight', backupCount=7)\n file_handler.setFormatter(formatter)\n file_handler.setLevel(level)\n logger.addHandler(file_handler)\n\n return logger\n\n return None\n\n\nclass Authentication:\n\n @staticmethod\n def get_jsessionid(vmanage_host, vmanage_port, username, password):\n api = \"/j_security_check\"\n base_url = \"https://%s:%s\"%(vmanage_host, vmanage_port)\n url = base_url + api\n payload = {'j_username' : username, 'j_password' : password}\n \n response = requests.post(url=url, data=payload, verify=False)\n try:\n cookies = response.headers[\"Set-Cookie\"]\n jsessionid = cookies.split(\";\")\n return(jsessionid[0])\n except:\n if logger is not None:\n logger.error(\"No valid JSESSION ID returned\\n\")\n exit()\n \n @staticmethod\n def get_token(vmanage_host, vmanage_port, jsessionid):\n headers = {'Cookie': jsessionid}\n base_url = \"https://%s:%s\"%(vmanage_host, vmanage_port)\n api = \"/dataservice/client/token\"\n url = base_url + api \n response = requests.get(url=url, headers=headers, verify=False)\n if response.status_code == 200:\n return(response.text)\n else:\n return None\n\nif __name__ == '__main__':\n\n try:\n\n log_level = logging.DEBUG\n logger = get_logger(\"log/app_route_report.txt\", log_level)\n\n try: \n start_date = input(\"Please enter start date(YYYY-MM-DD): \")\n time.strptime(start_date, '%Y-%m-%d')\n except ValueError:\n raise ValueError(\"Incorrect start data format, please enter in YYYY-MM-DD\") \n try: \n end_date = input(\"Please enter end date(YYYY-MM-DD): \")\n time.strptime(end_date, '%Y-%m-%d')\n except ValueError:\n raise ValueError(\"Incorrect end data format, please enter in YYYY-MM-DD\") \n\n if logger is not None:\n logger.info(\"Loading vManage login details from YAML\\n\")\n with open(\"vmanage_login.yaml\") as f:\n config = yaml.safe_load(f.read())\n\n vmanage_host = config[\"vmanage_host\"]\n vmanage_port = config[\"vmanage_port\"]\n username = config[\"vmanage_username\"]\n password = config[\"vmanage_password\"]\n\n Auth = Authentication()\n jsessionid = Auth.get_jsessionid(vmanage_host,vmanage_port,username,password)\n token = Auth.get_token(vmanage_host,vmanage_port,jsessionid)\n\n if token is not None:\n headers = {'Content-Type': \"application/json\",'Cookie': jsessionid, 'X-XSRF-TOKEN': token}\n else:\n headers = {'Content-Type': \"application/json\",'Cookie': jsessionid}\n\n base_url = \"https://%s:%s/dataservice\"%(vmanage_host,vmanage_port)\n\n # Get Device Inventory details \n\n api_url = \"/device\"\n\n url = base_url + api_url\n\n response = requests.get(url=url, headers=headers, verify=False)\n\n device_inv = dict()\n\n if response.status_code == 200:\n temp = response.json()[\"data\"]\n for item in temp:\n if item[\"personality\"] == \"vedge\":\n device_inv[item[\"system-ip\"]] = [{'hostname' : item[\"host-name\"]} , {'siteid' : item[\"site-id\"]}]\n else:\n if logger is not None:\n logger.error(\"Failed to retrieve device inventory\\n\")\n\n # Get app route statistics for tunnels between Hub routers and Spoke routers.\n\n # open excel file \n filename = 'Tunnel Statistics %s.xlsx'%time.strftime(\"%Y-%m-%d\")\n writer = ExcelWriter(filename)\n\n for hub in config[\"hub_routers\"]:\n\n api_url = \"/statistics/approute/fec/aggregation\"\n\n payload = {\n \"query\": {\n \"condition\": \"AND\",\n \"rules\": [\n {\n \"value\": [\n start_date+\"T00:00:00 UTC\",\n end_date+\"T23:59:59 UTC\" \n ],\n \"field\": \"entry_time\",\n \"type\": \"date\",\n \"operator\": \"between\"\n },\n {\n \"value\": [\n hub[\"system_ip\"]\n ],\n \"field\": \"local_system_ip\",\n \"type\": \"string\",\n \"operator\": \"in\"\n }\n ]\n },\n \"aggregation\": {\n \"field\": [\n {\n \"property\": \"name\",\n \"sequence\": 1,\n \"size\": 6000\n },\n {\n \"property\": \"proto\",\n \"sequence\": 2\n },\n {\n \"property\": \"local_system_ip\",\n \"sequence\": 3\n },\n {\n \"property\": \"remote_system_ip\",\n \"sequence\": 4\n }\n ],\n \"histogram\": {\n \"property\": \"entry_time\",\n \"type\": \"hour\",\n \"interval\": 24,\n \"order\": \"asc\"\n },\n \"metrics\": [\n {\n \"property\": \"latency\",\n \"type\": \"avg\"\n },\n {\n \"property\": \"jitter\",\n \"type\": \"avg\"\n },\n {\n \"property\": \"loss_percentage\",\n \"type\": \"avg\"\n },\n {\n \"property\": \"vqoe_score\",\n \"type\": \"avg\"\n }\n ]\n }\n }\n\n url = base_url + api_url\n\n response = requests.post(url=url, headers=headers, data=json.dumps(payload), verify=False)\n\n if response.status_code == 200:\n app_route_stats = response.json()[\"data\"]\n app_route_stats_headers = [\"Date\", \"Hub\", \"Hub Siteid\", \"Spoke\", \"Spoke Siteid\", \"Tunnel name\", \"vQoE score\", \"Latency\", \"Loss percentage\", \"Jitter\"]\n table = list()\n\n date_list = list()\n hub_list = list()\n hub_siteid_list = list()\n spoke_list = list()\n spoke_siteid_list = list()\n tunnel_name_list = list()\n vqoe_list = list()\n latency_list = list()\n loss_list = list()\n jitter_list = list()\n\n print(\"\\nAverage App route statistics between %s and spokes for %s and %s\\n\"%(device_inv[hub[\"system_ip\"]][0]['hostname'],start_date,end_date))\n\n for item in app_route_stats:\n tr = [time.strftime('%m/%d/%Y', time.gmtime(item['entry_time']/1000.)), device_inv[item['local_system_ip']][0]['hostname'], device_inv[item['local_system_ip']][1]['siteid'], device_inv[item['remote_system_ip']][0]['hostname'], device_inv[item['remote_system_ip']][1]['siteid'], item['name'], item['vqoe_score'], item['latency'], item['loss_percentage'], item['jitter']]\n table.append(tr)\n\n date_list.append(time.strftime('%m/%d/%Y', time.gmtime(item['entry_time']/1000.)))\n hub_list.append(device_inv[item['local_system_ip']][0]['hostname'])\n hub_siteid_list.append(device_inv[item['local_system_ip']][1]['siteid'])\n spoke_list.append(device_inv[item['remote_system_ip']][0]['hostname'])\n spoke_siteid_list.append(device_inv[item['remote_system_ip']][1]['siteid'])\n tunnel_name_list.append(item['name'])\n vqoe_list.append(item['vqoe_score'])\n latency_list.append(item['latency'])\n loss_list.append(item['loss_percentage'])\n jitter_list.append(item['jitter'])\n\n try:\n #print(tabulate.tabulate(table, app_route_stats_headers, tablefmt=\"fancy_grid\"))\n excel_content = dict()\n excel_content[\"Date\"] = date_list\n excel_content[\"Hub\"] = hub_list\n excel_content[\"Hub Siteid\"] = hub_siteid_list\n excel_content[\"Spoke\"] = spoke_list\n excel_content[\"Spoke Siteid\"] = spoke_siteid_list\n excel_content[\"Tunnel name\"] = tunnel_name_list\n excel_content[\"vQoE score\"] = vqoe_list\n excel_content[\"Latency\"] = latency_list\n excel_content[\"Loss percentage\"] = loss_list\n excel_content[\"Jitter\"] = jitter_list\n\n df = pd.DataFrame(excel_content)\n df.to_excel(writer, device_inv[hub[\"system_ip\"]][0]['hostname'] ,index=False)\n \n except UnicodeEncodeError:\n print(tabulate.tabulate(table, app_route_stats_headers, tablefmt=\"grid\"))\n \n else:\n if logger is not None:\n logger.error(\"Failed to retrieve app route statistics %s\\n\"%response.text)\n\n writer.save()\n print(\"\\nCreated report %s\"%filename)\n\n except Exception as e:\n print('Exception line number: {}'.format(sys.exc_info()[-1].tb_lineno), type(e).__name__, e)\n ","repo_name":"CiscoDevNet/sdwan-application-route-policy","sub_path":"monitor-app-route-report.py","file_name":"monitor-app-route-report.py","file_ext":"py","file_size_in_byte":11478,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"31"} +{"seq_id":"27008972573","text":"import torch\nfrom torch import nn\nimport pandas as pd\n\nclass SimpleNN(nn.Module):\n def __init__(self) -> None:\n super(SimpleNN, self).__init__()\n self.linear = nn.Linear(300, 4)\n self.softmax = nn.Softmax(dim=1)\n \n def forward(self, X):\n X = self.linear(X)\n X = self.softmax(X)\n return X\n \nif __name__ == '__main__':\n train_X = pd.read_csv('./data/train_X.csv', encoding='utf-8', index_col=0)\n train_X = torch.tensor(train_X.values).float()\n \n nn = SimpleNN()\n with torch.no_grad():\n print(nn(train_X))","repo_name":"wadaka0821/nlp100","sub_path":"chapter8/ex71.py","file_name":"ex71.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"33641697475","text":"import json\n\npath = '../datasets/bitly_usagov/example.txt'\n# print(open(path).readline())\n\nrecords = [json.loads(line) for line in open(path, encoding='utf-8')]\n\nprint(records[0], '\\n')\n\n# 14.1.1 纯Python时区计数\ntime_zones = [rec['tz'] for rec in records if 'tz' in rec]\nprint(time_zones[:10], '\\n')\n\ndef get_counts(sequence):\n counts = {}\n for x in sequence:\n if x in counts:\n counts[x] += 1\n else:\n counts[x] = 1\n return counts\n\nfrom collections import defaultdict\ndef get_counts2(sequence):\n counts = defaultdict(int) # 值将会初始化为0\n for x in sequence:\n counts[x] += 1\n return counts\n\ncounts = get_counts(time_zones)\nprint(counts, '\\n')\n\ndef top_counts(count_dict, n=10):\n value_key_pairs = [(count, tz) for tz, count in count_dict.items()]\n value_key_pairs.sort()\n return value_key_pairs[-n:]\n\nprint(top_counts(counts), '\\n')\n\n# 使用Python标准库\nfrom collections import Counter\ncounts = Counter(time_zones)\nprint(counts.most_common(10), '\\n')\n\n\n# 14.1.2 使用pandas进行时区计数\nimport pandas as pd\nframe = pd.DataFrame(records)\nprint(frame.info(), '\\n')\nprint(frame['tz'][:10], '\\n')\n\ntz_counts = frame['tz'].value_counts()\nprint(tz_counts[:10], '\\n')\n\nclean_tz = frame['tz'].fillna('Missing')\nclean_tz[clean_tz == ''] = 'Unknown'\ntz_counts = clean_tz.value_counts()\nprint(tz_counts[:10], '\\n')\n\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n# subset = tz_counts[:10]\n# sns.barplot(y=subset.index, x=subset.values)\n# plt.show()\n\nresults = pd.Series([x.split()[0] for x in frame.a.dropna()])\nprint(results[:5], '\\n')\nprint(results.value_counts()[:8], '\\n')\n\nimport numpy as np\n\ncframe = frame[frame.a.notnull()]\ncframe['os'] = np.where(cframe['a'].str.contains('Windows'), 'Windows', 'Not Windows')\nprint(cframe['os'][:5], '\\n')\n\nby_tz_os = cframe.groupby(['tz', 'os'])\n# for k, v in by_tz_os:\n# print(k)\n# print(v, '\\n')\nagg_counts = by_tz_os.size().unstack().fillna(0)\nprint(agg_counts[:10], '\\n')\n\nindexer = agg_counts.sum(1).argsort()\nprint(indexer[:10], '\\n')\n\n\ncount_subset = agg_counts.take(indexer[-10:])\nprint(count_subset, '\\n')\n\n\n\n# 对绘图数据重新排列\ncount_subset = count_subset.stack()\ncount_subset.name = 'total'\ncount_subset = count_subset.reset_index()\ncount_subset[:10]\n# sns.barplot(x='total', y='tz', hue='os', data=count_subset)\n# plt.show()\n\ndef norm_total(group):\n group['normed_total'] = group.total / group.total.sum()\n return group\nresults = count_subset.groupby('tz').apply(norm_total)\n\ng = count_subset.groupby('tz')\n\nprint('count_subset.groupby(\\'tz\\')=\\n', count_subset.groupby('tz'), '\\n')\nfor k, v in g:\n print(k)\n print(v, '\\n')\n\nprint(g.total)\n# print(g.total.transform('sum'))\n\n\n# sns.barplot(x='normed_total', y='tz', hue='os', data=results)\n# plt.show()\n\n\n\n\n","repo_name":"Thinktan/pydata_train","sub_path":"chapter14/train_14_1.py","file_name":"train_14_1.py","file_ext":"py","file_size_in_byte":2838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8722183072","text":"# Copied from: https://medium.com/python-data/effient-frontier-in-python-34b0c3043314\n# This program was created for my own educational purposes to truly understand Markowitz Portfolio Theory\n# No part of this program is used for commercial purposes\n\nimport quandl\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nquandl.ApiConfig.api_key = '3qNsf3VUNsvytS7sS_7W'\nselected = ['TSLA', 'FB', 'GOOGL', 'ADBE', 'AAPL']\ndata = quandl.get_table('WIKI/PRICES', ticker = selected, qopts = { 'columns': ['date', 'ticker', 'adj_close'] }, date = { 'gte': '2018-1-1', 'lte': '2018-12-31' }, paginate=True)\n\nclean = data.set_index('date')\ntable = clean.pivot(columns='ticker')\n\n# Calculate daily & annual returns of the stocks\nreturns_daily = table.pct_change()\nreturns_annual = returns_daily.mean() * 252 # of Trading Days: 252\n\n# print(returns_daily.head())\n# print(returns_annual.head())\n\n# Calculate daily & annual covariance\ncov_daily = returns_daily.cov() # Generate covariance matrix\ncov_annual = cov_daily * 252 # Annualize covmat\n\n# Risk Free Return Rate\nrisk_free = 0.03\n\n# Lists to store portfolio attributes\nport_returns = []\nport_volatility = []\nstock_weight = []\nsharpe_ratio = []\n\n# set the number of combinations for imaginary portfolios\nnum_assets = len(selected)\nnum_portfolio = 50000 # Number of imaginary portfolios\n\n# Populate empty lists with each port's attributes\nfor port in range(num_portfolio):\n weights = np.random.random(num_assets) # Returned as n x 1 Matrix\n weights /= np.sum(weights) # Normalize weight\n returns = np.dot(weights, returns_annual) # Portfolio Expected Return\n volatility = np.sqrt(np.dot(weights.T, np.dot(cov_annual, weights))) # Portfolio Standard Deviation\n\n # Calculate Sharpe Ratio\n sharpe = (returns - risk_free) / volatility\n sharpe_ratio.append(sharpe)\n \n # Add calculated values into appropriate lists\n port_returns.append(returns)\n port_volatility.append(volatility)\n stock_weight.append(weights)\n \nportfolio = {\n 'Returns': port_returns,\n 'Volatility': port_volatility,\n 'Sharpe-Ratio': sharpe_ratio\n}\n\n# extend portfolio to accomodate ticker & weight\nfor counter, symbol in enumerate(selected):\n portfolio[symbol + ' Weight'] = [Weight[counter] for Weight in stock_weight]\n\ndf = pd.DataFrame(portfolio)\n\ncolumn_order = ['Returns', 'Volatility', 'Sharpe-Ratio'] + [stock + ' Weight' for stock in selected]\n\ndf = df[column_order]\n\n# Calculate min_vol and max_SR\nmin_volatility = df['Volatility'].min()\nmax_sharpe = df['Sharpe-Ratio'].max()\n\nsharpe_portfolio = df.loc[df['Sharpe-Ratio'] == max_sharpe]\nmin_variance_portfolio = df.loc[df['Volatility'] == min_volatility]\n\nplt.style.use('seaborn')\ndf.plot.scatter(x='Volatility', y='Returns', c='Sharpe-Ratio', cmap='RdYlGn', edgecolors='black', grid=True)\n# Plot minimum variance portfolio\nplt.scatter(x=sharpe_portfolio['Volatility'], y=sharpe_portfolio['Returns'], c='red', marker='D', s=200)\n# Plot tangent portfolio\nplt.scatter(x=min_variance_portfolio['Volatility'], y=min_variance_portfolio['Returns'], c='blue', marker='D', s=200)\n# Plot the Capital Allocation Line\naxes = plt.gca()\nx_vals = np.array(axes.get_xlim())\nplt.plot(x_vals, risk_free + (max_sharpe * x_vals), label='Capital Allocation Line ' + '(Risk Free Rate: ' + str(risk_free) + ')', linestyle='--')\nplt.legend()\nplt.xlabel('Volatility')\nplt.ylabel('Expected Returns')\nplt.title('Efficient Frontier of ' + str(num_portfolio) + ' imaginary portfolios consisting of ' + str(selected).strip('[]'))\nplt.tight_layout()\nplt.show()\n","repo_name":"andrewhwanpark/efficient-portfolio","sub_path":"eport.py","file_name":"eport.py","file_ext":"py","file_size_in_byte":3565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6948593445","text":"import numpy as np\nimport tensorflow as tf\nimport keras.backend as K\nimport pandas as pd \n\nfrom os.path import join\nfrom processing import load_data, normalize_inputs\n\nfrom keras.callbacks import TensorBoard\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout\n\nSAVE_DIR = 'models/Sequential_Keras/'\nMODEL_NAME = 'MNST.h5'\nMODEL_DIR = join(SAVE_DIR, MODEL_NAME)\n\ntensorboard = TensorBoard(log_dir=SAVE_DIR)\n\n\ndef build_sequential():\n\n model = Sequential([\n Dense(512, input_dim=784, kernel_initializer='normal', activation='relu'),\n Dropout(0.2),\n Dense(512, kernel_initializer='normal', activation='relu'),\n Dropout(0.2),\n Dense(10, kernel_initializer='normal', activation='softmax')\n ])\n\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n return model\n\ntrain = pd.read_csv('../data/train.csv')\n\ntrain_X, valid_X, train_y, valid_y = load_data(train)\n\ntrain_X, train_y = normalize_inputs(train_X, train_y)\nvalid_X, valid_y = normalize_inputs(valid_X, valid_y)\n\nmodel = build_sequential()\nhx = model.fit(\n train_X,\n train_y,\n validation_data=(valid_X, valid_y),\n epochs=20,\n callbacks=[tensorboard],\n batch_size= None,\n steps_per_epoch=10,\n validation_steps=10)\n\n#model.save(MODEL_DIR)\n\n\ntest = pd.read_csv('../data/test.csv')\n\n\n\ntest_X = tf.cast(test / 255, tf.float32)\n\ntest_pred = pd.DataFrame(model.predict(test_X, steps=1))\ntest_pred = pd.DataFrame(test_pred.idxmax(axis=1))\ntest_pred.index.name = 'ImageId'\ntest_pred = test_pred.rename(columns = {0: 'Label'}).reset_index()\ntest_pred['ImageId'] = test_pred['ImageId'] + 1\ntest_pred.to_csv('predictions/Sequential_Keras/submission.csv', index=False)\n\n\n","repo_name":"snugdad/MNST_Dataset_Analysis","sub_path":"trainer/train_eval_sequential.py","file_name":"train_eval_sequential.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"73527099921","text":"\"\"\"Используя функцию sorted() и lambda-функцию, отсортируйте список кортежей\n по последнему символу их второго элемента.\"\"\"\n\nl = [(1, 8, 57, 20, 25),\n (58, 20, 6, 2, 8),\n (6, 35, 87, 15, 3)]\n\nres = sorted(l, key=lambda i: str(i[1])[-1])\n\nprint(res)\n","repo_name":"Zyoger/My-First-Repository","sub_path":"Python/UDEMI lesson/35/35.1.py","file_name":"35.1.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"23863784285","text":"from functools import partial\nimport warnings\n\nimport holoviews as _hv\nimport numpy as _np\n\nfrom packaging.version import Version\n\nfrom ..backend_transforms import _transfer_opts_cur_backend\nfrom ..converter import HoloViewsConverter\nfrom ..util import with_hv_extension, _convert_col_names_to_str\n\n\n@with_hv_extension\ndef scatter_matrix(data, c=None, chart='scatter', diagonal='hist',\n alpha=0.5, nonselection_alpha=0.1,\n tools=None, cmap=None, colormap=None,\n diagonal_kwds=None, hist_kwds=None, density_kwds=None,\n datashade=False, rasterize=False, dynspread=False, spread=False,\n **kwds):\n \"\"\"\n Scatter matrix of numeric columns.\n\n A scatter_matrix shows all the pairwise relationships between the columns.\n Each non-diagonal plots the corresponding columns against each other,\n while the diagonal plot shows the distribution of each individual column.\n\n This function is closely modelled on :func:`pandas.plotting.scatter_matrix`.\n\n Parameters:\n -----------\n data: DataFrame\n The data to plot. Every column is compared to every other column.\n c: str, optional\n Column to color by\n chart: str, optional\n Chart type for the off-diagonal plots (one of 'scatter', 'bivariate', 'hexbin')\n diagonal: str, optional\n Chart type for the diagonal plots (one of 'hist', 'kde')\n alpha: float, optional\n Transparency level for the off-diagonal plots\n nonselection_alpha: float, optional\n Transparency level for nonselected object in the off-diagonal plots\n tools: list of str, optional\n Interaction tools to include\n Defaults are 'box_select' and 'lasso_select'\n cmap/colormap: str or colormap object, optional\n Colormap to use when ``c`` is set.\n Default is `Category10 `.\n diagonal_kwds/hist_kwds/density_kwds: dict, optional\n Keyword options for the diagonal plots\n datashade (default=False):\n Whether to apply rasterization and shading (colormapping) using\n the Datashader library, returning an RGB object instead of\n individual points\n rasterize (default=False):\n Whether to apply rasterization using the Datashader library,\n returning an aggregated Image (to be colormapped by the\n plotting backend) instead of individual points\n dynspread (default=False):\n For plots generated with datashade=True or rasterize=True,\n automatically increase the point size when the data is sparse\n so that individual points become more visible.\n kwds supported include ``max_px``, ``threshold``, ``shape``, ``how`` and ``mask``.\n spread (default=False):\n Make plots generated with datashade=True or rasterize=True\n increase the point size to make points more visible, by\n applying a fixed spreading of a certain number of cells/pixels. kwds\n supported include: ``px``, ``shape``, ``how`` and ``mask``.\n kwds: Keyword options for the off-diagonal plots and datashader's spreading , optional\n\n Returns:\n --------\n obj : HoloViews object\n The HoloViews representation of the plot.\n\n See Also\n --------\n :func:`pandas.plotting.scatter_matrix` : Equivalent pandas function.\n \"\"\"\n\n data = _hv.Dataset(_convert_col_names_to_str(data))\n supported = list(HoloViewsConverter._kind_mapping)\n if diagonal not in supported:\n raise ValueError(f'diagonal type must be one of: {supported}, found {diagonal}')\n if chart not in supported:\n raise ValueError(f'Chart type must be one of: {supported}, found {chart}')\n diagonal = HoloViewsConverter._kind_mapping[diagonal]\n chart = HoloViewsConverter._kind_mapping[chart]\n\n if rasterize or datashade:\n try:\n import datashader # noqa\n except ImportError:\n raise ImportError(\"rasterize and datashade require \"\n \"datashader to be installed.\")\n from ..util import hv_version\n if hv_version <= Version('1.14.6'):\n warnings.warn(\n \"Versions of holoviews before 1.14.7 did not support \"\n \"dynamic update of rasterized/datashaded scatter matrix. \"\n \"Update holoviews to a newer version.\"\n )\n\n if rasterize and datashade:\n raise ValueError(\"Choose to either rasterize or \"\n \"datashade the scatter matrix, not both.\")\n\n if not rasterize and not datashade and (spread or dynspread):\n raise ValueError(\"dynspread or spread need rasterize \"\n \"or datashade to be set to True.\")\n\n if rasterize:\n import holoviews.operation.datashader as hd\n if dynspread or spread:\n if hd.ds_version < Version('0.12.0'):\n raise RuntimeError(\n 'Any version of datashader less than 0.12.0 does '\n 'not support rasterize with dynspread or spread.')\n\n #remove datashade kwds\n if datashade or rasterize:\n import holoviews.operation.datashader as hd\n\n ds_kwds = {}\n if 'aggregator' in kwds:\n ds_kwds['aggregator'] = kwds.pop('aggregator')\n\n #remove dynspread kwds\n sp_kwds = {}\n if dynspread:\n if 'max_px' in kwds:\n sp_kwds['max_px'] = kwds.pop('max_px')\n if 'threshold' in kwds:\n sp_kwds['threshold'] = kwds.pop('threshold')\n if 'shape' in kwds:\n sp_kwds['shape'] = kwds.pop('shape')\n if 'how' in kwds:\n sp_kwds['how'] = kwds.pop('how')\n if 'mask' in kwds:\n sp_kwds['mask'] = kwds.pop('mask')\n if spread:\n if 'px' in kwds:\n sp_kwds['px'] = kwds.pop('px')\n if 'shape' in kwds:\n sp_kwds['shape'] = kwds.pop('shape')\n if 'how' in kwds:\n sp_kwds['how'] = kwds.pop('how')\n if 'mask' in kwds:\n sp_kwds['mask'] = kwds.pop('mask')\n\n tools = tools or ['box_select', 'lasso_select']\n chart_opts = dict(alpha=alpha, tools=tools,\n nonselection_alpha=nonselection_alpha, **kwds)\n if c:\n if cmap and colormap:\n raise TypeError(\"Only specify `cmap` or `colormap`.\")\n ncolors = len(_np.unique(data.dimension_values(c)))\n cmap = cmap or colormap or 'Category10'\n cmap = _hv.plotting.util.process_cmap(cmap, ncolors=ncolors, categorical=True)\n chart_opts['cmap'] = cmap\n\n #get initial scatter matrix. No color.\n grid = _hv.operation.gridmatrix(data, diagonal_type=diagonal, chart_type=chart)\n\n if c:\n #change colors for scatter matrix\n chart_opts['color'] = c\n # Add color vdim to each plot.\n grid = grid.map(lambda x: x.clone(vdims=x.vdims+[c]), 'Scatter')\n # create a new scatter matrix with groups for each catetory, so now the histogram will\n # show separate colors for each group.\n groups = _hv.operation.gridmatrix(data.groupby(c).overlay(),\n chart_type=chart,\n diagonal_type=diagonal)\n # take the correct layer from each Overlay object within the scatter matrix.\n grid = (grid * groups).map(lambda x: x.get(0) if isinstance(x.get(0), chart) else x.get(1),\n _hv.Overlay)\n\n if (diagonal_kwds and hist_kwds) or \\\n (diagonal_kwds and density_kwds) or \\\n (hist_kwds and density_kwds):\n raise TypeError('Specify at most one of `diagonal_kwds`, `hist_kwds`, or '\n '`density_kwds`.')\n\n diagonal_opts = diagonal_kwds or hist_kwds or density_kwds or {}\n # set the histogram colors\n if c:\n diagonal_opts['fill_color'] = _hv.Cycle(cmap)\n # actually changing to the same color scheme for both scatter and histogram plots.\n grid = grid.options(\n {chart.__name__: chart_opts, diagonal.__name__: diagonal_opts},\n backend='bokeh',\n )\n\n # Perform datashade options after all the coloring is finished.\n if datashade or rasterize:\n aggregatefn = hd.datashade if datashade else hd.rasterize\n grid = grid.map(partial(aggregatefn, **ds_kwds), specs=chart)\n if spread or dynspread:\n spreadfn = hd.dynspread if dynspread else (hd.spread if spread else lambda z, **_: z)\n eltype = _hv.RGB if datashade else _hv.Image\n grid = grid.map(partial(spreadfn, **sp_kwds), specs=eltype)\n\n grid = _transfer_opts_cur_backend(grid)\n return grid\n","repo_name":"holoviz/hvplot","sub_path":"hvplot/plotting/scatter_matrix.py","file_name":"scatter_matrix.py","file_ext":"py","file_size_in_byte":8676,"program_lang":"python","lang":"en","doc_type":"code","stars":822,"dataset":"github-code","pt":"3"} +{"seq_id":"17097352813","text":"from Student import Student\n\nif __name__ == \"__main__\":\n mahasiswa1 = Student(\"Andre Nathaniel\", 19, \"140810200042\")\n\n mahasiswa1.setNilaiPenguji1(70)\n mahasiswa1.setNilaiPenguji2(80)\n mahasiswa1.setNilaiPembimbing(90)\n\n mahasiswa1.output()\n\n mahasiswa2 = Student(\"Dian Maulida\", 18, \"140810200084\")\n\n mahasiswa2.setNilaiPenguji1(50)\n mahasiswa2.setNilaiPenguji2(55)\n mahasiswa2.setNilaiPembimbing(60)\n\n mahasiswa2.output()","repo_name":"nielraja123/Tugas-Kelompok-PBO-Inheritance","sub_path":"Soal1/Python/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"1976701337","text":"# 코딩테스트 연습 / 2022 KAKAO BLIND RECRUITMENT / 주차 요금 계산\n\nimport math\n\ndef solution(fees, records):\n answer = []\n basic_time, basic_fee = fees[0], fees[1]\n per_time, per_fee = fees[2], fees[3]\n \n # park 기록 / key = 차번호: value = 시간\n park_dict = {}\n hsum_dict = {}\n for rec in records:\n time, num, state = rec.split(' ')\n # OUT\n if num in park_dict:\n # 시간을 분으로 환산한 값의 차이를 넣는다\n cur_h, cur_m = int(time[0:2]), int(time[-2:])\n in_h, in_m = int(park_dict[num][0:2]), int(park_dict[num][-2:])\n cur_m += cur_h * 60\n in_m += in_h * 60\n hsum_dict[num] += cur_m - in_m\n del park_dict[num]\n # IN\n else:\n park_dict[num] = time\n if num not in hsum_dict:\n hsum_dict[num] = 0\n\n # OUT처리 안된것들 처리\n for num, time in park_dict.items():\n cur_h, cur_m = 23, 59\n in_h, in_m = int(park_dict[num][0:2]), int(park_dict[num][-2:])\n cur_m += cur_h * 60\n in_m += in_h * 60\n hsum_dict[num] += cur_m - in_m\n \n # 요금정산\n hsum_dict = dict(sorted(hsum_dict.items()))\n for num, sum in hsum_dict.items():\n total = basic_fee\n if sum > basic_time:\n total = basic_fee + math.ceil((sum-basic_time)/per_time) * per_fee\n answer.append(total)\n \n return answer\n\n# test case / answer = [14600, 34400, 5000]\nprint(solution([180, 5000, 10, 600],[\"05:34 5961 IN\", \"06:00 0000 IN\", \"06:34 0000 OUT\", \"07:59 5961 OUT\", \"07:59 0148 IN\", \"18:59 0000 IN\", \"19:09 0148 OUT\", \"22:59 5961 IN\", \"23:00 5961 OUT\"]))","repo_name":"whitem4rk/2023-algorithm-study","sub_path":"LEVEL 2/fee.py","file_name":"fee.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12579070029","text":"from conans import ConanFile, CMake\n\n\nclass MainConan(ConanFile):\n settings = (\n \"os\",\n \"compiler\",\n \"build_type\",\n \"arch\"\n )\n\n requires = (\n \"entt/3.10.3\",\n \"fmt/9.1.0\",\n \"freetype/2.12.1\",\n \"glad/0.1.36\",\n \"glfw/3.3.8\",\n \"glm/0.9.9.8\",\n \"gtest/1.12.1\",\n \"spdlog/1.10.0\",\n \"stb/cci.20210910\",\n # \"glslang/11.7.0\",\n # \"imgui/1.88\",\n # \"sfml/2.5.1\",\n # \"tinygltf/2.5.0\",\n # \"toml11/3.7.1\",\n )\n\n options = {\n \"build_benchmarks\": [True, False],\n \"build_tests\": [True, False],\n }\n default_options = {\n \"build_benchmarks\": False,\n \"build_tests\": True,\n }\n\n generators = \"cmake\"\n\n def configure(self):\n if self.settings.os == \"Linux\" and self.settings.compiler.libcxx == \"libstdc++\":\n raise Exception(\"Main is not compatible with libstdc++. \"\n \"Please change the 'compiler.libcxx'-setting \"\n \"(e.g. to libstdc++11).\")\n\n # Configure GLAD options\n # self.options['glad'].shared = False\n # self.options['glad'].fPIC = True\n # self.options['glad'].no_loader = False\n self.options['glad'].spec = 'gl'\n # self.options['glad'].gl_profile = 'core'\n self.options['glad'].gl_version = '4.6'\n self.options['glad'].extensions = 'GL_EXT_texture_filter_anisotropic'\n\n def requirements(self):\n if self.options.build_benchmarks:\n self.requires(\"benchmark/1.6.2\")\n if self.options.build_tests:\n self.requires(\"gtest/1.12.1\")\n\n def imports(self):\n # Copies all dll files from packages bin folder to my \"bin\" folder (win)\n self.copy(\"*.dll\", dst=\"bin\", src=\"bin\")\n\n # Copies all dylib files from packages lib folder to my \"lib\" folder (macosx)\n self.copy(\"*.dylib*\", dst=\"lib\", src=\"lib\")\n\n # Copies all so files from packages lib folder to my \"lib\" folder (linux)\n self.copy(\"*.so*\", dst=\"lib\", src=\"lib\")\n\n def build(self):\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n# conanfile.txt:\n#\n# [requires]\n# glfw/3.3.3\n# glad/0.1.34\n#\n# [generators]\n# cmake_find_package\n#\n# [options]\n# glad:gl_profile=core\n# glad:gl_version=4.6\n","repo_name":"bhudiaxyz/procedural-voxel-worlds","sub_path":"conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":2333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"28725665985","text":"import random\nl=['самовар','весна','лето']\na= random.choice(l)\nb=random.choice(a)\nlst=list(a)\nc=lst.index(b)\nlst.insert(c,'?')\nlst.remove(b)\n \nprint(''.join(lst))\nd= input(\"Введите букву: \")\nif d==b:\n print(\"Победа!\")\n print(\"Слово: \",a)\nelse:\n print(\"Увы! Попробуйте в другой раз\")\n print(\"Слово: \",a)\n","repo_name":"twigtmm/my_python","sub_path":"random_letter1.py","file_name":"random_letter1.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33550727143","text":"from models.gert_noeqv import make_model\nfrom models.heads import LBAFeedForward\nfrom utils import get_mask, compute_global_correlations\nimport data.lba.lba_dataloader as dl\nimport torch\nimport torch.nn as nn\nimport pandas as pd\nimport numpy as np\nimport os\nimport time\n\nPROT_ATOMS = ('C', 'O', 'N', 'S', 'P', 'H')\nRES_LABEL = ('LEU', 'ILE', 'VAL', 'TYR', 'ARG', 'GLU', 'PHE', 'ASP', 'THR', 'LYS', \n 'ALA', 'GLY', 'TRP', 'SER', 'PRO', 'ASN', 'GLN', 'HIS', 'MET', 'CYS', 'LIG')\nBASE_LABEL = ('A', 'U', 'G', 'C')\n\ndef train(epoch, transformer_model, ff_model, loader, criterion, optimizer, device, max_train_iter, print_frequency):\n transformer_model.train()\n ff_model.train()\n\n start = time.time()\n\n losses = []\n for it, subunit in enumerate(loader):\n tick = time.time()\n subunit.pos = subunit.pos.to(device)\n subunit.label = subunit.label.to(device)\n optimizer.zero_grad()\n mask = get_mask(subunit.pos)\n out = transformer_model(subunit.pos, mask)\n output = ff_model(out)\n loss = criterion(output, subunit.label.float())\n loss.backward()\n losses.append(loss.item())\n optimizer.step()\n\n if it % print_frequency == 0:\n elapsed = time.time() - start\n print(f'Epoch {epoch}, iter {it}, train loss {np.mean(losses)}, avg it/sec {print_frequency / elapsed}')\n start = time.time()\n if it == max_train_iter:\n return np.mean(losses)\n\n return np.mean(losses)\n\n\n@torch.no_grad()\ndef test(transformer_model, ff_model, loader, criterion, device, max_test_iter, print_frequency):\n transformer_model.eval()\n ff_model.eval()\n\n losses = []\n y_true = []\n y_pred = []\n for it, subunit in enumerate(loader):\n subunit.pos = subunit.pos.to(device)\n subunit.label = subunit.label.to(device)\n mask = get_mask(subunit.pos)\n out = transformer_model(subunit.pos, mask)\n output = ff_model(out, subunit.pos)\n loss = criterion(output, subunit.label)\n losses.append(loss.item())\n y_true.extend(subunit.label.tolist())\n y_pred.extend(output.tolist())\n if it % print_frequency == 0:\n print(f'iter {it}, loss {np.mean(losses)}')\n if it == max_test_iter:\n break\n \n test_df = pd.DataFrame(\n np.array([y_true, y_pred]).T,\n columns=['true', 'pred'],\n )\n \n res = compute_global_correlations(test_df)\n print(test_df)\n print('RMSD:', np.sqrt(np.mean(losses)))\n return np.mean(losses), res, test_df\n\n\ndef train_noneqv_lba(ex, use_attention, labels_dir, data_dir, device, log_dir, checkpoint, num_epochs, batch_size,\n hidden_dim, learning_rate, workers, betas, eps, d_ff, d_atom,\n eta, max_radius, num_atoms, num_heads, max_train_iter, max_test_iter,\n print_frequency, test_mode=False):\n train_set = dl.LBA_Dataset(os.path.join(data_dir, 'lba_train@10'), labels_dir, max_radius=max_radius)\n train_loader = dl.DataLoader(train_set, batch_size=batch_size, num_workers=workers)\n val_set = dl.LBA_Dataset(os.path.join(data_dir, 'lba_val@10'), labels_dir, max_radius=max_radius)\n val_loader = dl.DataLoader(val_set, batch_size=batch_size, num_workers=workers)\n\n transformer_model = make_model(\n num_heads=num_heads, \n d_model=hidden_dim, \n d_ff=d_ff, \n d_atom=d_atom,\n eta=eta, \n Rc=max_radius, \n num_atoms=num_atoms, \n N=1,\n num_dense=1,\n use_attention=use_attention).to(device)\n ff_model = LBAFeedForward(hidden_dim).to(device)\n\n model_parameters = filter(lambda p: p.requires_grad, transformer_model.parameters())\n num_parameters = sum([np.prod(p.size()) for p in model_parameters])\n print('Number of trainable parameters:', num_parameters)\n \n best_val_loss = 999\n best_val_corr = 0\n \n params = [x for x in transformer_model.parameters()] + [x for x in ff_model.parameters()]\n\n criterion = nn.SmoothL1Loss().to(device)\n optimizer = torch.optim.Adam(params, lr=learning_rate, betas=betas, eps=eps)\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',\n factor=0.7, patience=3,\n min_lr=1e-6)\n\n if checkpoint:\n cpt = torch.load(checkpoint, map_location=device)\n transformer_model.load_state_dict(cpt['transformer_state_dict'])\n ff_model.load_state_dict(cpt['ff_state_dict'])\n optimizer.load_state_dict(cpt['optimizer_state_dict'])\n print('loaded model from checkpoint')\n\n print(f'Training for {num_epochs} epochs')\n print('---------------------------------')\n for epoch in range(1, num_epochs+1):\n #lr = scheduler.optimizer.param_groups[0]['lr']\n start = time.time()\n train_loss = train(\n epoch, \n transformer_model, \n ff_model, \n train_loader, \n criterion, \n optimizer,\n device,\n max_train_iter, \n print_frequency)\n print('Validating...')\n val_loss, res, _ = test(\n transformer_model, \n ff_model, \n val_loader, \n criterion,\n device,\n max_test_iter,\n print_frequency)\n scheduler.step(val_loss)\n if val_loss < best_val_loss:\n torch.save({\n 'epoch': epoch,\n 'transformer_state_dict': transformer_model.state_dict(),\n 'ff_state_dict': ff_model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'loss': train_loss,\n }, os.path.join(log_dir, f'best_weights.pt'))\n best_val_loss = val_loss\n best_val_corr = res['all_spearman']\n elapsed = (time.time() - start)\n ex.log_scalar('val loss', val_loss)\n ex.log_scalar('all pearson', res['all_pearson'])\n ex.log_scalar('all kendall', res['all_kendall'])\n ex.log_scalar('all spearman', res['all_spearman'])\n print('Epoch: {:03d}, Time: {:.3f} s'.format(epoch, elapsed))\n print(\n '\\nVal Correlations (Pearson, Kendall, Spearman)\\n'\n ' all averaged: ({:.3f}, {:.3f}, {:.3f})'.format(\n float(res[\"all_pearson\"]),\n float(res[\"all_kendall\"]),\n float(res[\"all_spearman\"])))\n\n if test_mode:\n test_set = dl.LBA_Dataset(os.path.join(data_dir, 'lba_test@10'), labels_dir, max_radius=max_radius)\n test_loader = dl.DataLoader(test_set, batch_size=batch_size, num_workers=workers)\n cpt = torch.load(os.path.join(log_dir, f'best_weights.pt'))\n transformer_model.load_state_dict(cpt['transformer_state_dict'])\n ff_model.load_state_dict(cpt['ff_state_dict'])\n test_criterion = nn.MSELoss().to(device) # switch to MSELoss from Huber for reporting RMSD\n test_loss, res, _ = test(\n transformer_model, \n ff_model, \n test_loader, \n test_criterion,\n device,\n max_test_iter,\n print_frequency)\n ex.log_scalar('test loss', test_loss)\n ex.log_scalar('all pearson test', res['all_pearson'])\n ex.log_scalar('all kendall test', res['all_kendall'])\n ex.log_scalar('all spearman test', res['all_spearman'])\n print(\n '\\nTest Correlations (Pearson, Kendall, Spearman)\\n'\n ' all averaged: ({:.3f}, {:.3f}, {:.3f})'.format(\n float(res[\"all_pearson\"]),\n float(res[\"all_kendall\"]),\n float(res[\"all_spearman\"])))\n\n return best_val_loss","repo_name":"drorlab/gert","sub_path":"src/atom3d_combined/tasks/lba/train_lba.py","file_name":"train_lba.py","file_ext":"py","file_size_in_byte":7751,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"} +{"seq_id":"37992113513","text":"def findArrayIntersection(arr: list, n: int, brr: list, m: int):\n i = j = 0\n lst = []\n while (i DataFrame:\n return df.alias('tweet').groupBy(\n f.to_date('created_at').alias('created_date')\n ).agg(\n f.count_distinct('author_id').alias('n_tweet'),\n f.sum('like_count').alias('n_like'),\n f.sum('quote_count').alias('n_quote'),\n f.sum('reply_count').alias('n_reply'),\n f.sum('retweet_count').alias('n_retweet')\n ).withColumn(\n 'weekday',\n f.date_format('created_date', 'E')\n ).orderBy('created_date')\n\ndef export_json(df: DataFrame, dst: str) -> None:\n df.coalesce(1).write.json(\n path=dst,\n mode='overwrite'\n )\n\ndef twitter_insight(\n spark: SparkSession,\n src: str,\n dst: str,\n process_date: str\n) -> None:\n tweet_df = spark.read.json(join(src, 'tweet'))\n tweet_conversation_df = get_tweet_conversations(tweet_df)\n export_json(tweet_conversation_df, join(dst, f'process_date={process_date}'))\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Spark Twitter Transformation Silver'\n )\n\n parser.add_argument('--src', required=True)\n parser.add_argument('--dst', required=True)\n parser.add_argument('--process-date', required=True)\n\n args = parser.parse_args()\n\n spark = SparkSession\\\n .builder\\\n .appName(\"twitter_transformation\")\\\n .getOrCreate()\n\n twitter_insight(spark, args.src, args.dst, args.process_date)","repo_name":"AdautoDCJunior/apache-transformacao-dados-spark","sub_path":"src/scripts/insight_twitter.py","file_name":"insight_twitter.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25341019213","text":"import os\ndef find_files(filename,path):\n for root,dirs,files in os.walk(path):\n for name in files:\n if name==filename:\n print(\"exist\")\n break\n else:\n print(\"not exist\")\nname=input()\ndir=\"c://HCL\"\nfind_files(name,dir)","repo_name":"backu2201/TODAY1","sub_path":"file_access4.py","file_name":"file_access4.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29863568351","text":"import csv\nimport one_truck\n\n\ndef make_list_from_csv(path: str):\n with open(path) as file:\n input_data = [list(map(int, rec)) for rec in csv.reader(file)]\n\n return input_data\n\n\ndef score_solutions_three_trucks(solutions: list):\n scores = []\n for solution in solutions:\n scores.append([solution, one_truck.find_weighted_dist(solution), one_truck.find_total_dist(solution)])\n\n return scores\n\n\ndef sort_pareto_one_truck(solutions_scores: list):\n optimal_solutions = []\n not_optimal_solutions = []\n for i in range(len(solutions_scores)):\n opt = True\n for j in range(len(solutions_scores)):\n if solutions_scores[j][1] < solutions_scores[i][1] and solutions_scores[j][2] < solutions_scores[i][2]:\n opt = False\n if opt:\n optimal_solutions.append(solutions_scores[i][0])\n else:\n not_optimal_solutions.append(solutions_scores[i][0])\n\n return optimal_solutions, not_optimal_solutions\n\n\nif __name__ == '__main__':\n data = make_list_from_csv(\"../results/To Keep/One/all_data\")\n data, bad_data = sort_pareto_one_truck(score_solutions_three_trucks(data))\n\n one_truck.save_csv(data, \"../results/To Keep/One/all_data_filtered\")\n one_truck.save_csv(bad_data, \"../results/To Keep/One/filtered_out\")\n","repo_name":"iQuad427/RO_Genetic_MTSP","sub_path":"one_truck/filer_one_truck.py","file_name":"filer_one_truck.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21971705448","text":"# implement a program that prompts the user for the name of a variable in camel case and outputs the corresponding name in snake case. \n# Assume that the user’s input will indeed be in camel case.\n\n\ndef main():\n camelCase = input(\"camelCase: \")\n\n\n # iterate through each character of string\n for letter in camelCase:\n # if a upper case letter is found then print \"_\" and then print letter in lowercase\n if letter.isupper():\n print(\"_\" + letter.lower(), end=\"\")\n # otherewise print letter as it is\n else:\n print(letter, end=\"\")\n print()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Ank1taS/CS50-python","sub_path":"pset2/camel.py","file_name":"camel.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36576842475","text":"# importing everything\r\nfrom tkinter import *\r\n\r\n# setting the up the screen\r\nroot = Tk()\r\nroot.title('GUI calculater')\r\n\r\n# making the display box\r\ne = Entry(root, width=35, borderwidth=5)\r\ne.grid(row=0, column=0, columnspan=3, padx=10, pady=10)\r\n\r\n# making the buttons\r\nbutton_1 = Button(root, text=\"1\", padx=40, pady=20, bg=\"#1477DA\")\r\nbutton_2 = Button(root, text=\"2\", padx=40, pady=20, bg=\"#1477DA\")\r\nbutton_3 = Button(root, text=\"3\", padx=40, pady=20, bg=\"#1477DA\")\r\nbutton_4 = Button(root, text=\"4\", padx=40, pady=20, bg=\"#1477DA\")\r\nbutton_5 = Button(root, text=\"5\", padx=40, pady=20, bg=\"#1477DA\")\r\nbutton_6 = Button(root, text=\"6\", padx=40, pady=20, bg=\"#1477DA\")\r\nbutton_7 = Button(root, text=\"7\", padx=40, pady=20, bg=\"#1477DA\")\r\nbutton_8 = Button(root, text=\"8\", padx=40, pady=20, bg=\"#1477DA\")\r\nbutton_9 = Button(root, text=\"9\", padx=40, pady=20, bg=\"#1477DA\")\r\nbutton_0 = Button(root, text=\"0\", padx=40, pady=20, bg=\"#1477DA\")\r\nbutton_clear = Button(root, text=\"Clear\", padx=79, pady=20, bg=\"green\")\r\nbutton_add = Button(root, text=\"+\", padx=39, pady=20, bg=\"red\")\r\nbutton_equal = Button(root, text=\"=\", padx=91, pady=20, bg=\"yellow\")\r\n\r\n# displaying the buttons on the screen\r\n# row 3 buttons\r\nbutton_1.grid(row=3, column=0)\r\nbutton_2.grid(row=3, column=1)\r\nbutton_3.grid(row=3, column=2)\r\n# row 2 buttons\r\nbutton_4.grid(row=2, column=0)\r\nbutton_5.grid(row=2, column=1)\r\nbutton_6.grid(row=2, column=2)\r\n# row 1 buttons\r\nbutton_7.grid(row=1, column=0)\r\nbutton_8.grid(row=1, column=1)\r\nbutton_9.grid(row=1, column=2)\r\nbutton_0.grid(row=4, column=0)\r\n# special buttons\r\nbutton_clear.grid(row=4, column=1, columnspan=2)\r\nbutton_add.grid(row=5, column=0)\r\nbutton_equal.grid(row=5, column=1, columnspan=2)\r\n\r\n\r\n","repo_name":"Yuvarajkatyal/tk-calculater","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"27668326435","text":"from fastai.vision import *\n\nimport models.cs_v2 as cs\nimport pose\nimport utils\nfrom utils import DataTime\n\n# mean head size of LIP validation set\ndefault_threshold = 0.3314\n\n\nclass SelfCorrect:\n def __init__(self):\n self.detect_target = None\n self.is_wrong = None\n\n def correction_loss_func(self, error_out, heatmaps, targets):\n n = targets.shape[0]\n preds = pose.output_to_scaled_pred(heatmaps)\n first_td_preds = preds[:, :16]\n is_visible = targets[..., 2] > 0\n gt = targets[..., :2]\n\n head_sizes = torch.norm(gt[:, 8] - gt[:, 9], dim=1)\n thresholds = head_sizes / 2\n has_head = (is_visible[:, 8:10]).all(1)\n thresholds[~has_head] = default_threshold\n distances = torch.norm(first_td_preds - gt, dim=2)\n under_threshold = (distances < thresholds[:, None])\n is_correct = under_threshold * is_visible\n self.is_wrong = (~under_threshold) * is_visible\n\n detect_target = torch.zeros(n, 16, dtype=torch.long)\n detect_target[self.is_wrong] = 1\n detect_target[is_correct] = 2\n self.detect_target = detect_target.reshape(-1).to(targets.device)\n\n error_detect_loss = F.cross_entropy(error_out.reshape(-1, 3), self.detect_target)\n\n first_targets = gt[is_visible]\n first_td = heatmaps[:, :16][is_visible]\n pred_detect = error_out.reshape(-1, 16, 3).argmax(dim=2)\n pred_wrong = pred_detect == 1\n wrong = pred_wrong * is_visible\n second_targets = gt[wrong]\n second_td = heatmaps[:, 16:][wrong]\n td = torch.cat((first_td, second_td))\n td_targets = torch.cat((first_targets, second_targets))\n keypoints_loss = pose.ce_loss(td, td_targets)\n return error_detect_loss + keypoints_loss\n\n def loss_func(self, outputs, targets):\n error_out, td_out = outputs\n return self.correction_loss_func(error_out, td_out, targets)\n\n def accuracy(self, outputs, targets):\n bu_out = outputs[0].reshape(-1, 3)\n return accuracy(bu_out, self.detect_target)\n\n def heatmap_func(self, outputs):\n heatmaps = outputs[1]\n n, _, h, w = heatmaps.shape\n combined = torch.empty(n, 16, h, w).to(heatmaps.device)\n combined[self.is_wrong] = heatmaps[:, 16:][self.is_wrong]\n combined[~self.is_wrong] = heatmaps[:, :16][~self.is_wrong]\n return torch.cat((heatmaps, combined), dim=1)\n\n\ndef main(args):\n print(args)\n arch = pose.nets[args.resnet]\n\n if args.cnn_fix:\n instructor = CNNObserver()\n bu_c = 0\n else:\n instructor = SelfObserveInstructor()\n bu_c = 16 * 3\n\n root = Path(__file__).resolve().parent.parent / 'LIP'\n db = pose.get_data(root, args.size, bs=args.bs)\n\n self_correct = SelfCorrect()\n pckh = partial(pose.Pckh, niter=3, mean=False, heatmap_func=self_correct.heatmap_func)\n learn = cs.cs_learner(db, arch, instructor, td_c=16, bu_c=bu_c, pretrained=False, embedding=None,\n add_td_out=True, loss_func=self_correct.loss_func, metrics=self_correct.accuracy,\n callback_fns=[pckh, DataTime])\n\n monitor = 'Total_2'\n utils.fit_and_log(learn, args, monitor)\n\n\nif __name__ == '__main__':\n parser = utils.basic_train_parser()\n parser.add_argument('--cnn-fix', action='store_true')\n main(parser.parse_args())\n","repo_name":"omriKramer/csPose","sub_path":"heatmap_analyze.py","file_name":"heatmap_analyze.py","file_ext":"py","file_size_in_byte":3382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"44449407241","text":"'''\nExercício Python 73: Crie uma tupla preenchida com os 20 primeiros colocados da Tabela do Campeonato Brasileiro de Futebol, na ordem de colocação. Depois mostre:\n\na) Os 5 primeiros times.\n\nb) Os últimos 4 colocados.\n\nc) Times em ordem alfabética.\n\nd) Em que posição está o time da Chapecoense.\n\n'''\n\ntabela_brasileirao_2017 = ('Corinthians',\n 'Palmeiras',\n 'Santos',\n 'Grêmio',\n 'Cruzeiro',\n 'Flamengo',\n 'Vasco da Gama',\n 'Chapecoense',\n 'Atlético - MG',\n 'Botafogo',\n 'Atlético - PR',\n 'Bahia',\n 'São Paulo',\n 'Fluminense',\n 'Sport - PE',\n 'Vitória - BA',\n 'Coritiba',\n 'Avaí',\n 'Ponte Preta',\n 'Atlético - GO',\n )\n\nprint('=-' * 20)\nprint('{:-^40}'.format('CLASSIFICAÇÃO BRASILEIRÃO 2017'))\nprint('=-' * 20)\n\nprint(f'Os 5 primeiros times da tabela foram {tabela_brasileirao_2017[0:5]}')\nprint('=-' * 20)\nprint(f'Os 4 últimos times da tabela foram {tabela_brasileirao_2017[-4:]}')\nprint('=-' * 20)\nprint(f'Os times em ordem alfabética: {sorted(tabela_brasileirao_2017)}')\nprint('=-' * 20)\nprint(\n f'A Chapecoense terminou o campeonato na {tabela_brasileirao_2017.index(\"Chapecoense\") + 1}ª posição')\n","repo_name":"jandersoncoelho/expython","sub_path":"ex073.py","file_name":"ex073.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"26159906412","text":"from kafka import KafkaProducer\nfrom faker import Faker\nimport random\nimport json\nimport os\n\nclass Producer():\n '''\n Kafka Producer based on \"Getting Started with Aiven for Apache Kafka\"\n https://help.aiven.io/en/articles/489572-getting-started-with-aiven-for-apache-kafka\n '''\n def __init__(self, topic, host, port):\n self.topic = topic\n self.fake = Faker()\n self.producer = KafkaProducer(\n bootstrap_servers=\"{}:{}\".format(host, port),\n security_protocol=\"SSL\",\n ssl_cafile=os.getenv(\"HOME\") + \"/aiven-ssl/ca.pem\",\n ssl_certfile=os.getenv(\"HOME\") + \"/aiven-ssl/service.cert\",\n ssl_keyfile=os.getenv(\"HOME\") + \"/aiven-ssl/service.key\",\n )\n\n def produce_account_data(self, iter=0):\n # Simulate account data\n account_data = {\n \"first\": self.fake.first_name(),\n \"last\": self.fake.last_name(),\n \"address\": self.fake.address().replace(\"\\n\", \" \"),\n \"balance\": self.fake.pricetag()[1:].replace(\",\", \"\"),\n \"account_type\": \"chequings\" if random.uniform(0,1) > 0.5 else \"savings\",\n }\n print(\"KAFKA PRODUCER: account produced for\", account_data[\"first\"])\n self.producer.send(self.topic, json.dumps(account_data).encode(\"utf-8\"))\n return account_data\n\n def close_session(self):\n self.producer.flush()\n","repo_name":"raph2314/aiven-demo","sub_path":"kafka_objects/producer.py","file_name":"producer.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"73769855122","text":"import inspect\n\nfrom .common import placeholder\n\n\ndef curry(fn, received=None):\n \"\"\"\n Makes curried version of a function\n\n These are equivalents:\n @curry\n def f(x, y, z):\n return x, y, z\n\n - f(1, 2, 3)\n - f(1)(2, 3)\n - f(1)(2)(3)\n - f(__, 2, 3)(1)\n - f(__, 2, __)(1, 3)\n\n Thanks ramda.js and MIT license for this (almost) copy-paste\n :type received: Already taken arguments\n :type fn: A function being curried\n \"\"\"\n\n length = len(inspect.signature(fn).parameters)\n if received is None:\n received = []\n\n def inner(*args):\n combined = []\n combined_idx = 0\n args_idx = 0\n left = length\n\n while combined_idx < len(received) or args_idx < len(args):\n if (\n combined_idx < len(received)\n and received[combined_idx] is not placeholder\n or args_idx >= len(args)\n ):\n result = received[combined_idx]\n else:\n result = args[args_idx]\n args_idx += 1\n\n combined.append(result)\n\n if result is not placeholder:\n left -= 1\n combined_idx += 1\n\n return fn(*combined) if left <= 0 else curry(fn, combined)\n\n return inner\n","repo_name":"deliro/pyfn","sub_path":"pyfn/curry.py","file_name":"curry.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"71784332562","text":"'''Trains a simple convnet on the MNIST dataset.\n\nGets to 99.25% test accuracy after 12 epochs\n(there is still a lot of margin for parameter tuning).\n16 seconds per epoch on a GRID K520 GPU.\n'''\n\nfrom __future__ import print_function\nimport sys\nimport os\nsys.path.insert(0,'/home/xmli/test_code/livertumor/Keras-2.0.8')\nsys.path.insert(0,'/home/xmli/test_code/livertumor/mylib')\nsys.path.insert(0,'/home/xmli/test_code/livertumor/')\nimport keras\nimport numpy as np\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras import backend as K\nimport matplotlib.pyplot as plt\n\nos.environ['CUDA_VISIBLE_DEVICES'] = '0'\nbatch_size = 128\nnum_classes = 10\nepochs = 12\n\n# input image dimensions\nimg_rows, img_cols = 28, 28\n\n# the data, shuffled and split between train and test sets\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\nif K.image_data_format() == 'channels_first':\n x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)\n x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)\n input_shape = (1, img_rows, img_cols)\nelse:\n x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\n input_shape = (img_rows, img_cols, 1)\n\nx_train = x_train.astype('float32')\nx_test = x_test.astype('float32')\nx_train /= 255\nx_test /= 255\nprint('x_train shape:', x_train.shape)\nprint(x_train.shape[0], 'train samples')\nprint(x_test.shape[0], 'test samples')\n\nimg = x_train[7,:,:,0]\nimg_rotate = np.rot90(img, k = 3)\n# plt.figure(1)\n# plt.imshow(img,'gray')\n# plt.figure(2)\n# plt.imshow(img_rotate,'gray')\n# # plt.show()\n\n\n\n# convert class vectors to binary class matrices\ny_train = keras.utils.to_categorical(y_train, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\n\nmodel = Sequential()\nmodel.add(Conv2D(32, kernel_size=(3, 3),\n activation='relu',\n input_shape=input_shape))\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\n# model.add(MaxPooling2D(pool_size=(2, 2)))\n# model.add(Dropout(0.25))\n# model.add(Flatten())\n# model.add(Dense(128, activation='relu'))\n# model.add(Dropout(0.5))\n# model.add(Dense(num_classes, activation='softmax'))\n\nmodel.compile(loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.Adadelta(),\n metrics=['accuracy'])\n\n# model.fit(x_train, y_train,\n# batch_size=batch_size,\n# epochs=epochs,\n# verbose=1,\n# validation_data=(x_test, y_test))\nmodel.load_weights('mymodel.h5', by_name=True)\nimg_test = np.zeros((1,28,28,1),dtype='float32')\nimg_test[0,:,:,0] = img_rotate\nscore = model.predict(img_test)\nscore = score[0,:,:,5]\nplt.figure(1)\nplt.imshow(score,'gray')\n# plt.figure(2)\n# plt.imshow(img_rotate,'gray')\nplt.show()\nexit(0)\n\n# score = model.evaluate(x_test, y_test, verbose=0)\n# print('Test loss:', score[0])\n# print('Test accuracy:', score[1])\n","repo_name":"xmengli/H-DenseUNet","sub_path":"Keras-2.0.8/examples/mnist_cnn.py","file_name":"mnist_cnn.py","file_ext":"py","file_size_in_byte":3042,"program_lang":"python","lang":"en","doc_type":"code","stars":509,"dataset":"github-code","pt":"3"} +{"seq_id":"41028749605","text":"import os\n\nimport chemicals\nimport pytest\nfrom numpy.testing import assert_allclose, assert_approx_equal\nfrom thermo import Chemical\n\nimport openpnm as op\nfrom openpnm.utils import get_mixture_model_args\n\ncondition = int(os.environ.get('NUMBA_DISABLE_JIT', 0)) != 0\n\n\nclass DensityTest:\n def setup_class(self):\n self.net = op.network.Cubic(shape=[3, 3, 3])\n self.phase = op.phase.Species(network=self.net, species='h2o')\n self.phase['pore.salinity'] = 0.0 # ppt\n\n def test_standard(self):\n # Liquid water\n self.phase.add_model(propname='pore.density',\n model=op.models.phase.density.liquid_pure_COSTALD)\n assert_approx_equal(self.phase['pore.density'].mean(), 993.327626112987)\n\n def test_ideal_gas(self):\n # Water vapor\n self.phase.add_model(propname='pore.density',\n model=op.models.phase.density.ideal_gas)\n self.phase.regenerate_models()\n assert_approx_equal(self.phase['pore.density'].mean(), 0.7367280065145)\n\n def test_water(self):\n # Liquid water\n self.phase.add_model(propname='pore.density',\n model=op.models.phase.density.water_correlation)\n self.phase.regenerate_models()\n assert_approx_equal(self.phase['pore.density'].mean(), 996.9522)\n\n def test_chemicals_for_pure_gas_molar_volume(self):\n mods = [\n # numba version not working for any\n chemicals.virial.BVirial_Pitzer_Curl,\n chemicals.virial.BVirial_Abbott,\n chemicals.virial.BVirial_Tsonopoulos,\n chemicals.virial.BVirial_Tsonopoulos_extended,\n ]\n n2 = op.phase.Species(network=self.net, species='nitrogen')\n n2['pore.temperature'] = 400\n Vm = []\n for f in mods:\n Vm.append(op.models.phase.chemicals_wrapper(n2, f=f).mean())\n assert_allclose(Vm, 8.795e-6, rtol=.3)\n\n @pytest.mark.skipif(condition, reason=\"Strange error coming from numba/chemicals\")\n def test_chemicals_wrapper_for_pure_liq_molar_volume(self):\n mods = [\n chemicals.volume.Yen_Woods_saturation,\n chemicals.volume.Rackett,\n chemicals.volume.Yamada_Gunn,\n chemicals.volume.Townsend_Hales,\n chemicals.volume.Bhirud_normal,\n chemicals.volume.COSTALD,\n # chemicals.volume.Campbell_Thodos, # numba version not working\n # chemicals.volume.SNM0, # numba version not working\n # chemicals.volume.CRC_inorganic, # requires rho\n # chemicals.volume.COSTALD_compressed, # requires Psat\n ]\n h2o = op.phase.Species(network=self.net, species='water')\n Vm = []\n for f in mods:\n Vm.append(op.models.phase.chemicals_wrapper(h2o, f=f).mean())\n assert_allclose(Vm, 1.88e-5, rtol=0.2)\n\n def test_chemicals_wrapper_for_pure_liq_with_args(self):\n h2o = op.phase.Species(network=self.net, species='water')\n # Using kwargs to map args to custom propnames\n temp = Chemical('h2o')\n h2o['pore.density'] = temp.rhol\n Vm = op.models.phase.chemicals_wrapper(\n phase=h2o,\n f=chemicals.volume.CRC_inorganic,\n rho0='pore.density',\n k=1,\n )\n assert_allclose(Vm, 1.85309071e-05, rtol=1e-4)\n # Put args directly in target\n h2o['pore.Psat'] = temp.Psat\n h2o['pore.Vs'] = temp.Vms\n Vm = op.models.phase.chemicals_wrapper(\n phase=h2o,\n f=chemicals.volume.COSTALD_compressed,\n rho='pore.density',\n )\n assert_allclose(Vm, 1.62975733e-05, rtol=1e-4)\n\n def test_chemicals_wrapper_for_liquid_mixture(self):\n h2o = op.phase.Species(network=self.net, species='h2o')\n etoh = op.phase.Species(network=self.net, species='ethanol')\n vodka = op.phase.LiquidMixture(network=self.net, components=[h2o, etoh])\n vodka.x(h2o.name, 0.60)\n vodka.x(etoh.name, 0.40)\n Vm = op.models.phase.chemicals_wrapper(\n phase=vodka,\n f=chemicals.volume.COSTALD_mixture,\n )\n h2o['param.Zr'] = 0.001\n etoh['param.Zr'] = 0.001\n Vm = op.models.phase.chemicals_wrapper(\n phase=vodka,\n f=chemicals.volume.Rackett_mixture,\n Zrs='param.Zr',\n )\n\n def test_liquid_pure_and_mixture(self):\n pn = op.network.Demo()\n h2o = op.phase.Species(network=pn, species='water')\n h2o.add_model(propname='pore.density',\n model=op.models.phase.density.liquid_pure_COSTALD)\n Vm = chemicals.COSTALD(\n T=h2o['pore.temperature'][0],\n Tc=h2o['param.critical_temperature'],\n Vc=h2o['param.critical_volume'],\n omega=h2o['param.acentric_factor'],\n )\n rho_ref = chemicals.Vm_to_rho(Vm, h2o['param.molecular_weight'])\n rho_calc = h2o['pore.density'][0]\n assert_allclose(rho_ref, rho_calc, rtol=1e-10, atol=0)\n\n etoh = op.phase.Species(network=pn, species='ethanol')\n etoh.add_model(propname='pore.density',\n model=op.models.phase.density.liquid_pure_COSTALD)\n\n vodka = op.phase.LiquidMixture(network=pn, components=[h2o, etoh])\n vodka.x(h2o.name, 0.5)\n vodka.x(etoh.name, 0.5)\n vodka.add_model(propname='pore.density',\n model=op.models.phase.density.liquid_mixture_COSTALD)\n args = get_mixture_model_args(\n phase=vodka,\n composition='xs',\n args={\n 'Tcs': 'param.critical_temperature',\n 'Vcs': 'param.critical_volume',\n 'omegas': 'param.acentric_factor',\n })\n Vm = chemicals.COSTALD_mixture(T=vodka['pore.temperature'][0], **args)\n rho_ref = chemicals.Vm_to_rho(Vm, vodka.get_mix_vals('param.molecular_weight')[0])\n rho_calc = vodka['pore.density'][0]\n assert_allclose(rho_ref, rho_calc, rtol=1e-10, atol=0)\n\n\nif __name__ == '__main__':\n\n t = DensityTest()\n self = t\n t.setup_class()\n for item in t.__dir__():\n if item.startswith('test'):\n print(f\"Running test: {item}\")\n t.__getattribute__(item)()\n","repo_name":"PMEAL/OpenPNM","sub_path":"tests/unit/models/phase/DensityTest.py","file_name":"DensityTest.py","file_ext":"py","file_size_in_byte":6289,"program_lang":"python","lang":"en","doc_type":"code","stars":404,"dataset":"github-code","pt":"3"} +{"seq_id":"2043166021","text":"import re\n\nre_email1 = re.compile(r'^[0-9a-zA-z\\.*]+\\@[0-9a-zA-z\\.*]+.com$')\nre_email2 = re.compile(r'^\\?\\s?\\w*@\\w+\\.\\w+$')\ndef is_valid_email(addr):\n\tif re_email1.match(addr):\n\t\treturn True\n\telse:\n\t\treturn False\n\t\n\nassert is_valid_email('someone@gmail.com')\t\t#assert condition (if false,raise error)\nassert is_valid_email('bill.gates@microsoft.com')\nassert not is_valid_email('bob#example.com')\nassert not is_valid_email('mr-bob@example.com')\nprint('ok')\n\t\t\n\t\t\n\t\t\n\t\t\n\ndef name_of_email(addr):\n\tif re_email2.match(addr).groups():\n\t\treturn re_email2.match(addr).group(1)\n\telse:\n\t\treturn None\n\n\nassert name_of_email(' tom@voyager.org') == 'Tom Paris'\nassert name_of_email('tom@voyager.org') == 'tom'\nprint('ok')","repo_name":"Yuanie/Python","sub_path":"regulartion1.py","file_name":"regulartion1.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"3225434472","text":"import ndspy\nimport ndspy.rom\nimport ndspy.narc\nimport code \nimport io\nimport codecs\nimport os\nimport json\nimport sys\nimport msg_reader2\nfrom header_reader import output_headers_json\nfrom msg_reader2 import output_texts\nfrom msg_reader2 import output_scripts\n\n\n# code.interact(local=dict(globals(), **locals()))\n\n\n#################### CREATE FOLDERS #############################\n\nrom_name = \"projects/\" + sys.argv[1].split(\".\")[0] \n\n# code.interact(local=dict(globals(), **locals()))\n\nif not os.path.exists(f'{rom_name}'):\n os.makedirs(f'{rom_name}')\n\nfor folder in [\"narcs\", \"texts\", \"json\", \"message_texts\", \"story_texts\", \"scripts\"]:\n\tif not os.path.exists(f'{rom_name}/{folder}'):\n\t\tos.makedirs(f'{rom_name}/{folder}')\n\n################# HARDCODED ROM INFO ##############################\n\nNARCS = [[\"a/0/1/2\", \"headers\"],[\"a/0/0/2\", \"message_texts\"], [\"a/0/0/3\", \"story_texts\"],[\"a/0/5/6\", \"scripts\"]]\nBW_MSG_BANKS = [[89, \"locations\"]]\nBW2_MSG_BANKS = [[109, \"locations\"]]\nMSG_BANKS = []\n\n\n################### EXTRACT RELEVANT BW_NARCS AND ARM9 #######################\n\nnarc_info = {} ##store narc names and file id pairs\n\nwith open(f'{rom_name.split(\"/\")[-1]}.nds', 'rb') as f:\n data = f.read()\n\nprint(\"decompressing arm9\")\nrom = ndspy.rom.NintendoDSRom(data)\narm9 = ndspy.codeCompression.decompress(rom.arm9)\narm9_sample = int.from_bytes(arm9[14:16], 'little')\nversion_identifier = {15395: [\"B2\",\"BW2\"], 63038: [\"W2\",\"BW2\"], 43676: [\"B\",\"BW\"], 4581: [\"W\",\"BW\"]}\n\nnarc_info[\"base_version\"] = version_identifier[arm9_sample][0]\nnarc_info[\"base_rom\"] = version_identifier[arm9_sample][1]\n\nwith open(f'{rom_name}/arm9.bin', 'wb') as f:\n\tf.write(arm9)\n\n\nif narc_info[\"base_rom\"] == \"BW2\":\n\tMSG_BANKS = BW2_MSG_BANKS\nelse:\n\tMSG_BANKS = BW_MSG_BANKS\n\tNARCS[3][0] = \"a/0/5/7\"\n\n\n\nfor narc in NARCS:\n\tfile_id = rom.filenames[narc[0]]\n\tfile = rom.files[file_id]\n\tnarc_file = ndspy.narc.NARC(file)\n\n\t# extract text banks\n\tif narc[1][-5:] == \"texts\":\n\t\toutput_texts(f\"{rom_name}/{narc[1]}\", narc_file)\n\n\tif narc[1] == \"scripts\":\n\t\toutput_scripts(f\"{rom_name}/scripts\", narc_file)\n\n\n\n\n\tnarc_info[narc[1]] = file_id # store file ID for later\n\n\n\t\n\t# with open(f'{rom_name}/narcs/{narc[1]}-{file_id}.narc', 'wb') as f:\n\t# f.write(file)\n\n\n#############################################################\n\n################### EXTRACT RELEVANT TEXTS ##################\n\nmsg_file_id = narc_info['message_texts']\n\nwith open(f'{rom_name}/message_texts/texts.json', 'r') as f:\n\tmessages = json.load(f)\n\t\n\tfor msg_bank in MSG_BANKS:\n\t\ttext = messages[msg_bank[0]]\n\n\t\twith open(f'{rom_name}/texts/{msg_bank[1]}.txt', 'w+') as outfile:\n\t\t\tfor line in text:\n\t\t\t\tline[1] = line[1].replace(\"―\", \"\").replace(\"⑮\", \" F\").replace(\"⑭\", \" M\").replace(\"⒆⒇\", \"PkMn\").replace(\"é\", \"e\").encode(\"ascii\", \"ignore\").decode()\n\n\t\t\t\toutfile.write(line[1] + \"\\n\")\n\n\n##############################################################\n################### WRITE SESSION SETTINGS ###################\n\nsettings = {}\nsettings[\"rom_name\"] = rom_name\nsettings.update(narc_info)\n\nwith open(f'session_settings.json', \"w\") as outfile: \n\tjson.dump(settings, outfile) \n\tprint(settings)\n\n\n\n\n\n#############################################################\n################### CONVERT TO JSON #########################\n\nheaders_narc_data = ndspy.narc.NARC(rom.files[narc_info[\"headers\"]])\noutput_headers_json(headers_narc_data)\n\n\n\n\n","repo_name":"hzla/Pokeweb","sub_path":"python/header_loader.py","file_name":"header_loader.py","file_ext":"py","file_size_in_byte":3391,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"} +{"seq_id":"29024212948","text":"import os\nimport speech_recognition as sr\nfrom tqdm import tqdm\nfrom multiprocessing.dummy import Pool\npool = Pool(8) # Number of concurrent threads\n\nwith open(\"api-key.json\") as f:\n GOOGLE_CLOUD_SPEECH_CREDENTIALS = f.read()\n\nr = sr.Recognizer()\nfiles = sorted(os.listdir('parts/'))\n\ndef transcribe(data):\n idx, file = data\n name = \"parts/\" + file\n print(name + \" started\")\n # Load audio file\n with sr.AudioFile(name) as source:\n audio = r.record(source)\n # Transcribe audio file\n text = r.recognize_google_cloud(audio, credentials_json=GOOGLE_CLOUD_SPEECH_CREDENTIALS)\n print(name + \" done\")\n return {\n \"idx\": idx,\n \"text\": text\n }\n\nall_text = pool.map(transcribe, enumerate(files))\npool.close()\npool.join()\n\ntranscript = \"\"\nfor t in sorted(all_text, key=lambda x: x['idx']):\n total_seconds = t['idx'] * 30\n # Cool shortcut from:\n # https://stackoverflow.com/questions/775049/python-time-seconds-to-hms\n # to get hours, minutes and seconds\n m, s = divmod(total_seconds, 60)\n h, m = divmod(m, 60)\n\n # Format time as h:m:s - 30 seconds of text\n transcript = transcript + \"{:0>2d}:{:0>2d}:{:0>2d} {}\\n\".format(h, m, s, t['text'])\n\nprint(transcript)\n\nwith open(\"transcript.txt\", \"w\") as f:\n f.write(transcript)\n","repo_name":"akras14/speech-to-text","sub_path":"fast.py","file_name":"fast.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","stars":166,"dataset":"github-code","pt":"3"} +{"seq_id":"74199359122","text":"# VSP2WOPWOP Blade Loading Analysis for Hover and Axial Flight\n\n# Author: Daniel Weitsman\n\n# This function trims the rotor to the desired thrust condition, which is specified in the input file, and computes\n# the aerodynamic loads using BEMT. These quantities are then assembled into a dictionary, which is returned to the\n# user.\n\n\n# %%\ndef loadingHover(UserIn, geomParams, XsecPolar, T, omega, Vz):\n\n import numpy as np\n from scipy.optimize import least_squares\n import bisect\n\n def rpm_residuals(omega):\n '''\n This function computes the residuals based on the percentage difference between the computed and target thrust.\n The rotational rate is adjusted until this residual is minimized.\n :param omega: rotational rate [rad/s]\n :return:\n :param res: percentage error between the target and computed T\n '''\n\n trim_out = rpm_trim(omega)\n res = np.abs((T - trim_out[0]*rho*np.pi*R**2*(omega*R)**2)/T)\n print(res)\n return res\n\n def coll_residuals(th0):\n '''\n This function computes the residuals based on the percentage difference between the computed and target CT.\n The collective pitch is adjusted until this residual is minimized.\n :param th0: collective pitch setting\n :return:\n :param res: percentage error between the target and computed CT\n '''\n th = th0+twistDist\n trim_out = coll_trim(th)\n # res = targCT - trim_out[0]\n res = np.abs((targCT - trim_out[0]) / targCT)\n print(res)\n return res\n\n def rpm_trim(omega):\n \"\"\"\n This function computes the radial loading distribution based on the thrust coefficient\n :param lam: Initial guess for the radial inflow distribution\n :param th: Radial twist distribution\n :return:\n :param CT: Radially integrated thrust coefficient\n :param dCT: Incremental thrust coefficient\n :param dCL: Radial distribution of the lift coefficient\n :param dCD: Radial distribution of the drag coefficient\n :param AoA: Radial angle of attack distribution\n \"\"\"\n CT_init = T / (rho * np.pi * R ** 2 * (omega * R) ** 2)\n lam_init = np.sqrt(CT_init / 2)\n th = th0+twistDist\n err = 1\n while np.any(err > 0.0005):\n lam = TipLoss(lam_init, th)\n AoA = th - lam / r\n dCL, dCD = PolarLookup(AoA)\n dCT = 0.5 * solDist * dCL * r ** 2\n CT = np.trapz(dCT, r)\n err = np.abs((lam - lam_init) / lam)\n lam_init = lam\n return CT, dCT, dCL, dCD, lam, AoA\n\n def coll_trim(th):\n \"\"\"\n This function computes the radial loading distribution based on the thrust coefficient\n :param th: collective pitch setting+twist distribution [rad]\n :return:\n :param CT: integrated thrust coefficient\n :param dCT: radial thrust coefficient distribution\n :param dCL: radial lift coefficient distribution\n :param dCD: radial drag coefficient distribution\n :param lam: radial inflow coefficient distribution\n :param AoA: radial angle of attack distribution [rad]\n \"\"\"\n lam = TipLoss(lamInit, th)\n AoA = th - lam / r\n dCL, dCD = PolarLookup(AoA)\n dCT = 0.5 * solDist * (dCL*np.cos(lam/r)-dCD*np.sin(lam/r))* r ** 2\n # dCT = 0.5 * solDist * dCL * r ** 2\n CT = np.trapz(dCT, r)\n\n return CT, dCT, dCL, dCD, lam, AoA\n\n def TipLoss(lambdaInit, ThetaDist):\n \"\"\"\n This function applies the fixed point iteration method to compute the inflow distribution and applies\n Prandtl's tip loss formulation, if specified for in the input module\n :param lambdaInit: Initial guess for the inflow ratio\n :param ThetaDist: collective pitch angle + twist distribution (rad)\n :return:\n :param: lam: radial inflow distribution\n \"\"\"\n if tipLoss == 1:\n iter = 0\n err = np.ones(len(r))\n while np.any(err > 0.005):\n # froot = 0.5*Nb*(r/((1 - r)*lam/r))\n f = 0.5 * Nb * ((1 - r) / lambdaInit)\n F = (2 / np.pi) * np.arccos(np.e ** (-f))\n lam = np.sqrt(1/4*(solDist*XsecPolarExp['Lift Slope']/(8*F)-lam_c)**2+solDist*XsecPolarExp['Lift Slope']*ThetaDist*r/(8*F))-(solDist*XsecPolarExp['Lift Slope']/(16*F)-lam_c/2)\n err = np.abs((lam - lambdaInit) / lam)\n err[np.where(np.isnan(err) == 1)] = 0\n lambdaInit = lam\n iter = iter + 1\n else:\n F = 1\n lam = np.sqrt(1/4*(solDist*XsecPolarExp['Lift Slope']/(8*F)-lam_c)**2+solDist*XsecPolarExp['Lift Slope']*ThetaDist*r/(8*F))-(solDist*XsecPolarExp['Lift Slope']/(16*F)-lam_c/2)\n\n lam[np.where(np.isnan(lam) == 1)] = 0\n # lam[0] = lam[1]\n # lam[-1] = lam[-2]\n return lam\n\n def PolarLookup(AoA):\n \"\"\"\n This function linearly interpolates the sectional blade load coefficients from the XFoil polar based on the\n computed angle of attack distribution. If the blade section is stalled CL at that section is linearly\n interpolated between the maximum and minimum CL, while CD is simply set to its maximum value for the\n respective airfoil.\n :param alpha: angle of attack distribution\n return:\n :param dCL: radial lift coefficient distribution\n :param dCD: radial drag coefficient distribution\n \"\"\"\n dCL = np.zeros(len(AoA))\n dCD = np.zeros(len(AoA))\n for i, alpha in enumerate(AoA):\n if alpha > XsecPolar[polarInd[i]]['alphaMax']:\n dCL[i] = np.interp(alpha ,xp = [XsecPolar[polarInd[i]]['alphaMax'],XsecPolar[polarInd[i]]['Alpha0']%(2*np.pi)],fp = [XsecPolar[polarInd[i]]['ClMax'],XsecPolar[polarInd[i]]['ClMin']])\n dCD[i] = XsecPolar[polarInd[i]]['CdMax']\n else:\n AoA_ind = bisect.bisect(XsecPolar[polarInd[i]]['Polar'][:, 0], alpha)\n dCL[i] = np.interp(alpha ,xp = [XsecPolar[polarInd[i]]['Polar'][AoA_ind-1,0],XsecPolar[polarInd[i]]['Polar'][AoA_ind,0]],fp = [XsecPolar[polarInd[i]]['Polar'][AoA_ind-1,1],XsecPolar[polarInd[i]]['Polar'][AoA_ind,1]])\n dCD[i] = np.interp(alpha ,xp = [XsecPolar[polarInd[i]]['Polar'][AoA_ind-1,0],XsecPolar[polarInd[i]]['Polar'][AoA_ind,0]],fp = [XsecPolar[polarInd[i]]['Polar'][AoA_ind-1,2],XsecPolar[polarInd[i]]['Polar'][AoA_ind,2]])\n return dCL, dCD\n\n # %%\n # This block of code defines parameters that are used throughout the remainder of the module\n Nb = UserIn['Nb']\n R = geomParams['R']\n chordDist = geomParams['chordDist']\n twistDist = geomParams['twistDist']\n solDist = geomParams['solDist']\n XsecLocation = UserIn['XsecLocation']\n rho = UserIn['rho']\n tipLoss = UserIn['tipLoss']\n r = geomParams['r']\n Adisk = geomParams['diskArea']\n sol = geomParams['solidity']\n # converts rotational rate from degrees to radians\n omega = omega / 60 * 2 * np.pi\n # Sectional free stream velocity\n UP = omega*geomParams['rdim']\n # Target thrust coefficient\n targCT = T / (rho * Adisk * (omega * R) ** 2)\n # Converts initial guess for the collective pitch setting from degrees to radians\n th0 = UserIn['thetaInit'] * np.pi / 180\n # Initial guess for the radial inflow distribution\n lamInit = np.ones(len(r))*np.sqrt(targCT / 2)\n # Axial climb/descent inflow ratio\n lam_c = Vz / (omega * R)\n\n if -2 < Vz/np.sqrt(T/(2*rho*Adisk)) < 0:\n raise ValueError('Non-physical solution, 1D assumption of momentum theory is violated')\n\n#%% This section of the code populates an array of the airfoil names based on their radial location along the blade span\n\n# initializes the expanded Xsect polar dictionary, which will store all the airfoil parameters corresponding to their\n # radial location\n XsecPolarExp = {}\n polarInd = []\n # if multiple airfoil sections are used along the blade span are used this section of the code would be executed\n if len(XsecLocation) > 1:\n ind = np.zeros((len(XsecLocation) + 1))\n # creates an array of size r that is filled with the indices corresponding to the radial location of each airfoil\n for i, Xsec in enumerate(XsecLocation):\n ind[i] = bisect.bisect(r, Xsec)\n ind[0] = 0\n ind[-1] = len(r)\n # loops through each airfoil section and their parameters, populating an array of size r, with these parameters.\n # These arrays are then written to the XsecPolarExp dictionary.\n for i, Xsec in enumerate(XsecPolar.keys()):\n polarInd.extend([Xsec] * int(ind[i + 1] - ind[i]))\n for ii, param in enumerate(list(XsecPolar[Xsec].keys())[1:]):\n if i == 0:\n XsecPolarExp = {**XsecPolarExp, **{param:XsecPolar[Xsec][param]*np.ones(len(r))}}\n else:\n XsecPolarExp[param][int(ind[i]):] = XsecPolar[Xsec][param]\n # if only a single airfoil section is used along the blade span the section's parameters are looped over,\n # expanded to correspond to each blade section, and assembled into the XsecPolarExp dictionary.\n else:\n polarInd = list(XsecPolar.keys())*len(r)\n for i,key in enumerate(list(XsecPolar[list(XsecPolar.keys())[0]].keys())[1:]):\n XsecPolarExp[key] = np.ones(len(r))*XsecPolar[list(XsecPolar.keys())[0]][key]\n\n # %%\n\n # This function employs the non-linear least square optimization method (LM) to compute the necessary rotational rate or collective\n # pitch angle to meet the target thrust or thrust coefficient, respectively.\n if UserIn['trim'] == 1:\n trim_sol = least_squares(rpm_residuals, omega, method='lm')\n CT, dCT, dCL, dCD, lam, AoA = rpm_trim(trim_sol.x)\n omega = trim_sol.x\n th = np.array([th0, 0, 0])\n else:\n trim_sol = least_squares(coll_residuals, th0, method='lm')\n CT, dCT, dCL, dCD, lam, AoA = coll_trim(trim_sol.x+twistDist)\n th = np.array([np.squeeze(trim_sol.x), 0, 0])\n\n#%%\n U =np.sqrt((omega*geomParams['rdim'])**2+(omega*R*lam)**2)\n # Integrated lift and drag coefficients\n CL = np.trapz(dCL, r)\n CD = np.trapz(dCD, r)\n\n # Distribution and integrated of the power/torque coefficient\n dCP = 0.5 * solDist * (lam / r * dCL + dCD) * r ** 3\n CP = np.trapz(dCP, r)\n\n # Power required by the rotor\n P = CP * rho * Adisk * (omega * R) ** 3\n\n # Distribution and integrated thrust\n dT = dCT * rho * Adisk * (omega * R) ** 2\n T = np.trapz(dT, r)\n\n # Distribution and integrated torque\n dQ = dCP * rho * Adisk * (omega * R) ** 2 * R\n Q = np.trapz(dQ, r)\n\n # Rotates the normal force component by the collective pitch setting, so that a single change of base (CB) can be\n # applied to the blade geometry and loading vector in the namelist file. If the collective pitch CB is\n # unnecessary, then dFz = dT/Nb.\n dFz = dT/Nb*np.cos(-th[0])-dQ/(Nb*r*R)*np.sin(-th[0])\n\n # Rotates the inplane force component by the collective pitch setting, so that a single change of base (CB) can be\n # applied to the blade geometry and loading vector in the namelist file. If the collective pitch CB is\n # unnecessary, then dFx =dQ/(Nb*r*R).\n dFx = dT/Nb*np.sin(-th[0])+dQ/(Nb*r*R)*np.cos(-th[0])\n\n # Figure of merit, induced power factor = 1.15\n FM = CP / (1.15 * CP + sol / 8 * CD)\n\n # Sets any infinite values of the computed force components (primarily near the blade root) equal to zero.\n dFx[np.where(np.isnan(dFx) == 1)] = 0\n dFz[np.where(np.isnan(dFz) == 1)] = 0\n dFy = np.zeros(len(r))\n\n # if the rotor is rotating CW the force distributions are flipped along the longitudinal axis of the rotor disk.\n if UserIn['rotation'] == 2:\n dFx = -dFx\n\n #%%\n # Assembles all computed load parameters into a dictionary\n loadParams = {'coll_residuals':trim_sol.fun,'th': th, 'beta': [0, 0, 0], 'CT': CT, 'T': T, 'dCT': dCT, 'dT': dT, 'CP': CP, 'P': P,\n 'Q': Q, 'dCP': dCP, 'dQ': dQ, 'dCL': dCL, 'dCD': dCD, 'CL': CL, 'CD': CD, 'FM': FM, 'AoA': AoA,'ClaDist':XsecPolarExp['Lift Slope'], 'lambda': lam,\n 'dFx': dFx, 'dFy': dFy, 'dFz': dFz, 'omega': omega,'U':U}\n return loadParams\n\n#\n# %% # figdir = os.path.abspath(os.path.join(input.dirDataFile,'Figures/CL.png')) # with cbook.get_sample_data(figdir) as\n#\n# #\n# import matplotlib.pyplot as plt\n# fig = plt.figure(figsize=[6.4, 4.5], )\n# ax = fig.gca()\n# plt.plot(r, dCL)\n# ax.set_ylabel('Lift Coefficient')\n# ax.set_xlabel('Nondimensional radial position, r/R')\n# ax.set_title('CT/$\\sigma$=0.01')\n# plt.grid()\n#\n# fig = plt.figure(figsize=[6.4, 4.5], )\n# ax = fig.gca()\n# plt.plot(r, dCD)\n# ax.set_ylabel('Drag Coefficient')\n# ax.set_xlabel('Nondimensional radial position, r/R')\n# ax.set_title('CT/$\\sigma$=0.01')\n# plt.grid()\n# plt.axes([0.25 ,1, 0.4 ,0.9])\n# [lam, F, err, i] = TipLoss(lamInit,thInit,r)\n# lam[np.where(np.isnan(lam) == 1)] = 0\n# AoA = th_init-lam/r\n# [Cl,Cd]=PolarLookup(AoA)\n# dCT = 0.5*sig*Cl*r**2*dr\n# CT_temp = np.trapz(dCT)\n\n#\n# # errCT= abs((CT_temp-CT)/CT_temp)\n# # th_init = 6*CT_temp/(np.mean(chord)*a)+3/2*np.sqrt(CT_temp/2)\n# # ii = ii+1\n","repo_name":"DanWeitsman/VSP2WOPWOP","sub_path":"loadingHover.py","file_name":"loadingHover.py","file_ext":"py","file_size_in_byte":13484,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"31039412919","text":"import time\nimport json\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom bs4 import BeautifulSoup\n\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\nclass bot():\n\n def __init__(self):\n # load credentials to log into the textbook\n self.credentials, self.paths = self.loadCredentials()\n\n # make a driver instance to get to the textbook\n self.driver = self.makeDriver()\n\n # url of the textbook\n url = 'https://newconnect.mheducation.com/flow/connect.html'\n\n # navigate to the textbook\n self.navigate(self.driver, url)\n\n time.sleep(15)\n\n # login to textbook\n self.login(self.driver, self.credentials, self.paths)\n\n def loadCredentials(self):\n\n # load login credentials for mcgraw hill connect\n with open('credentials.json') as f:\n credentials = json.load(f)\n\n # load the paths to buttons whose locations we need\n with open('paths.json') as f:\n paths = json.load(f)\n\n return credentials, paths\n\n # make a driver\n def makeDriver(self):\n driver = webdriver.Firefox()\n return driver\n\n # navigate driver to a page\n def navigate(self, driver, destination):\n self.driver.get(destination)\n\n def login(self, driver, credentials, paths):\n\n # load all of our paths\n userPath = paths['username']\n pwPath = paths['password']\n submitButton = paths['submit']\n\n # get individual credentials\n user = credentials['username']\n pw = credentials['password']\n\n driver.implicitly_wait(20)\n\n # send username and password\n sendPW = driver.find_element_by_css_selector('#ember575')\n # sendPW.sendKeys(user)\n print(sendPW)\n\n def getVideos():\n yield None\n\nif __name__ == '__main__':\n bot = bot()\n","repo_name":"ethinallen/doNotGetDropped","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"16521579834","text":"import math\n\n\ndef find_min_dist(queue, dist):\n sorted_nodes = sorted(queue, key=dist.get)\n return queue.pop(sorted_nodes[0])\n\n\nclass Dijkstra:\n def __init__(self, gr, start_node):\n self.graph = gr\n self.start_node = start_node\n\n def solve(self, target=None):\n # print(f\"Solving for {self.start_node} -> {target}\")\n queue = {} # name:node\n dist = {}\n prev = {}\n for node in self.graph.nodes:\n dist[node] = math.inf\n queue[node] = self.graph.get_node(node)\n dist[self.start_node] = 0\n\n while queue:\n u = find_min_dist(queue, dist)\n for v in u.connections:\n alt = dist[u.name] + u.get_weight(v)\n if alt < dist[v]:\n dist[v] = alt\n prev[v] = u\n if u.name == target:\n return dist[u.name]\n return dist, prev\n\n\nif __name__ == \"__main__\":\n from graph import _graph_for_testing\n\n graph = _graph_for_testing()\n dijkstra = Dijkstra(graph, \"A\")\n distance = dijkstra.solve(\"D\")\n assert distance == 3\n\n","repo_name":"mikedowney-co-uk/Advent_of_code_2022","sub_path":"aoc/dijkstra.py","file_name":"dijkstra.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"44234282561","text":"from datetime import datetime\nfrom html.parser import HTMLParser\nfrom threading import Thread\nfrom time import sleep\n\nimport requests\nimport xmltodict\n\nfrom bot import Bot\nfrom config import Config\nfrom log import Log\nfrom users import Users\n\n\nclass News(object):\n\n def __init__(self, configpath, newspath, userspath, dispatcher):\n self.newsconfig = Config(newspath)\n self.users = Users(userspath)\n self.log = Log(configpath)\n self.bot = Bot(configpath, userspath)\n self.stopflag = False\n self.dispatcher = dispatcher\n\n def get_rss_content(self):\n url = self.newsconfig.get_value(\"NEWS\", \"url\")\n headers = {\n \"Connection\": \"keep-alive\",\n \"Cache-Control\": \"max-age=0\",\n \"DNT\": \"1\",\n \"Upgrade-Insecure-Requests\": \"1\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36\",\n \"Sec-Fetch-User\": \"?1\",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\",\n \"Sec-Fetch-Site\": \"same-origin\",\n \"Sec-Fetch-Mode\": \"navigate\",\n \"Referer\": \"https://www.heise.de/news-extern/news.html\",\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Accept-Language\": \"de-DE,de;q=0.9,en-GB;q=0.8,en;q=0.7\"\n }\n return requests.get(url, headers=headers).text\n\n def start_thread(self):\n thread = Thread(target=self.run_thread, args=())\n thread.start()\n\n def check_for_news(self):\n xml = self.get_rss_content()\n news = xmltodict.parse(xml)\n news = news[\"feed\"][\"entry\"]\n last_news_id = self.newsconfig.get_value(\"NEWS\", \"last_id\")\n if last_news_id == \"\":\n last_news_id = news[0][\"id\"]\n self.newsconfig.set_value(\"NEWS\", \"last_id\", last_news_id)\n if news[0][\"id\"] != last_news_id:\n self.send_news(news, last_news_id)\n self.newsconfig.set_value(\"NEWS\", \"last_id\", news[0][\"id\"])\n\n def send_news(self, newsdict, last_news_id):\n for news in newsdict:\n if news[\"id\"] == last_news_id:\n break\n else:\n for chat_id in self.users.get_user_list():\n title = news[\"title\"][\"#text\"]\n published = datetime.fromisoformat(news[\"published\"])\n published = \"{0:%d.%m.%Y %H:%M}\".format(published)\n description = news[\"summary\"][\"#text\"]\n url_article = \"[To Article](\" + news[\"link\"][\"@href\"] + \")\"\n html_parser = ImgParse()\n html_parser.feed(news[\"content\"][\"#text\"])\n url_image = html_parser.img_src\n url_image = \"[To Image](\" + url_image + \")\"\n text = \"*\" + title + \"*\\n\" + published + \"\\n\\n\" + description + \"\\n\\n\" + url_image + \" - \" + url_article\n self.bot.send_message(\n chat_id, text, self.dispatcher.bot.send_message,\n \"Markdown\", False)\n\n def run_thread(self):\n interval = self.newsconfig.get_value(\"NEWS\", \"interval\") * 60\n slept = interval\n while True:\n if not self.stopflag:\n sleep(1)\n slept = slept + 1\n if slept >= interval:\n self.check_for_news()\n slept = 0\n else:\n break\n\n\nclass ImgParse(HTMLParser):\n img_src = \"\"\n\n def handle_starttag(self, tag, attrs):\n if tag == \"img\":\n self.img_src = dict(attrs)[\"src\"]\n\n def error(self, message):\n raise message\n","repo_name":"jpylypiw/heise-online-news-telegram-bot","sub_path":"heise_online_bot/news.py","file_name":"news.py","file_ext":"py","file_size_in_byte":3782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"24506538103","text":"from typing import Optional\nfrom fastapi import FastAPI\nfrom pydantic import BaseModel\n\napp = FastAPI()\n\nclass items(BaseModel):\n id : int\n name: str\n address: str\n price: int\n tax: float\n@app.put(\"/items/item_id\") \nasync def put_data(item_id: int, item:items,q: Optional[str] = None):\n result = {\"item_id\":item_id, **item.dict()}\n if q:\n result.update({\"q\":q})\n return result","repo_name":"HemilGoyani/FastAPI","sub_path":"api program/requestBody.py","file_name":"requestBody.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20813402129","text":"import pytest\nfrom pages.contact_us import ContactUs\nfrom time import sleep\nfrom pages.base_page import Base\nfrom selenium.common.exceptions import NoSuchElementException\n\n\n@pytest.mark.usefixtures('set_up')\nclass TestPositive(Base):\n def test_submit(self):\n driver = self.driver\n contact = ContactUs(driver)\n contact.enter_name('Susan')\n contact.enter_email('susan@test.com')\n contact.enter_telephone(4564646)\n contact.submit_button()\n success_message = contact.show_success_message()\n sleep(3)\n try:\n assert success_message.text == \"Feedback has been sent to the administrator\"\n except NoSuchElementException:\n print('Field error')\n\n\n@pytest.mark.parametrize('get_func1, output', [(ContactUs.get_name, ''), (ContactUs.get_email, ''),\n (ContactUs.get_telephone, ''), (ContactUs.get_country, ''),\n (ContactUs.get_company, ''), (ContactUs.get_message, '')])\ndef test_clear_button(self, get_func1, output):\n driver = self.driver\n contact = ContactUs(driver)\n contact.enter_name('Susan')\n contact.enter_email('ada@test.com')\n contact.enter_telephone(78787)\n contact.enter_country('8648')\n contact.enter_company('Company name')\n contact.enter_message('Some message here')\n contact.clear_fields()\n sleep(3)\n assert get_func1(contact) == output\n","repo_name":"sus94/bdg_final_project","sub_path":"tests/test_positive.py","file_name":"test_positive.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11075898109","text":"# Defining your own exception and impportance of raise\n\nclass MyExcept(Exception):\n def __init__(self):\n return\n def __str__(self):\n print(\"My Except Occured\")\ndef myfunc():\n raise MyExcept\n\ntry:\n myfunc()\nexcept:\n print(\"Caught an exception\")\n raise\nfinally:\n print(\"Printed Finnally\")\n\nprint(\"Print\")\n","repo_name":"ChocolatePadmanaban/Learning_python","sub_path":"Day5/part8.py","file_name":"part8.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14627518641","text":"class Solution:\r\n def numberOfSubarrays(self, nums: List[int], k: int) -> int:\r\n presum = [1 if n % 2 == 1 else 0 for n in nums]\r\n\r\n for i in range(1, len(nums)):\r\n presum[i] += presum[i-1]\r\n\r\n ans = 0\r\n dic = defaultdict(int)\r\n\r\n dic[0] = 1\r\n\r\n for num in presum:\r\n ans += dic[num-k] \r\n dic[num] += 1\r\n\r\n return ans","repo_name":"nmktad/A2SV","sub_path":"count-number-of-nice-subarrays.py","file_name":"count-number-of-nice-subarrays.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23015554560","text":"#!/usr/bin/env python\n\nimport sys, os\nimport unittest\nimport subprocess\n\nsys.path.append(os.path.join(\"..\", \"lib\"))\nimport utils\n\nclass TestInstalliTerm2(utils.ConfigInstallTest):\n def test_config(self):\n Home = os.environ[\"HOME\"]\n PkgDir = os.path.dirname(__file__)\n Hash1 = subprocess.check_output(\n [\"sha1sum\", os.path.join(PkgDir, \"files\", \"com.googlecode.iterm2.plist\")])\n Hash2 = subprocess.check_output(\n [\"sha1sum\", os.path.join(os.environ[\"HOME\"], \"Library\", \"Preferences\",\n \"com.googlecode.iterm2.plist\")])\n self.assertEqual(Hash1.split()[0], Hash2.split()[0])\n\n def test_app_install(self):\n self.assertTrue(os.path.exists(\"/Applications/iTerm.app\"))\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"MetroWind/dotfiles-mac","sub_path":"iterm2/test-mac.py","file_name":"test-mac.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"} +{"seq_id":"12468118366","text":"import numpy as np\nfrom Object import Object\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\nimport Utils\n'''\n@author Truong Huy Cuong\n'''\nclass Wall(Object):\n def getVelocity(self):\n return np.array([0,0])\n def getShape(self):\n return 1#shape.SQUARE\n def __init__(self,x,y,width,height,parent=None):\n super(Wall, self).__init__()\n self.rsize = np.array([width,height])\n self.pos = np.array([x,y])\n self.normal = np.array([0,0])\n\n self.dots = []\n self.dots.append(np.array([x-width/2,y-height/2]))\n self.dots.append(np.array([x+width/2,y-height/2]))\n self.dots.append(np.array([x-width/2,y+height/2]))\n self.dots.append(np.array([x+width/2,y+height/2]))\n\n def getDot(self,i):\n return self.dots[i]\n\n def getNormal(self,point):\n normal = np.zeros(2)\n TOP_LEFT = 0\n TOP_RIGHT = 1\n BOT_LEFT= 2\n BOT_RIGHT =3\n if point[0] >= self.dots[TOP_LEFT][0] and point[0]<= self.dots[TOP_RIGHT][0]:\n if point[1] == self.dots[TOP_LEFT][1]:\n normal[1] = -1\n elif point[1] == self.dots[BOT_LEFT][1]:\n normal[1] = 1\n if point[1] >= self.dots[TOP_LEFT][1] and point[1] <= self.dots[BOT_LEFT][1]:\n if point[0] == self.dots[TOP_LEFT][0]:\n normal[0] = -1\n elif point[0] ==self.dots[BOT_RIGHT][0]:\n normal[0] = 1\n\n return normal\n\n def checkOnRect(self,point):\n rtl = self.dots[0]\n rbr = self.dots[3]\n return not(point[0] rbr[0] or\n point[1] < rtl[1] or point[1] > rbr[1]\n )\n\n def setSize(self,width,height):\n self.rsize = np.array([width,height])\n\n def draw(self,qp):\n origin = np.array([self.pos[0] - self.rsize[0] / 2, self.pos[1] - self.rsize[1] / 2])\n pen = QPen(Qt.red, 1.5, Qt.SolidLine)\n qp.setPen(pen)\n qp.drawRect(origin[0],origin[1],self.rsize[0],self.rsize[1])\n\n def draw3(self,screen):\n origin = np.array([self.pos[0] - self.rsize[0] / 2, self.pos[1] - self.rsize[1] / 2])\n pg.draw.rect(screen, Utils.BLACK, [origin[0], origin[1], self.rsize[0], self.rsize[1]], 2)\n\n","repo_name":"cuongth95/MRS","sub_path":"Wall.py","file_name":"Wall.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"452937390","text":"from Chessboard import Chessboard\n\ncb = Chessboard()\n\n\ndef test_1():\n cb.move(5 + 2j, 5 + 4j)\n cb.move(5 + 7j, 5 + 5j)\n assert not cb.move(5 + 4j, 5 + 5j)[0]\n\n\ndef test_2():\n cb.move(2 + 1j, 3 + 3j)\n cb.move(6 + 8j, 1 + 3j)\n assert cb.move(2 + 2j, 1 + 3j)[0]\n assert not cb.chess_board[2 + 2j]\n\n\ndef test_3():\n cb.move(4 + 8j, 8 + 4j)\n cb.move(8 + 2j, 8 + 3j)\n assert cb.move(8 + 4j, 8 + 3j)[0]\n cb.move(8 + 1j, 8 + 3j)\n assert not cb.chess_board[8 + 1j]\n assert cb.chess_board[8 + 3j].name == 'r' and cb.chess_board[8 + 3j].fraction\n\n\ndef test_4():\n cb.move(4 + 7j, 4 + 6j)\n cb.move(3 + 3j, 2 + 5j)\n cb.move(1 + 7j, 1 + 6j)\n assert cb.move(2 + 5j, 3 + 7j)[0]\n assert cb.chess_board[3 + 7j].name == 'n'\n\n\ndef test_is_king_safe():\n cb.restart()\n assert cb.is_king_safe()[0]\n cb.move(6 + 2j, 6 + 4j)\n cb.move(5 + 7j, 5 + 6j)\n cb.move(1 + 2j, 1 + 3j)\n assert cb.move(4 + 8j, 8 + 4j)[1] == 'Шах от фигуры с позиции h4'\n assert not cb.is_king_safe()[0]\n assert not cb.check_move(1 + 3j, 1 + 4j)\n assert cb.check_move(7 + 2j, 7 + 3j)\n assert cb.move(1 + 3j, 1 + 4j)[1] == \"Вы не можете так ходить, пока вам стоит шах\"\n assert cb.move(7 + 2j, 7 + 3j)[0]\n assert cb.move(1 + 7j, 1 + 6j)[1] == 'Ход завершен'\n assert cb.is_king_safe()[0]\n\n\ndef test_check_checkmate_1():\n cb.restart()\n cb.move(7 + 2j, 7 + 4j)\n cb.move(5 + 7j, 5 + 5j)\n cb.move(6 + 2j, 6 + 3j)\n cb.move(4 + 8j, 8 + 4j)\n assert cb.check_checkmate()[0]\n assert cb.check_checkmate()[1] == set()\n\n\ndef test_check_checkmate_2():\n cb.restart()\n cb.move(6 + 2j, 6 + 3j)\n cb.move(5 + 7j, 5 + 5j)\n cb.move(1 + 2j, 1 + 3j)\n cb.move(4 + 8j, 8 + 4j)\n\n assert not cb.is_king_safe()[0]\n assert not cb.check_checkmate()[0]\n assert cb.check_checkmate()[1] == {(7 + 2j, 7 + 3j)}\n\n\ndef test_chessboard_with_checkmate():\n cb.restart()\n cb.move(5 + 2j, 5 + 4j)\n cb.move(5 + 7j, 5 + 5j)\n cb.move(6 + 1j, 3 + 4j)\n cb.move(2 + 8j, 3 + 6j)\n cb.move(4 + 1j, 8 + 5j)\n cb.move(7 + 8j, 6 + 6j)\n assert cb.move(8 + 5j, 6 + 7j)[1] == 'Игра завершена, Чёрным поставлен мат'\n\ndef test_update_pawn():\n cb.restart()\n assert cb.update_pawn(1 + 8j, 'Ферзь')[0]\n assert cb.chess_board[1 + 8j].name == 'q'\n assert not cb.chess_board[1 + 8j].fraction\n\n cb.restart()\n cb.move(1 + 2j, 1 + 3j)\n assert cb.update_pawn(2 + 1j, 'Ладья')[0]\n assert cb.chess_board[2 + 1j].name == 'r'\n assert cb.chess_board[2 + 1j].fraction\n\n assert not cb.update_pawn(2 + 3j, 'sdf')[0]\n\n\ndef test_make_an_passant():\n cb.restart()\n cb.move(2 + 2j, 2 + 4j)\n cb.move(8 + 7j, 8 + 5j)\n cb.move(2 + 4j, 2 + 5j)\n cb.move(1 + 7j, 1 + 5j)\n assert cb.make_en_passant(2 + 5j, 1 + 6j)\n assert not cb.chess_board[1 + 5j]\n assert cb.chess_board[1 + 6j]\n\n cb.restart()\n\n cb.move(4 + 2j, 4 + 4j)\n cb.move(3 + 7j, 3 + 5j)\n cb.move(4 + 4j, 4 + 5j)\n cb.move(8 + 7j, 8 + 5j)\n assert not cb.make_en_passant(4 + 5j, 3 + 6j)\n cb.move(1 + 2j, 1 + 3j)\n cb.move(5 + 7j, 5 + 5j)\n assert cb.make_en_passant(4 + 5j, 5 + 6j)\n\n cb.restart()\n cb.move(4 + 2j, 4 + 4j)\n cb.move(5 + 7j, 5 + 5j)\n cb.move(4 + 4j, 4 + 5j)\n cb.move(3 + 7j, 3 + 5j)\n assert not cb.move(4 + 5j, 5 + 6j)[0]\n assert cb.move(4 + 5j, 3 + 6j)[0]\n\ndef test_castling():\n cb.restart()\n cb.move(1 + 2j, 1 + 4j)\n assert cb.castling[True]\n cb.move(5 + 7j, 5 + 5j)\n assert cb.castling[False]\n cb.move(1 + 1j, 1 + 2j)\n assert cb.castling[True]\n assert cb.castling[False]\n cb.move(5 + 8j, 5 + 7j)\n assert not cb.castling[False]\n\n cb.restart()\n cb.move(7 + 2j, 7 + 4j)\n cb.move(8 + 7j, 8 + 5j)\n cb.move(6 + 1j, 7 + 2j)\n cb.move(8 + 5j, 8 + 4j)\n cb.move(7 + 1j, 8 + 3j)\n cb.move(1 + 7j, 1 + 5j)\n assert cb.move(5 + 1j, 7 + 1j)[0]\n assert cb.chess_board[7 + 1j].name == 'k'\n assert cb.chess_board[6 + 1j].name == 'r'\n\ndef test_short_notation_to_complex_numbers():\n cb.restart()\n assert cb.find_chesspiece_by_name('p') == {1 + 2j, 2 + 2j, 3 + 2j, 4 + 2j, 5 + 2j, 6 + 2j, 7 + 2j, 8 + 2j}\n assert cb.find_chesspiece_by_name('q') == {4 + 1j}\n assert cb.find_chesspiece_by_name('n') == {2 + 1j, 7 + 1j}\n cb.move(2 + 2j, 2 + 4j)\n assert cb.find_chesspiece_by_name('k') == {5 + 8j}\n assert cb.short_notation_to_complex_numbers('O-O') == (5 + 8j, 7 + 8j)\n cb.move(3 + 7j, 3 + 5j)\n assert cb.short_notation_to_complex_numbers('O-O') == (5 + 1j, 7 + 1j)\n cb.restart()\n assert cb.short_notation_to_complex_numbers('b4') == (2 + 2j, 2 + 4j)\n cb.move(1 + 2j, 1 + 4j)\n assert cb.short_notation_to_complex_numbers('Nh6') == (7 + 8j, 8 + 6j)\n cb.restart()\n cb.move(1 + 2j, 1 + 4j)\n cb.move(2 + 8j, 1 + 6j)\n cb.move(8 + 2j, 8 + 4j)\n cb.move(1 + 6j, 3 + 5j)\n cb.move(1 + 1j, 1 + 3j)\n cb.move(3 + 5j, 2 + 3j)\n cb.move(8 + 1j, 8 + 3j)\n cb.move(7 + 7j, 7 + 6j)\n assert cb.short_notation_to_complex_numbers('Rhxb3') == (8 + 3j, 2 + 3j)\n assert cb.short_notation_to_complex_numbers('Raxb3') == (1 + 3j, 2 + 3j)\n cb.restart()\n cb.move(2 + 2j, 2 + 4j)\n cb.move(3 + 7j, 3 + 5j)\n cb.move(4 + 2j, 4 + 4j)\n cb.move(8 + 7j, 8 + 6j)\n assert cb.short_notation_to_complex_numbers('bxc5') == (2 + 4j, 3 + 5j)\n assert cb.short_notation_to_complex_numbers('dxc5') == (4 + 4j, 3 + 5j)\n cb.restart()\n cb.move(2 + 2j, 2 + 4j)\n cb.move(2 + 7j, 2 + 6j)\n cb.move(2 + 4j, 2 + 5j)\n cb.move(3 + 7j, 3 + 5j)\n assert cb.short_notation_to_complex_numbers('bxc6') == (2 + 5j, 3 + 6j)\n","repo_name":"Wodey/chess_university_project","sub_path":"tests/test_Chessboard.py","file_name":"test_Chessboard.py","file_ext":"py","file_size_in_byte":5695,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"31480043067","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom _MOM.import_MOM import *\nfrom _FFM import FFM\n\nfrom _GTW._OMP._DNS.Attr_Type import A_DNS_Label\n\nimport _FFM.Net_Device\nimport _FFM.Belongs_to_Net_Device\nimport _FFM.Belongs_to_Node\n\nfrom _GTW._OMP._NET import NET\nimport _GTW._OMP._NET.Attr_Type\n\n_Ancestor_Essence = FFM.Link1\n_Mixin_1 = FFM.Belongs_to_Node_Left\n_Mixin_2 = FFM.Belongs_to_Net_Device_Left\n\nclass Net_Interface (_Mixin_1, _Mixin_2, _Ancestor_Essence) :\n \"\"\"Model a network interface of a FFM device\"\"\"\n\n is_partial = True\n is_relevant = True\n\n class _Attributes \\\n ( _Mixin_1._Attributes\n , _Mixin_2._Attributes\n , _Ancestor_Essence._Attributes\n ) :\n\n _Ancestor = _Ancestor_Essence._Attributes\n\n ### Primary attributes\n\n class left (_Ancestor.left) :\n \"\"\"Network device the interface is connected to.\"\"\"\n\n role_type = FFM.Net_Device\n role_name = \"device\"\n Kind_Mixins = (Attr.Init_Only_Mixin, )\n show_in_ui_selector= False\n\n # end class left\n\n class mac_address (NET.A_MAC_Address) :\n \"\"\"MAC address of interface.\"\"\"\n\n kind = Attr.Primary_Optional\n\n # end class mac_address\n\n class name (A_DNS_Label) :\n \"\"\"Name of the interface.\"\"\"\n\n kind = Attr.Primary_Optional\n completer = Attr.Completer_Spec (2, Attr.Selector.primary)\n\n # end class name\n\n ### Non-primary attributes\n\n class is_active (A_Boolean) :\n \"\"\"Indicates if this interface is active.\"\"\"\n\n kind = Attr.Optional\n\n # end class is_active\n\n class desc (A_Text) :\n \"\"\"Description of interface\"\"\"\n\n kind = Attr.Optional\n ui_name = \"Description\"\n\n # end class desc\n\n # end class _Attributes\n\n# end class Net_Interface\n\nif __name__ != \"__main__\" :\n FFM._Export (\"*\")\n### __END__ FFM.Net_Interface\n","repo_name":"FFM/FFM","sub_path":"_FFM/Net_Interface.py","file_name":"Net_Interface.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"3"} +{"seq_id":"6319822010","text":"from flask import Blueprint, redirect, render_template, request, flash, make_response, url_for, abort\nfrom flask_login import login_required, current_user\nfrom flaskapp.database import db, Credential, Objective\nfrom flaskapp.auth import admin_required, current_user\nfrom flaskapp.forms import ObjectiveForm\nfrom flaskapp.views.home import home\nfrom stravalib import Client\nfrom datetime import datetime\n\nimport os\nimport requests\n\nobjectives = Blueprint('objectives', __name__)\n\nDATASERVICE = os.environ['OBJECTIVE_SERVICE']\n\n\n@objectives.route('/objectives', methods=['GET'])\n@login_required\ndef _objectives():\n reply = requests.get(DATASERVICE + '/objectives?user_id=' + str(current_user.dataservice_user_id))\n if reply.status_code == 200:\n return render_template(\"objectives.html\", objectives=reply.json())\n else:\n return render_template(\"objectives.html\")\n\n\n@objectives.route('/create_objective', methods=['GET', 'POST'])\n@login_required\ndef create_objective():\n status = 200\n \n form = ObjectiveForm()\n if request.method == 'POST':\n\n if form.validate_on_submit():\n new_objective = Objective()\n form.populate_obj(new_objective)\n new_objective.user_id = current_user.dataservice_user_id\n json = new_objective.to_json()\n\n reply = requests.post(DATASERVICE + '/objectives', json=json)\n return redirect('/objectives'), status\n else:\n # Bad data were sent\n status = 400\n \n return render_template('create_objective.html', form=form), status\n","repo_name":"ytbeepbeep/flask-app","sub_path":"flaskapp/views/objectives.py","file_name":"objectives.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34078825254","text":"\"\"\"add state history and summary\n\nRevision ID: 7c4641194d0e\nRevises: 68c5abc6c517\nCreate Date: 2023-12-14 23:27:36.892610\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '7c4641194d0e'\ndown_revision = '68c5abc6c517'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('equipment_state_history',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('equipment_id', sa.Integer(), nullable=True),\n sa.Column('previous_quantity', sa.Integer(), nullable=True),\n sa.Column('new_quantity', sa.Integer(), nullable=True),\n sa.Column('previous_state', sa.String(), nullable=True),\n sa.Column('new_state', sa.String(), nullable=True),\n sa.Column('changed_at', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['equipment_id'], ['equipments.id'], name=op.f('fk_equipment_state_history_equipment_id_equipments')),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('equipment_state_summaries',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('equipment_history_id', sa.Integer(), nullable=True),\n sa.Column('date', sa.Date(), nullable=True),\n sa.Column('state', sa.String(), nullable=True),\n sa.Column('total_quantity', sa.Integer(), nullable=True),\n sa.Column('total_idle', sa.Integer(), nullable=True),\n sa.Column('total_rented_out', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['equipment_history_id'], ['equipment_state_history.id'], name=op.f('fk_equipment_state_summaries_equipment_history_id_equipment_state_history')),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('equipment_state_summaries')\n op.drop_table('equipment_state_history')\n # ### end Alembic commands ###\n","repo_name":"mako314/EquipMe","sub_path":"server/migrations/versions/7c4641194d0e_add_state_history_and_summary.py","file_name":"7c4641194d0e_add_state_history_and_summary.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"33356218623","text":"# ------------------------------------------------- #\r\n# Title: Assignment 07\r\n# Description: Adds donation information to donationList by utilizing pickling and structured error handling\r\n# ChangeLog: (Who, When, What)\r\n# DTil, 03/01/2023, Created Script\r\n# ------------------------------------------------- #\r\n\r\n\r\n# Data ---------------------------------------------------------------------- #\r\nimport pickle\r\n\r\nfile_name_str = \"donationList.dat\" # Name of the data file\r\ndonorInfo = {} # A row of data separated into elements of a dictionary {Name, Donation}\r\ndonatorList = [] # list that acts as a table of rows\r\n\r\n\r\n# Processing --------------------------------------------------------------- #\r\n\r\n\r\nclass Processor:\r\n \"\"\" Performs Processing Tasks \"\"\"\r\n\r\n @staticmethod\r\n def read_data_from_file(file_name, list_of_rows):\r\n \"\"\" Reads binary data from file into list of dictionary rows\r\n\r\n :param file_name: (string) with name of file\r\n :param list_of_rows: (list) you want filled with file data\r\n :return: (list) of dictionary rows\r\n \"\"\"\r\n list_of_rows = [] # Clears current data\r\n try:\r\n objFile = open(file_name, \"rb\") # open file in binary format\r\n except FileNotFoundError: # Exception if no file is found, typical for initial program start\r\n print(\"There is no existing donation list file.\\n\"\r\n \"Starting new file.\")\r\n return list_of_rows # return empty list\r\n\r\n tempfile = [] # temp list\r\n try:\r\n tempfile = pickle.load(objFile) # load a dictionary row of data into temp list\r\n for row in tempfile: # iterate through each dict in file\r\n name, donation = row.values()\r\n list_of_rows.append({\"Name\": str(name).title(), \"Donation\": int(donation)}) # add dict to list\r\n objFile.close()\r\n print(\"Data has been uploaded from the file.\")\r\n except pickle.UnpicklingError: # Exception handling for non-binary file\r\n print(\"This file is not in binary format\\n\"\r\n \"Starting new file.\")\r\n return list_of_rows\r\n\r\n @staticmethod\r\n def write_data_to_file(file_name, list_of_rows):\r\n \"\"\" Write data from list of dictionary rows to file in binary\r\n\r\n :param file_name: (string) with name of file\r\n :param list_of_rows: (list) you want to save to file\r\n :return: nothing\r\n \"\"\"\r\n objFile = open(file_name, \"wb\") # open file to write in binary\r\n pickle.dump(list_of_rows, objFile) # write entire list to file\r\n objFile.close()\r\n\r\n @staticmethod\r\n def add_entry_to_list(donorName, donorValue, list_of_rows):\r\n \"\"\" Adds donor and donation to the list\r\n\r\n :param donorName: (string) with donor's full name\r\n :param donorValue: (int) with donation value\r\n :param list_of_rows: (list) you want to add more data to\r\n :return: (list) of dictionary rows\r\n \"\"\"\r\n row = {\"Name\": str(donorName), \"Donation\": int(donorValue)} # convert input info into dict object\r\n list_of_rows.append(row) # add dictionary object to list\r\n print(\"%s has made a donation of $%i!\\n\" % (donorName, donorValue))\r\n return list_of_rows # return list with new data\r\n\r\n\r\n# Presentation (Input/Output) -------------------------------------------- #\r\n\r\nclass IO:\r\n \"\"\" Performs Input and Output tasks\"\"\"\r\n\r\n @staticmethod\r\n def input_entry_name():\r\n \"\"\" Gets full name to be added to the list\r\n\r\n :return: (string) with name\r\n \"\"\"\r\n # Exception Handling for First Name Input\r\n while True:\r\n try:\r\n nameFirst = input(\"First name? \") # User input first name\r\n if any(char == \" \" for char in nameFirst.strip()): # for cases of entering whole name\r\n raise Exception(\"Enter only the first name.\")\r\n elif not nameFirst.strip().isalpha(): # exception for non-alphabetical input\r\n raise Exception(\"First name must be of letters.\")\r\n except Exception as e:\r\n print(e) # Print exception\r\n else:\r\n break\r\n print()\r\n # Exception Handling for Last name Input\r\n while True:\r\n try:\r\n nameLast = input(\"Last name? \") # User input last name\r\n if any(char == \" \" for char in nameLast.strip()): # for cases of entering a space\r\n raise Exception(\"Enter only the last name.\")\r\n if not nameLast.strip().isalpha(): # exception for non-alphabetical input\r\n raise Exception(\"Last name must be of letters.\")\r\n else: # return whole name as single string if no exceptions\r\n return (nameFirst.strip().capitalize() + \" \" + nameLast.strip().capitalize())\r\n except Exception as e:\r\n print(e) # Print exception\r\n print()\r\n\r\n @staticmethod\r\n def input_entry_donation():\r\n \"\"\" Gets donation value to be added to the list\r\n\r\n :return: (int) of donation value\r\n \"\"\"\r\n while True:\r\n try:\r\n donation = int(input(\"Enter donation amount - $\")) # User int input\r\n except ValueError as e: # Exception for non numerical data\r\n print(\"Enter a valid whole dollar amount.\")\r\n else:\r\n return donation # return value if no exception\r\n print()\r\n\r\n @staticmethod\r\n def display_donator_list(list_of_rows):\r\n \"\"\" Shows current list in the list of dictionary rows\r\n\r\n :param list_of_rows: (list) of rows you want to display\r\n :return: nothing\r\n \"\"\"\r\n print(\"\\n******* The current donators are: *******\")\r\n for row in list_of_rows:\r\n print(row[\"Name\"] + \" - $\" + str(row[\"Donation\"]))\r\n print(\"*******************************************\")\r\n print() # Add an extra line for looks\r\n\r\n @staticmethod\r\n def exit_option():\r\n \"\"\" Gets user input to continue or exit the program\r\n\r\n :return: Boolean\r\n \"\"\"\r\n while True:\r\n try:\r\n exit = input(\"Add another donation? [Y/N] \") # Prompt user input\r\n if not (exit.lower() == \"y\" or exit.lower() == \"n\"): # Raise Exception if input is neither Y or N\r\n raise Exception(\"Choose Y or N\")\r\n elif exit.lower() == \"n\": # Exit program prompt\r\n IO.display_donator_list(donatorList) # Display current list one last time\r\n print(\"Thank you all for the donation!\")\r\n return True\r\n elif exit.lower() == \"y\": # Continue program prompt\r\n break\r\n except Exception as e:\r\n print(e) # print exception\r\n print()\r\n return False\r\n\r\n\r\n# Main Body of Script ------------------------------------------------------ #\r\n\r\n\r\n# When program starts, load data from donationList.dat\r\ndonatorList = Processor.read_data_from_file(file_name=file_name_str, list_of_rows=donatorList)\r\n\r\nwhile True:\r\n IO.display_donator_list(donatorList) # displays current list\r\n\r\n name = IO.input_entry_name() # gets name input from user\r\n donation = IO.input_entry_donation() # gets donation input from user\r\n print()\r\n Processor.add_entry_to_list(donorName=name, donorValue=donation, list_of_rows=donatorList) # Adds data to list\r\n\r\n Processor.write_data_to_file(file_name=file_name_str, list_of_rows=donatorList) # Save data to file\r\n\r\n exit = IO.exit_option() # Prompts to continue or exit program\r\n if exit == True:\r\n break\r\n\r\n","repo_name":"dtil-gh/IntroToProg-Python-Mod07","sub_path":"Assignment07.py","file_name":"Assignment07.py","file_ext":"py","file_size_in_byte":7808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"4222006678","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\nfrom __future__ import unicode_literals\n\n# This file is only used if you use `make publish` or\n# explicitly specify it as your config file.\n\nimport os\nimport sys\n\nsys.path.append(os.curdir)\nfrom pelicanconf import *\n\nSITEURL = \"//minchin.ca\"\nRELATIVE_URLS = False\nLOAD_CONTENT_CACHE = False\n# OUTPUT_PATH = '../minchinweb.github.io-master/' # default is 'output/'\n\n# FEED_ALL_ATOM = 'feeds/all.atom.xml'\n# CATEGORY_FEED_ATOM = 'feeds/%s.atom.xml'\n\n# DELETE_OUTPUT_DIRECTORY = True\n\n# Following items are often useful when publishing\n\n# DISQUS_SITENAME = \"\"\nGOOGLE_ANALYTICS_UNIVERSAL = \"UA-384291-3\"\nGOOGLE_ANALYTICS_UNIVERSAL_PROPERTY = \"minchin.ca\"\n\nPLUGINS = PLUGINS + [\n # 'assets',\n \"minify\", # pelican-minify\n \"extended_sitemap\", # pelican-extended-sitemap\n # \"minchin.pelican.plugins.optimize_images\", # need executables for Linux to do this on Travis-CI\n # \"minchin.pelican.plugins.cname\",\n # \"minchin.pelican.plugins.nojekyll\",\n]\n","repo_name":"MinchinWeb/minchinweb.github.io","sub_path":"publishconf.py","file_name":"publishconf.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"1648209300","text":"import io\nimport json\nfrom dataclasses import dataclass\n\nimport pandas as pd\nimport requests\nimport streamlit as st\n\n\n@dataclass\nclass Pokemon:\n \"\"\"\n Class representation of a Pokemon\n \"\"\"\n\n name: str\n base_experience: int\n sprite_front_default: str\n sprite_back_default: str\n stats: dict\n\n @property\n def front_image(self):\n picture_resp = requests.get(self.sprite_front_default)\n in_memory_picture = io.BytesIO(picture_resp.content)\n return in_memory_picture\n\n @property\n def back_image(self):\n picture_resp = requests.get(self.sprite_back_default)\n in_memory_picture = io.BytesIO(picture_resp.content)\n return in_memory_picture\n\n @property\n def stats_series(self) -> pd.Series:\n \"\"\"\n Renders a pokemons stats into a more readable format\n\n Returns\n -------\n dict\n Rendered dict with all pokmeon fighting stats\n \"\"\"\n stats_dict = {stat[\"stat\"][\"name\"]: stat[\"base_stat\"] for stat in self.stats}\n return pd.Series(stats_dict)\n\n @classmethod\n def from_dict(cls, payload: dict) -> \"Pokemon\":\n \"\"\"\n Creates a Pokemon instance from a json payload\n\n Parameters\n ----------\n payload : dict\n A dictionary of parameters required to create an instance of Pokemon\n\n Returns\n -------\n Pokemon\n Returns an instance of a Pokemon\n \"\"\"\n return cls(\n name=payload[\"name\"],\n base_experience=payload[\"base_experience\"],\n sprite_front_default=payload[\"sprites\"][\"front_default\"],\n sprite_back_default=payload[\"sprites\"][\"back_default\"],\n stats=payload[\"stats\"],\n )\n\n\ndef load_pokemon(pokemon_name: str) -> dict:\n \"\"\"\n Loads a pokemon from pokeapi\n\n Parameters\n ----------\n pokemon_name : str\n Name of the pokemon in lower case\n\n Returns\n -------\n dict\n The resp from pokeapi\n \"\"\"\n poke_url = f\"https://pokeapi.co/api/v2/pokemon/{pokemon_name}/\"\n resp = requests.get(poke_url)\n return json.loads(resp.text)\n\n\n@st.cache\ndef load_all_pokemons() -> list:\n \"\"\"\n Get's a list of all pokemons\n\n Returns\n -------\n list\n List with all pokemon names\n \"\"\"\n resp = requests.get(\"https://pokeapi.co/api/v2/pokemon?limit=100000&offset=0\").text\n resp_to_json = json.loads(resp)\n return [pokemon[\"name\"] for pokemon in resp_to_json[\"results\"]]\n\n\ndef pokemon_template(pokemon_to_render: Pokemon):\n \"\"\"\n Renders a simple template for a given pokemon\n\n Parameters\n ----------\n pokemon_to_render : Pokemon\n The pokemon you want to show in the dashboard\n \"\"\"\n st.subheader(pokemon_to_render.name)\n st.image([pokemon_to_render.front_image, pokemon_to_render.back_image])\n st.bar_chart(pokemon_to_render.stats_series)\n\n\n# --------------\n# Streamlit app\n# --------------\nst.title(\"Online Pokedex\")\n\npokemon_choice = st.sidebar.selectbox(\"Select Pokemon\", load_all_pokemons())\n\nbulbasaur = Pokemon.from_dict(load_pokemon(pokemon_choice))\n\npokemon_template(bulbasaur)\n","repo_name":"faurholdt/pokemon_dashboard","sub_path":"streamlit_app.py","file_name":"streamlit_app.py","file_ext":"py","file_size_in_byte":3152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18332794581","text":"import sys\nimport math\nfrom collections import defaultdict\nfrom collections import deque\n\nsys.setrecursionlimit(1000000)\nMOD = 10 ** 9 + 7\ninput = lambda: sys.stdin.readline().strip()\nNI = lambda: int(input())\nNMI = lambda: map(int, input().split())\nNLI = lambda: list(NMI())\nSI = lambda: input()\n\n\ndef main():\n N, M, K, Q = NMI()\n C = [NLI() for _ in range(N)]\n C.sort()\n now = 0\n T_cans = deque()\n ans = 10**20\n nt = 0\n for i, (p, t) in enumerate(C):\n if i < M:\n now += p\n if t:\n T_cans.append(p)\n nt += 1\n\n if i == M-1:\n ans = min(ans, now + Q * ((nt + K - 1) // K))\n\n else:\n if t == 1: continue\n if nt == 0: break\n now -= T_cans.pop()\n now += p\n nt -= 1\n ans = min(ans, now + Q * ((nt+K-1) // K))\n\n print(ans)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Mao-beta/AtCoder","sub_path":"PAST/PAST06H.py","file_name":"PAST06H.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"69883944723","text":"from src.screen_processor import MapleScreenCapturer\nimport cv2, imutils, os\nfrom src.monster_detector import MonsterTemplateDetector\nfrom src.player_medal_detector import PlayerMedalDetector\nimport numpy as np\nos.chdir(\"../src\")\nwincap = MapleScreenCapturer()\ndetector = MonsterTemplateDetector(\"img/ArcaneRiver/ChewChew/츄츄 아일랜드.json\")\nplayerdetector = PlayerMedalDetector()\ndetector.create_template(\"mob1.png\")\nplayerdetector.create_template(\"medal1.png\")\ncapture_width = 700\ncapture_height = 200\nwhile True:\n img = wincap.capture(set_focus=False)\n grayscale = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2GRAY)\n\n playerloc = playerdetector.find(grayscale)\n detected = []\n if playerloc:\n bbox = grayscale[max((playerloc[1] - int(capture_height / 2)), 0):max((playerloc[1] + int(capture_height / 2)), 0),\n (playerloc[0] - int(capture_width / 2)):(playerloc[0] + int(capture_width / 2))]\n cv2.rectangle(grayscale, (playerloc[0] - int(capture_width / 2), playerloc[1] - int(capture_height / 2)), (playerloc[0] + int(capture_width / 2), playerloc[1] + int(capture_height / 2)),(0,0,255), 3)\n detected = detector.find(grayscale)\n cv2.circle(grayscale, playerloc, 15, (0,0,255), -1)\n\n if detected:\n for point in detected:\n cv2.circle(grayscale, (playerloc[0] - int(capture_width / 2)+point[0], playerloc[1] - int(capture_height / 2)+point[1]), 20, (0,0,255), -1)\n\n cv2.imshow(\"\", imutils.resize(grayscale, width=500))\n inp = cv2.waitKey(1)\n if inp == ord('q'):\n cv2.destroyAllWindows()\n break\n","repo_name":"Dashadower/MS-Visionify","sub_path":"tests/non-unittests/src.monster_detector.py","file_name":"src.monster_detector.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"3"} +{"seq_id":"17617787113","text":"\nfrom convolveDistributionWithLineOfSight import *\n\ndef main():\n\n dataDir = '/Users/DavidHarvey/Documents/Work/TimeDelay/output/CDM/z_0.25'\n jsonFile = dataDir+'/B002_cluster_0_2_total_sph.fits.py.raw.json'\n\n\n \n ax = plt.gca()\n \n hubbleParameter = 70.\n cluster = timeDelayDistribution( jsonFile, \\\n newHubbleParameter=hubbleParameter, \\\n timeDelayBins=np.linspace(1,2,50), \\\n outputPklFile='../output/CDM/singleSourcePlane/singleSourcePlane_%i.pkl' % hubbleParameter)\n sourcePlane = cluster.finalPDF['finalLoS'][-1]\n\n \n plt.plot(sourcePlane.timeDelayPDF['x'], sourcePlane.timeDelayPDF['y'],\\\n label='Lens Plane')\n \n plt.plot(sourcePlane.timeDelayWithLineOfSightPDF['x'], \\\n sourcePlane.timeDelayWithLineOfSightPDF['y'], \\\n label='los')\n\n\n ax.set_xlabel(r'log($\\Delta t$/ days)')\n ax.set_ylabel(r'P(log[$\\Delta t$])')\n ax.set_ylim(0., 8.)\n ax.set_xlim(0.85, 2.3)\n \n ax.legend()\n plt.show()\n\n\n \n z = [i.data['z'] for i in cluster.finalPDF['finalLoS']]\n w = [i.data['weight'] for i in cluster.finalPDF['finalLoS']]\n \n \n plt.plot(z,w)\n plt.yscale('log')\n plt.show()\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"davidharvey1986/timeDelay","sub_path":"plotSingleHaloMultipleSource.py","file_name":"plotSingleHaloMultipleSource.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21741855790","text":"import numpy as np\nimport pandas as pd\nimport os, subprocess, filetype, re\n\n# python3 /data/project/Alzheimer/YSscript/cle/3.BioData_Moore_2D.py\n\n# 2D 가능한 모든 Moore dataset을 돈다\n# 총 52개의 sample\n# monoclonal (colon_crypt, small_bowel_crypt_, appendix_crypt) : 10개\n# biclonal : (prostate_Acinus, testis_seminiferous_tubule) : 13개\n# polyclonal (oesophagus_epithelium, bladder ,ureter, skin, thyroid_follicle, bronchus) : 29개\n\ndef out(command): \n result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=True) \n return result.stdout.rstrip(\"\\n\")\n\n\n\n########################################################################################################################################################################\n\ndef Other_tissues (SCRIPT_DIR, DIR ) :\n inputdf = pd.read_csv(\"/data/project/Alzheimer/EM_cluster/EM_input/summary/Moore_2_sample.txt\", sep = \"\\t\")\n TISSUE_set = set([])\n\n\n for k in range (inputdf.shape[0]):\n DONOR, TISSUE, SAMPLE = inputdf.iloc[k][\"DONOR\"], inputdf.iloc[k][\"TISSUE\"], inputdf.iloc[k][\"SAMPLE\"]\n INPUT_TSV = \"/\".join([\"/data/project/Alzheimer/EM_cluster/EM_input/Moore_2_sample\", DONOR, TISSUE, SAMPLE+\"_input.txt\"])\n\n # if TISSUE not in [\"adrenal_gland_zona_glomerulosa\", \"visceral_fat\", \"bronchus_epithelium\"]:\n # continue\n\n kwargs = {\"INPUT_TSV\" : INPUT_TSV, \"MODE\" : \"Both\", \"NUM_CLONE_TRIAL_START\" : 1, \"NUM_CLONE_TRIAL_END\" : 5, \n \"TRIAL_NO\" : 5, \"DEPTH_CUTOFF\" : 10, \"KMEANS_CLUSTERNO\" : 6, \"MIN_CLUSTER_SIZE\" : 10, \"MAKEONE_STRICT\" : 3,\n \"RANDOM_PICK\" : 0, \"AXIS_RATIO\": -1, \"PARENT_RATIO\": 0, \"NUM_PARENT\" : 0, \"FP_RATIO\":0, \"FP_USEALL\" : \"False\", \n \"RANDOM_SEED\" : 0, \"SAMPLENAME\" : \"\", \"BENCHMARK_NO\" : 0, \n \"NPVAF_DIR\" : \"\", \"SIMPLE_KMEANS_DIR\" : \"\", \"CLEMENT_DIR\" : \"\", \"SCICLONE_DIR\" : \"\", \"PYCLONEVI_DIR\" : \"\", \"COMBINED_OUTPUT_DIR\" : \"\",\n \"SCORING\" : False, \"MAXIMUM_NUM_PARENT\" : 1, \"VERBOSE\" : 1 }\n \n if int ( inputdf.iloc[k][\"SHARED\"] ) < 60:\n continue\n elif int ( inputdf.iloc[k][\"TOTAL\"] ) < 350:\n continue\n else:\n kwargs [\"RANDOM_PICK\"] = 300\n\n INPUT_FILETYPE, NUM_BLOCK = filetype.main (INPUT_TSV)\n kwargs[\"NUM_BLOCK_INPUT\"] = kwargs[\"NUM_BLOCK\"] = NUM_BLOCK\n SAMPLENAME = kwargs[\"SAMPLENAME\"] = re.split(r'[_ .]', INPUT_TSV.split(\"/\")[-1])[0]\n\n\n kwargs[\"NPVAF_DIR\"] = \"/data/project/Alzheimer/CLEMENT/02.npvaf/3.BioData/Moore_2D/\" + TISSUE + \"/\" + DONOR + \"-\" + SAMPLENAME\n kwargs[\"COMBINED_OUTPUT_DIR\"] = \"/data/project/Alzheimer/CLEMENT/03.combinedoutput/3.BioData/Moore_2D/\" + TISSUE + \"/\" + DONOR + \"-\" + SAMPLENAME\n kwargs[\"SIMPLE_KMEANS_DIR\"] = \"/data/project/Alzheimer/YSscript/cle/data/SIMPLE_KMEANS/3.BioData/Moore_2D/\" + TISSUE + \"/\" + DONOR + \"-\" + SAMPLENAME\n kwargs[\"CLEMENT_DIR\"] = \"/data/project/Alzheimer/YSscript/cle/data/CLEMENT/3.BioData/Moore_2D/\" + TISSUE + \"/\" + DONOR + \"-\" + SAMPLENAME\n kwargs[\"PYCLONEVI_DIR\"] = \"/data/project/Alzheimer/YSscript/cle/data/pyclone-vi/3.BioData/Moore_2D/\" + TISSUE + \"/\" + DONOR + \"-\" + SAMPLENAME\n kwargs[\"SCICLONE_DIR\"] = \"/data/project/Alzheimer/YSscript/cle/data/sciclone/3.BioData/Moore_2D/\" + TISSUE + \"/\" + DONOR + \"-\" + SAMPLENAME\n kwargs[\"QUANTUMCLONE_DIR\"] = \"/data/project/Alzheimer/YSscript/cle/data/quantumclone/3.BioData/Moore_2D/\" + TISSUE + \"/\" + DONOR + \"-\" + SAMPLENAME\n \n\n print (\"k = {}\\t{}/{}-{} (SAMPLE = {})\\tTOTAL : {}\\tSHARED : {}\".format (k, TISSUE, DONOR, SAMPLENAME, SAMPLE, int ( inputdf.iloc[k][\"TOTAL\"] ), int ( inputdf.iloc[k][\"SHARED\"] )) )\n\n os.system (\"mkdir -p \" + kwargs[\"NPVAF_DIR\"]) # 출력 디렉토리 만들기\n os.system (\"mkdir -p \" + kwargs[\"SIMPLE_KMEANS_DIR\"]) # 출력 디렉토리 만들기\n os.system (\"mkdir -p \" + kwargs[\"CLEMENT_DIR\"]) # 출력 디렉토리 만들기\n os.system (\"mkdir -p \" + kwargs[\"SCICLONE_DIR\"]) # 출력 디렉토리 만들기\n os.system (\"mkdir -p \" + kwargs[\"PYCLONEVI_DIR\"]) # 출력 디렉토리 만들기\n os.system (\"mkdir -p \" + kwargs[\"QUANTUMCLONE_DIR\"]) # 출력 디렉토리 만들기\n os.system (\"mkdir -p \" + kwargs[\"COMBINED_OUTPUT_DIR\"]) # 출력 디렉토리 만들기\n\n\n logPath = \"/data/project/Alzheimer/YSscript/cle/log/3.BioData/Moore_2D/\" + TISSUE + \"/\" + DONOR + \"-\" + SAMPLE\n os.system (\"rm -rf \" + logPath)\n os.system (\"mkdir -p \" + logPath)\n hold_j = TISSUE + \"_\" + DONOR + \"_\" + SAMPLE\n command = \" \".join ( [ \"qsub -pe smp 1\", \"-e\", logPath, \"-o\", logPath, \n \"-N\", TISSUE + \"_\" + DONOR + \"_\" + SAMPLE, \n SCRIPT_DIR + \"/2.CellData_pipe1_CLEMENT_bm.sh\", \n str(SCRIPT_DIR), str(INPUT_TSV), str(kwargs[\"MODE\"]), str(kwargs[\"NUM_CLONE_TRIAL_START\"]), str(kwargs[\"NUM_CLONE_TRIAL_END\"]), \n str(kwargs[\"RANDOM_PICK\"]), str(kwargs[\"AXIS_RATIO\"]), str(kwargs[\"PARENT_RATIO\"]), str(kwargs[\"NUM_PARENT\"]), str(kwargs[\"FP_RATIO\"]), str(kwargs[\"FP_USEALL\"]),\n str(kwargs[\"TRIAL_NO\"]), str(kwargs[\"DEPTH_CUTOFF\"]), str(kwargs[\"MIN_CLUSTER_SIZE\"]), str(kwargs[\"VERBOSE\"]),\n str(kwargs[\"KMEANS_CLUSTERNO\"]), str(kwargs[\"RANDOM_SEED\"]), str(kwargs[\"SAMPLENAME\"]), str(kwargs[\"BENCHMARK_NO\"]), \n str(kwargs[\"NPVAF_DIR\"]), str(kwargs[\"SIMPLE_KMEANS_DIR\"]), str(kwargs[\"CLEMENT_DIR\"]), str(kwargs[\"SCICLONE_DIR\"]), str(kwargs[\"PYCLONEVI_DIR\"]) , str(kwargs[\"QUANTUMCLONE_DIR\"]), str(kwargs[\"COMBINED_OUTPUT_DIR\"]), \n str(kwargs[\"SCORING\"]), str(kwargs[\"MAKEONE_STRICT\"]), str(kwargs[\"MAXIMUM_NUM_PARENT\"]) ] )\n \n #os.system (command)\n #print (command)\n\n\n########################################################################################################################################################################\n\ndef adrenal_gland_continuous ( SCRIPT_DIR, DIR ):\n import glob\n\n DONOR_LIST = sorted(glob.glob(DIR + \"/*\"))\n DONOR_LIST = [i.split(\"/\")[-1] for i in DONOR_LIST]\n n = 0\n\n for DONOR in DONOR_LIST: # PD28690\n AG_TISSUE_LIST = sorted(glob.glob(DIR + \"/\" + DONOR + \"/adrenal_gland_zona/*\" ))\n AG_TISSUE_LIST = [i.split(\"/\")[-1] for i in AG_TISSUE_LIST]\n \n ind_list = []\n for i, AG_TISSUE in enumerate (AG_TISSUE_LIST):\n if AG_TISSUE.split (\"_\")[1] == AG_TISSUE.split (\"_\")[3]:\n ind_list.append ( i )\n\n AG_TISSUE_LIST = [ AG_TISSUE_LIST[i].split(\"_input.txt\")[0] for i in ind_list ]\n\n\n\n for AG_TISSUE in AG_TISSUE_LIST: # fasciculata_L1_glomerulosa_L1\n if \"reticularis\" in AG_TISSUE:\n continue\n\n INPUT_TSV = DIR + \"/\" + DONOR + \"/adrenal_gland_zona/\" + AG_TISSUE + \"_input.txt\"\n\n kwargs = {\"INPUT_TSV\" : INPUT_TSV, \"MODE\" : \"Both\", \"NUM_CLONE_TRIAL_START\" : 1, \"NUM_CLONE_TRIAL_END\" : 7, \n \"TRIAL_NO\" : 10, \"DEPTH_CUTOFF\" : 10, \"KMEANS_CLUSTERNO\" : 8, \"MIN_CLUSTER_SIZE\" : 5, \"MAKEONE_STRICT\" : 3,\n \"RANDOM_PICK\" : 0, \"AXIS_RATIO\": -1, \"PARENT_RATIO\": 0, \"NUM_PARENT\" : 0, \"FP_RATIO\":0, \"FP_USEALL\" : \"False\", \n \"RANDOM_SEED\" : 0, \"SAMPLENAME\" : \"\", \"BENCHMARK_NO\" : 0, \n \"NPVAF_DIR\" : \"\", \"SIMPLE_KMEANS_DIR\" : \"\", \"CLEMENT_DIR\" : \"\", \"SCICLONE_DIR\" : \"\", \"PYCLONEVI_DIR\" : \"\", \"COMBINED_OUTPUT_DIR\" : \"\",\n \"SCORING\" : False, \"MAXIMUM_NUM_PARENT\" : 1, \"VERBOSE\" : 1 }\n kwargs[\"RANDOM_PICK\"] = -1\n\n\n INPUT_FILETYPE, NUM_BLOCK = filetype.main (INPUT_TSV)\n kwargs[\"NUM_BLOCK_INPUT\"] = kwargs[\"NUM_BLOCK\"] = NUM_BLOCK\n SAMPLENAME = kwargs[\"SAMPLENAME\"] = AG_TISSUE\n TISSUE = \"adrenal_gland_zona\"\n\n kwargs[\"NPVAF_DIR\"] = \"/data/project/Alzheimer/CLEMENT/02.npvaf/3.BioData/Moore_2D_AG/\" + AG_TISSUE\n kwargs[\"COMBINED_OUTPUT_DIR\"] = \"/data/project/Alzheimer/CLEMENT/03.combinedoutput/3.BioData/Moore_2D_AG/\" + AG_TISSUE\n kwargs[\"SIMPLE_KMEANS_DIR\"] = \"/data/project/Alzheimer/YSscript/cle/data/SIMPLE_KMEANS/3.BioData/Moore_2D_AG/\" + AG_TISSUE\n kwargs[\"CLEMENT_DIR\"] = \"/data/project/Alzheimer/YSscript/cle/data/CLEMENT/3.BioData/Moore_2D_AG/\" + AG_TISSUE\n kwargs[\"PYCLONEVI_DIR\"] = \"/data/project/Alzheimer/YSscript/cle/data/pyclone-vi/3.BioData/Moore_2D_AG/\" + TISSUE + \"/\" + DONOR + \"-\" + AG_TISSUE\n kwargs[\"SCICLONE_DIR\"] = \"/data/project/Alzheimer/YSscript/cle/data/sciclone/3.BioData/Moore_2D_AG/\" + AG_TISSUE\n kwargs[\"QUANTUMCLONE_DIR\"] = \"/data/project/Alzheimer/YSscript/cle/data/quantumclone/3.BioData/Moore_2D_AG/\" + AG_TISSUE\n \n\n print (\"n = {}\\t{}/{}-{}\".format ( n, TISSUE, DONOR, AG_TISSUE ) )\n\n os.system (\"mkdir -p \" + kwargs[\"NPVAF_DIR\"]) # 출력 디렉토리 만들기\n os.system (\"mkdir -p \" + kwargs[\"SIMPLE_KMEANS_DIR\"]) # 출력 디렉토리 만들기\n os.system (\"mkdir -p \" + kwargs[\"CLEMENT_DIR\"]) # 출력 디렉토리 만들기\n os.system (\"mkdir -p \" + kwargs[\"SCICLONE_DIR\"]) # 출력 디렉토리 만들기\n os.system (\"mkdir -p \" + kwargs[\"PYCLONEVI_DIR\"]) # 출력 디렉토리 만들기\n os.system (\"mkdir -p \" + kwargs[\"QUANTUMCLONE_DIR\"]) # 출력 디렉토리 만들기\n os.system (\"mkdir -p \" + kwargs[\"COMBINED_OUTPUT_DIR\"]) # 출력 디렉토리 만들기\n\n\n logPath = \"/data/project/Alzheimer/YSscript/cle/log/3.BioData/Moore_2D_AG/\" + AG_TISSUE\n os.system (\"rm -rf \" + logPath)\n os.system (\"mkdir -p \" + logPath)\n hold_j = \"Moore_2D_AG_\" + AG_TISSUE\n command = \" \".join ( [ \"qsub -pe smp 1\", \"-e\", logPath, \"-o\", logPath, \n \"-N\", \"Moore_2D_AG_\" + AG_TISSUE, \n SCRIPT_DIR + \"/2.CellData_pipe1_CLEMENT_bm.sh\", \n str(SCRIPT_DIR), str(INPUT_TSV), str(kwargs[\"MODE\"]), str(kwargs[\"NUM_CLONE_TRIAL_START\"]), str(kwargs[\"NUM_CLONE_TRIAL_END\"]), \n str(kwargs[\"RANDOM_PICK\"]), str(kwargs[\"AXIS_RATIO\"]), str(kwargs[\"PARENT_RATIO\"]), str(kwargs[\"NUM_PARENT\"]), str(kwargs[\"FP_RATIO\"]), str(kwargs[\"FP_USEALL\"]),\n str(kwargs[\"TRIAL_NO\"]), str(kwargs[\"DEPTH_CUTOFF\"]), str(kwargs[\"MIN_CLUSTER_SIZE\"]), str(kwargs[\"VERBOSE\"]),\n str(kwargs[\"KMEANS_CLUSTERNO\"]), str(kwargs[\"RANDOM_SEED\"]), str(kwargs[\"SAMPLENAME\"]), str(kwargs[\"BENCHMARK_NO\"]), \n str(kwargs[\"NPVAF_DIR\"]), str(kwargs[\"SIMPLE_KMEANS_DIR\"]), str(kwargs[\"CLEMENT_DIR\"]), str(kwargs[\"SCICLONE_DIR\"]), str(kwargs[\"PYCLONEVI_DIR\"]) , str(kwargs[\"QUANTUMCLONE_DIR\"]), str(kwargs[\"COMBINED_OUTPUT_DIR\"]), \n str(kwargs[\"SCORING\"]), str(kwargs[\"MAKEONE_STRICT\"]), str(kwargs[\"MAXIMUM_NUM_PARENT\"]) ] )\n #print (command)\n os.system (command)\n n = n + 1\n \n \n #2. MatrixFormation + SigProfiler\n logPath = \"/data/project/Alzheimer/YSscript/cle/log/3.BioData/Moore_1D/\" + TISSUE + \"/\" + DONOR + \"-\" + SAMPLENAME\n os.system (\"rm -rf \" + logPath)\n os.system (\"mkdir -p \" + logPath)\n\n kwargs[\"OUTPUT_DIR\"] = kwargs[\"COMBINED_OUTPUT_DIR\"] + \"/SigProfiler\"\n os.system (\"rm -rf \" + kwargs[\"OUTPUT_DIR\"])\n os.system (\"rm -rf \" + kwargs[\"OUTPUT_DIR\"] + \"MatrixGenerator\")\n os.system (\"mkdir -p \" + kwargs[\"OUTPUT_DIR\"])\n command = \" \".join([\"qsub -pe smp 1\", \"-e\", logPath, \"-o\", logPath, \n \"-N\", \"Sig_AG_\" + AG_TISSUE,\n \"-hold_jid\", str( hold_j ), \n SCRIPT_DIR + \"/3.BioData_pipe1_Signature.sh\",\n \"--SCRIPT_DIR\", str(SCRIPT_DIR), \n \"--DECISION_MEMBERSHIP_PATH\", kwargs[\"COMBINED_OUTPUT_DIR\"] + \"/result/CLEMENT_decision.membership.txt\" , \n \"--NPVAF_PATH\", kwargs[\"NPVAF_DIR\"] + \"/npvaf.txt\", \n \"--DONOR\", DONOR,\n \"--TISSUE\", TISSUE,\n \"--OUTPUT_DIR\", str( kwargs[\"OUTPUT_DIR\"] ) ])\n os.system (command)\n\n\n\n\n\nif __name__ == \"__main__\":\n SCRIPT_DIR = os.path.dirname(__file__)\n print (SCRIPT_DIR, \"\\n\")\n\n DIR = \"/data/project/Alzheimer/CLEMENT/01.INPUT_TSV/3.BioData/Moore_2D/2.all_woMosaic\" # AG : 3.woMosaic_ver2\n\n\n #Other_tissues (SCRIPT_DIR, DIR ) \n adrenal_gland_continuous (SCRIPT_DIR, DIR ) ","repo_name":"goldpm1/cle","sub_path":"3.BioData_Moore_2D.py","file_name":"3.BioData_Moore_2D.py","file_ext":"py","file_size_in_byte":13271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30387701403","text":"\"\"\"\r\naffiche une la table de multiplication de chiffre saisie par l'utilisateur,\r\net imprime à l'écran un asterix jusye apres les nombres multiples de multiple_de\r\n\"\"\"\r\nnombre=int(input(\"Entrer le facteur de multiplication : \"))\r\nmultiple_de=int(input(\"Entrer le facteur multiple de : \"))\r\ni=1\r\nwhile i<=20:\r\n result=nombre*i\r\n print(result, end=' ')\r\n if result%multiple_de==0:\r\n print('*', end=' ')\r\n i+=1\r\n","repo_name":"damien-d-bzh/SillagePython3","sub_path":"Swinnen_4.7.py","file_name":"Swinnen_4.7.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71116109521","text":"from xml.sax import parseString\nfrom bs4 import BeautifulSoup\nimport re\nimport os\nimport csv\nimport unittest\n\n\ndef get_listings_from_search_results(html_file):\n \"\"\"\n Write a function that creates a BeautifulSoup object on html_file. Parse\n through the object and return a list of tuples containing:\n a string of the title of the listing,\n an int of the cost to rent for one night,\n and a string of the listing id number\n in the format given below. Make sure to turn costs into ints.\n\n The listing id is found in the url of a listing. For example, for\n https://www.airbnb.com/rooms/1944564\n the listing id is 1944564.\n.\n\n [\n ('Title of Listing 1', 'Cost 1', 'Listing ID 1'), # format\n ('Loft in Mission District', 210, '1944564'), # example\n ]\n \"\"\"\n base_path = os.path.abspath(os.path.dirname(__file__))\n full_path = os.path.join(base_path, html_file)\n with open(full_path, 'r') as f:\n content = f.read()\n soup = BeautifulSoup(content,'html.parser')\n\n titles = soup.find_all('div', class_=\"t1jojoys dir dir-ltr\")\n #gives a list of tags that were searching for \n #something that will be the same for all 20 searches\n costs= soup.find_all('span', class_=\"_tyxjp1\")\n #list of the costs \n\n\n position = 0\n L = []\n #L is list of tuples \n for item in titles: \n title=titles[position].text\n cost= int(costs[position].text.lstrip(\"$\"))\n #strip left \n id= titles[position][\"id\"].lstrip(\"title_\")\n t = (title,cost,id)\n L.append(t)\n position += 1 \n # print(t)\n return L\n \n \n\n\ndef get_listing_information(listing_id):\n \"\"\"\n Write a function to return relevant information in a tuple from an Airbnb listing id.\n NOTE: Use the static files in the html_files folder, do NOT send requests to the actual website.\n Information we're interested in:\n string - Policy number: either a string of the policy number, \"Pending\", or \"Exempt\"\n This field can be found in the section about the host.\n Note that this is a text field the lister enters, this could be a policy number, or the word\n \"pending\" or \"exempt\" or many others. Look at the raw data, decide how to categorize them into\n the three categories.\n string - Place type: either \"Entire Room\", \"Private Room\", or \"Shared Room\"\n Note that this data field is not explicitly given from this page. Use the\n following to categorize the data into these three fields.\n \"Private Room\": the listing subtitle has the word \"private\" in it\n \"Shared Room\": the listing subtitle has the word \"shared\" in it\n \"Entire Room\": the listing subtitle has neither the word \"private\" nor \"shared\" in it\n int - Number of bedrooms\n. \n (\n policy number,\n place type,\n number of bedrooms\n )\n \"\"\"\n source_dir = os.path.dirname(__file__)\n full_path = os.path.join(source_dir, f\"html_files/listing_{listing_id}.html\")\n f= open(full_path, 'r')\n r = f.read()\n f.close()\n soup = BeautifulSoup(r, \"html.parser\")\n\n policy_number = soup.find_all(\"li\", class_ = \"f19phm7j dir dir-ltr\")[0].text\n if \"pending\" in policy_number.lower():\n policy_number = \"Pending\"\n#making the search lowercase to be able to catch everything\n elif \"exempt\" in policy_number.lower():\n policy_number = \"Exempt\"\n else: \n policy_number = policy_number.lstrip('policy_number: ')\n \n type_of_stay = soup.find_all(\"h2\", class_ = \"_14i3z6h\")[0].text\n #getting first information thats in the list \n if \"private\" in type_of_stay.lower():\n type_of_stay = \"Private Room\" \n elif \"shared\" in type_of_stay.lower():\n type_of_stay = \"Shared Room\"\n else:\n type_of_stay = \"Entire Room\"\n \n number_rooms = soup.find_all(\"li\", class_ = \"l7n4lsf dir dir-ltr\")[1]\n #get the second item in the list \n number_bedrooms = number_rooms.find_all(\"span\")[2].text\n # the 3rd index gives the number of bedrooms\n if \"studio\" in number_bedrooms.lower(): \n number_bedrooms = 1\n else: \n number_bedrooms = int(number_bedrooms.split(\" \")[0])\n #string will now be list, getting first index which is the number that need to be an int\n return (policy_number, type_of_stay, number_bedrooms)\n\n\ndef get_detailed_listing_database(html_file):\n \"\"\"\n Write a function that calls the above two functions in order to return\n the complete listing information using the functions you’ve created.\n This function takes in a variable representing the location of the search results html file.\n The return value should be in this format:\n\n\n [\n (Listing Title 1,Cost 1,Listing ID 1,Policy Number 1,Place Type 1,Number of Bedrooms 1),\n (Listing Title 2,Cost 2,Listing ID 2,Policy Number 2,Place Type 2,Number of Bedrooms 2),\n ...\n ]\n \"\"\"\n get_listing= get_listings_from_search_results(html_file)\n listings_list= []\n for listing in get_listing: \n # print (listing)\n listing_info = get_listing_information(listing[2])\n #calling the listing id index \n listings_list.append(listing + listing_info)\n return listings_list\n \n\n\n\n\ndef write_csv(data, filename):\n \"\"\"\n Write a function that takes in a list of tuples (called data, i.e. the\n one that is returned by get_detailed_listing_database()), sorts the tuples in\n ascending order by cost, writes the data to a csv file, and saves it\n to the passed filename. The first row of the csv should contain\n \"Listing Title\", \"Cost\", \"Listing ID\", \"Policy Number\", \"Place Type\", \"Number of Bedrooms\",\n respectively as column headers. For each tuple in data, write a new\n row to the csv, placing each element of the tuple in the correct column.\n\n When you are done your CSV file should look like this:\n\n Listing Title,Cost,Listing ID,Policy Number,Place Type,Number of Bedrooms\n title1,cost1,id1,policy_number1,place_type1,num_bedrooms1\n title2,cost2,id2,policy_number2,place_type2,num_bedrooms2\n title3,cost3,id3,policy_number3,place_type3,num_bedrooms3\n ...\n\n In order of least cost to most cost.\n\n This function should not return anything.\n \"\"\"\n\n\n sorted_list = sorted(data, key =lambda c: c[1])\n #indexing to the cost and sorting in ascending order \n with open(filename,'w') as out:\n csv_out=csv.writer(out)\n csv_out.writerow(['Listing Title','Cost','Listing ID', 'Policy Number', 'Place Type', 'Number of Bedrooms'])\n for row in sorted_list:\n csv_out.writerow(row)\n #open and write a csv file, create columns, and add sorted_list\n\n\ndef check_policy_numbers(data):\n \"\"\"\n Write a function that takes in a list of tuples called data, (i.e. the one that is returned by\n get_detailed_listing_database()), and parses through the policy number of each, validating the\n policy number matches the policy number format. Ignore any pending or exempt listings.\n Return the listing numbers with respective policy numbers that do not match the correct format.\n Policy numbers are a reference to the business license San Francisco requires to operate a\n short-term rental. These come in two forms, where # is a number from [0-9]:\n 20##-00####STR\n STR-000####\n .\n Return value should look like this:\n [\n listing id 1,\n listing id 2,\n ...\n ]\n\n \"\"\"\n wrong_listing_id = []\n for listing in data: \n print(listing)\n if listing[3] != \"Pending\" and listing[3] != \"Exempt\":\n if (re.match(r\"STR-000[0-9]{4}\", listing[3]) == None) and (re.match(r\"20[0-9]{2}-00[0-9]{4}STR\", listing[3]) == None):\n wrong_listing_id.append(listing[2])\n #goes through list of strings and return only if it matches\n return wrong_listing_id\n\n\n\n\n\ndef extra_credit(listing_id):\n \"\"\"\n There are few exceptions to the requirement of listers obtaining licenses\n before listing their property for short term leases. One specific exception\n is if the lister rents the room for less than 90 days of a year.\n\n Write a function that takes in a listing id, scrapes the 'reviews' page\n of the listing id for the months and years of each review (you can find two examples\n in the html_files folder), and counts the number of reviews the apartment had each year.\n If for any year, the number of reviews is greater than 90 (assuming very generously that\n every reviewer only stayed for one day), return False, indicating the lister has\n gone over their 90 day limit, else return True, indicating the lister has\n never gone over their limit.\n \"\"\"\n \n filename = (\"html_files/listing_\" + listing_id + \"_reviews.html\")\n with open(filename, 'r') as f:\n content = f.read()\n soup = BeautifulSoup(content, 'html.parser')\n\n reviews = soup.find_all('li', class_='_1f1oir5')\n\n count_dic = {}\n for review in reviews: \n if review.text[-4:] in count_dic: \n count_dic[review.text[-4:]] += 1\n else: \n count_dic[review.text[-4:]] = 1\n\n for year in count_dic.items():\n if year[1] > 90:\n return False \n return True\n\n\nclass TestCases(unittest.TestCase):\n\n def test_get_listings_from_search_results(self):\n # call get_listings_from_search_results(\"html_files/mission_district_search_results.html\")\n # and save to a local variable\n listings = get_listings_from_search_results(\"html_files/mission_district_search_results.html\")\n # check that the number of listings extracted is correct (20 listings)\n self.assertEqual(len(listings), 20)\n # check that the variable you saved after calling the function is a list\n self.assertEqual(type(listings), list)\n # check that each item in the list is a tuple\n\n # check that the first title, cost, and listing id tuple is correct (open the search results html and find it)\n\n # check that the last title is correct (open the search results html and find it)\n \n\n def test_get_listing_information(self):\n html_list = [\"1623609\",\n \"1944564\",\n \"1550913\",\n \"4616596\",\n \"6600081\"]\n # call get_listing_information for i in html_list:\n listing_informations = [get_listing_information(id) for id in html_list]\n # check that the number of listing information is correct (5)\n self.assertEqual(len(listing_informations), 5)\n for listing_information in listing_informations:\n # check that each item in the list is a tuple\n self.assertEqual(type(listing_information), tuple)\n # check that each tuple has 3 elements\n self.assertEqual(len(listing_information), 3)\n # check that the first two elements in the tuple are string\n self.assertEqual(type(listing_information[0]), str)\n self.assertEqual(type(listing_information[1]), str)\n # check that the third element in the tuple is an int\n self.assertEqual(type(listing_information[2]), int)\n # check that the first listing in the html_list has policy number 'STR-0001541'\n\n # check that the last listing in the html_list is a \"Private Room\"\n\n # check that the third listing has one bedroom\n\n \n\n def test_get_detailed_listing_database(self):\n # call get_detailed_listing_database on \"html_files/mission_district_search_results.html\"\n # and save it to a variable\n detailed_database = get_detailed_listing_database(\"html_files/mission_district_search_results.html\")\n # check that we have the right number of listings (20)\n self.assertEqual(len(detailed_database), 20)\n for item in detailed_database:\n # assert each item in the list of listings is a tuple\n self.assertEqual(type(item), tuple)\n # check that each tuple has a length of 6\n\n # check that the first tuple is made up of the following:\n # 'Loft in Mission District', 210, '1944564', '2022-004088STR', 'Entire Room', 1\n\n # check that the last tuple is made up of the following:\n # 'Guest suite in Mission District', 238, '32871760', 'STR-0004707', 'Entire Room', 1\n\n \n\n def test_write_csv(self):\n # call get_detailed_listing_database on \"html_files/mission_district_search_results.html\"\n # and save the result to a variable\n detailed_database = get_detailed_listing_database(\"html_files/mission_district_search_results.html\")\n # call write csv on the variable you saved\n write_csv(detailed_database, \"test.csv\")\n # read in the csv that you wrote\n csv_lines = []\n with open(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'test.csv'), 'r') as f:\n csv_reader = csv.reader(f)\n for i in csv_reader:\n csv_lines.append(i)\n # check that there are 21 lines in the csv\n self.assertEqual(len(csv_lines), 21)\n # check that the header row is correct\n\n # check that the next row is Private room in Mission District,82,51027324,Pending,Private Room,1\n\n # check that the last row is Apartment in Mission District,399,28668414,Pending,Entire Room,2\n\n \n\n def test_check_policy_numbers(self):\n # call get_detailed_listing_database on \"html_files/mission_district_search_results.html\"\n # and save the result to a variable\n detailed_database = get_detailed_listing_database(\"html_files/mission_district_search_results.html\")\n # call check_policy_numbers on the variable created above and save the result as a variable\n invalid_listings = check_policy_numbers(detailed_database)\n # check that the return value is a list\n self.assertEqual(type(invalid_listings), list)\n # check that there is exactly one element in the string\n\n # check that the element in the list is a string\n\n # check that the first element in the list is '16204265'\n pass\n\n\nif __name__ == '__main__':\n database = get_detailed_listing_database(\"html_files/mission_district_search_results.html\")\n write_csv(database, \"airbnb_dataset.csv\")\n check_policy_numbers(database)\n unittest.main(verbosity=2)\n","repo_name":"SI206-UMich/proj2-fall2022-tanaydixon","sub_path":"f22_Project2.py","file_name":"f22_Project2.py","file_ext":"py","file_size_in_byte":14732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"35934228545","text":"#Python Program to count unique values inside a list\r\nlst = [1,'a',24,'sachin',1,'sachin',2,3]\r\ncount = 0 \r\nfor i in (lst):\r\n if lst.count(i)==1:\r\n count += 1\r\nprint(count)\r\n\r\noutput = len([i for i in lst if lst.count(i) == 1])\r\nprint(output) ","repo_name":"choubeyji-git/Python-Coding-Interview-questions","sub_path":"python_practice/abhishek/strings/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29266501166","text":"import asyncio\nfrom asyncio import AbstractEventLoop\nfrom typing import AsyncGenerator\n\nimport pytest\nimport pytest_asyncio\nfrom faker import Faker\nfrom httpx import AsyncClient\nfrom sqlalchemy.ext.asyncio import AsyncSession\n\nfrom src.app import app\nfrom src.core.db.db import async_session\nfrom src.rest.managers import CategoryManager\nfrom src.rest.managers.product_manager import ProductManager\nfrom src.rest.managers.user_manager import UserManager\nfrom src.models import User, Product, Category\nfrom src.services.auth_service import AuthService\n\nfaker = Faker()\n\n\n@pytest.fixture(scope='session')\ndef event_loop() -> AbstractEventLoop:\n \"\"\"\n This should be here to escape the following exception:\n ScopeMismatch: You tried to access the function scoped fixture event_loop with a session scoped request object,\n involved factories:\n def session() -> sqlalchemy.ext.asyncio.session.AsyncSession\n \"\"\"\n return asyncio.get_event_loop()\n\n\n@pytest.fixture(scope='session')\nasync def session() -> AsyncSession:\n \"\"\"A fixture that gives a session.\"\"\"\n async with async_session() as sess:\n yield sess\n\n\n@pytest_asyncio.fixture\nasync def client():\n async with AsyncClient(app=app, base_url=f\"https:///api/v1\") as client:\n yield client\n\n\n@pytest_asyncio.fixture\nasync def patched_client(test_user):\n \"\"\"The same as usual client but with user and bearer token.\"\"\"\n async with AsyncClient(app=app, base_url=f\"https:///api/v1\") as client:\n token = AuthService.create_token(user_id=test_user.id, token_type=\"access\")\n client.headers[\"Authorization\"] = f\"Bearer {token}\"\n client.user = test_user\n yield client\n\n\n\n\n@pytest.fixture\ndef fake_user_data() -> dict:\n \"\"\"Generate fake data for User.\"\"\"\n return {\n # \"id\": 9999,\n \"email\": faker.email(),\n \"password\": faker.password(),\n \"full_name\": faker.first_name() + faker.last_name(),\n \"phone_number\": faker.phone_number(),\n \"company_name\": faker.company(),\n \"unp\": faker.pystr(),\n \"legal_address\": faker.address(),\n \"IBAN\": faker.iban(),\n \"BIC\": faker.pystr(),\n \"serving_bank\": faker.company(),\n \"is_active\": faker.boolean(),\n \"is_admin\": faker.boolean(),\n }\n\n\n@pytest.fixture\ndef fake_product_data() -> dict:\n \"\"\"Generate fake data for Product.\"\"\"\n return {\n \"vendor_code\": faker.zipcode(),\n \"name\": faker.name(),\n \"base_unit\": faker.pystr(),\n \"images\": faker.file_path(),\n \"tax\": faker.pyint(),\n \"description\": faker.text(),\n \"price\": faker.pyfloat(),\n }\n\n\n@pytest.fixture\ndef fake_category_data() -> dict:\n \"\"\"Generate fake data for Category.\"\"\"\n return {\n \"name\": faker.name()\n }\n\n\n@pytest.fixture\nasync def test_user(\n session: AsyncSession,\n fake_user_data: dict,\n) -> AsyncGenerator[User, None]:\n \"\"\"Generate test user and after finished of the test delete him.\"\"\"\n user = User(**fake_user_data)\n\n session.add(user)\n await session.commit()\n\n yield user\n\n await UserManager.delete(id=user.id, session=session)\n\n\n@pytest.fixture\nasync def test_product(\n session: AsyncSession,\n fake_product_data: dict,\n) -> AsyncGenerator[Product, None]:\n \"\"\"Generate test product and after executing of test delete him.\"\"\"\n product = Product(**fake_product_data)\n\n session.add(product)\n await session.commit()\n\n yield product\n\n await ProductManager.delete(id=product.id, session=session)\n\n\n@pytest.fixture\nasync def test_category(\n session: AsyncSession,\n fake_category_data: dict,\n) -> AsyncGenerator[Category, None]:\n \"\"\"Generate test category and after executing of test delete him.\"\"\"\n category = Category(**fake_category_data)\n\n session.add(category)\n await session.commit()\n\n yield category\n\n await CategoryManager.delete(id=category.id, session=session)\n","repo_name":"dmitryzhurkovsky/cabel_torg","sub_path":"backend/src/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":3935,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"8097102338","text":"import string\nimport random\nfrom django.conf import settings\nfrom r3s_cms.apps.system.models import SystemExceptionLog\nfrom collections import OrderedDict\nfrom r3s_cms.apps.system.models import ProxyUser\n\ndef getUser(request):\n\tuser = request.user or None\n\tif user:\n\t\tuser = ProxyUser.objects.filter(username = user.get_username()).first()\n\tif user:\n\t\tif user.is_active is False:\n\t\t\tuser = None\n\treturn user\n\ndef log_exception(exception = None , traceback = None , note = None):\n\terror_message = None\n\tif exception:\n\t\ttry:\n\t\t\terror_message = exception.message\n\t\texcept:\n\t\t\terror_message = str(exception)\n\t\tif not settings.DEBUG:\n\t\t\tlog = SystemExceptionLog.log_error(cls , message = error_message , traceback = traceback , note = note)\n\t\t\tif log:\n\t\t\t\terror_message = log.message\n\treturn error_message\n\ndef setForm(fields = []):\n\tfor field in fields:\n\t\tfields[field].widget.attrs['class'] = 'form-control'\n\treturn fields\n\ndef randomStringGenerator(size=6, chars=string.ascii_uppercase + string.digits):\n\treturn ''.join(random.choice(chars) for _ in range(size))\n\t\ndef lambdaFunction(**kwargs):\n\tpass\n\ndef required_fields(fields , required = []):\n\tfor field in fields:\n\t\tif field in required:\n\t\t\tfields[field].required = True\n\t\telse:\n\t\t\tfields[field].required = False\n\treturn fields\n\ndef reorder_fields(fields, order):\n \"\"\"Reorder form fields by order, removing items not in order.\n\n >>> reorder_fields(\n ... OrderedDict([('a', 1), ('b', 2), ('c', 3)]),\n ... ['b', 'c', 'a'])\n OrderedDict([('b', 2), ('c', 3), ('a', 1)])\n \"\"\"\n for key, v in fields.items():\n if key not in order:\n del fields[key]\n\n return OrderedDict(sorted(fields.items(), key=lambda k: order.index(k[0])))","repo_name":"R3SWebDevelopment/polls","sub_path":"r3s_cms/lib/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"15304610414","text":"import cv2\r\nimport glob\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport config\r\n\r\nfrom moviepy.editor import VideoFileClip\r\nfrom scipy import stats\r\nfrom lane_functions import *\r\n\r\n# Initialize global variables\r\nconfig.falseCount = 0 # Checks for false positive histogram transform calls\r\nconfig.curveDist = 0 # Distance between fitted left and right curves\r\nconfig.videoType = 'project' # Options: project, challenge, harder_challenge\r\n\r\n# Set image directories for calibration of camera\r\nimageDir = 'camera_cal/calibration*.jpg'\r\ntestCalibImage = 'test_images/straight_lines1.jpg'\r\n# Set image directory for test images\r\ntestLaneImages = 'test_images/test*.jpg'\r\n\r\n# Calibrate Camera\r\nundist, mtx, dist = calibrate_camera(imageDir, testCalibImage)\r\n\r\n# Setup clip initialization\r\nclip = VideoFileClip(\"test_videos/project_video.mp4\")\r\n\r\n# Run initial frame configuration\r\nfirstFrame = clip.get_frame(0)\r\n\r\n# Apply undistortion, warp and gradient operations\r\ngradImageFirst, vertices = grad_image(firstFrame)\r\nwarpImageFirst, warp_mtx = warp_perspective(gradImageFirst, mtx, dist, vertices, 'forward')\r\n# Search for lane pixels and fit first polyline\r\nresultFirst = search_around_poly(warpImageFirst, None, None, True)\r\n\r\n\r\n# Define process function in main py file to avoid passing above\r\n# calibration steps to lane_functions.py\r\ndef process_image(image):\r\n # Apply undistortion, warp and gradient operations\r\n gradImage, vertices = grad_image(image)\r\n warpImage, warp_mtx = warp_perspective(gradImage, mtx, dist, vertices, 'forward')\r\n\r\n # Use previous line to find lane pixels and fit polyline\r\n laneCurvesWarp = search_around_poly(warpImage, config.left_fit_global, config.right_fit_global, False)\r\n\r\n # Reverse warped image\r\n laneCurves, _ = warp_perspective(laneCurvesWarp, mtx, dist, vertices, 'reverse')\r\n\r\n # Overlay onto original image\r\n overlayImage = cv2.addWeighted(image, 1, laneCurves, 0.3, 0)\r\n\r\n concatImage = cv2.hconcat([overlayImage, laneCurvesWarp])\r\n\r\n # Measure Curvature\r\n left_R_curve, right_R_curve, offset = measure_curvature_real()\r\n if (left_R_curve > 5000) and (right_R_curve > 5000):\r\n textMeasure = 'Vehicle is moving along a straight line'\r\n else:\r\n textMeasure = 'Left Lane Radius: ' + str(round(left_R_curve, 2)) + \\\r\n '(m), Right Lane Radius: ' + str(round(right_R_curve, 2)) + '(m)'\r\n addCurveVal = cv2.putText(concatImage, textMeasure, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), thickness=2)\r\n result = cv2.putText(addCurveVal, offset, (50, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), thickness=2)\r\n\r\n return result\r\n\r\n\r\n# Run moviepy on remaining frames where first frame is redundant\r\nwhite_output = 'test_videos_output/project_final.mp4'\r\nwhite_clip = clip.fl_image(process_image) # NOTE: this function expects color images!!\r\nwhite_clip.write_videofile(white_output, audio=False)\r\n","repo_name":"SeymurD/Udacity-Self-Driving-Car-Engineer-ND","sub_path":"Projects/Project - Advanced Lane Finding/src/advanced_lane_finding.py","file_name":"advanced_lane_finding.py","file_ext":"py","file_size_in_byte":2964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34125496608","text":"# Import \nimport os\nimport numpy as np\nimport glob\nimport PIL.Image as Image\nfrom tqdm.notebook import tqdm\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.datasets as datasets\nfrom torch.utils.data import DataLoader\nimport torchvision.transforms as transforms\nimport matplotlib.pyplot as plt\nimport torchvision.models as models\n\nimport pdb\n\n# settings\ntorch.manual_seed(1234)\nwith_augs = False\nlr = 0.0001\nwith_norm = False\nnum_epochs = 100\n\n\n# ISIC dataloader\nclass ISIC_classifier(torch.utils.data.Dataset):\n def __init__(self, transform, data_path):\n 'Initialization'\n self.transform = transform\n image_paths_background = sorted(glob.glob(data_path + 'background/*.jpg'))\n \n image_paths_style0 = sorted(glob.glob(data_path + 'train_style0/Images/*.jpg'))\n image_paths_style1 = sorted(glob.glob(data_path + 'train_style1/Images/*.jpg'))\n image_paths_style2 = sorted(glob.glob(data_path + 'train_style2/Images/*.jpg'))\n\n image_paths_style2_names = []\n for path in image_paths_style2:\n image_paths_style2_names.append(path.split(\"/\")[-1])\n\n # Used for task 4 weak annotations\n image_paths_20 = [path for path in image_paths_style0 if path.split(\"/\")[-1] in image_paths_style2_names]\n\n image_paths_20_names = []\n for path in image_paths_20:\n image_paths_20_names.append(path.split(\"/\")[-1])\n\n image_paths_0n20 = [path for path in image_paths_style0 if path.split(\"/\")[-1] not in image_paths_20_names]\n image_paths_1n20 = [path for path in image_paths_style1 if path.split(\"/\")[-1] not in image_paths_20_names]\n image_paths_2n20 = [path for path in image_paths_style2 if path.split(\"/\")[-1] not in image_paths_20_names]\n \n\n image_paths = []\n image_paths.extend(image_paths_2n20)\n image_paths.extend(image_paths_1n20)\n image_paths.extend(image_paths_0n20)\n\n self.labels = [0]*len(image_paths_background)\n self.labels.extend([1]*len(image_paths))\n\n # Make final list of image paths (should match with labels)\n self.image_paths = image_paths_background\n self.image_paths.extend(image_paths)\n \n def __len__(self):\n 'Returns the total number of samples'\n return len(self.image_paths)\n\n def __getitem__(self, idx):\n 'Generates one sample of data'\n image_path = self.image_paths[idx]\n label = self.labels[idx]\n \n image = Image.open(image_path)\n image = transforms.functional.crop(image,36,114,image.size[1]-36-37,image.size[0]-114-102)\n X = self.transform(image)\n return X, label\n\n\n# Get CUDA\nif torch.cuda.is_available():\n print(\"The code will run on GPU.\")\nelse:\n print(\"The code will run on CPU. Go to Edit->Notebook Settings and choose GPU as the hardware accelerator\")\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\nout_dict = {'train_acc': [],\n 'val_acc': [],\n 'train_loss': [],\n 'val_loss': []}\n\n# Load training data\ndataset = torch.load('datasets/classifier_dataset.pt')\ntrain_len = int(dataset.__len__()*0.7)\ntrain_dataset, val_dataset = torch.utils.data.random_split(dataset,[train_len,dataset.__len__()-train_len], generator=torch.Generator().manual_seed(42))\ntrain_loader = DataLoader(train_dataset, batch_size=32, shuffle=True, num_workers=0)\nval_loader = DataLoader(val_dataset, batch_size=32, shuffle=True, num_workers=0)\n\n\n# Get model\nmodel = models.vgg16(pretrained=True)\nfor param in model.parameters():\n param.requires_grad = False\n\nnum_ftrs = model.classifier[6].in_features\n\n# Here the size of each output sample is set to 2.\n# Alternatively, it can be generalized to nn.Linear(num_ftrs, len(class_names)).\nmodel.classifier[6] = nn.Linear(num_ftrs, 2)\n# Set optimizer and model\nmodel = model.to(device)\noptimizer = torch.optim.AdamW(model.parameters(), lr=lr)\n\ndef loss_fun(output, target):\n crit = nn.CrossEntropyLoss()\n return crit(output, target)\n\nout_dict = {'train_acc': [],\n 'test_acc': [],\n 'train_loss': [],\n 'test_loss': []}\n\nfor epoch in tqdm(range(num_epochs), unit='epoch'):\n #For each epoch\n train_correct = 0\n train_loss = []\n model.train()\n for minibatch_no, (data, target) in tqdm(enumerate(train_loader), total=len(train_loader)):\n data, target = data.to(device), target.to(device)\n #Zero the gradients computed for each weight\n optimizer.zero_grad()\n #Forward pass your image through the network\n output = model(data)\n #output = torch.sigmoid(output[:,0])\n #Compute the loss\n loss = loss_fun(output, target)\n \n #Backward pass through the network\n loss.backward()\n #Update the weights\n optimizer.step()\n \n train_loss.append(loss.cpu().item())\n #Compute how many were correctly classified\n predicted = output.argmax(dim=1)\n\n train_correct += (target==predicted).sum().cpu().item()\n #Comput the test accuracy\n model.eval()\n test_loss = []\n test_correct = 0\n for data, target in val_loader:\n data, target = data.to(device), target.to(device)\n with torch.no_grad():\n output = model(data)\n #output = torch.sigmoid(output[:,0])\n test_loss.append(loss_fun(output, target).cpu().item())\n predicted = output.argmax(dim=1)\n test_correct += (target==predicted).sum().cpu().item()\n train_acc = train_correct/len(train_dataset)\n test_acc = test_correct/len(val_dataset)\n\n out_dict['train_acc'].append(train_correct/len(train_dataset))\n out_dict['test_acc'].append(test_correct/len(val_dataset))\n out_dict['train_loss'].append(np.mean(train_loss))\n out_dict['test_loss'].append(np.mean(test_loss))\n print(\"Accuracy train: {train:.1f}%\\t test: {test:.1f}%\".format(test=100*test_acc, train=100*train_acc))\n\n# Save model\nname = \"ClassifierModel\"\ntorch.save(model.state_dict(), os.getcwd()+\"/models/\"+name+\".pt\")\n\n# Make plot of training curves\nfig, ax = plt.subplots(1,2,figsize=(15,7))\nax[0].plot(out_dict['train_acc'])\nax[0].plot(out_dict['test_acc'])\nax[0].legend(('Train accuracy','Test accuracy'))\nax[0].set_xlabel('Epoch number')\nax[0].set_ylabel('Accuracy')\nax[0].set_title('Training and test accuracy')\nax[1].plot(out_dict['train_loss'])\nax[1].plot(out_dict['test_loss'])\nax[1].legend(('Train loss','Test loss'))\nax[1].set_xlabel('Epoch number')\nax[1].set_ylabel('Loss')\nax[1].set_title('Training and test loss')\nfig.savefig(os.getcwd()+'/figures/training_curve_'+name+'.png')","repo_name":"NWeis97/DLICV_project3","sub_path":"src/classifier_model.py","file_name":"classifier_model.py","file_ext":"py","file_size_in_byte":6670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"35688160818","text":"n = int(input())\npoints = []\nmin_max = 1000000\nfor i in range(n):\n x, y = map(int, input().split())\n for xx, yy in points:\n max_dist = max(abs(xx - x), abs(yy - y))\n if max_dist < min_max:\n min_max = max_dist\n points.append((x, y))\nprint(min_max * min_max)","repo_name":"nathanlo99/dmoj_archive","sub_path":"done/hkccc15j2.py","file_name":"hkccc15j2.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5820146544","text":"class Heap:\n def __init__(self, comparator):\n self._adt = []\n self._comparator = comparator\n\n def insert(self, item):\n self._adt.append(item)\n self.heapify_up()\n\n @staticmethod\n def get_parent(el_idx):\n if el_idx % 2 == 0:\n parent = (el_idx - 2) // 2\n else:\n parent = (el_idx - 1) // 2\n\n return parent\n\n @staticmethod\n def get_left_child(parent):\n return 2 * parent + 1\n\n @staticmethod\n def get_right_child(parent):\n return 2 * parent + 2\n\n def heapify_up(self):\n # Now heapify the arr by starting with the last element which just got inserted\n last_el = len(self._adt) - 1\n parent = Heap.get_parent(last_el)\n\n while parent >= 0 and self._comparator(self._adt[last_el], self._adt[parent]):\n self._adt[parent], self._adt[last_el] = self._adt[last_el], self._adt[parent]\n last_el = parent\n parent = Heap.get_parent(last_el)\n\n def empty(self):\n if len(self._adt) > 0:\n return False\n\n return True\n\n def remove_max(self):\n max_el = self._adt[0]\n\n # Now pick up the last element and put it on the top\n self._adt[0] = self._adt[len(self._adt) - 1]\n self._adt = self._adt[:len(self._adt) - 1]\n\n top_el_idx = 0\n left_child = Heap.get_left_child(0)\n right_child = Heap.get_right_child(0)\n\n while left_child < len(self._adt) or right_child < len(self._adt):\n if left_child <= len(self._adt) and right_child >= len(self._adt):\n gt = left_child\n elif left_child >= len(self._adt) and right_child <= len(self._adt):\n gt = right_child\n elif self._comparator(self._adt[left_child], self._adt[top_el_idx]) or self._comparator(self._adt[right_child], self._adt[top_el_idx]):\n # Compare the two children and find out which one it is\n if self._comparator(self._adt[left_child], self._adt[right_child]):\n gt = left_child\n else:\n gt = right_child\n\n if self._comparator(self._adt[gt], self._adt[top_el_idx]):\n self._adt[gt], self._adt[top_el_idx] = self._adt[top_el_idx], self._adt[gt]\n top_el_idx = gt\n left_child = Heap.get_left_child(top_el_idx)\n right_child = Heap.get_right_child(top_el_idx)\n else:\n break\n\n return max_el\n\n\n def get_max(self):\n return self._adt[0]\n\n def __str__(self):\n return str(self._adt)\n\ndef main():\n arr = [15, 10, 9, 8, 9, 6, 3, 4, 2]\n\n gt = lambda x, y: x > y\n lt = lambda x, y: x < y\n\n heap = Heap(gt)\n for item in arr:\n heap.insert(item)\n\n heap.insert(12)\n\n while not heap.empty():\n print(heap.remove_max())\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ssarangi/algorithms","sub_path":"epi/heaps/heap.py","file_name":"heap.py","file_ext":"py","file_size_in_byte":2921,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"21481885980","text":"import urllib.request\nimport urllib.parse\nimport string\n\n\n\ndef load_baidu():\n url = \"http://www.baidu.com\"\n\n response = urllib.request.urlopen(url)\n print(\"响应header信息:\")\n print(response.headers)\n\n print(\"-\"*100)\n # 创建请求对象\n req = urllib.request.Request(url)\n # 请求网络数据\n res = urllib.request.urlopen(req)\n print(\"请求头信息:\")\n print(req.headers) # 此时请求头信息为空{}\n print(\"响应头信息:\")\n print(res.headers)\n\n data = response.read().decode(\"utf-8\")\n with open(\"2-baidu.html\", \"w\", encoding=\"utf-8\") as f:\n f.write(data)\n\n\nif __name__ == '__main__':\n load_baidu()","repo_name":"MingruiWang2017/python-web-crawler","sub_path":"day2/2_request_header.py","file_name":"2_request_header.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"16966731743","text":"import tensorflow as tf\nfrom models import sub_units\n\n\nclass EncoderLayer(tf.keras.layers.Layer):\n \"\"\"\n single block of encoder model, to be stacked multiple\n time and fed with the positional encoding of the language 1\n \"\"\"\n def __init__(self, *, d_model, num_heads, dff, dropout_rate=0.1):\n super().__init__()\n\n self.self_attention = sub_units.GlobalSelfAttention(\n num_heads=num_heads,\n key_dim=d_model,\n dropout=dropout_rate)\n\n self.ffn = sub_units.FeedForward(d_model, dff)\n\n def call(self, x):\n x = self.self_attention(x)\n x = self.ffn(x)\n return x\n\n def get_config(self):\n config = super().get_config().copy()\n config.update({\n 'self_attention': self.self_attention,\n 'ffn': self.ffn\n })\n\n return config\n\n\nclass DecoderLayer(tf.keras.layers.Layer):\n \"\"\"\n single block of decoder model, to be stacked multiple\n times and fed with the decoder output as context and\n positional encoding of the inputs of language 2\n \"\"\"\n def __init__(self,\n *,\n d_model,\n num_heads,\n dff,\n dropout_rate=0.1):\n super().__init__()\n\n self.causal_self_attention = sub_units.CausalSelfAttention(\n num_heads=num_heads,\n key_dim=d_model,\n dropout=dropout_rate)\n\n self.cross_attention = sub_units.CrossAttention(\n num_heads=num_heads,\n key_dim=d_model,\n dropout=dropout_rate)\n\n self.ffn = sub_units.FeedForward(d_model, dff)\n\n def call(self, x, context):\n x = self.causal_self_attention(x=x)\n x = self.cross_attention(x=x, context=context)\n\n x = self.ffn(x) # Shape `(batch_size, seq_len, d_model)`.\n\n return x\n\n def get_config(self):\n config = super().get_config().copy()\n config.update({\n 'causal_self_attention': self.causal_self_attention,\n 'cross_attention': self.cross_attention,\n 'ffn': self.ffn\n })\n\n return config\n\n\nclass Encoder(tf.keras.layers.Layer):\n def __init__(self, *, num_layers, d_model, num_heads,\n dff, vocab_size, dropout_rate=0.1):\n super().__init__()\n\n self.d_model = d_model\n self.num_layers = num_layers\n\n self.pos_embedding = sub_units.PositionalEmbedding(\n vocab_size=vocab_size, d_model=d_model)\n\n self.enc_layers = [EncoderLayer(d_model=d_model,\n num_heads=num_heads,\n dff=dff, dropout_rate=dropout_rate)\n for _ in range(num_layers)]\n\n self.dropout = tf.keras.layers.Dropout(dropout_rate)\n\n def call(self, x):\n # `x` is token-IDs shape: (batch, seq_len)\n x = self.pos_embedding(x) # Shape `(batch_size, seq_len, d_model)`.\n\n x = self.dropout(x)\n\n for i in range(self.num_layers):\n x = self.enc_layers[i](x)\n\n return x # Shape `(batch_size, seq_len, d_model)`.\n\n def get_config(self):\n config = super().get_config().copy()\n config.update({\n 'd_mdoel': self.d_model,\n 'num_layers': self.num_layers,\n 'pos_embedding': self.pos_embedding,\n 'enc_layers': self.enc_layers,\n 'dropout': self.dropout\n })\n\n return config\n\n\nclass Decoder(tf.keras.layers.Layer):\n def __init__(self, *, num_layers, d_model, num_heads, dff, vocab_size,\n dropout_rate=0.1):\n\n super().__init__()\n\n self.d_model = d_model\n self.num_layers = num_layers\n\n self.pos_embedding = sub_units.PositionalEmbedding(vocab_size=vocab_size,\n d_model=d_model)\n\n self.dropout = tf.keras.layers.Dropout(dropout_rate)\n self.dec_layers = [DecoderLayer(d_model=d_model,\n num_heads=num_heads,\n dff=dff,\n dropout_rate=dropout_rate)\n for _ in range(num_layers)]\n\n def call(self, x, context):\n # `x` is token-IDs shape (batch, target_seq_len)\n x = self.pos_embedding(x) # (batch_size, target_seq_len, d_model)\n\n x = self.dropout(x)\n\n for i in range(self.num_layers):\n x = self.dec_layers[i](x, context)\n\n return x # (batch_size, target_seq_len, d_model)\n\n def get_config(self):\n config = super().get_config().copy()\n config.update({\n 'd_mdoel': self.d_model,\n 'num_layers': self.num_layers,\n 'pos_embedding': self.pos_embedding,\n 'dropout': self.dropout,\n 'dec_layers': self.dec_layers\n })\n\n return config\n","repo_name":"niccolot/ENG-ITA_NMT","sub_path":"models/encoder_decoder.py","file_name":"encoder_decoder.py","file_ext":"py","file_size_in_byte":4905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"71542556248","text":"import os\r\nimport sys\r\n\r\nsys.path.insert(0, os.path.abspath(\"..\"))\r\n\r\nproject = \"VAE mixin PyTorch\"\r\ncopyright = \"2023, yuanx749\"\r\nauthor = \"yuanx749\"\r\n\r\nextensions = [\r\n \"sphinx.ext.autodoc\",\r\n \"sphinx.ext.intersphinx\",\r\n \"sphinx.ext.viewcode\",\r\n \"myst_parser\",\r\n]\r\ntemplates_path = [\"_templates\"]\r\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\r\ntoc_object_entries_show_parents = \"hide\"\r\n\r\nhtml_theme = \"furo\"\r\nhtml_css_files = [\"custom.css\"]\r\nhtml_static_path = [\"_static\"]\r\n\r\nautodoc_member_order = \"bysource\"\r\nautodoc_mock_imports = [\"torch\"]\r\nautodoc_typehints = \"none\"\r\n\r\nintersphinx_mapping = {\r\n \"python\": (\"https://docs.python.org/3\", None),\r\n \"torch\": (\"https://pytorch.org/docs/stable\", None),\r\n}\r\n\r\nmyst_enable_extensions = [\"dollarmath\"]\r\n\r\nif os.getenv(\"doc_format\", \"numpydoc\") == \"napoleon\":\r\n extensions.append(\"sphinx.ext.napoleon\")\r\n napoleon_google_docstring = False\r\n napoleon_use_param = False\r\n napoleon_use_rtype = False\r\n napoleon_preprocess_types = True\r\nelse:\r\n extensions.append(\"numpydoc\")\r\n numpydoc_show_class_members = False\r\n numpydoc_xref_param_type = True\r\n numpydoc_xref_ignore = {\"optional\", \"default\"}\r\n","repo_name":"yuanx749/vae-mixin-pytorch","sub_path":"docs/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"24559141149","text":"#! /usr/bin/env python3\n\n\"\"\"\nsample.txt:\nIn this list, the two entries that sum to 2020 are 1721 and 299. Multiplying them together produces 1721 * 299 = 514579, so the correct answer is 514579.\n\"\"\"\n\nimport sys\n\nfrom typing import Set\n\n\ndef find_sum_to(entries: Set[int], limit: int) -> int:\n # if two entries in `entries` sum to `limit`, return their product\n for e in entries:\n if (limit - e) in entries:\n return e * (limit - e)\n\n return 0\n\n\ndef calc(content) -> int:\n entries = set(map(lambda e: int(e.strip()), content))\n for e in entries:\n result = find_sum_to(entries - {e}, 2020 - e)\n if result:\n return e * result\n\n return 0\n\n\ndef main():\n with open(sys.argv[1], \"r\") as f:\n content = f.readlines()\n\n result = calc(content)\n print(result)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Amar1729/aoc-2020","sub_path":"python/day1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"38036146060","text":"import collections\r\nimport os\r\nimport numpy as np\r\nfrom torch import Tensor\r\nimport librosa\r\nfrom torch.utils.data import Dataset\r\n\r\n___author__ = \"Hemlata Tak\"\r\n__email__ = \"tak@eurecom.fr\"\r\n\r\n\r\ndef genSpoof_list(dir_meta, is_train=False, is_eval=False):\r\n d_meta = {}\r\n file_list = []\r\n with open(dir_meta, 'r') as f:\r\n l_meta = f.readlines()\r\n\r\n if is_train:\r\n for line in l_meta:\r\n _, key, _, _, label = line.strip().split(' ')\r\n file_list.append(key)\r\n d_meta[key] = 1 if label == 'bonafide' else 0\r\n return d_meta, file_list\r\n\r\n elif is_eval:\r\n for line in l_meta:\r\n key = line.strip()\r\n file_list.append(key)\r\n return file_list\r\n else:\r\n for line in l_meta:\r\n _, key, _, _, label = line.strip().split(' ')\r\n file_list.append(key)\r\n d_meta[key] = 1 if label == 'bonafide' else 0\r\n return d_meta, file_list\r\n\r\n\r\ndef pad(x, max_len=64600):\r\n x_len = x.shape[0]\r\n if x_len >= max_len:\r\n return x[:max_len]\r\n # need to pad\r\n num_repeats = int(max_len / x_len) + 1\r\n\r\n # padded_x = np.tile(x, (1, num_repeats))[:, :max_len][0]\r\n\r\n x_repeat = np.repeat(x, num_repeats)\r\n padded_x = x_repeat[:max_len]\r\n return padded_x\r\n\r\n\r\nASVFile = collections.namedtuple('ASVFile',\r\n ['speaker_id', 'file_name', 'path', 'sys_id', 'key'])\r\n\r\n\r\nclass Dataset_ASVspoof2019_train(Dataset):\r\n def __init__(self, list_IDs, labels, base_dir, transform=None, is_train=True,\r\n protocols_dir=None):\r\n '''self.list_IDs\t: list of strings (each string: utt key),\r\n self.labels : dictionary (key: utt key, value: label integer)'''\r\n self.list_IDs = list_IDs\r\n self.labels = labels\r\n self.base_dir = base_dir\r\n self.transform = transform\r\n self.protocols_fname = 'train.trn' if is_train else 'dev.trl'\r\n self.protocols_dir = protocols_dir\r\n self.sysid_dict = {\r\n '-': 0, # bonafide speech\r\n 'A01': 1, # Wavenet vocoder\r\n 'A02': 2, # Conventional vocoder WORLD\r\n 'A03': 3, # Conventional vocoder MERLIN\r\n 'A04': 4, # Unit selection system MaryTTS\r\n 'A05': 5, # Voice conversion using neural networks\r\n 'A06': 6, # transform function-based voice conversion\r\n 'A07': 7,\r\n 'A08': 8,\r\n 'A09': 9,\r\n 'A10': 10,\r\n 'A11': 11,\r\n 'A12': 12,\r\n 'A13': 13,\r\n 'A14': 14,\r\n 'A15': 15,\r\n 'A16': 16,\r\n 'A17': 17,\r\n 'A18': 18,\r\n 'A19': 19,\r\n # For PA:\r\n 'AA': 27,\r\n 'AB': 28,\r\n 'AC': 29,\r\n 'BA': 30,\r\n 'BB': 31,\r\n 'BC': 32,\r\n 'CA': 33,\r\n 'CB': 34,\r\n 'CC': 35\r\n }\r\n self.protocols_fname = os.path.join(self.protocols_dir,\r\n 'ASVspoof2019.{}.cm.{}.txt'.format('LA', self.protocols_fname))\r\n self.files_meta = self.parse_protocols_file(self.protocols_fname)\r\n data = list(map(self.read_file, self.files_meta))\r\n self.data_x, self.data_y, self.data_sysid = map(list, zip(*data))\r\n if self.transform:\r\n self.data_x = [self.transform(x) for x in self.data_x]\r\n\r\n def __len__(self):\r\n return len(self.list_IDs)\r\n\r\n def __getitem__(self, idx):\r\n x = self.data_x[idx]\r\n y = self.data_y[idx]\r\n return x, y, self.files_meta[idx]\r\n\r\n def read_file(self, meta):\r\n data_x, fs = librosa.load(meta.path, sr=16000)\r\n data_y = meta.key\r\n return data_x, float(data_y), meta.sys_id\r\n\r\n def _parse_line(self, line):\r\n tokens = line.strip().split(' ')\r\n return ASVFile(speaker_id=tokens[0],\r\n file_name=tokens[1],\r\n path=os.path.join(os.path.join(self.base_dir, 'flac'), tokens[1] + '.flac'),\r\n sys_id=self.sysid_dict[tokens[3]],\r\n key=int(tokens[4] == 'bonafide'))\r\n\r\n def parse_protocols_file(self, protocols_fname):\r\n lines = open(protocols_fname).readlines()\r\n files_meta = map(self._parse_line, lines)\r\n return list(files_meta)\r\n\r\n\r\nclass Dataset_ASVspoof2021_eval(Dataset):\r\n def __init__(self, list_IDs, base_dir):\r\n '''self.list_IDs\t: list of strings (each string: utt key),\r\n '''\r\n\r\n self.list_IDs = list_IDs\r\n self.base_dir = base_dir\r\n\r\n def __len__(self):\r\n return len(self.list_IDs)\r\n\r\n def __getitem__(self, index):\r\n self.cut = 64600 # take ~4 sec audio (64600 samples)\r\n key = self.list_IDs[index]\r\n X, fs = librosa.load(self.base_dir + 'flac/' + key + '.flac', sr=16000)\r\n X_pad = pad(X, self.cut)\r\n x_inp = Tensor(X_pad)\r\n return x_inp, key\r\n","repo_name":"Shenkailai/asvspoof-GCN","sub_path":"data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":5016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"31478367142","text":"import os\nfrom datetime import datetime\n\nimport zpp_serpent\nfrom fastapi import Depends, FastAPI\nfrom crypto.ecc import scalar_mult\nfrom crypto.ecdh import make_keypair\nfrom src.db import User, create_db_and_tables, get_async_session\nfrom src.schemas import UserCreate, UserRead, UserUpdate, Note, Key\nfrom src.service import NoteService, UserService\nfrom src.settings import KEY_EXPIRATION_TIME\nfrom src.users import auth_backend, current_active_user, fastapi_users\n\napp = FastAPI()\n\napp.include_router(\n fastapi_users.get_auth_router(auth_backend), prefix=\"/auth/jwt\", tags=[\"auth\"]\n)\napp.include_router(\n fastapi_users.get_register_router(UserRead, UserCreate),\n prefix=\"/auth\",\n tags=[\"auth\"],\n)\napp.include_router(\n fastapi_users.get_reset_password_router(),\n prefix=\"/auth\",\n tags=[\"auth\"],\n)\napp.include_router(\n fastapi_users.get_verify_router(UserRead),\n prefix=\"/auth\",\n tags=[\"auth\"],\n)\napp.include_router(\n fastapi_users.get_users_router(UserRead, UserUpdate),\n prefix=\"/users\",\n tags=[\"users\"],\n)\n\n\n@app.get(\"/\")\nasync def authenticated_route(user: User = Depends(current_active_user)):\n return {\"message\": f\"Hello {user.email}!\"}\n\n\n@app.get(\"/get_public_key\")\nasync def exchange_public_keys(alice_public_key: Key, user: User = Depends(current_active_user),\n session=Depends(get_async_session)):\n await UserService.save_public_key(session, user.id, alice_public_key)\n return {\"public_key\": os.getenv(\"public_key\")}\n\n\n@app.post(\"/create_note\")\nasync def create_note(note: Note, user: User = Depends(current_active_user), session=Depends(get_async_session)):\n if (datetime.now() - user.pk_updated_at).seconds > KEY_EXPIRATION_TIME:\n return {\"message\": \"handshake required\"}\n note_name, note_message = note.name, note.message\n try:\n note.name, note.message = await NoteService.decrypt_note(user, note)\n await NoteService.create_note(session, user.id, note)\n note.name, note.message = note_name, note_message\n except BaseException as e:\n print(e)\n return {\"message\": \"ECDH error\"}\n return {\"message\": note}\n\n\n@app.get(\"/get_notes\")\nasync def get_notes(user: User = Depends(current_active_user), session=Depends(get_async_session)):\n if (datetime.now() - user.pk_updated_at).seconds > KEY_EXPIRATION_TIME:\n return {\"message\": \"handshake required\"}\n notes = await NoteService.get_user_notes(session, user.id)\n shared_secret = scalar_mult(int(os.getenv('private_key')), eval(user.public_key))\n password = shared_secret[0].to_bytes(32, 'big')\n try:\n for note in notes:\n note.name = str(zpp_serpent.encrypt_CFB(note.name.encode(), password))\n note.message = str(zpp_serpent.encrypt_CFB(note.message.encode(), password))\n except BaseException:\n return {\"message\": \"ECDH error\"}\n return {\"message\": notes}\n\n\n@app.post(\"/edit_note\")\nasync def edit_note(note: Note, user: User = Depends(current_active_user),\n session=Depends(get_async_session)):\n if (datetime.now() - user.pk_updated_at).seconds > KEY_EXPIRATION_TIME:\n return {\"message\": \"handshake required\"}\n try:\n note_name, note_message = note.name, note.message\n note.name, note.message = await NoteService.decrypt_note(user, note)\n await NoteService.update_note(session, note.name, note.message, user.id)\n note.name, note.message = note_name, note_message\n except BaseException:\n return {\"message\": \"ECDH error\"}\n return {\"message\": note}\n\n\n@app.delete(\"/delete_note\")\nasync def delete_note(note: Note, user: User = Depends(current_active_user), session=Depends(get_async_session)):\n if (datetime.now() - user.pk_updated_at).seconds > KEY_EXPIRATION_TIME:\n return {\"message\": \"handshake required\"}\n try:\n note.name, note.message = await NoteService.decrypt_note(user, note)\n deleted_note = await NoteService.delete_note(session, user.id, note)\n except BaseException:\n return {\"message\": \"ECDH error\"}\n return {\"message\": deleted_note}\n\n\n@app.on_event(\"startup\")\nasync def on_startup():\n if 'private_key' not in os.environ:\n bob_private_key, bob_public_key = make_keypair()\n os.environ['private_key'] = str(bob_private_key)\n os.environ['public_key'] = str(bob_public_key)\n print('keys were created')\n\n await create_db_and_tables()\n","repo_name":"akimich11/super-safe-evernote","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4434,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"30660486085","text":"import os\nimport shutil\nimport cv2\nfrom torchvision.utils import make_grid\nimport numpy as np\nfrom concurrent import futures\n\n\"\"\"\n This cleaning script iterates through folders and finds textures of different types\n Textures are then renamed with prefix (albedo, normal, metallic, roughness, ao)\n\"\"\"\n\nINPUT_FOLDER = \"PBR_dataset\"\nOUTPUT_FOLDER = \"PBR_dataset_256\"\n\n\nfolders = [x for x in os.listdir(INPUT_FOLDER) if os.path.isdir(os.path.join(INPUT_FOLDER,x))]\n\ntexture_dict = dict()\n\ndef string_contains(string, sub_strings):\n for sub in sub_strings:\n if sub in string.lower():\n return True\n else:\n return False\n\nfor folder in folders:\n files = os.listdir(os.path.join(INPUT_FOLDER,folder))\n files = [x for x in files if x.endswith(\".png\")]\n\n texture_dict[folder] = dict()\n\n for file_name in files:\n file_name_replace = file_name.replace('_', '-')\n ending = file_name_replace.split(\"-\")[-1]\n\n # find albedo\n if string_contains(file_name, [\"color\", \"albedo\", \"alb\"]):\n texture_dict[folder][\"albedo\"] = os.path.join(INPUT_FOLDER,folder,file_name)\n # find metallic\n if string_contains(file_name, [\"metal\"]):\n texture_dict[folder][\"metallic\"] = os.path.join(INPUT_FOLDER,folder,file_name)\n # find roughness\n if string_contains(file_name, [\"rough\"]):\n texture_dict[folder][\"roughness\"] = os.path.join(INPUT_FOLDER,folder,file_name)\n # find normal mapfrom concurrent import futures\n if string_contains(file_name, [\"normal\"]):\n texture_dict[folder][\"normal\"] = os.path.join(INPUT_FOLDER,folder,file_name)\n # find ambien occlusion\n if string_contains(file_name, [\"ao\", \"ambient\", \"occ\"]):\n texture_dict[folder][\"ao\"] = os.path.join(INPUT_FOLDER,folder,file_name)\n # find ambien occlusion\n if string_contains(file_name, [\"height\",]):\n texture_dict[folder][\"height\"] = os.path.join(INPUT_FOLDER,folder,file_name)\n\n# create\nif not os.path.exists(OUTPUT_FOLDER):\n os.makedirs(OUTPUT_FOLDER)\n\ndef save_material(material):\n folder = os.path.join(OUTPUT_FOLDER,material)\n crop_size = 256\n num_crops = 10\n for crop_idx in range(num_crops):\n\n for idx, texture_type in enumerate(texture_dict[material]):\n src = texture_dict[material][texture_type]\n\n out_folder = \"{}_{}\".format(folder,crop_idx)\n material_name = \"{}_{}\".format(material,crop_idx)\n if not os.path.exists(out_folder):\n os.makedirs(out_folder)\n dst = os.path.join(out_folder,\"{}-{}.png\".format(material_name,texture_type))\n print(dst)\n #try:\n #shutil.copyfile(src,dst)\n im = cv2.imread(src)\n if idx == 0:\n h, w, d = im.shape\n rx = np.random.randint(0,w-crop_size)\n ry = np.random.randint(0,h-crop_size)\n\n crop = im[rx:rx+crop_size,rx:rx+crop_size]\n #import pdb; pdb.set_trace()\n #im = cv2.resize(im,(256,256))\n cv2.imwrite(dst,crop)\n #except:\n #import pdb; pdb.set_trace()\n\n\n\n\n#for material in texture_dict:\n# save_material(material)\n\n# parallel processing\nwith futures.ProcessPoolExecutor() as executor:\n executor.map(save_material, texture_dict)\n","repo_name":"maitek/ml-experimental","sub_path":"texture2pbr/parse_dataset.py","file_name":"parse_dataset.py","file_ext":"py","file_size_in_byte":3379,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"12045602783","text":"# Scott McElfresh sme1d1 ICP 9\n\nfrom keras import Sequential\nfrom keras.datasets import mnist\nimport numpy as np\nfrom keras.layers import Dense\nfrom keras.utils import to_categorical\nimport matplotlib.pyplot as plt\n\n(train_images, train_labels), (test_images, test_labels) = mnist.load_data()\n\n# print(train_images.shape[1:])\n# process the data\n# 1. convert each image of shape 28*28 to 784 dimensional which will be fed to the network as a single feature\ndimData = np.prod(train_images.shape[1:])\n# print(dimData)\ntrain_data = train_images.reshape(train_images.shape[0], dimData)\ntest_data = test_images.reshape(test_images.shape[0], dimData)\n\n# convert data to float and scale values between 0 and 1\ntrain_data = train_data.astype('float')\ntest_data = test_data.astype('float')\n# scale data\ntrain_data /= 255.0\ntest_data /= 255.0\n# change the labels from integer to one-hot encoding. to_categorical is doing the same thing as LabelEncoder()\ntrain_labels_one_hot = to_categorical(train_labels)\ntest_labels_one_hot = to_categorical(test_labels)\n\n# creating network\nmodel = Sequential()\nmodel.add(Dense(512, activation='relu', input_shape=(dimData,)))\nmodel.add(Dense(10, activation='softmax'))\n# Compile model\nmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n# Create validation set\nhistory = model.fit(train_data, train_labels_one_hot, batch_size=256, epochs=8, verbose=2,\n validation_data=(test_data, test_labels_one_hot))\n# print((history.history.keys()))\n\n# evaluate model's error\nscore = model.evaluate(test_data, test_labels_one_hot, verbose=0)\nprint(\"Baseline error: %.2f%%\" % (100 - score[1] * 100))\n\n# Plot a single image, its label, and inference (model prediction for label)\ninfer = str(np.argmax(model.predict(test_data[0].reshape(1, 784))))\nlabelsAndTrainingImages = list(zip(test_images, test_labels))\n\n# Plot single image and model estimation\nplot4 = plt.figure(4)\nplt.axis('off')\nplt.imshow(test_images[0], cmap='gray', interpolation='nearest', aspect='equal')\nplt.title(\"Label \" + str(test_labels[0]) + \"\\n\" + \"Infer \" + infer)\n\n# Plot first twenty images, labels, and inferen\nplot1 = plt.figure(figsize=(10, 10))\nfor index, (image, label) in enumerate(labelsAndTrainingImages[:20]):\n plt.subplot(4, 5, index + 1)\n plt.axis('off')\n plt.imshow(image, cmap='gray', interpolation='nearest', aspect='equal')\n plt.title(\"Label \" + '%i' % label + \"\\n\" + \"Infer \" +\n str(np.argmax(model.predict(test_data[index].reshape(1, 784)))))\n\n# Plot accuracy\nplot2 = plt.figure(2)\nplt.plot(history.history['accuracy'])\nplt.plot(history.history['val_accuracy'])\nplt.title('model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['train', 'validation'], loc='lower right')\n\n# \"Loss Plot\"\nplot3 = plt.figure(3)\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'validation'], loc='upper left')\nplt.show()\n","repo_name":"sme1d1/UMKC_DeepLearning2021","sub_path":"ICP/ICP9/source/ICP9.py","file_name":"ICP9.py","file_ext":"py","file_size_in_byte":3006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"7075270950","text":"import os\n\nimport uvicorn\n\n__ignore_list = [\"venv\"]\n\n\nif __name__ == \"__main__\":\n log_level = os.environ.get(\"FASTAPI_LOG\", \"info\")\n is_dev = os.environ.get(\"DEPLOY_STATUS\", \"prod\") == \"dev\"\n uvicorn.run(\n \"captain.main:app\",\n port=5392,\n log_level=log_level,\n reload=is_dev,\n reload_excludes=[\n os.path.join(os.getcwd(), p)\n for p in __ignore_list\n if os.path.exists(os.path.join(os.getcwd(), p))\n ],\n )\n","repo_name":"flojoy-io/studio","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"31"} +{"seq_id":"821018352","text":"import math\nimport os\nimport random\nimport re\nimport sys\n\ndef larrysArray(A):#A=[3,1,2]\n count = 0\n for i in range(1,len(A)):\n for j in range(i):\n if A[j] > A[i]:\n count += 1\n if count%2 == 0:\n return \"YES\"\n return \"NO\"\n \n \n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n t = int(input().strip())\n\n for t_itr in range(t):\n n = int(input().strip())\n\n A = list(map(int, input().rstrip().split()))\n\n result = larrysArray(A)\n\n fptr.write(result + '\\n')\n\n fptr.close()\n","repo_name":"vijayvignesh021/Hackerrank","sub_path":"Larry's_Array.py","file_name":"Larry's_Array.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26795012749","text":"from tool.runners.python import SubmissionPy\n\nclass DidipSubmission(SubmissionPy):\n def run(self, s):\n \"\"\"\n :param s: input in string format\n :return: solution flag\n \"\"\"\n # Your code goes here\n grid = s.splitlines()\n seen = [[False for _ in range(len(grid[0]))] for _ in range(len(grid))]\n counter = 0\n\n for i in range(len(grid)):\n max_left = -1\n j = 0\n while max_left < 9 and j < len(grid[0]):\n if int(grid[i][j]) > max_left:\n max_left = int(grid[i][j])\n if not seen[i][j]:\n seen[i][j] = True\n counter += 1\n j += 1\n\n max_right = -1\n j = -1\n while max_right < 9 and -j <= len(grid[0]):\n # print(i, j, grid[i], grid[i][j])\n if int(grid[i][j]) > max_right:\n max_right = int(grid[i][j])\n if not seen[i][j]:\n seen[i][j] = True\n counter += 1\n j -= 1\n\n\n for j in range(len(grid[0])):\n max_top = -1\n i = 0\n while max_top < 9 and i < len(grid[0]):\n if int(grid[i][j]) > max_top:\n max_top = int(grid[i][j])\n if not seen[i][j]:\n seen[i][j] = True\n counter += 1\n i += 1\n\n max_bottom = -1\n i = -1\n while max_bottom < 9 and -i <= len(grid[0]):\n if int(grid[i][j]) > max_bottom:\n max_bottom = int(grid[i][j])\n if not seen[i][j]:\n seen[i][j] = True\n counter += 1\n i -= 1\n\n return counter\n\ndef test_didip():\n \"\"\"\n Run `python -m pytest ./day-08/part-1/didip.py` to test the submission.\n \"\"\"\n assert (\n DidipSubmission().run(\n \"\"\"30373\n25512\n65332\n33549\n35390\n\"\"\".strip()\n )\n == 21\n )\n\n","repo_name":"badouralix/adventofcode-2022","sub_path":"day-08/part-1/didip.py","file_name":"didip.py","file_ext":"py","file_size_in_byte":2099,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"31"} +{"seq_id":"69917695450","text":"#coding=utf-8\nimport sys\nimport os\nimport pygame\n\nfrom src.constants import *\nfrom src.Game import *\nfrom src.pieces.Pieces import *\n\npygame.init()\n\nclock = pygame.time.Clock() #La meme que Time.deltaTime() sous unity\nwindow = pygame.display.set_mode((1200, 900)) #On creer un ecran (Surface)\n\ndef resource_path(relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\")\n\n return os.path.join(base_path, relative_path)\n\ndef getPosition(x,y): #Methode servant a convertir la position du clique de la souris en case de clique de la souris\n row = y//square #Ligne egale a position du clique divise par nb de case du jeu\n col = x//square # // mais avec les colonnes\n return row, col\n\ndef main():\n run = True\n gameOver = False\n FPS = 60\n game = Game(widthS, heightS, rows, cols, square, window)\n\n while run:\n clock.tick(FPS) #Prise en compte seulement du 60 FPS (pour un jeu d echec ca va)\n\n #window.blit(imagesPiecesRegnant[\"roi\"], (50, 50)) #On affiche le roi en position 50 50\n #window.blit(imagesPiecesOpposant[\"generalDeJade\"], (150, 150))\n\n game.updateWindow()\n if game.checkGame():\n gameOver = True\n\n for event in pygame.event.get(): #Definitions des evenements qui peuvent arriver durant la partie\n if event.type == pygame.QUIT: #Si l on quitte le jeu\n run = False\n quit()\n\n if event.type == pygame.KEYDOWN: #Si l on appuie sur barre espace et que l on est en game_over alors on reset la partie\n if event.key == pygame.K_SPACE and gameOver: #game_over est une variable proche de run mais qui ne viendra pas couper l application\n game.reset()\n \n if event.type == pygame.MOUSEBUTTONDOWN and not gameOver: #Si l on appuie sur la souris et que l on est pas en game_over alors\n if pygame.mouse.get_pressed()[0]: #Si clique gauche\n location = pygame.mouse.get_pos() #On vient recuperer les coordonnees du clique afin de verifier s il y a une piece ou autre dessous\n if location[0] <= widthS and location[1] <= heightS:\n game.select(location)\n else:\n game.selectPara(location)\nmain()","repo_name":"enzo1000/Shogi","sub_path":"Shogi/Shogi/Shogi.py","file_name":"Shogi.py","file_ext":"py","file_size_in_byte":2485,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"2827282223","text":"#!/usr/bin/env python\nimport os\n\ndef log(var):\n print(var)\n return var\n\nflatten = lambda l: (item for sublist in l for item in sublist)\nstrip_nl_char = lambda val: val.rstrip('\\n')\ncli_run = lambda cmd: map(strip_nl_char, os.popen(cmd).readlines())\n\nfind_c_files_str = 'find {} -name \\*.h -print -o -name \\*.cpp -print -o -name \\*.c -print -o -name \\*.cxx -print -o -name \\*.hpp -print -o -name \\*.hxx -print'.format\nfind_all_c_files =lambda path: cli_run(find_c_files_str(path))\n\ndef build_db(path):\n files = None\n print('Searching files to index')\n with open(path, 'r') as f:\n splited_lines = (line.rstrip('\\n').split(' ') for line in f if not line.startswith('#'))\n files = set(flatten(map(find_all_c_files, (line[1] for line in splited_lines if len(line) > 1))))\n\n if files:\n with open('./cscope.files', 'w+') as f:\n f.write('\\n'.join(files))\n print('start build DB')\n print(cli_run('cscope -RUbq 2>/dev/null'))\n print('finished')\n\nbuild_db('./.BookMarks')\n\n\n","repo_name":"amberik/dotFiles","sub_path":"vim/cscope_build.py","file_name":"cscope_build.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"10218038392","text":"from django.shortcuts import render, HttpResponse\nfrom time import gmtime, strftime\nimport datetime as pt\n\nn = pt.datetime.now()\n# Create your views here.\ndef timeDis(request):\n context = {\n 'date': n.strftime(\"%b-%d-%y\"),\n 'time': n.strftime(\"%I:%M %p\")\n }\n\n return render(request, 'index.html', context)","repo_name":"rashidjama/displayTime-assignment","sub_path":"home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33503280860","text":"import json\nimport urllib.request\nimport requests\n\nfrom slacker import Slacker\nfrom bs4 import BeautifulSoup\nfrom slackclient import SlackClient\nfrom flask import Flask, request, make_response, render_template, jsonify\nfrom operator import itemgetter\nfrom math import floor\n\napp = Flask(__name__)\n\nslack_token = \"xoxb-503818135714-507351131987-GyVOyX3uF6Pzv1N4yT5X9dKn\"\nslack_client_id = \"503818135714.507348823507\"\nslack_client_secret = \"8194f60277b7af2f46584293915e356b\"\nslack_verification = \"hgtt6xtPYcb4Oq5TzDPhYFOr\"\nsc = SlackClient(slack_token)\nslack = Slacker(slack_token)\n\ndef crawl_opgg_recommend(intent):\n if intent == 'Default Fallback Intent':\n return ''\n\n keywords = []\n statistics = []\n\n sourcecode1 = urllib.request.urlopen(\n 'http://www.op.gg/champion/ajax/statistics/trendChampionList/type=winratio&').read()\n soup1 = BeautifulSoup(sourcecode1, \"html.parser\")\n sourcecode2 = urllib.request.urlopen(\n 'http://www.op.gg/champion/ajax/statistics/trendChampionList/type=banratio&').read()\n soup2 = BeautifulSoup(sourcecode2, \"html.parser\")\n\n winpick_champ = [i.get_text() for i in soup1.find_all(\"div\", class_=\"champion-index-table__name\")]\n winpick_champ = winpick_champ[:142]\n winratio = [i.get_text() for i in soup1.find_all(\"td\",\n class_=\"champion-index-table__cell champion-index-table__cell--value champion-index-table__cell--blue\")]\n winratio = winratio[:142]\n pickratio = [i.get_text() for i in\n soup1.find_all(\"td\", class_=\"champion-index-table__cell champion-index-table__cell--value\")]\n pickratio = pickratio[:142]\n\n for i in range(len(winpick_champ)):\n if not winpick_champ[i] in statistics:\n statistics.append((winpick_champ[i], floor(float(winratio[i].strip('%')) * float(pickratio[i].strip('%'))) / 100))\n statistics = sorted(statistics, key=itemgetter(1), reverse=True)\n keywords = [\"OP 챔피언 추천 Top 10\"]\n for i in range(1, 11):\n keywords.append(str(i) + \"위: \" + statistics[i - 1][0] + \"\\t\\t추천지수: \" + str(statistics[i - 1][1]))\n slack.files.upload(statistics[0][0].replace(\" \", \"\") + '.png', channels=\"day4\")\n\n return u'\\n'.join(keywords)\ndef crawl_opgg_top10(intent):\n if intent == 'Default Fallback Intent':\n return ''\n\n keywords = []\n statistics = []\n statistics_ban = []\n\n sourcecode1 = urllib.request.urlopen(\n 'http://www.op.gg/champion/ajax/statistics/trendChampionList/type=winratio&').read()\n soup1 = BeautifulSoup(sourcecode1, \"html.parser\")\n sourcecode2 = urllib.request.urlopen(\n 'http://www.op.gg/champion/ajax/statistics/trendChampionList/type=banratio&').read()\n soup2 = BeautifulSoup(sourcecode2, \"html.parser\")\n\n winpick_champ = [i.get_text() for i in soup1.find_all(\"div\", class_=\"champion-index-table__name\")]\n winpick_champ = winpick_champ[:142]\n winratio = [i.get_text() for i in soup1.find_all(\"td\",\n class_=\"champion-index-table__cell champion-index-table__cell--value champion-index-table__cell--blue\")]\n winratio = winratio[:142]\n pickratio = [i.get_text() for i in\n soup1.find_all(\"td\", class_=\"champion-index-table__cell champion-index-table__cell--value\")]\n pickratio = pickratio[:142]\n\n ban_champ = [i.get_text() for i in soup2.find_all(\"div\", class_=\"champion-index-table__name\")]\n ban_champ = ban_champ[:142]\n banratio = [i.get_text() for i in soup2.find_all(\"td\",\n class_=\"champion-index-table__cell champion-index-table__cell--value champion-index-table__cell--blue\")]\n banratio = banratio[:142]\n\n for i in range(len(winpick_champ)):\n if not winpick_champ[i] in statistics:\n statistics.append((winpick_champ[i], float(winratio[i].strip('%')), float(pickratio[i].strip('%'))))\n\n for i in range(len(ban_champ)):\n if not ban_champ[i] in statistics:\n statistics_ban.append((ban_champ[i], float(banratio[i].strip('%'))))\n\n if intent == '승':\n statistics = sorted(statistics, key=itemgetter(1), reverse=True)\n keywords = [\"챔피언 승률 Top 10\"]\n slack.files.upload(statistics[0][0].replace(\" \", \"\") + '.png', channels=\"day4\")\n for i in range(1, 11):\n keywords.append(str(i) + \"위: \" + statistics[i-1][0] + \"\\t\\t승률: \" + str(statistics[i-1][1]) + \"%\")\n if intent == '픽':\n statistics = sorted(statistics, key=itemgetter(2), reverse=True)\n keywords = [\"챔피언 픽률 Top 10\"]\n slack.files.upload(statistics[0][0].replace(\" \", \"\") + '.png', channels=\"day4\")\n for i in range(1, 11):\n keywords.append(str(i) + \"위: \" + statistics[i-1][0] + \"\\t\\t픽률: \" + str(statistics[i-1][2]) + \"%\")\n if intent == '밴':\n statistics_ban = sorted(statistics_ban, key=itemgetter(1), reverse=True)\n print(statistics_ban)\n keywords = [\"챔피언 밴률 Top 10\"]\n slack.files.upload(statistics_ban[0][0].replace(\" \", \"\") + '.png', channels=\"day4\")\n for i in range(1, 10):\n keywords.append(str(i) + \"위: \" + statistics_ban[i-1][0] + \"\\t\\t밴률: \" + str(statistics_ban[i-1][1]) + \"%\")\n\n # 한글 지원을 위해 앞에 unicode u를 붙혀준다.\n return u'\\n'.join(keywords)\n\ndef get_answer(text, user_key):\n data_send = {\n 'query': text,\n 'sessionId': user_key,\n 'lang': 'ko',\n }\n\n data_header = {\n 'Authorization': 'Bearer 37dc131c28f940459853c4ecd1b190bc',\n 'Content-Type': 'application/json; charset=utf-8'\n }\n\n dialogflow_url = 'https://api.dialogflow.com/v1/query?v=20150910'\n res = requests.post(dialogflow_url, data=json.dumps(data_send), headers=data_header)\n\n if res.status_code != requests.codes.ok:\n return '오류가 발생했습니다.'\n\n data_receive = res.json()\n result = {\n \"speech\": data_receive['result']['fulfillment']['speech'],\n \"intent\": data_receive['result']['metadata']['intentName']\n }\n print(result)\n return result\n\n# 이벤트 핸들하는 함수\ndef _event_handler(event_type, slack_event):\n print(slack_event[\"event\"])\n\n if event_type == \"app_mention\":\n channel = slack_event[\"event\"][\"channel\"]\n text = slack_event[\"event\"][\"text\"]\n text = text[13:]\n answer = get_answer(text, 'session')\n\n if answer['intent'] == '추천':\n keywords = crawl_opgg_recommend(answer['intent'])\n else:\n keywords = crawl_opgg_top10(answer['intent'])\n sc.api_call(\n \"chat.postMessage\",\n channel=channel,\n text= answer['speech'] + \"\\n\" + keywords\n )\n\n return make_response(\"App mention message has been sent\", 200, )\n\n # ============= Event Type Not Found! ============= #\n # If the event_type does not have a handler\n message = \"You have not added an event handler for the %s\" % event_type\n # Return a helpful error message\n return make_response(message, 200, {\"X-Slack-No-Retry\": 1})\n\n\n@app.route(\"/listening\", methods=[\"GET\", \"POST\"])\ndef hears():\n slack_event = json.loads(request.data)\n\n if \"challenge\" in slack_event:\n return make_response(slack_event[\"challenge\"], 200, {\"content_type\":\n \"application/json\"\n })\n\n if slack_verification != slack_event.get(\"token\"):\n message = \"Invalid Slack verification token: %s\" % (slack_event[\"token\"])\n make_response(message, 403, {\"X-Slack-No-Retry\": 1})\n\n if \"event\" in slack_event:\n event_type = slack_event[\"event\"][\"type\"]\n return _event_handler(event_type, slack_event)\n\n # If our bot hears things that are not events we've subscribed to,\n # send a quirky but helpful error response\n return make_response(\"[NO EVENT IN SLACK REQUEST] These are not the droids\\\n you're looking for.\", 404, {\"X-Slack-No-Retry\": 1})\n\n\n@app.route(\"/\", methods=[\"GET\"])\ndef index():\n return \"

Server is ready.

\"\n\nif __name__ == '__main__':\n app.run('0.0.0.0', port=8080)","repo_name":"Gomchichit/lol_statistics","sub_path":"Seungmin_module.py","file_name":"Seungmin_module.py","file_ext":"py","file_size_in_byte":8269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"18390347160","text":"\"\"\"Training Generation CLI entrypoint.\"\"\"\nfrom argparse import Namespace\n\nimport tqdm\nfrom tunable import Tunable, TunableManager\n\nfrom ...output import Output\nfrom ...parameters import pixel_to_um\nfrom ...random import RRF\nfrom .. import add_output_prefix, initialize_cells, initialize_simulator\n\n\nclass TrainingDataCount(Tunable):\n \"\"\"Training samples to generate\"\"\"\n\n default: int = 16\n\n\nclass TrainingCellCount(Tunable):\n \"\"\"Cells to add to training samples\"\"\"\n\n default: int = 32\n\n\nclass TrainingImageWidth(Tunable):\n \"\"\"Image width in pixels of training images\"\"\"\n\n default: int = 128\n\n\nclass TrainingImageHeight(Tunable):\n \"\"\"Image height in pixels of training images\"\"\"\n\n default: int = 128\n\n\ntqdm.tqdm.monitor_interval = 0\n\n\ndef subcommand_main(args: Namespace) -> None:\n \"\"\"\n Entry point for the 'training' subcommand.\n\n :param args: pre-parsed arguments\n :return: None\n \"\"\"\n\n shape = (TrainingImageHeight.value, TrainingImageWidth.value)\n\n cell_count = TrainingCellCount.value\n\n TunableManager.load(\n {\n 'Width': pixel_to_um(shape[1]),\n 'Height': pixel_to_um(shape[0]),\n 'NewCellRadiusFromCenter': 1,\n },\n reset=False,\n )\n\n ccf = RRF.sequence.integers(0, cell_count * 2)\n next(ccf) # for seed = 1 and PCG64, the first value will be a zero ... skip that\n\n if not args.output:\n raise RuntimeError(\"Output must be set\")\n\n outputs = Output.SelectableGetMultiple()\n\n output_count = 0\n\n for _ in tqdm.tqdm(range(TrainingDataCount.value)):\n simulator = initialize_simulator()\n initialize_cells(simulator, count=next(ccf), cell_type=args.cell)\n\n simulator.step(60.0)\n\n for output in outputs:\n if args.prefix:\n output_name = add_output_prefix(args.output, output=output)\n else:\n output_name = args.output\n\n output.write(\n simulator.simulation.world,\n output_name,\n overwrite=args.overwrite,\n output_count=output_count,\n )\n\n output_count += 1\n","repo_name":"modsim/CellSium","sub_path":"cellsium/cli/training/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"23124639388","text":"#([aeiouy]{1}[bcdfgjklmnpqrstvwxz]{1})+[aeiouy]?|([bcdfgjklmnpqrstvwxz]{1}[aeiouy]{1})+[bcdfgjklmnpqrstvwxz]?\n\n\ndef checkio(text):\n\n import re\n\t#replace this for solution\n lower = text.lower()\n tokens = re.split( r'\\W', lower)\n match = [ x for x in tokens if re.search(r'^([aeiouy]{1}[bcdfghjklmnpqrstvwxz]{1})+[aeiouy]?$|^([bcdfghjklmnpqrstvwxz]{1}[aeiouy]{1})+[bcdfgjklmnpqrstvwxz]?$', x)]\n return len(match)\n\n#These \"asserts\" using only for self-checking and not necessary for auto-testing\nif __name__ == '__main__':\n\tassert checkio(\"My name is ...\") == 3, \"bobby\"\n\tassert checkio(\"Hello world\") == 0, \"Hey\"\n\tassert checkio(\"A quantity of striped words.\") == 1, \"Only of\"\n\tassert checkio(\"Dog,cat,mouse,bird.Human.\") == 3, \"hahah\"\n","repo_name":"bobbyc/checkio","sub_path":"oreilly/testStriped.py","file_name":"testStriped.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"16678756474","text":"\n# coding: utf-8\n\n# In[142]:\n\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns; sns.set(color_codes=True)\nimport pandas as pd\nfrom sklearn.preprocessing import Imputer\n\n\n# In[143]:\n\n\n\ndataset = pd.read_csv('C:/Users/rithw/Desktop/Sem IX/Python (PP)/Python Project/diamonds.csv')\n\n\n# In[144]:\n\n\nget_ipython().magic('matplotlib inline')\n\n\n# In[145]:\n\n\ndef remove_outlier(df_in, col_name):\n q1 = df_in[col_name].quantile(0.25)\n q3 = df_in[col_name].quantile(0.75)\n iqr = q3-q1 #Interquartile range\n fence_low = q1-1.5*iqr\n fence_high = q3+1.5*iqr\n df_out = df_in.loc[(df_in[col_name] < fence_low) | (df_in[col_name] > fence_high)]\n return df_out[col_name]\n\nm=dataset.drop('cut', axis=1)\nn=m.drop('color', axis=1)\no=n.drop('clarity', axis=1)\nprint(o.info())\n\ncolNames=o.columns\nprint(\"columns with outliers\")\nfor colName in colNames:\n colvalues = o[colName].values\n #print('column12:', colvalues)\n print('column:', colName)\n outValues=remove_outlier(o,colName)\n print(outValues)\n G=outValues.count()\n print(G)\n#print(o.carat.describe())\n\n\n# In[146]:\n\n\nprint(dataset.info())\nprint(dataset.describe())\n\nprint(dataset.isnull().sum())\n#dataset = dataset.fillna(dataset['carat'].value_counts().index[0])\nprint((dataset==0).sum())\n\n#dataset.iloc[:,6:]=dataset.iloc[:,6:].replace(0, np.NaN)\n#dataset = dataset.fillna(dataset['price'].value_counts().index[0])\n\ndataset['cut'].unique()\ndataset['color'].unique()\ndataset['clarity'].unique()\nprint(dataset['clarity'].value_counts())\nprint(dataset['color'].value_counts())\nprint(dataset['cut'].value_counts())\n\n\n# In[147]:\n\n\nsns.boxplot(x=dataset['carat'])\n\n\n# In[102]:\n\n\n\n\n# X = iqrOutCount(dataset['carat'])\n# X\n\n\n# In[148]:\n\n\nsns.boxplot(x=dataset['x'])\n\n\n# In[149]:\n\n\nsns.boxplot(x=dataset['y'])\n\n\n# In[ ]:\n\n\nsns.boxplot(x=dataset['z'])\n\n\n# In[ ]:\n\n\nsns.boxplot(x=dataset['depth'])\n\n\n# In[ ]:\n\n\nsns.boxplot(x=dataset['table'])\n\n\n# In[ ]:\n\n\nsns.boxplot(x=dataset['price'])\n\n\n# In[150]:\n\n\ndataset.drop(dataset.loc[dataset['clarity']=='XXX'].index, inplace=True)\ndataset.drop(dataset.loc[dataset['clarity']==' '].index, inplace=True)\ndataset.drop(dataset.loc[dataset['cut']==0.3].index, inplace=True)\ndataset.drop(dataset.loc[dataset['cut']=='Wonderful'].index, inplace=True)\ndataset.drop(dataset.loc[dataset['color']== 'AAA'].index, inplace=True)\ndataset.drop(dataset.loc[dataset['color']==0.3].index, inplace=True)\ndataset['cut'].fillna('null entry', inplace = True)\ndataset['cut'] = pd.Categorical(dataset['cut'], ['null entry','Fair','Good','Very Good','Ideal','Premium'], ordered = True)\n\ndataset['cut'] = dataset['cut'].cat.codes\ndataset['color'] = pd.Categorical(dataset['color'], ['D','E','F','G','H','I','J'], ordered = True)\ndataset['color'] = dataset['color'].cat.codes\ndataset['clarity'] = pd.Categorical(dataset['clarity'], ['I1','SI2','SI1','VS2','VS1','VVS2','VVS1','IF'], ordered = True)\ndataset['clarity'] = dataset['clarity'].cat.codes\n#dataset = dataset.fillna(0)\n \ndataset['clarity'].fillna(0, inplace = True)\ndataset['carat'].fillna(0, inplace = True) \ndataset['price'].fillna(0, inplace = True)\n\n\n# In[151]:\n\n\n#dataset.loc[dataset.price.isnull()==True]\ndf = dataset.loc[dataset.price==0]\ndf\n\n# dataset.loc[dataset.price==np.NaN]\n\n\n# In[153]:\n\n\nx = dataset.drop(dataset.loc[dataset.price==0].index,axis=0)\n\n\n# In[154]:\n\n\n\n\n# dataset.iloc[:,6:]=dataset.iloc[:,6:].replace(0, np.NaN)\n#dataset = dataset.fillna(dataset.price=0)\n\n\n# In[155]:\n\n\n# regression\n\nallCols = dataset.columns.tolist()\nallCols.remove('depth')\nallCols.remove('x')\nallCols.remove('y')\nallCols.remove('z')\nallCols.remove('table')\nallCols.remove('price')\nallCols\nprint(allCols)\n\n\n# In[156]:\n\n\nx1=x[allCols]\ny1=x.price\n#y_pred=df.price\nynew=df[allCols]\n\n#from sklearn.model_selection import train_test_split\n#x_train, x_test, y_train, y_test = train_test_split(x,y, test_size=0.25)\nfrom sklearn.linear_model import LinearRegression\nmodel = LinearRegression()\nmodel.fit(x1,y1)\n\n#predict\ny_pred = model.predict(ynew)\nprint(y_pred)\n\n\n\n\n\n\n# In[157]:\n\n\n# classification\nallCols2 = dataset.columns.tolist()\nallCols2.remove('depth')\nallCols2.remove('x')\nallCols2.remove('y')\nallCols2.remove('z')\nallCols2.remove('table')\nallCols2.remove('cut')\nprint(allCols2)\n\n\n# In[158]:\n\n\ndf1 = dataset.loc[dataset['cut']==0]\ndf1\n\n\n# In[159]:\n\n\np = dataset.drop(dataset.loc[dataset.cut==0].index,axis=0)\n\n\n# In[160]:\n\n\nxp=p[allCols2]\nyp=p.cut\ny_pred2=df1.cut\nynew2=df1[allCols2]\n#print(y_pred2)\n#print(ynew2)\n#print(xp)\n#print(yp)\n#x2=dataset[allCols2]\n#y2=dataset.cut\n#from sklearn.model_selection import train_test_split\n#x_train2, x_test2, y_train2, y_test2 = train_test_split(x2,y2, test_size=0.25)\nfrom sklearn.neighbors import KNeighborsClassifier\nmodel2 = KNeighborsClassifier()\nmodel2.fit(xp,yp)\n#predict\ny_pred2 = model2.predict(ynew2)\nprint(y_pred2)\n\n\n# In[161]:\n\n\npd.crosstab(dataset.carat,dataset.cut, values=dataset.price, aggfunc='mean').fillna('-')\n\n\n# In[25]:\n\n\n\n\n# In[162]:\n\n\npd.crosstab(dataset.carat,dataset.clarity, values=dataset.price, aggfunc='mean').fillna('-')\n\n\n# In[26]:\n\n\n\n\n# In[163]:\n\n\npd.crosstab(dataset.carat,dataset.color, values=dataset.price, aggfunc='mean').fillna('-')\n\n\n# In[164]:\n\n\n\n#print(dataset.isnull().sum())\n\nprint((dataset==0).x.sum())\nMean1 = int(dataset['x'].mean())\nprint(\"Mean of column X :\",Mean1)\nprint((dataset==0).y.sum())\nMean2 = int(dataset['y'].mean())\nprint(\"Mean of column Y :\",Mean2)\nprint((dataset==0).z.sum())\nMean3=(dataset['z'].mean())\nprint(\"Mean of column Z :\",Mean3)\n\n\n# In[165]:\n\n\n# dataset['x'] = np.where((dataset==0).x.sum(), Mean1, dataset['x'])\n\ndataset[dataset.x==0] = dataset[dataset.x==0].replace(0,Mean1)\n\nprint((dataset==0).x.sum())\n\ndataset[dataset.y==0] = dataset[dataset.y==0].replace(0,Mean2)\n\nprint((dataset==0).y.sum())\n\ndataset[dataset.z==0] = dataset[dataset.z==0].replace(0,Mean3)\n\nprint((dataset==0).z.sum())\n\n\ndataset['Depth_Percent']=dataset['z']/((dataset['x']+dataset['y'])/2)\n\n\nprint(dataset[dataset['Depth_Percent'] > 5])\n\n\n# In[58]:\n\n\n\n\n\n# In[166]:\n\n\nplt.hist(dataset['price'], bins = 1000, color = 'red', alpha=0.5)\nplt.xlim(xmin=0, xmax = 20000)\nplt.xlabel(\"Price\")\nplt.ylabel(\"Frequency\")\nplt.title(\"Distribution of Price\")\nplt.show()\n\n\n# In[ ]:\n\n\nsns.distplot(dataset['cut'], kde=False, rug=True);\nplt.ylabel(\"Count\")\nplt.xlabel(\"Cut\")\nplt.title(\"Distribution of Cut\")\nplt.show()\n\n# In[ ]:\n\n\nplt.figure()\nsns.lmplot(x='price', y='carat', data=dataset,\n fit_reg=True,line_kws={'color':'red'})\n# tweak using matplotlib\nplt.ylim(0, 6)\nplt.xlim(0,20000 )\nplt.title('Relationship between Price and Carat')\n#plt.ylabel('Y axis')\n#plt.xlabel('X axis')\n# good practice\nplt.show()\n\n\n# In[ ]:\n\n\nplt.figure()\nsns.lmplot(x='price', y='x', data=dataset,\n fit_reg=True,line_kws={'color':'red'})\n# tweak using matplotlib\nplt.ylim(0, 12)\nplt.xlim(0,20000)\nplt.title('Relationship between Price and X ')\n#plt.ylabel('Y axis')\n#plt.xlabel('X axis')\n# good practice\nplt.show()\n\n\n# In[ ]:\n\n\nsns.boxplot('color', 'price', data=dataset)\nplt.ylim(0,20000)\n\n","repo_name":"rithwiksarma/EDA-and-Classification---Diamonds-Dataset","sub_path":"Diamond Data Set EDA.py","file_name":"Diamond Data Set EDA.py","file_ext":"py","file_size_in_byte":7026,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"22222319323","text":"from django import forms\nfrom .models import Album, Picture\n\n\nclass AlbumForm(forms.ModelForm):\n class Meta:\n model = Album\n fields = (\"name\",)\n widgets = {\n \"name\": forms.TextInput(\n attrs={\n \"class\": \"form-control\",\n \"required\": True,\n \"placeholder\": \"Album's name...\",\n }\n )\n }\n\n\nclass UploadPictureForm(forms.Form):\n album = forms.ModelChoiceField(\n queryset=Album.objects, empty_label=\"Select album\", label=\"Album\"\n )\n picture = forms.ImageField(\n label=\"Pictures\",\n widget=forms.ClearableFileInput(attrs={\"multiple\": True}),\n required=False,\n )\n\n\nclass UpdatePictureForm(forms.Form):\n album = forms.ModelChoiceField(\n queryset=Album.objects, empty_label=\"Select album\", label=\"Album\"\n )\n picture = forms.ImageField(label=\"Picture\")\n description = forms.CharField(label=\"Description\", widget=forms.Textarea(attrs={}))\n tags = forms.CharField(label=\"Tags\")\n","repo_name":"kkosiba/django-gallery","sub_path":"gallery/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"31284437142","text":"# encoding: utf-8\r\nfrom __future__ import absolute_import, unicode_literals\r\n\r\nfrom django.db.models.fields import reverse_related\r\n\r\nfrom apiview import model, admintools, descriptors\r\nfrom django.db import models\r\n\r\nfrom example.celery import async_call\r\nfrom . import constants\r\n\r\n\r\nclass DeletedManager(models.Manager):\r\n\r\n def get_queryset(self):\r\n queryset = super(DeletedManager, self).get_queryset()\r\n return queryset.filter(delete_status=constants.DELETE_CODE.NORMAL.code)\r\n\r\n def get_all_queryset(self):\r\n return super(DeletedManager, self).get_queryset()\r\n\r\n\r\nclass BaseModel(model.BaseModel):\r\n id = models.BigAutoField('主键ID', primary_key=True)\r\n create_time = models.DateTimeField('创建时间', auto_now_add=True, db_index=True, editable=False)\r\n modify_time = models.DateTimeField('修改时间', auto_now=True, db_index=True, editable=False)\r\n delete_status = models.BooleanField('删除状态', choices=constants.DELETE_CODE.get_list(),\r\n default=constants.DELETE_CODE.NORMAL.code, null=False, db_index=True)\r\n remark = models.TextField('备注说明', null=True, blank=True, default='')\r\n\r\n default_manager = models.Manager()\r\n objects = DeletedManager()\r\n\r\n def __str__(self):\r\n if hasattr(self, 'name'):\r\n return self.name\r\n else:\r\n return super(BaseModel, self).__str__()\r\n\r\n class Meta:\r\n abstract = True\r\n\r\n @classmethod\r\n def ex_search_fields(cls):\r\n ret = set()\r\n for field in cls._meta.fields:\r\n if not field.db_index and not field.unique \\\r\n and field.name == 'name' and isinstance(field, models.CharField):\r\n ret.add(field.name)\r\n return ret\r\n\r\n @classmethod\r\n def search_fields(cls):\r\n ret = super(BaseModel, cls).search_fields()\r\n return ret.union(cls.ex_search_fields())\r\n\r\n def delete(self, using=None, keep_parents=False):\r\n self.delete_status = constants.DELETE_CODE.DELETED.code\r\n return self.save(using=using, force_update=True, update_fields=['delete_status', ])\r\n\r\n def save_or_update(self):\r\n if self.pk is None:\r\n self.save(force_insert=True)\r\n else:\r\n self.save_changed()\r\n\r\n\r\nclass ManyToManyRel(reverse_related.ForeignObjectRel):\r\n def __init__(self, field, to, related_name=None, related_query_name=None,\r\n limit_choices_to=None, symmetrical=True, through=None,\r\n through_fields=None, db_constraint=False):\r\n super(ManyToManyRel, self).__init__(\r\n field, to,\r\n related_name=related_name,\r\n related_query_name=related_query_name,\r\n limit_choices_to=limit_choices_to,\r\n )\r\n\r\n self.through = through\r\n\r\n if through_fields and not through:\r\n raise ValueError(\"Cannot specify through_fields without a through model\")\r\n self.through_fields = through_fields\r\n\r\n self.symmetrical = symmetrical\r\n self.db_constraint = db_constraint\r\n\r\n def get_related_field(self):\r\n opts = self.through._meta\r\n field = None\r\n if self.through_fields:\r\n field = opts.get_field(self.through_fields[0])\r\n else:\r\n for field in opts.fields:\r\n rel = getattr(field, 'remote_field', None)\r\n if rel and rel.model == self.model:\r\n break\r\n if field is None:\r\n return None\r\n else:\r\n return field.foreign_related_fields[0]\r\n\r\n\r\nclass ManyToManyField(models.ManyToManyField):\r\n\r\n rel_class = ManyToManyRel\r\n\r\n def __init__(self, to, related_name=None, related_query_name=None, limit_choices_to=None, symmetrical=None,\r\n through=None, through_fields=None, db_constraint=False, db_table=None, swappable=True, **kwargs):\r\n super(ManyToManyField, self).__init__(to, related_name, related_query_name, limit_choices_to, symmetrical,\r\n through, through_fields, db_constraint, db_table, swappable, **kwargs)\r\n\r\n\r\nclass ForeignKey(models.ForeignKey):\r\n\r\n forward_related_accessor_class = descriptors.ForwardManyToOneCacheDescriptor\r\n\r\n def __init__(self, to, on_delete=models.DO_NOTHING, related_name=None, related_query_name=None,\r\n limit_choices_to=None, parent_link=False, to_field=None, db_constraint=False, **kwargs):\r\n super(ForeignKey, self).__init__(to, on_delete, related_name, related_query_name, limit_choices_to,\r\n parent_link, to_field, db_constraint, **kwargs)\r\n\r\n\r\nclass OneToOneField(models.OneToOneField):\r\n\r\n forward_related_accessor_class = descriptors.ForwardOneToOneCacheDescriptor\r\n\r\n def __init__(self, to, on_delete=models.DO_NOTHING, to_field=None, db_constraint=False, **kwargs):\r\n super(OneToOneField, self).__init__(to, on_delete, to_field, db_constraint=db_constraint, **kwargs)\r\n\r\n\r\nclass ExportMixin(admintools.ExportMixin):\r\n\r\n def async_export_data(self, func, *args, **kwargs):\r\n async_call(func, *args, **kwargs)\r\n\r\n\r\nclass ImportExportMixin(admintools.ImportExportMixin):\r\n\r\n def async_export_data(self, func, *args, **kwargs):\r\n async_call(func, *args, **kwargs)\r\n","repo_name":"007gzs/dingtalk-django-example","sub_path":"core/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5340,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"31"} +{"seq_id":"13449029500","text":"import logging\nimport os\n\nimport torch\n\nlogger = logging.getLogger('RotationNet')\n\n\ndef save_model(arch, model, optimizer, fname):\n \"\"\"\n Save the pytorch model\n\n Parameters\n ----------\n arch : Model architecture (str)\n model : Pytorch model\n optimizer : Pytorch optimizer\n fname : file name to save\n\n Returns\n -------\n Nothing\n \"\"\"\n torch.save({\n 'arch': arch,\n 'state_dict': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n }, fname)\n\n\ndef load_model(model, path, map_location=None):\n \"\"\"\n Load model weights for evaluation\n\n Parameters\n ----------\n model : Pytorch model to load weights\n path : Path to saved model weights\n\n Returns\n -------\n Nothing, the weights are updated in the given model\n \"\"\"\n if os.path.isfile(path):\n logger.debug(\"Loading model '{}'\".format(path))\n checkpoint = torch.load(path, map_location=map_location)\n model.load_state_dict(checkpoint['state_dict'])\n logger.debug(\"Loaded model ({}) '{}'\".format(checkpoint['arch'], path))\n del checkpoint\n torch.cuda.empty_cache()\n else:\n logger.warning(\"No model found at '{}'\".format(path))\n","repo_name":"raikilon/geometry-recognition","sub_path":"models/rotation_net/utils/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"71873864727","text":"import pandas as pd\nimport numpy as np\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler, OneHotEncoder\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.compose import make_column_transformer\n\nhotel = pd.read_csv(r'hotel.csv')\n\nX = hotel.copy()\ny = X.pop('is_canceled')\n\nX['arrival_date_month'] = \\\n X['arrival_date_month'].map(\n {'January':1, 'February': 2, 'March':3,\n 'April':4, 'May':5, 'June':6, 'July':7,\n 'August':8, 'September':9, 'October':10,\n 'November':11, 'December':12}\n )\nfeatures_num = [\n \"lead_time\", \"arrival_date_week_number\",\n \"arrival_date_day_of_month\", \"stays_in_weekend_nights\",\n \"stays_in_week_nights\", \"adults\", \"children\", \"babies\",\n \"is_repeated_guest\", \"previous_cancellations\",\n \"previous_bookings_not_canceled\", \"required_car_parking_spaces\",\n \"total_of_special_requests\", \"adr\",\n]\nfeatures_cat = [\n \"hotel\", \"arrival_date_month\", \"meal\",\n \"market_segment\", \"distribution_channel\",\n \"reserved_room_type\", \"deposit_type\", \"customer_type\",\n]\n\n#checking if there are any null values\nprint(hotel.isnull().sum())\n\n\n# there are a few missing values so impute the missing one's\ntransformer_num = make_pipeline(\n SimpleImputer(strategy=\"constant\"), \n StandardScaler(),\n)\ntransformer_cat = make_pipeline(\n SimpleImputer(strategy=\"constant\", fill_value=\"NA\"),\n OneHotEncoder(handle_unknown='ignore'),\n)\n\n# stratify - make sure classes are evenlly represented across splits\nX_train, X_valid, y_train, y_valid = \\\n train_test_split(X, y, stratify=y, train_size=0.75)\n\nX_train = preprocessor.fit_transform(X_train)\nX_valid = preprocessor.transform(X_valid)\n\ninput_shape = [X_train.shape[1]]\n\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n\n# defining the model \nmodel = keras.Sequential([\n layers.BatchNormalization(input_shape=input_shape),\n layers.Dense(256,activation='relu'),\n layers.BatchNormalization(),\n layers.Dropout(0.3),\n layers.Dense(256,activation='relu'),\n layers.BatchNormalization(),\n layers.Dropout(0.3),\n layers.Dense(1,activation='sigmoid'),\n])\n\n# adding the optimizer and loss\n\nmodel.compile(\noptimizer='adam',loss='binary_crossentropy',metrics=['binary_accuracy'],)\n\n# defining the early stopping so that the model won't overfit or underfit\n\nearly_stopping = keras.callbacks.EarlyStopping(\n patience=5,\n min_delta=0.001,\n restore_best_weights=True,\n)\n\n# Training the above defined model\nhistory = model.fit(\n X_train, y_train,\n validation_data=(X_valid, y_valid),\n batch_size=512,\n epochs=200,\n callbacks=[early_stopping],\n)\n\n#plotting the graphs to check the loss \n\nhistory_df = pd.DataFrame(history.history)\nhistory_df.loc[:, ['loss', 'val_loss']].plot(title=\"Cross-entropy\")\nhistory_df.loc[:, ['binary_accuracy', 'val_binary_accuracy']].plot(title=\"Accuracy\")\n","repo_name":"imvbhuvan/Machine-Learning","sub_path":"Algorithms/Binary Classification using NN/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":2948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40908286052","text":"from os import getenv\nimport psycopg2\nfrom psycopg2 import sql\n\n# 0. Decorator (bem mais complexo, overkill)\n# 1. Gerenciador de contexto with\n# 2. Criar uma funcao que retornasse conn\n# 3. Utilizar herança e POO para conexao\n\n\nconfigs = {\n \"host\": getenv(\"DB_HOST\"),\n \"database\": getenv(\"DB_NAME\"),\n \"user\": getenv(\"DB_USER\"),\n \"password\": getenv(\"DB_PASSWORD\"),\n}\n\n# conn = psycopg2.connect(**configs)\n\n\nclass DatabaseConnector:\n @classmethod\n def get_conn_cur(cls):\n cls.conn = psycopg2.connect(**configs)\n cls.cur = cls.conn.cursor()\n\n @classmethod\n def commit_and_close(cls):\n cls.conn.commit()\n cls.cur.close()\n cls.conn.close()\n\n @classmethod\n def insert_into(cls, payload: dict, table_name: str):\n cls.get_conn_cur()\n\n sql_table_name = sql.Identifier(table_name)\n columns = [sql.Identifier(key) for key in payload.keys()]\n values = [sql.Literal(value) for value in payload.values()]\n\n query = sql.SQL(\n \"\"\"\n INSERT INTO\n {sql_table_name} ({columns})\n VALUES\n ({values})\n RETURNING *\n \"\"\"\n ).format(\n sql_table_name=sql_table_name,\n columns=sql.SQL(\",\").join(columns),\n values=sql.SQL(\",\").join(values),\n )\n\n cls.cur.execute(query)\n\n inserted_data = cls.cur.fetchone()\n\n print(\"=\" * 100)\n print(query.as_string(cls.cur))\n print(\"=\" * 100)\n\n cls.commit_and_close()\n\n return inserted_data\n\n @classmethod\n def get_column_names(cls, table_name: str):\n cls.get_conn_cur()\n\n query = sql.SQL(\n \"\"\"\n SELECT\n column_name\n FROM\n information_schema.COLUMNS\n WHERE table_name = {table_name}\n ORDER BY ordinal_position;\n \"\"\"\n ).format(table_name=sql.Literal(table_name))\n\n cls.cur.execute(query)\n\n column_names = cls.cur.fetchall()\n\n # print(\"=\" * 100)\n # print(column_names)\n # print(\"=\" * 100)\n\n cls.commit_and_close()\n\n return [row[0] for row in column_names]\n\n @classmethod\n def select_all(cls, table_name: str):\n cls.get_conn_cur()\n\n # Nome de coluna SQL \"table_name\"\n # SELECT * FROM 'table_name';\n sql_table_name = sql.Identifier(table_name)\n\n query = sql.SQL(\n \"\"\"\n SELECT * FROM {sql_table_name};\n \"\"\"\n ).format(sql_table_name=sql_table_name)\n\n cls.cur.execute(query)\n\n print(\"=\" * 100)\n print(query.as_string(cls.cur))\n print(\"=\" * 100)\n\n result = cls.cur.fetchall()\n\n cls.commit_and_close()\n\n return result\n\n @classmethod\n def serializer(cls, data: tuple, keys: list[str]):\n return dict(zip(keys, data))\n","repo_name":"Kenzie-Academy-Brasil-Developers/q3-demos-turma7","sub_path":"sprint4/demo9/app/models/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2930,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"72460958487","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport json\nimport sys\nimport time\nimport datetime\n\nimport requests\nimport yaml\n\n\ndef write_new(s, setting):\n url = setting['url']\n data = json.dumps({'text': s, 'timestamp': time.time()})\n resp = requests.post(url, data=data)\n return resp.status_code == 200\n\n\ndef retrieve_all(setting):\n url = setting['url']\n params = {'orderBy': '\"timestamp\"'}\n resp = requests.get(url, params=params)\n results = []\n if resp.status_code == 200:\n data = json.loads(resp.text)\n for d in data.values():\n results += [(d['timestamp'], d['text'])]\n results.sort(reverse=True)\n return results\n\n\nif len(sys.argv) < 2:\n exit(1)\n\nwith open('accounts.yml') as fp:\n setting = yaml.load(fp.read())['firebase']\n setting['url'] = setting['databaseURL'] + '/notes.json'\n\nif sys.argv[1] == 'new':\n msg = ' '.join(sys.argv[2:]).strip()\n\n if msg != '':\n if write_new(msg, setting):\n print('Your note was saved.')\nelif sys.argv[1] == 'show':\n notes = retrieve_all(setting)\n for ts, txt in notes:\n date_str = datetime.datetime.fromtimestamp(ts)\\\n .strftime('%Y-%m-%d')\n print(f'{date_str} - {txt}')\n","repo_name":"e0en/57_exercises","sub_path":"python/51_mynotes.py","file_name":"51_mynotes.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"29084030499","text":"import csv\nimport openpyxl\nfrom datetime import datetime, timedelta\n\n\nclass Record:\n def __init__(self, product, production_date, barcode, age):\n self.product = product\n self.production_date = production_date\n self.barcode = barcode\n self.age = age\n\n\nfilename = \"c:/Users/c_mil/Desktop/Dev/data.csv\"\n\nrecords = []\n\nwith open(filename, \"r\") as file:\n reader = csv.DictReader(file)\n headers = [header.strip() for header in reader.fieldnames]\n for row in reader:\n if \"product\" in headers and \"production date\" in headers and \"carton barcode\" in headers:\n production_date = datetime.strptime(\n row[\"production date\"], \"%d/%m/%Y\").date()\n age = (datetime.now().date() - production_date).days\n if age > 14:\n record = Record(row[\"product\"], production_date,\n row[\"carton barcode\"], age)\n records.append(record)\n\nif records:\n workbook = openpyxl.Workbook()\n sheet = workbook.active\n sheet.title = str(datetime.now().date())\n\n headers = [\"Product\", \"Production Date\", \"Carton Barcode\", \"Product Age\"]\n sheet.append(headers)\n\n for record in records:\n sheet.append([record.product, record.production_date,\n record.barcode, record.age])\n\n table = openpyxl.worksheet.table.Table(\n ref=f\"A1:{chr(ord('A') + len(headers) - 1)}{len(records) + 1}\", displayName=sheet.title)\n sheet.add_table(table)\n\n workbook.save(f\"{sheet.title}.xlsx\")\nelse:\n print(\"No records found.\")\n","repo_name":"ChrisTheITstudent/data_project","sub_path":"output_excel_sheet_from_csv.py","file_name":"output_excel_sheet_from_csv.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40962839246","text":"from rlbot.utils.logging_utils import get_logger\r\n\r\nfrom framework.input_formatter.base_input_formatter import BaseInputFormatter\r\nfrom framework.output_formatter.base_output_formatter import BaseOutputFormatter\r\nfrom framework.model.base_model import BaseModel\r\nfrom framework.reward_manager.base_reward_manager import BaseRewardManager\r\nfrom framework.utils import get_repo_directory\r\n\r\n\r\nclass BaseModelHolder:\r\n\r\n use_custom_fit = False\r\n use_custom_sample_action = False\r\n model_output = None\r\n logger = None\r\n\r\n def __init__(self, model: BaseModel, input_formatter: BaseInputFormatter, output_formatter: BaseOutputFormatter,\r\n reward_manager: BaseRewardManager=None):\r\n \"\"\"\r\n\r\n :param model:\r\n :param input_formatter:\r\n :param output_formatter:\r\n :param reward_manager:\r\n \"\"\"\r\n self.logger = get_logger(str(type(self).__name__))\r\n self.model = model\r\n self.input_formatter = input_formatter\r\n self.output_formatter = output_formatter\r\n self.reward_manager = reward_manager\r\n\r\n self.use_custom_fit = not hasattr(self.model.fit, 'is_native')\r\n self.use_custom_sample_action = not hasattr(self.model.predict, 'is_native')\r\n\r\n def initialize_model(self, load=False):\r\n input_layer = self.model.create_input_layer(self.input_formatter)\r\n hidden_layer = self.model.create_hidden_layers(input_layer=input_layer)\r\n self.model_output = self.model.create_output_layer(self.output_formatter, hidden_layer=hidden_layer)\r\n if load:\r\n self.__load_model_safely()\r\n self.model.finalize_model()\r\n\r\n def train_step(self, input_array, output_array, rewards=None, batch_size=1):\r\n \"\"\"\r\n Performs a single train step on the data given.\r\n All data (input, output, rewards) should end up producing arrays of the same length\r\n :param input_array: Fed as input to the model this is the data that is expected to produce results.\r\n :param output_array: The expected result that the model should produce.\r\n :param rewards: Optional, rewards are weighted values to say how strongly a certain action should be copied.\r\n :param batch_size: How many are in the array\r\n :return:\r\n \"\"\"\r\n formatted_input = self.input_formatter.create_input_array(input_array, batch_size=batch_size)\r\n formatted_output = self.output_formatter.create_array_for_training(output_array, batch_size=batch_size)\r\n if self.reward_manager is not None:\r\n reward_input = input_array if self.reward_manager.has_input_formatter() else formatted_input\r\n reward_output = output_array if self.reward_manager.has_output_formatter() else formatted_output\r\n created_rewards = self.reward_manager.create_reward(reward_input, reward_output,\r\n existing_rewards=rewards, batch_size=batch_size)\r\n else:\r\n created_rewards = None\r\n\r\n if self.use_custom_fit:\r\n self.model.fit(formatted_input, formatted_output, batch_size=batch_size, rewards=created_rewards)\r\n else:\r\n self.__fit(formatted_input, formatted_output, batch_size=batch_size, rewards=created_rewards)\r\n\r\n def predict(self, prediction_input):\r\n \"\"\"\r\n Predicts an output given the input\r\n :param prediction_input: The input, this can be anything as it will go through a BaseInputFormatter\r\n :return:\r\n \"\"\"\r\n arr = self.input_formatter.create_input_array(prediction_input)\r\n if self.use_custom_sample_action:\r\n output = self.model.predict(arr)\r\n else:\r\n output = self.__predict(arr)\r\n return self.output_formatter.format_model_output(output)\r\n\r\n def finish_training(self, save_model=True):\r\n if save_model:\r\n file_path = self.get_file_path()\r\n print('saving model at:', file_path)\r\n self.model.save(file_path)\r\n\r\n def __fit(self, arr, out, rewards=None, batch_size=1):\r\n raise NotImplementedError()\r\n\r\n def __predict(self, arr):\r\n raise NotImplementedError()\r\n\r\n def get_model_name(self):\r\n return str(type(self.model).__name__)\r\n\r\n def get_file_path(self):\r\n return get_repo_directory() + '/trainer/weights/' + self.get_model_name() + '.mdl'\r\n\r\n def __load_model_safely(self):\r\n try:\r\n self.model.load(self.get_file_path())\r\n except Exception as e:\r\n get_logger(str(type(self).__name__)).warn(\"Unable to load model: \" + str(e))\r\n","repo_name":"SaltieRL/Saltie","sub_path":"framework/model_holder/base_model_holder.py","file_name":"base_model_holder.py","file_ext":"py","file_size_in_byte":4636,"program_lang":"python","lang":"en","doc_type":"code","stars":155,"dataset":"github-code","pt":"31"} +{"seq_id":"15165991936","text":"class Solution:\n def sortColors(self, nums):\n\n length = len(nums)\n index = 0\n count_2_shifted = 0\n count = 0\n\n while index <= length-count_2_shifted-1:\n print(index, count_2_shifted, nums)\n if nums[index] == 0 and index != 0:\n nums.pop(index)\n nums.insert(0, 0)\n index-=1\n elif nums[index] == 2 and index != length-1:\n nums.pop(index)\n nums.append(2)\n index-=1\n count_2_shifted+=1\n index+=1\n count+=1\n return nums\n\nnums = [0, 1, 2, 2, 1, 1, 2, 2, 0, 0, 0, 0, 2, 1]\nprint(\"Before Sort: \")\nprint(nums)\nprint()\n# [0, 1, 2, 2, 1, 1, 2, 2, 0, 0, 0, 0, 2, 1]\n\nSolution().sortColors(nums)\nprint()\nprint(\"After Sort: \")\nprint(nums)\n# [0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2]\n","repo_name":"aditya109/data-structures-and-algorithms","sub_path":"dailyProblems/sortColors.py","file_name":"sortColors.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"7659732636","text":"\"\"\"\n다음은 달력에 관한 몇 가지 일반적인 정보입니다 (필요한 경우 좀 더 연구를 해 보셔도 좋습니다).\n· 1900년 1월 1일은 월요일이다.\n· 4월, 6월, 9월, 11월은 30일까지 있고, 1월, 3월, 5월, 7월, 8월, 10월, 12월은 31일까지 있다.\n· 2월은 28일이지만, 윤년에는 29일까지 있다.\n· 윤년은 연도를 4로 나누어 떨어지는 해를 말한다. 하지만 400으로 나누어 떨어지지 않는 매 100년째는 윤년이 아니며, 400으로 나누어 떨어지면 윤년이다\n20세기 (1901년 1월 1일 ~ 2000년 12월 31일) 에서, 매월 1일이 일요일인 경우는 총 몇 번입니까?\n\"\"\"\n\n\nimport datetime\n\ndef getDay_c(a,b,c):\n daylist = ['월', '화', '수', '목', '금', '토', '일']\n return daylist[datetime.date(a,b,c).weekday()]\n\nn = 0\nfor i in range(1901,2001):\n\tfor j in range(1,13):\n\t\tif getDay_c(i,j,1)=='일':\n\t\t\tn+=1\n\nprint(n)\n","repo_name":"PIUphil/project_euler","sub_path":"001~025/problem19.py","file_name":"problem19.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"23010091206","text":"import discord\nfrom discord.ext import tasks\nimport os\nfrom dotenv import load_dotenv\nfrom itertools import cycle\nimport requests\nimport json\nimport random\nclient = discord.Client() \nload_dotenv() #Used for loading in the TOKEN to keep private \nTOKEN = os.getenv('TOKEN')\n\nglobal A \nA = '\\U0001F1E6'\nglobal I \nI = '\\U0001F1EE'\nglobal R\nR = '\\U0001F1F7'\nglobal E \nE = '\\U0001F1EA'\nglobal D \nD = '\\U0001F1E9'\n\nglobal oldMessage\noldMessage = 0\n\n@client.event\nasync def on_message(message):\n global oldMessage\n if(oldMessage !=0):\n print(oldMessage.content)\n if message.author == client.user:\n return \n if message.content.lower() == (\"!monkey\"):\n print(\"monkey called\")\n randval1 = random.randrange(490,500)\n randval2 = random.randrange(345,350)\n await message.channel.send(f\"https://www.placemonkeys.com/{randval1}/{randval2}?random\")\n if message.content.lower() == (\"!bundar\"):\n print(\"bundar called\")\n randval1 = random.randrange(490,500)\n randval2 = random.randrange(345,350)\n await message.channel.send(f\"https://www.placemonkeys.com/{randval1}/{randval2}?random\")\n if message.content.lower() == (\"!crazymonkey\"):\n print(\"crazymonkey called\")\n randval1 = random.randrange(490,500)\n randval2 = random.randrange(345,350)\n await message.channel.send(f\"https://www.placemonkeys.com/{randval1}/{randval2}?spooky\")\n if message.content.lower() == (\"!oldiemonkey\"):\n print(\"oldiemonkey called\")\n randval1 = random.randrange(490,500)\n randval2 = random.randrange(345,350)\n await message.channel.send(f\"https://www.placemonkeys.com/{randval1}/{randval2}?greyscale\")\n if message.content.lower() == (\"joe\"):\n randInt2 = random.randint(1,2)\n print(\"joe called\")\n if(randInt2 == 1):\n await message.channel.send(\"mama\")\n elif(randInt2 == 2):\n await message.channel.send(\"biden\")\n if message.author.id == 746508832326287431: \n randInt = random.randint(1,20)\n print(randInt)\n if(randInt == 4 ):\n await message.channel.send(\"stop it\")\n if message.content.lower() == (\"egg\"):\n print(\"egg called\")\n await message.channel.send(\"egg\")\n if message.content.lower() == (\"donald\"):\n print(\"duck\")\n await message.channel.send(\"https://tenor.com/view/donald-trump-dancing-maga-trump-gif-18842875\")\n if message.content.lower() == (\"dm egg\"):\n print(\"egg was dm'd\")\n await message.author.send(\"egg\")\n if message.content.lower() == (\"!air\"):\n await oldMessage.add_reaction(A)\n await oldMessage.add_reaction(I)\n await oldMessage.add_reaction(R) \n await oldMessage.add_reaction(E)\n await oldMessage.add_reaction(D)\n oldMessage = message\n\n\n\n\n\n@client.event\nasync def on_connect():\n print(\"Bot connected to the server!\")\nclient.run(TOKEN)\n\n","repo_name":"mhamida292/EggBot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"43100404706","text":"import os\nimport pytest\n\nfrom azure.core.credentials import AccessToken, AzureKeyCredential\nfrom devtools_testutils import (\n AzureTestCase,\n AzureMgmtPreparer,\n FakeResource,\n ResourceGroupPreparer,\n)\nfrom devtools_testutils.cognitiveservices_testcase import CognitiveServicesAccountPreparer\nfrom azure_devtools.scenario_tests import ReplayableTest\n\n\nREGION = 'westus2'\n\n\nclass FakeTokenCredential(object):\n \"\"\"Protocol for classes able to provide OAuth tokens.\n :param str scopes: Lets you specify the type of access needed.\n \"\"\"\n def __init__(self):\n self.token = AccessToken(\"YOU SHALL NOT PASS\", 0)\n\n def get_token(self, *args):\n return self.token\n\nTEST_ENDPOINT = 'https://test-resource.api.cognitive.microsoft.com'\nTEST_KEY = '0000000000000000'\nTEST_PROJECT = 'test-project'\nTEST_WORKFLOW = 'test-workflow'\n\n\nclass ConversationTest(AzureTestCase):\n FILTER_HEADERS = ReplayableTest.FILTER_HEADERS + ['Ocp-Apim-Subscription-Key']\n\n def __init__(self, method_name):\n super(ConversationTest, self).__init__(method_name)\n self.scrubber.register_name_pair(os.environ.get(\"AZURE_CONVERSATIONS_ENDPOINT\"), TEST_ENDPOINT)\n self.scrubber.register_name_pair(os.environ.get(\"AZURE_CONVERSATIONS_KEY\"), TEST_KEY)\n self.scrubber.register_name_pair(os.environ.get(\"AZURE_CONVERSATIONS_PROJECT\"), TEST_PROJECT)\n self.scrubber.register_name_pair(os.environ.get(\"AZURE_CONVERSATIONS_WORKFLOW_PROJECT\"), TEST_WORKFLOW)\n\n def generate_fake_token(self):\n return FakeTokenCredential()\n\n\nclass GlobalResourceGroupPreparer(AzureMgmtPreparer):\n def __init__(self):\n super(GlobalResourceGroupPreparer, self).__init__(\n name_prefix='',\n random_name_length=42\n )\n\n def create_resource(self, name, **kwargs):\n rg = FakeResource(\n name=\"rgname\",\n id=\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rgname\"\n )\n\n return {\n 'location': REGION,\n 'resource_group': rg,\n }\n\n\nclass GlobalConversationAccountPreparer(AzureMgmtPreparer):\n def __init__(self):\n super(GlobalConversationAccountPreparer, self).__init__(\n name_prefix='',\n random_name_length=42\n )\n\n def create_resource(self, name, **kwargs):\n if self.is_live:\n return {\n 'location': REGION,\n 'resource_group': \"rgname\",\n 'conv_account': os.environ.get(\"AZURE_CONVERSATIONS_ENDPOINT\"),\n 'conv_key': os.environ.get(\"AZURE_CONVERSATIONS_KEY\"),\n 'conv_project': os.environ.get(\"AZURE_CONVERSATIONS_PROJECT\"),\n 'workflow_project': os.environ.get(\"AZURE_CONVERSATIONS_WORKFLOW_PROJECT\")\n }\n return {\n 'location': REGION,\n 'resource_group': \"rgname\",\n 'conv_account': TEST_ENDPOINT,\n 'conv_key': TEST_KEY,\n 'conv_project': TEST_PROJECT,\n 'workflow_project': TEST_WORKFLOW\n }\n","repo_name":"mirespace/python-azure","sub_path":"sdk/cognitivelanguage/azure-ai-language-conversations/tests/testcase.py","file_name":"testcase.py","file_ext":"py","file_size_in_byte":3066,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"22814937083","text":"from bs4 import BeautifulSoup\nimport requests\nimport random\n\ndef scraping():\n ng_list = [\"wired\"]\n html = requests.get('https://wired.jp')\n soup = BeautifulSoup(html.text, \"html.parser\")\n article = soup.find(class_=\"listed-article-norm\")\n title = article.h3.string\n for i in ng_list:\n if i in title:\n return\n url = article.a.get(\"href\")\n image = article.img.get(\"data-original\")\n html = requests.get(url)\n soup = BeautifulSoup(html.text, \"html.parser\")\n main = soup.find(class_=\"article-detail\")\n head = soup.find(class_=\"contents-main\")\n reporter = ''\n try:\n reporter = head.find(class_=\"post-credit\").text\n reporter = reporter.split(\"\\r\\n\")[0]\n except:\n print(\"reporter not found\")\n text = ''.join([s.text for s in main.find_all('p')])\n return {'article_text':text,'article_title':title, 'article_url':url,'article_reporter':reporter.replace(\"TEXT BY \", \"\"), 'site_name':'wired','article_image': image}\n\nif __name__ == \"__main__\":\n print(scraping())\n","repo_name":"kanazawa-pri/random-admin","sub_path":"random-admin/app/scrapings/wired.py","file_name":"wired.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"28497478609","text":"# Importa la nueva estructura\nfrom menu.models import Combo\n\nclass Carro:\n def __init__(self, request):\n self.request = request\n self.session = request.session\n carro = self.session.get('carro')\n\n if not carro:\n carro={}\n carro = self.session['carro'] = {}\n #else:\n self.carro = carro\n\n def agregar(self, combo):\n if str(combo.id) not in self.carro.keys():\n self.carro[str(combo.id)] = {\n 'combo_id': combo.id,\n 'nombre': combo.nombre,\n 'precio': str(combo.precio),\n 'cantidad': 1,\n 'imagen': combo.imagen.url\n }\n else:\n for key, value in self.carro.items():\n if key == str(combo.id):\n value['cantidad'] = value['cantidad'] + 1\n value['precio'] = float(value['precio']) + combo.precio\n break\n self.guardar_carro()\n\n def guardar_carro(self):\n self.session['carro'] = self.carro\n self.session.modified = True\n\n def eliminar(self, combo):\n combo_id = str(combo.id) if isinstance(combo, Combo) else str(combo)\n if combo_id in self.carro:\n del self.carro[combo_id]\n self.guardar_carro()\n\n def restar_producto(self, combo):\n for key, value in self.carro.items():\n if key == str(combo.id):\n value['cantidad'] = value['cantidad'] - 1\n value['precio'] = float(value['precio']) - combo.precio\n if value['cantidad'] < 1:\n self.eliminar(combo.id)\n break\n self.guardar_carro()\n\n def limpiar_carro(self):\n self.session['carro'] = {}\n self.session.modified = True\n","repo_name":"albabernal03/patrones_creacionales","sub_path":"Proyectoweb/carro/carro.py","file_name":"carro.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"3356477850","text":"def get_parent(e):\n if parent[e] != e:\n parent[e] = get_parent(parent[e])\n return parent[e]\ndef union(e1, e2):\n a, b = get_parent(e1), get_parent(e2)\n if a < b:\n parent[b] = a\n else:\n parent[a] = b\n\nN, M = map(int, input().split())\ngender = [''] + input().split()\nedges = [list(map(int, input().split())) for _ in range(M)]\nedges.sort(key=lambda x:x[2])\nparent = [i for i in range(N+1)]\nans = 0\nfor edge in edges:\n if gender[edge[0]] == gender[edge[1]]:\n continue\n if get_parent(edge[0]) != get_parent(edge[1]):\n union(edge[0], edge[1])\n ans += edge[2]\nfor i in range(1, N+1):\n if parent[i] != 1:\n parent[i] = get_parent(i)\nif sum(parent[1:]) != N:\n print(-1)\nelse:\n print(ans)","repo_name":"SSAFY-algamza/ssafy-algorithm-study","sub_path":"f1rstf1y9/BOJ/BOJ_14621_나만 안되는 연애.py","file_name":"BOJ_14621_나만 안되는 연애.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1852286250","text":"# -*- coding: utf-8 -*-\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import Select\nimport unittest, time, re\n\nclass ALinks(unittest.TestCase):\n def setUp(self):\n self.driver = webdriver.Firefox()\n self.driver.implicitly_wait(30)\n self.base_url = \"http://deanza.edu/\"\n self.verificationErrors = []\n \n def test_a_links(self):\n link_title = {\n \"Office of Equity, Social Justice and Multicultural Education\" : \"De Anza College :: Office of Equity :: Home\",\n \"Donors\" : \"De Anza College :: Friends :: Home\",\n \"Dropping Classes\" : \"De Anza College :: Registration :: Adding and Dropping Classes\",\n \"ETS\" : \"Educational Technology Services : Your District IT Department\",\n \"E-mail\" : \"De Anza College :: Current Students :: You Need a Current Email Address\",\n \"Eco Pass\" : \"De Anza College :: Eco Pass :: Overview\",\n \"Economics Department\" : \"De Anza College :: Economics :: Home\",\n \"Educational Diagnostic Center\" : \"De Anza College :: Educational Diagnostic Center :: Home\",\n \"Equity, Social Justice and Multicultural Education\" : \"De Anza College :: Office of Equity :: Home\",\n }\n \n driver = self.driver\n driver.get(self.base_url + \"directory/dir-az.html\")\n\n for link in link_title:\n title = link_title[link]\n driver.find_element_by_link_text(link).click()\n try: self.assertEqual(title, driver.title)\n except AssertionError as e: self.verificationErrors.append(str(e))\n driver.back()\n\n def tearDown(self):\n self.driver.quit()\n self.assertEqual([], self.verificationErrors)\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"robgarcelon/CIS74_SoftwareQA","sub_path":"Selenium_Webdriver/Deanza_edu_DE_Links.py","file_name":"Deanza_edu_DE_Links.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"36436968025","text":"class Solution:\n def canReach(self, arr: List[int], start: int) -> bool:\n seen = set()\n def dfs(arr, pos):\n if pos >= 0 and pos < len(arr):\n nonlocal seen\n if arr[pos] == 0:\n return True\n l,r = pos-arr[pos], pos+arr[pos]\n if r in seen and l in seen:\n return False\n seen.add(l)\n seen.add(r)\n return dfs(arr, l) or dfs(arr, r)\n \n return dfs(arr, start)","repo_name":"sinoyuco/leetcode_solutions","sub_path":"array/can_reach_index_zero.py","file_name":"can_reach_index_zero.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20546188286","text":"from django.shortcuts import render, redirect\nfrom django.views.generic.list import ListView\nfrom django.views.generic.edit import UpdateView\nfrom django.db.models import Q, Count, Case, When\nfrom django.contrib import messages\nfrom django.db import connection\nfrom .models import Post\nfrom comments.forms import FormComments\nfrom comments.models import Comments\n\n\nclass PostIndex(ListView):\n model = Post\n template_name = 'posts/index.html'\n paginate_by = 6\n context_object_name = 'posts_objects'\n\n # Override the query set to change the order in which posts are displayed\n def get_queryset(self):\n qs = super().get_queryset()\n qs = qs.select_related('category_post')\n qs = qs.order_by('-id').filter(published_post=True)\n qs = qs.annotate(\n number_comments=Count(\n Case(\n When(comments__published_comment=True, then=1)\n )\n )\n )\n return qs\n\n # To monitor the quantities of database queries\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(**kwargs)\n context['connection'] = connection\n return context\n\n\nclass PostSearch(PostIndex):\n template_name = 'posts/post_search.html'\n\n # Filter the query set to search term\n def get_queryset(self):\n qs = super().get_queryset()\n # print(self.request.GET.get('term'))\n term = self.request.GET.get('term')\n\n if not term:\n return qs\n\n qs = qs.filter(\n Q(title_post__icontains=term) |\n Q(author_post__first_name__iexact=term) |\n Q(data_post__icontains=term) |\n Q(content_post__icontains=term) |\n Q(excerpt_post__icontains=term) |\n Q(category_post__name_category__iexact=term)\n )\n return qs\n\n\nclass PostCategory(PostIndex):\n template_name = 'posts/post_category.html'\n\n # Filter the query set to select the category\n def get_queryset(self):\n qs = super().get_queryset()\n # print(self.kwargs)\n category = self.kwargs.get('category', None)\n if not category:\n return qs\n qs = qs.filter(category_post__name_category__iexact=category)\n return qs\n\n\nclass PostDetails(UpdateView):\n template_name = 'posts/post_detail.html'\n model = Post\n form_class = FormComments\n context_object_name = 'post_objects'\n\n # Override the get_context_data method\n def get_context_data(self, **kwargs):\n # Using what you already have in get_context_data\n context = super().get_context_data(**kwargs)\n # Getting which post we're working on\n post = self.get_object()\n # Selecting information from the database and injecting it in context\n comments = Comments.objects.filter(published_comment=True,\n post_comment=post.id)\n context['comments'] = comments\n return context\n\n def form_valid(self, form):\n post = self.get_object()\n comment = Comments(**form.cleaned_data)\n # Complete the comment table fields provided in which post the comment was made\n comment.post_comment = post\n # Checks if the user is logged in to enter it as user_comment\n if self.request.user.is_authenticated:\n comment.user_comment = self.request.user\n # Save the comment, send a message to the user and redirect to the detail page of the post\n comment.save()\n messages.success(self.request, 'Comentario gravado com sucesso!')\n return redirect('post_details', pk=post.id)\n","repo_name":"amilcartostes/Blog","sub_path":"posts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"43958866908","text":"\r\nfrom tracemalloc import stop\r\nimport winsound\r\nimport json\r\nimport time\r\nimport os\r\nimport pyautogui as py\r\nclass Settings():\r\n def __init__(self):\r\n json_file_path = \"config.json\"\r\n with open(\"config.json\", \"r\") as f:\r\n self.data = json.load(f)\r\n self.menu()\r\n f.close()\r\n with open(\"config.json\", \"w\") as f:\r\n json.dump(self.data, f)\r\n \r\n print(self.data)\r\n def sound(self):\r\n frequency = 2500 # Set Frequency To 2500 Hertz\r\n duration = 1000 # Set Duration To 1000 ms == 1 second\r\n winsound.Beep(frequency, duration)\r\n\r\n def menu(self):\r\n print(\"1- Kasa bilgilerini düzenle\\n2-Konum ayarla\")\r\n secim = input(\"Seçim yapınız (1/2): \")\r\n if secim == \"1\":\r\n self.setCoinandCollecter()\r\n elif secim == \"2\":\r\n self.setSmallCase()\r\n else:\r\n print(\"Hatalı seçim!\")\r\n self.menu()\r\n # def setLiveButton(self):\r\n # print(\"Lütfen canlı yayın bölümüne geçmek için bulunan butona tıklayınız..\")\r\n # print(\"Bip sesini duyduktan sonra bırakabilirsiniz.\")\r\n # input(\"Hazır olduğunuzda enter tuşuna basınız.\")\r\n # time.sleep(3)\r\n # coordinat = py.position()\r\n # self.data[\"smallCaseX\"] = coordinat[0]\r\n # self.data[\"smallCaseY\"] = coordinat[1]\r\n # self.sound()\r\n # self.setSmallCase()\r\n def setSmallCase(self):\r\n print(\"Lütfen canlı yayındaki kasa simgesinin üstüne geliniz.\")\r\n print(\"Bip sesini duyduktan sonra bırakabilirsiniz.\")\r\n input(\"Hazır olduğunuzda enter tuşuna basınız.\")\r\n time.sleep(3)\r\n coordinat = py.position()\r\n self.data[\"smallCaseX\"] = coordinat[0]\r\n self.data[\"smallCaseY\"] = coordinat[1]\r\n self.sound()\r\n self.setCaseArea()\r\n\r\n \r\n def setCaseArea(self):\r\n print(\"Lütfen canlı yayında bir kasaya tıklayınız, ardından açılan pencerede sol üst köşeyi gösteriniz.\")\r\n print(\"Bip sesini duyduktan sonra bırakabilirsiniz.\")\r\n input(\"Hazır olduğunuzda enter tuşuna basınız.\")\r\n time.sleep(3)\r\n coordinat = py.position()\r\n self.data[\"SS-Top-LeftX\"] = coordinat[0]\r\n self.data[\"SS-Top-LeftY\"] = coordinat[1]\r\n self.sound()\r\n print(\"Şimdi sağ üst köşeyi gösteriniz.\")\r\n print(\"Bip sesini duyduktan sonra bırakabilirsiniz.\")\r\n input(\"Hazır olduğunuzda enter tuşuna basınız.\")\r\n time.sleep(3)\r\n coordinat = py.position()\r\n self.data[\"SS-Top-RightX\"] = coordinat[0]\r\n self.data[\"SS-Top-RightY\"] = coordinat[1]\r\n self.sound()\r\n print(\"Şimdi sol alt köşeyi gösteriniz.\")\r\n print(\"Bip sesini duyduktan sonra bırakabilirsiniz.\")\r\n input(\"Hazır olduğunuzda enter tuşuna basınız.\")\r\n time.sleep(3)\r\n coordinat = py.position()\r\n self.data[\"SS-Down-LeftX\"] = coordinat[0]\r\n self.data[\"SS-Down-LeftY\"] = coordinat[1]\r\n self.sound()\r\n print(\"Şimdi sağ alt köşeyi gösteriniz.\")\r\n print(\"Bip sesini duyduktan sonra bırakabilirsiniz.\")\r\n input(\"Hazır olduğunuzda enter tuşuna basınız.\")\r\n time.sleep(3)\r\n coordinat = py.position()\r\n self.data[\"SS-Down-RightX\"] = coordinat[0]\r\n self.data[\"SS-Down-RightY\"] = coordinat[1]\r\n self.sound()\r\n self.setOpenButton()\r\n \r\n\r\n # def setCloseButton(self):\r\n # print(\"Şimdi uygulamayı kapatmak için sağ üstten çarpıya basınız.\")\r\n # print(\"Açılan penceredeki kapat butonunun üstüne geliniz.\")\r\n # print(\"Bip sesini duyduktan sonra bırakabilirsiniz.\")\r\n # input(\"Hazır olduğunuzda enter tuşuna basınız.\")\r\n # time.sleep(3)\r\n # coordinat = py.position()\r\n # self.data[\"closeButtonX\"] = coordinat[0]\r\n # self.data[\"closeButtonY\"] = coordinat[1]\r\n # self.sound()\r\n\r\n \r\n def setOpenButton(self):\r\n print(\"Şimdi kasayı açmak için tıklanan butonu gösteriniz.\")\r\n print(\"Bip sesini duyduktan sonra bırakabilirsiniz.\")\r\n input(\"Hazır olduğunuzda enter tuşuna basınız.\")\r\n time.sleep(3)\r\n coordinat = py.position()\r\n self.data[\"openButtonX\"] = coordinat[0]\r\n self.data[\"openButtonY\"] = coordinat[1]\r\n self.sound()\r\n # json_dump(self.data,\"config.json\")\r\n \r\n # def setWindowMode(self):\r\n # print(\"Şimdi tam ekran yapmamız için pencereyi uzatın ve ortalayın.\")\r\n # print(\"Ve çift tıklayınca tam ekran olacağı şekilde bir konumda bekleyiniz..\")\r\n # print(\"Bip sesini duyduktan sonra bırakabilirsiniz.\")\r\n # input(\"Hazır olduğunuzda enter tuşuna basınız.\")\r\n # time.sleep(3)\r\n # coordinat = py.position()\r\n # self.data[\"windowModeX\"] = coordinat[0]\r\n # self.data[\"windowModeY\"] = coordinat[1]\r\n # self.sound()\r\n # self.setLiveButton()\r\n\r\n def setCoinandCollecter(self):\r\n jeton = int(input(\"Kasada beklediğiniz minimum jeton sayısını giriniz: \"))\r\n collecter = int(input(\"Kasadan beklediğin en fazla toplayıcı sayısını giriniz: \"))\r\n workTime = int(input(\"Saniye cinsinden programın ne kadar çalışacağını giriniz: \"))\r\n stopTime = int(input(\"Saniye cinsinden programın ne kadar bekleyeceğini giriniz: \"))\r\n self.data[\"workTime\"] = workTime\r\n self.data[\"stopTime\"] = stopTime\r\n self.data[\"minJeton\"] = jeton\r\n self.data[\"maxToplayan\"] = collecter\r\n \r\n \r\nSettings()","repo_name":"erensunar/tiktok-bot","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":5663,"program_lang":"python","lang":"tr","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"74891130960","text":"import unittest\n\n\"\"\"\nWhich One Is Your Type?\n\nPython has three main types of data structures formed by\nsmaller objects:\n\n* Lists, written with [] square brackets, such as [1, 2, 4, 8].\n* Tuples, written with () parentheses, such as (7, 8, 9).\n* Sets, written with{} curly brackets, such as {2, 3, 5, 7, 11, 13}.\n\nEach of these types has its own special properties and\npeculiarities that are worth knowing, but this challenge\nis only about transforming these data types into each other.\n\nThis can be done as in the following:\n\n* tuple([1,2,4,8]) returns (1,2,4,8)\n* list({2,3,5,7,11}) returns [2, 3, 5, 7, 11, 13]\n* set((1,2,4)) returns {1,2,4}\n\nGiven two data structures, data1 and data2, return data2\nconverted to the type of data1.\n\nExamples\nconvert([1, 2, 4, 8], (7, 8, 9)) ➞ [7, 8, 9]\n\nconvert((7, 8, 9), [1, 2, 4, 8]) ➞ (1, 2, 4, 8)\n\nconvert([1, 2, 4, 8], {2, 3, 5, 7, 11, 13}) ➞ [2, 3, 5, 7, 11, 13]\n\nconvert({2, 3, 5, 7, 11, 13}, [1, 2, 4, 8]) ➞ {8, 1, 2, 4}\n\nNotes\n* You might have noticed that the last example gives {8, 1, 2, 4}\nrather than{1, 2, 4, 8}. This has to do with the fact that in\nsets order doesn't matter, so that Python considers {8, 1, 2, 4}\nand {1, 2, 4, 8} to be the same set.\n\n* In the test cases you won't have to worry about orders:\nthe answers will always have the order given by applying the list(),\ntuple(), set() functions.\n\"\"\"\n\n\ndef convert(data1, data2):\n if type(data1) == type(data2):\n return data2\n elif isinstance(data1, list):\n return list(data2)\n elif isinstance(data1, tuple):\n return tuple(data2)\n else:\n return set(data2)\n\n\nclass WhichOneIsYourType(unittest.TestCase):\n def test_convert(self):\n self.assertEqual(convert([1, 2, 4, 8], [1, 2, 4, 8]), [1, 2, 4, 8])\n self.assertEqual(convert([1, 2, 4, 8], (7, 8, 9)), [7, 8, 9])\n self.assertEqual(convert([1, 2, 4, 8], {2, 3, 5, 7, 11, 13}), [\n 2, 3, 5, 7, 11, 13])\n self.assertEqual(convert((7, 8, 9), (7, 8, 9)), (7, 8, 9))\n self.assertEqual(convert((7, 8, 9), [1, 2, 4, 8]), (1, 2, 4, 8))\n self.assertEqual(\n convert((7, 8, 9), {2, 3, 5, 7, 11, 13}), (2, 3, 5, 7, 11, 13))\n self.assertEqual(convert({2, 3, 5, 7, 11, 13}, [\n 1, 2, 4, 8]), {8, 1, 2, 4})\n self.assertEqual(convert({2, 3, 5, 7, 11, 13}, (7, 8, 9)), {8, 9, 7})\n self.assertEqual(convert({2, 3, 5, 7, 11, 13}, {\n 2, 3, 5, 7, 11, 13}), {2, 3, 5, 7, 11, 13})\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"carlosesh/python_solutions","sub_path":"edabit/VeryEasy/WhichOneIsYourType.py","file_name":"WhichOneIsYourType.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"37318868223","text":"import torch\nimport torch.nn as nn \nimport torch.nn.functional as F \nfrom normalization import ActNorm\n\nclass Convolution2D(nn.Module):\n def __init__(self, in_dim, out_dim, kernel=3, stride=1, padding=1, std=0.05, last_layer=False, actnorm=False, batchnorm=False):\n super(Convolution2D, self).__init__()\n # Set the device \n device = 'cpu'\n\n self.conv = nn.Conv2d(\n in_channels=in_dim, \n out_channels=out_dim, \n kernel_size=kernel,\n stride=stride,\n padding=padding,\n )\n\n self.last_layer=last_layer\n self.actnorm = actnorm, \n self.batchnorm = batchnorm\n\n if self.last_layer: \n self.conv.weight.data.zero_()\n self.conv.bias.data.zero_()\n self.logs = nn.Parameter(torch.zeros(1,out_dim, 1, 1, device=device))\n \n else: \n self.conv.weight.data.normal_(mean=0.0, std=std)\n if self.actnorm:\n self.actNorm = ActNorm(c=out_dim)\n elif self.batchnorm:\n self.batchnorm = nn.BatchNorm2d(out_dim, affine=True) \n self.conv.bias.data.zero_()\n self.to(device)\n \n def forward(self, x, scale_factor=3.0):\n if not self.last_layer:\n # x = F.pad(x, [1,1,1,1], value=1)\n x = self.conv(x)\n if self.actnorm:\n x, _ = self.actNorm(x)\n elif self.batchnorm:\n x = self.batchnorm(x)\n # else:\n # x = F.leaky_relu(x, inplace=False)\n x = F.leaky_relu(x, inplace=False)\n\n else:\n x = self.conv(x)\n x = x * torch.exp(self.logs * scale_factor)\n return x\n\nclass NN(nn.Module):\n def __init__(self, in_dim, mid_dim, out_dim, actNorm=True, batchNorm=False):\n super(NN, self).__init__()\n # NN shallow ResNet for the affine coupling layer used as the conditioner \n \n self.conv1 = Convolution2D(\n in_dim=in_dim,\n out_dim=mid_dim,\n std=0.05,\n kernel=3,\n padding=1,\n actnorm=actNorm, \n batchnorm=batchNorm \n )\n self.conv2 = Convolution2D(\n in_dim=mid_dim,\n out_dim=mid_dim,\n std=0.05,\n kernel=1, \n padding=1,\n actnorm=actNorm, \n batchnorm=batchNorm \n )\n self.conv3 = Convolution2D(\n in_dim=mid_dim,\n out_dim=out_dim,\n last_layer=True, \n actnorm=False,\n batchnorm=False,\n kernel=3,\n padding=0\n )\n\n def forward(self, x):\n x = self.conv1.forward(x)\n x = self.conv2.forward(x)\n x = self.conv3.forward(x)\n return x\n\n\n\n\n\n\n\n\n\n\n# TO DO Alternative ResNet\nclass ResNetBlock(nn.Module):\n def __init__(self, in_dim, out_dim, kernel=3, stride=1, padding=1, bias=True):\n super(ResNetBlock, self).__init__()\n self.batch_norm_in = nn.BatchNorm2d(in_dim)\n self.w_norm_Conv2D_in = nn.utils.weight_norm(\n nn.Conv2d(\n in_channels=in_dim, \n out_channels=out_dim,\n kernel_size=kernel, \n stride=stride,\n padding=padding,\n bias=False\n )\n )\n\n self.batch_norm_out = nn.BatchNorm2d(out_dim)\n self.w_norm_Conv2D_out = nn.utils.weight_norm(\n nn.Conv2d(\n in_channels=out_dim, \n out_channels=out_dim,\n kernel_size=kernel, \n stride=stride,\n padding=padding, \n bias=True\n )\n )\n\n def forward(self, x):\n # with torch.autograd.set_detect_anomaly(True):\n skip_connection = x\n x = self.batch_norm_in(x)\n # x = F.relu(x, inplace=True)\n x = F.relu(x)\n x = self.w_norm_Conv2D_in(x)\n \n x = self.batch_norm_in(x)\n # x = F.relu(x, inplace=True)\n x = F.relu(x)\n x = self.w_norm_Conv2D_out(x)\n x = x + skip_connection \n return x\n\n\nclass ResNetBatchNorm(nn.Module):\n def __init__(self, in_dim, mid_dim, out_dim, n_blocks=1):\n super(ResNetBatchNorm, self).__init__()\n self.in_batchnorm = nn.BatchNorm2d(in_dim)\n self.in_conv = nn.utils.weight_norm(\n nn.Conv2d(\n in_channels=in_dim,\n out_channels=mid_dim, \n kernel_size=3, \n stride=1, \n padding=1,\n bias=True\n )\n )\n self.skip_c = nn.utils.weight_norm(\n nn.Conv2d(\n in_channels=mid_dim,\n out_channels=mid_dim, \n kernel_size=1, \n stride=1, \n padding=0,\n bias=True\n )\n )\n self.blocks = nn.ModuleList(\n [ResNetBlock(mid_dim, mid_dim) for i in range(n_blocks)]\n )\n # Skip connection Identity matrix \n self.skipC = nn.ModuleList(\n [nn.utils.weight_norm(\n nn.Conv2d(\n in_channels=mid_dim,\n out_channels=mid_dim, \n kernel_size=3, \n stride=1, \n padding=1,\n bias=True\n )\n )for i in range(n_blocks)]\n )\n\n self.out_batchnorm = nn.BatchNorm2d(mid_dim)\n self.out_conv = nn.utils.weight_norm(\n nn.Conv2d(\n in_channels=mid_dim,\n out_channels=out_dim, \n kernel_size=1, \n stride=1, \n padding=0,\n bias=True\n )\n ) \n\n def forward(self, x):\n # with torch.autograd.set_detect_anomaly(True):\n x = self.in_batchnorm(x)\n x *= 2.\n # x = F.relu(x,inplace=False)\n x = F.relu(x)\n x = self.in_conv(x)\n skip_c = self.skip_c(x)\n\n for resBlock, skipc in zip(self.blocks, self.skipC):\n x = resBlock.forward(x)\n skip_c += skipc(x)\n\n x = self.out_batchnorm(skip_c)\n # x = F.relu(x, inplace=True)\n x = F.relu(x)\n x = self.out_conv(x)\n return x ","repo_name":"liverom017/BachelorThesisNormalizingFlows","sub_path":"nn.py","file_name":"nn.py","file_ext":"py","file_size_in_byte":6410,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"37643855349","text":"import pytest\nfrom pytest import fixture\nimport asyncio\nimport psutil\nfrom genesynth.worker import *\n\nworker = WorkerRegistry()\n\nclass Foo:\n @worker.register\n def foo(self, a):\n return a\n\nclass Bar:\n @worker.register\n def foo(self, a):\n return a\n\n @worker.register\n async def bar(self, a):\n return a\n\n async def buzz(self, a):\n return a\n\n@worker.register\ndef foo(a):\n return a\n\ndef test_registry():\n r = WorkerRegistry()\n class Bar:\n @r.register\n def bar(self, a):\n return a\n\n assert len(r) == 1\n\n b = Bar()\n assert b.bar(1) == 1\n assert Bar.bar.__qualname__ in r\n assert r[Bar.bar.__qualname__](b, 1) == 1\n\n f = Foo()\n assert f.foo(1) == 1\n assert Foo.foo.__qualname__ in worker\n assert worker[Foo.foo.__qualname__](f, 1) == 1\n\n assert foo(1) == 1\n assert foo.__qualname__ in worker\n assert worker[foo.__qualname__](1) == 1\n\n@pytest.mark.asyncio\nasync def test_runner():\n workers = 2\n assert list(worker.keys()) == ['Foo.foo', 'Bar.foo', 'Bar.bar', 'foo']\n assert len(worker) == 4\n\n runner = Runner(registry=worker, workers=workers)\n assert runner.executor._max_workers == workers\n\n wraps = runner._wraps(Bar.bar)\n assert wraps.__qualname__ == Bar.bar.__qualname__\n assert wraps.__name__ == 'bar'\n\n assert not runner.executor._processes\n with pytest.raises(ValueError):\n assert 1 == await runner.run(Bar().foo, 1)\n assert len(runner.executor._processes) <= workers\n assert psutil.pid_exists(list(set(runner.executor._processes))[0])\n assert psutil.Process(list(set(runner.executor._processes))[0]).name() in ('Python', 'python', 'pytest')\n assert 1 == await runner.run(Bar().bar, 1)\n assert len(runner.executor._processes) <= workers\n assert 1 == await runner.run(Bar().buzz, 1)\n assert len(runner.executor._processes) <= workers\n\n","repo_name":"sterling312/genesynth","sub_path":"tests/test_worker.py","file_name":"test_worker.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"36422727077","text":"import sys\nimport math\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\n\ndef usage():\n sys.stderr.write(\"Usage: python \" + sys.argv[0] + \" img.jpg\\n\")\n sys.exit(1)\n\ndef grayscale_histogram(image, level, bin):\n \"\"\" Grayscale histogram\n image = np.array of an image file\n level = spatial levels of an image\n bin = number of bins in the histogram\n \"\"\"\n\n if level < 1 or level > 6:\n sys.stderr.write(\"Grayscale Histogram: Level range: [1,6]\\n\")\n sys.exit(1)\n if bin < 1 or bin > 256:\n sys.stderr.write(\"Grayscale Histogram: Bin range: [1,256]\\n\")\n sys.exit(1)\n\n histogram = np.zeros((pow(2,level-1)**2, bin), dtype=int)\n\n # Convert image to grayscale\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n bin_range = 256/bin\n\n intervals = pow(2,level-1)\n x_interval = int(gray.shape[0]/intervals)\n y_interval = int(gray.shape[1]/intervals)\n\n k = 0\n\n for y_level in range(intervals):\n for x_level in range(intervals):\n for y in range(y_level*y_interval, (y_level+1)*y_interval):\n for x in range(x_level*x_interval, (x_level+1)*x_interval):\n bin_index = int(gray[x][y]/bin_range)\n histogram[k][bin_index] += 1\n k += 1\n\n # Normalize the histograms\n return histogram*intervals**2/gray.size\n\ndef plot_grayscale_histogram(histogram):\n \"\"\" Plots grayscale histogram\n histogram = 2D np.array, the first dimension is for level and the second one\n is for normalized histogram counts\n \"\"\"\n\n level = int(math.log(math.sqrt(histogram.shape[0]), 2) + 1)\n bin = histogram.shape[1]\n\n plt.figure(\"Level:\" + str(level) + \" Bin:\" + str(bin))\n\n print(\"Level:\" + str(level) + \" Bin:\" + str(bin))\n\n for i in range(pow(2,level-1)**2):\n plt.subplot(pow(2,level-1), pow(2,level-1), i+1)\n plt.bar(np.arange(bin), histogram[i])\n #plt.plot(histogram[i])\n\n plt.show()\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2: usage()\n\n img = cv2.imread(sys.argv[1])\n hist256 = grayscale_histogram(img, 2, 64)\n\n plot_grayscale_histogram(hist256)","repo_name":"berkeracir/ceng483","sub_path":"hw1/grayscale_histogram.py","file_name":"grayscale_histogram.py","file_ext":"py","file_size_in_byte":2161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14982106207","text":"age = 20\n\nif age >= 6:\n print(\"teenager\")\n\nif age>=18:\n print(\"adult\")\nelse:\n print(\"kid\")\n\nimport time\n\ndef countdown(n):\n if n<=0:\n print(\"Blastoff!\")\n else:\n print(n)\n time.sleep(1)\n countdown(n-1)\n\n\ncountdown(5)","repo_name":"amandalxllll/oim3640","sub_path":"session08/demo01.py","file_name":"demo01.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"40715695386","text":"from django.urls import path\nfrom . import views\n\n\napp_name = 'chatApp'\n\n\nurlpatterns = [\n path('', views.index, name=\"index\"),\n path('/', views.chatroom, name=\"chatroom\"),\n path('direct//', views.directMessage, name=\"directMessage\"),\n]","repo_name":"2002vini/chatApplication","sub_path":"chat/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23649256623","text":"# f -> formate\nimport logging\nimport os\n\nto_seconds = 3\nname_of_unit = 'seconds'\n\n\ndef days_to_units(value):\n print(value * to_seconds)\n\n\ndef validate_input(value):\n try:\n\n days_to_units(int(value))\n\n except:\n print('invalid Input : enter only positive non-decimal digits.')\n\n\n# validate_input()\n\n# x = \"\"\n# while x != \"exit\":\n# x = input(\"Please Type :\\n\")\n# for element in x.split(\",\"):\n# validate_input(element)\n#\n#\n\n\nlogger = logging.getLogger(\"main\")\nlogger.warning(\"Hi this is test\")\n","repo_name":"ank1tchaudhary/Python","sub_path":"learning/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22430875080","text":"#!/usr/bin/env python\n\"\"\"\nChecks frequency of updates from server running CVSS.\n\"\"\"\n\nfrom cvss.Controller import Controller\nfrom cvss.client_loop import Config, client_loop\nfrom time import time\n\nclass MyController(Controller):\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.calls_to_update = 0\n self.start_time = time()\n \n def update(self, sensor_data):\n self.calls_to_update += 1\n if self.calls_to_update == 10:\n elapsed_time = time() - self.start_time\n freq = 10 / elapsed_time\n print(\"===================>>> update rate: {}\".format(freq))\n self.reset()\n\n# Read config file\nconfig = Config('cvss_config.json')\n \n# Create the controller and apply it in a continuous loop.\nmy_controller = MyController()\nclient_loop(config, my_controller)\n","repo_name":"m-abdulhak/BVC-Controller","sub_path":"test_frequency.py","file_name":"test_frequency.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"28970958937","text":"import collections\n\nclass Solution:\n def killProcess(self, pid, ppid, kill):\n \"\"\"\n :type pid: List[int]\n :type ppid: List[int]\n :type kill: int\n :rtype: List[int]\n \"\"\"\n m = collections.defaultdict(list)\n for idx, p in enumerate(ppid):\n m[p].append(pid[idx])\n a = [kill]\n r = []\n while a:\n p = a.pop()\n if p in m:\n a.extend(m[p])\n r.append(p)\n\n return r","repo_name":"aloklal99/naukari","sub_path":"python/leetcode/p582.py","file_name":"p582.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"3458014560","text":"import numpy as np\nimport os\n\nclass CheetahData:\n\n def __init__(self,seq_len):\n all_files = sorted([os.path.join(\"cheetah\",d) for d in os.listdir(\"cheetah\") if d.endswith(\".csv\")])\n\n train_files = all_files[5:25]\n valid_files = all_files[:5]\n\n self.seq_len = seq_len\n self.obs_size = 17\n self.action_size = 6\n self.batch_size = 32\n\n self._load_files(all_files)\n self.train_x, self.train_y = self._load_files(train_files)\n self.valid_x, self.valid_y = self._load_files(valid_files)\n\n \n self.batch_size=1024\n all_x = self.sample_training_set()[0].reshape([-1,self.obs_size])\n mean_x = np.mean(all_x,axis=0)\n std_x = np.std(all_x,axis=0)\n print(\"mean_x: \",str(mean_x))\n print(\"std_x: \",str(std_x))\n\n def _sample_set(self,batch_size,set_x,set_y,rng=np.random,seq_len=None):\n if(seq_len is None):\n seq_len = self.seq_len\n obs = np.empty([seq_len,batch_size,self.obs_size])\n actions = np.empty([seq_len,batch_size,self.action_size])\n \n for b in range(batch_size):\n b_i = rng.randint(len(set_y))\n\n t_start = rng.randint(set_y[b_i].shape[0]-seq_len)\n\n obs[:,b] = set_x[b_i][t_start:t_start+seq_len]\n actions[:,b] = set_y[b_i][t_start:t_start+seq_len]\n\n return (obs,actions)\n\n def sample_training_set(self):\n return self._sample_set(self.batch_size,self.train_x,self.train_y)\n\n def sample_validation_set(self):\n return self._sample_set(256,self.valid_x,self.valid_y,rng=np.random.RandomState(12309))\n\n def _load_files(self,files):\n all_x = []\n all_y = []\n all_rewards = []\n for f in files:\n \n arr = np.loadtxt(f,delimiter=',')\n obs = arr[:,:self.obs_size].astype(np.float32)\n actions = arr[:,self.obs_size:-1].astype(np.float32)\n r = arr[-1,-1].astype(np.float32)\n\n all_x.append(obs)\n all_y.append(actions)\n all_rewards.append(r)\n\n print(\"Loaded file '{}' of length {:d}\".format(f,obs.shape[0]))\n print(\"Loaded {:d} files with mean return {:0.2f} +- {:0.2f}\".format(len(all_rewards),np.mean(all_rewards),np.std(all_rewards)))\n return all_x,all_y\n\n\nif __name__ == \"__main__\":\n data = CheetahData(seq_len=64)","repo_name":"mlech26l/icra_lds","sub_path":"cheetah_data.py","file_name":"cheetah_data.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"17030737578","text":"#!/usr/bin/env python3\n\nimport matplotlib.pyplot as plt\n\nresults = dict()\nwith open('results-SLATE.txt', 'r') as lines:\n for line in lines:\n words = line.split(', ')\n count = len(words)\n if count not in results:\n results[count] = 0\n results[count] += 1\n\nfig, ax = plt.subplots(1,1)\n\nlabels = list(results.keys())\nvalues = list(results.values())\nplt.bar(labels, values, color=(96.0/255.0, 160.0/255.0, 94.0/255.0, 1.0))\nax.set_xlabel('Guesses per answer')\nax.set_ylabel('Words solved')\n\n# Get rid of the border and tick marks which look cheap\nax.spines['top'].set_visible(False)\nax.spines['right'].set_visible(False)\n#ax.spines['bottom'].set_visible(False)\nax.spines['left'].set_visible(False)\nax.get_yaxis().set_ticks([])\n\nrects = ax.patches\nfor rect, label in zip(rects, values):\n height = rect.get_height()\n ax.text(rect.get_x() + rect.get_width() / 2, height+0.01, label,\n ha='center', va='bottom')\n\nplt.savefig(f'results-SLATE.png')\n","repo_name":"joshstephenson/Wordle-Solver-Rust","sub_path":"chart_results.py","file_name":"chart_results.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"70029465683","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport pickle\nimport seaborn as sns\nimport sys\nimport os\n\nsys.path.append(os.path.dirname(os.path.abspath('')))\nfrom analyse import gather_training_res as gtr\nfrom utils import parse_cfg as cfg\n\n\ndef get_model_evidence(ker_str):\n # string formed kernel\n ind = domain.index(ker_str)\n evidence = evidence_all[ind]\n # Don't forget negative\n return - evidence\n\n\ndef get_model_rmse(ker_str):\n ind = domain.index(ker_str)\n rmse = rmse_all[ind]\n return rmse\n\n\ndef get_time(ker_str):\n ind = domain.index(ker_str)\n train_time = train_time_all[ind]\n return train_time\n\n\ndef pre_trained_res(elbo_file_name):\n results = pd.read_csv(elbo_file_name)\n domain = results['kernel'].values.tolist()\n evidence_all = results['elbo'].values.tolist()\n train_time_all = results['time'].values.tolist()\n rmse_all = results['rmse'].values.tolist()\n\n return domain, evidence_all, train_time_all, rmse_all\n\n\ndef load_data(datafile):\n data = pd.read_csv(datafile)\n data = np.array(data)\n data = data.astype(float)\n dim = data.shape[1]\n X = data[:, 0:dim - 1]\n y = data[:, -1].reshape(-1, 1)\n return X, y\n\n\ndef random_selection(num, randseed):\n np.random.seed(randseed)\n inds = np.arange(len(domain))\n np.random.shuffle(inds)\n rand_ker = np.array(domain)[inds[0:num]]\n rand_elbo = np.array(evidence_all)[inds[0:num]]\n rand_rmse = np.array(rmse_all)[inds[0:num]]\n rand_train_time = np.array(train_time_all)[inds[0:num]]\n\n best_ind = []\n\n for i in range(num):\n best_ind.append(np.argmax(rand_elbo[0:i + 1]))\n\n record_best_ker = rand_ker[best_ind]\n record_elbo = rand_elbo[best_ind]\n record_rmse = rand_rmse[best_ind]\n\n obs_time = []\n acc_time = []\n for ker in rand_ker:\n obs_time.append(get_time(ker))\n acc_time.append(sum(obs_time))\n\n return record_best_ker, record_elbo, record_rmse, obs_time, acc_time\n\n\ndef random_selection_m(iternum, randnum):\n avg_time = np.mean(np.array(train_time_all))\n\n best_ker_elbos = []\n best_ker_rmses = []\n for i in range(randnum):\n _, elbos, rmses, _, _ = random_selection(iternum, i)\n best_ker_elbos.append(elbos)\n best_ker_rmses.append(rmses * datastd)\n\n best_ker_elbos = np.array(best_ker_elbos)\n avg_elbos = np.mean(best_ker_elbos, 0)\n avg_elbos_std = np.std(best_ker_elbos, 0)\n avg_rmses = np.mean(best_ker_rmses, 0)\n avg_rmse_std = np.std(best_ker_rmses, 0)\n acc_time = avg_time * np.array(range(iternum)) + avg_time\n return acc_time, avg_elbos, avg_rmses, avg_elbos_std, avg_rmse_std\n\n\ndef plot_rmse_cmp(elbo_file_name, bayesian_file_name, bo_result_filename, datastd, gpu_num=5):\n x = plt.cm.get_cmap('tab10')\n colors = x.colors[1:5]\n blue = sns.color_palette(\"Set2\")[2]\n with open(bo_result_filename, 'rb') as fin:\n res = pickle.load(fin)\n obs_time = res[\"obs_time\"]\n bo_time = res[\"bo_time\"]\n bomins = res[\"best_ker_seen_elbo\"]\n best_ker_seen = res['best_ker_seen']\n best_ker_rmse = []\n for ker in best_ker_seen:\n best_ker_rmse.append(get_model_rmse(ker))\n total_time = [max(obs_time[0:4]) + bo_time[i] + sum(obs_time[4:i + 4]) for i in range(len(bomins))]\n best_ker_rmse = datastd * np.array(best_ker_rmse)\n acc_time, avg_elbos, avg_rmses, avg_elbos_std, avg_rmse_std = random_selection_m(45, 50)\n\n bayesian_results = pd.read_csv(bayesian_file_name)\n bayesian_time = bayesian_results['time'].values / gpu_num\n bayesian_single = bayesian_results['single'].values * datastd\n bayesian_bma = bayesian_results['Bayesian'].values * datastd\n\n f = plt.figure(figsize=(4.2, 3))\n err = plt.errorbar(acc_time, avg_rmses, avg_rmse_std, label='Random', color='cornflowerblue')\n err[-1][0].set_linestyle('-.')\n plt.plot(total_time, best_ker_rmse, label='BO', color=colors[0])\n # plt.plot(acc_time, avg_rmses, label='random')\n plt.plot(bayesian_time, bayesian_single, label='VBKS-s', color=colors[2])\n plt.plot(bayesian_time, bayesian_bma, label='VBKS', color=colors[1])\n\n plt.xlabel('Time elapsed (s)', fontsize=20)\n plt.ylabel('RMSE', fontsize=20)\n plt.legend(fontsize=11)\n plt.xticks(fontsize=12)\n plt.yticks(fontsize=12)\n plt.title('VBKS vs BO', fontsize=20)\n # plt.gca().yaxis.set_major_formatter(StrMethodFormatter('{x:,.2f}')) # 2 decimal places\n result_fig = elbo_file_name[0:-4] + '_VBKSvsBOvsRand' + '_rmse.pdf'\n plt.savefig(result_fig, bbox_inches='tight', pad_inches=0)\n print('Results saved in:')\n print(os.path.dirname(elbo_file_name))\n return best_ker_seen\n\n\ndef plot_rmse_cmp_multi(bayesian_file_name, bo_result_filename, datastd, gpu_num=5):\n x = plt.cm.get_cmap('tab10')\n colors = x.colors[1:5]\n with open(bo_result_filename, 'rb') as fin:\n res = pickle.load(fin)\n obs_time = res[\"obs_time\"]\n bo_time = res[\"bo_time\"]\n zero = np.array([0])\n bo_time = np.concatenate((zero, bo_time))\n bomins = res[\"best_ker_seen_elbo\"]\n best_ker_seen = res['best_ker_seen']\n best_ker_rmse = []\n for ker in best_ker_seen:\n best_ker_rmse.append(get_model_rmse(ker))\n total_time = [max(obs_time[0:4]) + bo_time[i] + sum(obs_time[4:i + 4]) for i in range(len(bomins))]\n best_ker_rmse = datastd * np.array(best_ker_rmse)\n\n acc_time, avg_elbos, avg_rmses, avg_elbos_std, avg_rmse_std = random_selection_m(45, 50)\n\n bayesian_results = pd.read_csv(bayesian_file_name)\n bayesian_time = bayesian_results['time'].values / gpu_num\n bayesian_single = bayesian_results['single_mean'].values * datastd\n bayesian_bma = bayesian_results['Bayesian_mean'].values * datastd\n\n bayesian_single_std = bayesian_results['single_std'].values * datastd\n bayesian_bma_std = bayesian_results['Bayesian_std'].values * datastd\n\n f = plt.figure(figsize=(4.2, 3))\n err = plt.errorbar(acc_time, avg_rmses, avg_rmse_std, label='Random', color='cornflowerblue')\n err[-1][0].set_linestyle('-.')\n plt.plot(total_time, best_ker_rmse, label='BO', color=colors[0])\n\n plt.errorbar(bayesian_time, bayesian_single, bayesian_single_std, label='VBKS-s',\n color=colors[2])\n plt.errorbar(bayesian_time, bayesian_bma, bayesian_bma_std, label='VBKS', color=colors[1])\n\n plt.xlabel('Time elapsed (s)', fontsize=20)\n plt.ylabel('RMSE', fontsize=20)\n plt.legend(fontsize=11)\n plt.xticks(fontsize=12)\n plt.yticks(fontsize=12)\n plt.title('VBKS vs BO', fontsize=22)\n result_fig = bayesian_file_name[0:-4] + '_VBKSvsBOvsRand' + '_rmse_multi.pdf'\n plt.savefig(result_fig, bbox_inches='tight', pad_inches=0)\n print('Results saved in:')\n print(os.path.dirname(bayesian_file_name))\n\n return\n\n\nif __name__ == \"__main__\":\n cfg_file = '../../config/swiss_cfg_1.json'\n file_str = 'swiss_top10'\n prior_pg = 0.5\n seed = 10\n\n # cfg_file = '../../config/air_cfg_1.json'\n # file_str = 'temper_top10'\n # prior_pg = 0.5\n # seed = 10\n\n settings, paths, wf = cfg.get_settings(cfg_file, 'full')\n elbo_file_name = '../../results/' + wf + '/' + 'fulldata_summary.csv'\n domain, evidence_all, train_time_all, rmse_all = pre_trained_res(elbo_file_name)\n datafile = '../'+paths['datafile']\n\n datastd = 1\n\n _, _, wf_sub = cfg.get_settings(cfg_file, 'sub')\n bayesian_file_name = '../../results/' + wf_sub + '_rnd' + str(seed) + '/' + 'plots_res/' + file_str + \"_RMSE_comparison_\" + 'ss' + str(\n 1000) + '_p' + str(prior_pg) + \"_normalized.csv\"\n subset_size = 200\n bo_res_file = '../../results/' + wf + '/' + 'bo_subsize' + str(subset_size) + '.pkl'\n plot_rmse_cmp(elbo_file_name, bayesian_file_name, bo_res_file, datastd)\n\n # ==averaging multiple runs:\n # saving_folder = '../../results/multiple_results/'\n # if not os.path.exists(saving_folder):\n # os.mkdir(saving_folder)\n # random_seeds = [1, 2, 3]\n # result_folders = ['../../results/' + wf_sub + '_rnd' + str(seed) + '/' for seed in random_seeds]\n # bayesian_file_name = gtr.get_bks_res_multi_seeds(result_folders, file_str, 1000, 0.5, saving_folder)\n # plot_rmse_cmp_multi(bayesian_file_name, bo_res_file, datastd)\n","repo_name":"lemelondeau/VBKS","sub_path":"src/compare_BO/plot_comparison.py","file_name":"plot_comparison.py","file_ext":"py","file_size_in_byte":8212,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"33399831596","text":"\"\"\"Classes and functions for manipulating system internal coordinate trajectories.\"\"\"\n\n# . Currently:\n#\n# - Only distances are supported, although it would be straightforward to add other ICs.\n#\n# - All ICs need to be stated specifically. It would be nice to have also classes of IC too\n# (e.g. all water oxygens within X Angstroms of a given atom) without specific indexation.\n# Would need pairlist and updating mechanism.\n#\n\nimport glob, os, os.path, random\n\nfrom pCore import AttributableObject , \\\n Pickle , \\\n PickleFileExtension , \\\n Unpickle\nfrom .ExportImport import _Exporter , \\\n _Importer\nfrom .TrajectoryMixin import TrajectoryMixin\n\n#===================================================================================================================================\n# . Parameters.\n#===================================================================================================================================\n# . Default block size.\n_DefaultBlockSize = 1024\n\n# . Block naming.\n_BlockPrefix = \"block\"\n_BlockPostfix = PickleFileExtension\n\n# . Header and footer naming.\n_FooterName = \"footer\"\n_HeaderName = \"header\"\n\n#===================================================================================================================================\n# . Reader class.\n#===================================================================================================================================\nclass SystemICTrajectoryReader ( AttributableObject ):\n \"\"\"Class for reading system IC trajectories.\"\"\"\n\n _attributable = dict ( AttributableObject._attributable )\n _attributable.update ( { \"blocks\" : 0 ,\n \"blockSize\" : 0 ,\n \"currentBlock\" : -1 ,\n \"current\" : 0 ,\n \"data\" : None ,\n \"distanceCutOff\" : 0.0 ,\n \"distanceIndices\" : None ,\n \"isTrajectory\" : True ,\n \"numberOfFrames\" : -1 ,\n \"numberOfReads\" : 0 ,\n \"owner\" : None ,\n \"path\" : None } )\n\n def Close ( self ):\n \"\"\"Close the trajectory.\"\"\"\n pass\n\n @classmethod\n def FromPathAndOwner ( selfClass, path, owner ):\n \"\"\"Constructor given path, owner and other options.\"\"\"\n self = selfClass.WithOptions ( owner = owner, path = path )\n self.Open ( )\n return self\n\n def Open ( self ):\n \"\"\"Open the trajectory.\"\"\"\n # . Check that the trajectory exists, is a directory and is readable.\n if os.access ( self.path, os.F_OK ) and os.access ( self.path, os.R_OK ) and os.path.isdir ( self.path ):\n # . Check for a valid header.\n if not os.path.exists ( os.path.join ( self.path, _HeaderName + _BlockPostfix ) ): raise IOError ( \"Unable to find trajectory header.\" )\n # . Find the number of blocks.\n self.blocks = len ( glob.glob ( os.path.join ( self.path, _BlockPrefix + \"*\" + _BlockPostfix ) ) )\n # . Invalid trajectory.\n else: raise IOError ( \"Invalid or non-existent trajectory.\" )\n\n def ReadBlock ( self ):\n \"\"\"Read a block of data.\"\"\"\n if self.currentBlock < self.blocks:\n self.data = Unpickle ( os.path.join ( self.path, _BlockPrefix + \"{:d}\".format ( self.currentBlock ) + _BlockPostfix ) )\n self.blockSize = len ( self.data )\n self.current = 0\n self.currentBlock += 1\n else: raise IndexError ( \"Invalid block index.\" )\n\n def ReadFooter ( self ):\n \"\"\"Read the footer.\"\"\"\n pass\n\n def ReadHeader ( self ):\n \"\"\"Read the trajectory header.\"\"\"\n header = Unpickle ( os.path.join ( self.path, _HeaderName + _BlockPostfix ) )\n for ( key, value ) in header.items ( ): setattr ( self, key, value )\n return header\n\n def RestoreOwnerData ( self ):\n \"\"\"Restore data from a frame to the owner.\"\"\"\n if self.current >= self.blockSize:\n if self.currentBlock >= self.blocks:\n self.numberOfFrames = self.numberOfReads\n return False\n else:\n self.ReadBlock ( )\n self.owner.scratch.icTerms = self.data[current] # . Put data into scratch.\n self.current += 1\n self.numberOfReads += 1\n return True\n\n def ReturnAllFrameData ( self ):\n \"\"\"Return all frame data as a list.\"\"\"\n data = []\n self.currentBlock = 0\n for i in range ( self.blocks ):\n self.ReadBlock ( )\n data.extend ( self.data )\n return data\n\n#===================================================================================================================================\n# . Writer class.\n#===================================================================================================================================\nclass SystemICTrajectoryWriter ( AttributableObject ):\n \"\"\"Class for writing system IC trajectories.\"\"\"\n\n _attributable = dict ( AttributableObject._attributable )\n _attributable.update ( { \"blocks\" : 0 ,\n \"blockSize\" : _DefaultBlockSize ,\n \"current\" : 0 ,\n \"data\" : None ,\n \"distanceCutOff\" : 0.0 ,\n \"distanceIndices\" : None ,\n \"isAppendable\" : False ,\n \"isTrajectory\" : True ,\n \"numberOfFrames\" : 0 ,\n \"numberOfWrites\" : 0 ,\n \"owner\" : None ,\n \"path\" : None } )\n\n def _CheckOptions ( self ):\n \"\"\"Check options.\"\"\"\n super ( SystemICTrajectoryWriter, self )._CheckOptions ( )\n isOK = ( self.distanceCutOff is not None ) and ( self.distanceCutOff > 0.0 ) and \\\n ( self.distanceIndices is not None ) and ( len ( self.distanceIndices ) > 0 )\n if not isOK: raise Exception ( \"Invalid options to IC trajectory.\" )\n\n def Close ( self ):\n \"\"\"Close the trajectory.\"\"\"\n self.WriteBlock ( )\n self.WriteFooter ( )\n\n @classmethod\n def FromPathAndOwner ( selfClass, path, owner, append = False, distanceCutOff = None, distanceIndices = None ):\n \"\"\"Constructor given path, owner and other options.\"\"\"\n self = selfClass.WithOptions ( distanceCutOff = distanceCutOff ,\n distanceIndices = distanceIndices ,\n isAppendable = append ,\n owner = owner ,\n path = path )\n self.Open ( )\n return self\n\n def Open ( self ):\n \"\"\"Open the trajectory.\"\"\"\n pathExists = os.access ( self.path, os.F_OK )\n if pathExists:\n if not os.path.isdir ( self.path ): raise IOError ( \"Trajectory exists that is not a directory.\" )\n else:\n os.mkdir ( self.path )\n if not os.access ( self.path, os.W_OK ): raise IOError ( \"Trajectory is not writeable.\" )\n if pathExists:\n if self.isAppendable:\n self.blocks = len ( glob.glob ( os.path.join ( self.path, _BlockPrefix + \"*\" + _BlockPostfix ) ) )\n else:\n for target in glob.glob ( os.path.join ( self.path, \"*\" ) ): os.remove ( target )\n\n def WriteBlock ( self ):\n \"\"\"Write a block of data.\"\"\"\n if self.current > 0:\n Pickle ( os.path.join ( self.path, _BlockPrefix + \"{:d}\".format ( self.blocks ) + _BlockPostfix ), self.data )\n self.blocks += 1\n self.current = 0\n self.data = []\n\n def WriteFooter ( self ):\n \"\"\"Write a footer.\"\"\"\n pass\n\n def WriteHeader ( self ):\n \"\"\"Write the trajectory header.\"\"\"\n header = { \"distanceCutoff\" : self.distanceCutOff ,\n \"distanceIndices\" : self.distanceIndices }\n Pickle ( os.path.join ( self.path, _HeaderName + _BlockPostfix ), header )\n self.data = []\n\n def WriteOwnerData ( self ):\n \"\"\"Write data from the owner to a frame.\"\"\"\n if self.current >= self.blockSize: self.WriteBlock ( )\n crd3 = self.owner.coordinates3\n ics = []\n for ( i, j ) in self.distanceIndices:\n d = crd3.Distance ( i, j )\n if d <= self.distanceCutOff:\n ics.append ( ( i, j, d ) )\n self.data.append ( ics )\n self.current += 1\n self.numberOfFrames += 1\n self.numberOfWrites += 1\n\n#===================================================================================================================================\n# . Exporter and importer definitions.\n#===================================================================================================================================\n_Exporter.AddHandler ( { TrajectoryMixin : SystemICTrajectoryWriter.FromPathAndOwner } , [ \"ptIC\" ], \"System Internal Coordinate Trajectory\" )\n_Importer.AddHandler ( { TrajectoryMixin : SystemICTrajectoryReader.FromPathAndOwner } , [ \"ptIC\" ], \"System Internal Coordinate Trajectory\" )\n\n#===================================================================================================================================\n# . Testing.\n#===================================================================================================================================\nif __name__ == \"__main__\":\n pass\n","repo_name":"pdynamo/pDynamo3","sub_path":"pBabel/SystemICTrajectory.py","file_name":"SystemICTrajectory.py","file_ext":"py","file_size_in_byte":10204,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"3"} +{"seq_id":"37141145376","text":"from parex import ParExForConditionalGeneration, ParExModel\nfrom parex import ParExDataLoader\nfrom transformers import AutoTokenizer\n\ndataloader = ParExDataLoader('facebook/bart-base', 'sentence-transformers/all-MiniLM-L6-v2', 80, 40)\n[train_dataloader] = dataloader.get_dataloader(batch_size=1, types=['train'])\nfrom parex import ParExModel\nmodel = ParExModel(\n 'sentence-transformers/all-MiniLM-L6-v2',\n 'parex_model/v1/gen',\n load_pretrained_mapping=True,\n mapping_ck='parex_model/v1/map')\ngen = ParExForConditionalGeneration(model)\ntokenizer = AutoTokenizer.from_pretrained('facebook/bart-base')\nlimit = 10\nfor i, batch in enumerate(train_dataloader):\n out = gen.generate(batch)[0]\n print(tokenizer.decode(out, skip_special_tokens=True))\n if i > limit:\n break","repo_name":"TokisakiKurumi2001/parex","sub_path":"infer.py","file_name":"infer.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"37044302770","text":"#定义一个”老师“的语言\r\nyou_need_replace_this_with_name_you_give = '''\r\n老师:年龄 职称 姓名 动作 授课门类 \r\n姓名:张三 | 李四 | 王二\r\n年龄:40岁 | 50岁 | 60岁 \r\n职称:教授 | 副教授 | 讲师\r\n动作:教 | 任教 | 玩耍 | 不知道\r\n授课门类:数学 | 哲学 | 经济 | 计算机\r\n'''\r\n#定义一个\"学生\"的语言\r\nyou_need_replace_this_with_name_you_give_1 = '''\r\n学生:年龄 姓名 动作 年级 专业课程\r\n姓名:小二 | 小三 | 小四\r\n年龄:21 | 22 | 23\r\n动作: 学 | 学习 | 打架 | 赌博 \r\n专业课程:数学 | 哲学 | 经济 | 计算机\r\n'''\r\n\r\nimport random\r\ndef creat_grams(garmmar_str,split = ':',line_spline = \"\\n\"):\r\n grammer ={}\r\n for line in garmmar_str.split(line_spline):\r\n if not line.split():\r\n continue\r\n else:\r\n ext, smat = line.split(split)\r\n grammer[ext] = [s.split() for s in smat.split(\"|\")]\r\n return grammer\r\n\r\n#print(creat_grams(you_need_replace_this_with_name_you_give))\r\ndef gereate(garm,target):\r\n if target not in garm:\r\n return target\r\n else:\r\n expended = [gereate(garm,t) for t in random.choice(garm[target])]\r\n return ''.join([e for e in expended])\r\n\r\n#print(gereate(creat_grams(you_need_replace_this_with_name_you_give),target=\"老师\"))\r\n\r\ndef gereate_n(garm,target):\r\n n = 10\r\n gen = []\r\n for i in range(n):\r\n if target not in garm:\r\n return target\r\n else:\r\n expended = [gereate(garm,t) for t in random.choice(garm[target])]\r\n gen.append(expended)\r\n return (gen)\r\n#print(gereate_n(creat_grams(you_need_replace_this_with_name_you_give),target=\"老师\"))\r\nprint([''.join(f) for f in gereate_n(creat_grams(you_need_replace_this_with_name_you_give),target=\"老师\")])\r\n","repo_name":"SevenMpp/Deep-course-learning","sub_path":"seatwork_01.py","file_name":"seatwork_01.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71531348563","text":"\"\"\"empty message\n\nRevision ID: eeac7b2f4174\nRevises: 51fe9c5b48ed\nCreate Date: 2019-07-03 17:11:26.310557\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'eeac7b2f4174'\ndown_revision = '51fe9c5b48ed'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('service', sa.Column('artifact_name', sa.String(), nullable=True))\n op.drop_column('service', 'docker_image')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('service', sa.Column('docker_image', sa.VARCHAR(), autoincrement=False, nullable=True))\n op.drop_column('service', 'artifact_name')\n # ### end Alembic commands ###\n","repo_name":"gnydick/orch","sub_path":"migrations/versions/eeac7b2f4174_.py","file_name":"eeac7b2f4174_.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"40916156966","text":"from sklearn.neighbors import KNeighborsClassifier\nimport pandas as pd\nimport numpy as np\n\nseed = 7\nnp.random.seed(seed)\ndf = pd.read_excel(r'..\\data\\data.xlsx',header = None)\nX = df.values\nx_train = X[0:153,0:-7]\ny_train = X[0:153,-7]\nx_val = X[153:170,0:-7]\ny_val = X[153:170,-7]\n\nknn = KNeighborsClassifier()\n\nknn.fit(x_train,y_train)\ny_pred = knn.predict(x_val)\ndef accuracy(y_pred,y_true):\n N = y_true.shape[0]\n count = 0\n for i in range(N):\n count = count + np.array_equal(y_pred[i],y_true[i])\n return count/N\nacc = accuracy(y_pred,y_val)\nprint(\"val accuracy:\",acc)\nprint(y_pred)","repo_name":"xiaoyuan-sudo/CDs","sub_path":"otherMLModels/KNN.py","file_name":"KNN.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"26834227452","text":"# HigherOrLower\r\n\r\nimport random\r\n\r\n# Card constants\r\nSUIT = ('Schoppen', 'Harten', 'Klaveren', 'Ruiten')\r\nRANK = ('Aas', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'Boer',\r\n 'Vrouw', 'Heer')\r\nNCARDS = 8\r\n\r\n\r\n# Pass in a deck and this function returns a random card from the deck\r\ndef get_card(deck_list_in):\r\n this_card = deck_list_in.pop() # pop one off the top of the deck\r\n return this_card\r\n\r\n\r\n# Pass in a deck and this function returns a shuffled copy of the deck\r\ndef shuffle(deck_list_in):\r\n deck_list_out = deck_list_in.copy() # make a copy of the starting deck\r\n random.shuffle(deck_list_out)\r\n return deck_list_out\r\n\r\n\r\n# Main code\r\nprint(\"=\" * 30)\r\nprint(' HOGER OF LAGER '.center(30, \"=\"))\r\nprint(\"=\" * 30)\r\nprint('\\nWelkom bij Hoger of Lager.')\r\nprint('Je moet kiezen of de volgende kaart hoger of lager is dan de \\\r\nhuidige kaart.')\r\nprint('Als je het goed hebt krijg je 20 punten, heb je het fout dan verlies \\\r\nje 15 punten.')\r\nprint('Je begint met 50 punten. Succes!')\r\nprint()\r\n\r\nstarting_deck_list = []\r\n\r\ni = 1\r\nfor suit in SUIT:\r\n for this_value, rank in enumerate(RANK):\r\n card_dict = {'rank':rank, 'suit':suit, 'value':this_value + 1}\r\n starting_deck_list.append(card_dict)\r\n # Show cards added to dict\r\n # print(f\"{i:0>2}. {card_dict['suit']} {card_dict['rank']}\")\r\n i += 1\r\n\r\nscore = 50\r\n\r\nwhile True:\r\n print()\r\n game_deck_list = shuffle(starting_deck_list)\r\n current_card_dict = get_card(game_deck_list)\r\n current_card_rank = current_card_dict['rank']\r\n current_card_value = current_card_dict['value']\r\n current_card_suit = current_card_dict['suit']\r\n\r\n print(\"We openen met een\", current_card_suit, current_card_rank)\r\n\r\n for card_number in range(0, NCARDS): # play one game of this many cards\r\n\r\n print(f\"\\nKaart {card_number + 1}:\")\r\n\r\n answer = input(\"Is de volgende kaart hoger of lager (h of l): \")\r\n answer = answer.casefold()\r\n next_card_dict = get_card(game_deck_list)\r\n next_card_rank = next_card_dict['rank']\r\n next_card_value = next_card_dict['value']\r\n next_card_suit = next_card_dict['suit']\r\n\r\n print(\"Volgende kaart is een\", next_card_suit, next_card_rank)\r\n\r\n if answer == \"h\":\r\n if next_card_value > current_card_value:\r\n print(\"Netjes, inderdaad hoger\")\r\n score += 20\r\n else:\r\n print(\"Helaas..\")\r\n score -= 15\r\n elif answer == \"l\":\r\n if next_card_value < current_card_value:\r\n print(\"Netjes, inderdaad lager\")\r\n score += 20\r\n else:\r\n print(\"Helaas..\")\r\n score -= 15\r\n\r\n if card_number + 1 == 8:\r\n print(\"Je eindscore is\", score)\r\n else:\r\n print(\"Je score is\", score)\r\n\r\n current_card_rank = next_card_rank\r\n current_card_value = next_card_value\r\n\r\n go_again = input(\"Nog een keer? (enter) of stoppen (s): \")\r\n if go_again == \"s\":\r\n break\r\n\r\nprint(\"Bedankt voor het spelen en tot de volgende keer!\")\r\n","repo_name":"DJdeGoede/MyRepo","sub_path":"python/object_oriented_python/HigherOrLowerProcedural.py","file_name":"HigherOrLowerProcedural.py","file_ext":"py","file_size_in_byte":3136,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"70446371601","text":"import json\nfrom twilio.rest import Client\nfrom kivymd.app import MDApp\nfrom kivy.lang import Builder\nfrom kivymd.uix.list import TwoLineListItem\nfrom kivymd.uix.dialog import MDDialog\nfrom kivy.uix.screenmanager import ScreenManager, Screen\nfrom kivymd.uix.button import MDFlatButton\nfrom twilio.base.exceptions import TwilioRestException\n\nString_Builder = \"\"\"\nScreen:\n ScreenManager: \n id: screen_manager\n MainScreen:\n AddNumber:\n\n\n name: \"main\"\n MDNavigationLayout:\n ScreenManager:\n Screen:\n BoxLayout:\n orientation: \"vertical\"\n size: self.size\n pos: self.pos\n FitImage:\n source: \"TheBadjie.png\"\n MDTextFieldRound:\n id: message\n hint_text: \"Enter Message\"\n size_hint: 0.75, 0.06\n pos_hint: {\"center_x\": 0.5,\"center_y\": 0.3}\n normal_color: 1,1,1,0.1\n line_color: 1,1,1,0.1\n color_active: 1,1,1,1\n line_color_focus: 1,1,1,1\n theme_text_color: \"Custom\"\n text_color: 1,1,1,1\n MDRoundFlatIconButton:\n text: \"Send\"\n size_hint: 0.85, None\n pos_hint: {\"center_x\": 0.5,\"center_y\": 0.2}\n line_color: 1,1,1,1\n text_color: 1,1,1,1\n icon_color: 1,1,1,1\n icon: \"send\"\n on_release: app.sendMessage()\n MDFloatingActionButton:\n icon: \"menu\"\n pos_hint: {\"center_x\": 0.1,\"center_y\": 0.95}\n md_bg_color: 229/255.0,150/255.0,38/255.0,1\n on_press: nav_drawer.set_state(\"open\")\n \n MDNavigationDrawer:\n id: nav_drawer\n BoxLayout:\n orientation: \"vertical\"\n FitImage:\n source: \"MyImage.jpg\" \n MDCard:\n orientation: \"vertical\"\n MDToolbar:\n title: \"PHONEBOOK\" \n md_bg_color: 0,0,0,0.75\n font_style: \"Caption\"\n ScrollView:\n MDList:\n id: phoneNums\n MDFloatingActionButton:\n icon: \"plus\"\n pos_hint: {\"center_x\": 0.8,\"center_y\": 0.3}\n md_bg_color: 229/255.0,150/255.0,38/255.0,1\n on_release: root.manager.current=\"number\"\n MDLabel:\n text: \" Your Name\"\n size_hint_y:None \n height: self.texture_size[1]\n MDLabel:\n text: \" yourEmail@gmail.com\\\\n\"\n size_hint_y:None \n height: self.texture_size[1] \n font_style: \"Caption\"\n\n name: \"number\"\n MDCard:\n orientation: \"vertical\"\n size_hint: 1, 0.7\n pos_hint: {\"center_x\": 0.5, \"center_y\": 0.7}\n FitImage:\n source: \"addContact.png\"\n MDFloatingActionButton:\n icon: \"home\"\n md_bg_color: 229/255.0,150/255.0,38/255.0,1\n pos_hint: {\"center_x\": 0.1, \"center_y\": 0.95}\n on_release: root.manager.current=\"main\" \n MDCard:\n orientation: \"vertical\"\n size_hint: 1, 0.5\n padding: \"20dp\"\n ScrollView:\n MDList:\n MDTextField:\n id: FirstName\n hint_text: \"First name\"\n pos_hint: {\"center_x\": 0.5}\n MDTextField:\n id: LastName\n hint_text: \"Last name\"\n pos_hint: {\"center_x\": 0.5}\n MDTextField:\n id: Phone\n hint_text: \"Phone\"\n pos_hint: {\"center_x\": 0.5}\n MDTextField:\n id: Email\n hint_text: \"Email\"\n pos_hint: {\"center_x\": 0.5}\n MDFloatingActionButton:\n icon: \"content-save\"\n pos_hint: {\"center_x\": 0.5,\"center_y\": 0.2}\n md_bg_color: 0,0,0,0.75\n on_release: app.saveContact() \n\"\"\"\n\n\nclass MainScreen(Screen):\n pass\n\n\nclass AddNumber(Screen):\n pass\n\n\nsm = ScreenManager()\nsm.add_widget(MainScreen(name=\"main\"))\nsm.add_widget(AddNumber(name=\"number\"))\n\n\nclass TheBadjie(MDApp):\n def build(self):\n AppBuilder = Builder.load_string(String_Builder)\n return AppBuilder\n\n def saveContact(self):\n with open(\"contact.json\", \"r\") as fr:\n reader = json.load(fr)\n\n FirstName = self.root.ids.screen_manager.get_screen(\"number\").ids.FirstName.text\n LastName = self.root.ids.screen_manager.get_screen(\"number\").ids.LastName.text\n Phone = self.root.ids.screen_manager.get_screen(\"number\").ids.Phone.text\n Email = self.root.ids.screen_manager.get_screen(\"number\").ids.Email.text\n with open(\"contact.json\", \"w\") as f:\n reader[FirstName] = Phone\n json.dump(reader, f, indent=2)\n fr.close()\n self.root.ids.screen_manager.get_screen(\"number\").ids.FirstName.text = \"\"\n self.root.ids.screen_manager.get_screen(\"number\").ids.LastName.text = \"\"\n self.root.ids.screen_manager.get_screen(\"number\").ids.Phone.text = \"\"\n self.root.ids.screen_manager.get_screen(\"number\").ids.Email.text = \"\"\n self.on_start()\n\n def on_start(self):\n with open(\"Message_Sender\\contact.json\", \"r\") as fr:\n self.phone_numbers = json.load(fr)\n for key, value in self.phone_numbers.items():\n self.root.ids.screen_manager.get_screen(\"main\").ids.phoneNums.add_widget(TwoLineListItem(text=key,\n secondary_text=value))\n\n def sendMessage(self):\n pass\n try:\n account_sid = 'AC6e55cf0ae5a8caa8ab3774098f1de4ab' \n auth_token = \"e5fd5a772413a4b33643304bd405522c\"\n text_message = self.root.ids.screen_manager.get_screen(\"main\").ids.message.text\n client = Client(account_sid, auth_token)\n self.messsageSid = \"\"\n message_splitter = str(text_message).split()\n if len(message_splitter) > 1:\n single_Num = self.phone_numbers[str(message_splitter[0])] # single phone number to send to\n space_index = str(text_message).index(\" \")\n\n if message_splitter[0] in self.phone_numbers: # single phone number to send to condition\n\n message = client.messages.create(\n from_=\"+yourTwilioNumber\",\n body=f'Message=>\\n{text_message[int(space_index + 1):]}',\n to=f'{single_Num}'\n )\n self.messsageSid += str(message) + '\\n'\n\n else:\n dismisser = MDFlatButton(text=\"Dismiss\",\n on_release=self.dismiss)\n for key, value in self.phone_numbers.items():\n print(value)\n message = client.messages.create(\n from_=\"+yourTwilioNumber\",\n body=f'Password=>\\n{text_message}',\n to=f'{value}'\n )\n self.messsageSid += str(message) + '\\n'\n self.dialog = MDDialog(title=\"Message Sent!\",\n text=self.messsageSid,\n buttons=[dismisser],\n size_hint=(0.75, 0.5)\n )\n self.dialog.open()\n except TwilioRestException as e:\n dismisser = MDFlatButton(text=\"Dismiss\",\n on_release=self.dismiss)\n self.dialog = MDDialog(title=\"Error Message!\",\n text=str(e),\n buttons=[dismisser],\n size_hint=(0.75, 0.5)\n )\n self.dialog.open()\n\n def dismiss(self, obj):\n pass\n self.dialog.dismiss()\n\n\nTheBadjie().run()","repo_name":"babucarr32/TwilioMessageSender","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"41965152055","text":"from keras.layers.convolutional import Conv2D\nfrom keras.layers.core import Activation\nfrom keras.layers.pooling import MaxPooling2D\nfrom keras.layers.merge import Add\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.models import Model\nfrom keras.regularizers import l2\n\nfrom ..utils.net_utils import BilinearUpSampling, bn_act_convtranspose, bn_act_conv_block\nfrom ..encoder import scope_table, build_encoder\n\n\ndef residual_conv_unit(inputs,\n n_filters=256,\n kernel_size=3,\n weight_decay=1e-4,\n kernel_initializer=\"he_normal\",\n bn_epsilon=1e-3,\n bn_momentum=0.99):\n \"\"\" residual convolutional unit.\n :param inputs: 4-D tensor, shape of (batch_size, height, width, channel).\n :param n_filters: int, number of filters, default 256.\n :param kernel_size: int, default 3.\n :param weight_decay: float, default 1e-4.\n :param kernel_initializer: string, default \"he_normal\".\n :param bn_epsilon: float, default 1e-3.\n :param bn_momentum: float, default 0.99.\n\n :return: 4-D tensor, shape of (batch_size, height, width, channel).\n \"\"\"\n x = Activation(\"relu\")(inputs)\n x = Conv2D(n_filters, (kernel_size, kernel_size), padding=\"same\", activation=None, use_bias=False,\n kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(x)\n x = BatchNormalization(epsilon=bn_epsilon, momentum=bn_momentum)(x)\n x = Activation(\"relu\")(x)\n x = Conv2D(n_filters, (kernel_size, kernel_size), padding=\"same\", activation=None, use_bias=False,\n kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(x)\n x = BatchNormalization(epsilon=bn_epsilon, momentum=bn_momentum)(x)\n x = Add()([x, inputs])\n\n return x\n\n\ndef chained_residual_pooling(inputs,\n pool_size=(5, 5),\n n_filters=256,\n weight_decay=1e-4,\n kernel_initializer=\"he_normal\",\n bn_epsilon=1e-3,\n bn_momentum=0.99):\n \"\"\" chained residual pooling.\n :param inputs: 4-D tensor, shape of (batch_size, height, width, channel).\n :param pool_size: tuple, default (5, 5).\n :param n_filters: int, number of filters, default 256.\n :param weight_decay: float, default 1e-4.\n :param kernel_initializer: string, default \"he_normal\".\n :param bn_epsilon: float, default 1e-3.\n :param bn_momentum: float, default 0.99.\n\n :return: 4-D tensor, shape of (batch_size, height, width, channel).\n \"\"\"\n x_relu = Activation(\"relu\")(inputs)\n\n x = MaxPooling2D(pool_size=pool_size, strides=(1, 1), padding=\"same\")(x_relu)\n x = Conv2D(n_filters, (3, 3), padding=\"same\", activation=None, use_bias=False,\n kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(x)\n x = BatchNormalization(epsilon=bn_epsilon, momentum=bn_momentum)(x)\n x_sum1 = Add()([x_relu, x])\n\n x = MaxPooling2D(pool_size=pool_size, strides=(1, 1), padding=\"same\")(x)\n x = Conv2D(n_filters, (3, 3), padding=\"same\", activation=None, use_bias=False,\n kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(x)\n x_sum2 = Add()([x, x_sum1])\n\n return x_sum2\n\n\ndef multi_resolution_fusion(high_inputs=None,\n low_inputs=None,\n n_filters=256,\n weight_decay=1e-4,\n kernel_initializer=\"he_normal\",\n bn_epsilon=1e-3,\n bn_momentum=0.99):\n \"\"\" fuse multi resolution features.\n :param high_inputs: 4-D tensor, shape of (batch_size, height, width, channel),\n features with high spatial resolutions.\n :param low_inputs: 4-D tensor, shape of (batch_size, height, width, channel),\n features with low spatial resolutions.\n :param n_filters: int, number of filters, default 256.\n :param weight_decay: float, default 1e-4.\n :param kernel_initializer: string, default \"he_normal\".\n :param bn_epsilon: float, default 1e-3.\n :param bn_momentum: float, default 0.99.\n\n :return: 4-D tensor, shape of (batch_size, height, width, channel).\n \"\"\"\n if high_inputs is None:\n fuse = Conv2D(n_filters, (3, 3), padding=\"same\", activation=None, use_bias=False,\n kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(low_inputs)\n fuse = BatchNormalization(epsilon=bn_epsilon, momentum=bn_momentum)(fuse)\n else:\n conv_low = Conv2D(n_filters, (3, 3), padding=\"same\", activation=None, use_bias=False,\n kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(low_inputs)\n conv_low = BatchNormalization(epsilon=bn_epsilon, momentum=bn_momentum)(conv_low)\n conv_high = Conv2D(n_filters, (3, 3), padding=\"same\", activation=None, use_bias=False,\n kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(high_inputs)\n conv_high = BatchNormalization(epsilon=bn_epsilon, momentum=bn_momentum)(conv_high)\n conv_low = BilinearUpSampling(target_size=(int(conv_high.shape[1]), int(conv_high.shape[2])))(conv_low)\n fuse = Add()([conv_high, conv_low])\n\n return fuse\n\n\ndef refine_block(high_inputs=None,\n low_inputs=None,\n base_filters=256):\n \"\"\" a complete refine block.\n :param high_inputs: 4-D tensor, shape of (batch_size, height, width, channel),\n features with high spatial resolutions.\n :param low_inputs: 4-D tensor, shape of (batch_size, height, width, channel),\n features with low spatial resolutions.\n :param base_filters: int, initial number of filters, default 256.\n :return:\n \"\"\"\n if low_inputs is None: # Block 4\n # 2 RCUs\n rcu_new_low = residual_conv_unit(high_inputs, n_filters=base_filters * 2)\n rcu_new_low = residual_conv_unit(rcu_new_low, n_filters=base_filters * 2)\n\n # feature fusion\n fuse = multi_resolution_fusion(high_inputs=None, low_inputs=rcu_new_low, n_filters=base_filters * 2)\n fuse_pooling = chained_residual_pooling(fuse, n_filters=base_filters * 2)\n output = residual_conv_unit(fuse_pooling, n_filters=base_filters * 2)\n return output\n else:\n rcu_high = residual_conv_unit(high_inputs, n_filters=base_filters)\n rcu_high = residual_conv_unit(rcu_high, n_filters=base_filters)\n\n fuse = multi_resolution_fusion(rcu_high, low_inputs, n_filters=base_filters)\n fuse_pooling = chained_residual_pooling(fuse, n_filters=base_filters)\n output = residual_conv_unit(fuse_pooling, n_filters=base_filters)\n return output\n\n\ndef RefineNet(input_shape,\n n_class,\n encoder_name,\n encoder_weights=None,\n weight_decay=1e-4,\n kernel_initializer=\"he_normal\",\n bn_epsilon=1e-3,\n bn_momentum=0.99,\n init_filters=256,\n upscaling_method=\"bilinear\"):\n \"\"\" 4 cascaded RefineNet implementation using keras\n ref: Lin G, Milan A, Shen C, et al. RefineNet: Multi-Path Refinement Networks for High-Resolution\n Semantic Segmentation[J]. arXiv preprint arXiv:1611.06612, 2016.\n :param input_shape: tuple, i.e., (height, width, channel).\n :param n_class: int, number of class, must >= 2.\n :param encoder_name: string, name of encoder.\n :param encoder_weights: string, path of weights, default None.\n :param weight_decay: float, default 1e-4.\n :param kernel_initializer: string, default \"he_normal\".\n :param bn_epsilon: float, default 1e-3.\n :param bn_momentum: float, default 0.99.\n :param init_filters: int, number of filters when apply refining.\n :param upscaling_method: string, \"bilinear\" of \"conv\", default \"bilinear\".\n\n :return: a Keras Model instance.\n \"\"\"\n encoder = build_encoder(input_shape, encoder_name, encoder_weights=encoder_weights,\n weight_decay=weight_decay, kernel_initializer=kernel_initializer,\n bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)\n\n # actually are pool5, pool4, pool3 and pool2\n high_1 = encoder.get_layer(scope_table[encoder_name][\"pool4\"]).output\n high_2 = encoder.get_layer(scope_table[encoder_name][\"pool3\"]).output\n high_3 = encoder.get_layer(scope_table[encoder_name][\"pool2\"]).output\n high_4 = encoder.get_layer(scope_table[encoder_name][\"pool1\"]).output\n\n high_1 = Conv2D(init_filters * 2, (1, 1), padding=\"same\", activation=\"relu\",\n kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(high_1)\n high_2 = Conv2D(init_filters, (1, 1), padding=\"same\", activation=\"relu\",\n kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(high_2)\n high_3 = Conv2D(init_filters, (1, 1), padding=\"same\", activation=\"relu\",\n kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(high_3)\n high_4 = Conv2D(init_filters, (1, 1), padding=\"same\", activation=\"relu\",\n kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(high_4)\n\n low_1 = refine_block(high_1, low_inputs=None, base_filters=init_filters)\n low_2 = refine_block(high_2, low_1, base_filters=init_filters)\n low_3 = refine_block(high_3, low_2, base_filters=init_filters)\n low_4 = refine_block(high_4, low_3, base_filters=init_filters)\n x = low_4\n\n x = residual_conv_unit(x, init_filters)\n x = residual_conv_unit(x, init_filters)\n\n if upscaling_method == \"conv\":\n x = bn_act_convtranspose(x, 128, kernel_size=[3, 3], scale=2,\n weight_decay=weight_decay, kernel_initializer=kernel_initializer,\n bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)\n x = bn_act_conv_block(x, 128, weight_decay=weight_decay, kernel_initializer=kernel_initializer,\n bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)\n else:\n x = BilinearUpSampling(target_size=(input_shape[0], input_shape[1]))(x)\n\n output = Conv2D(n_class, (1, 1), activation=None,\n kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(x)\n output = Activation(\"softmax\")(output)\n\n return Model(encoder.input, output)\n","repo_name":"liuph0119/Semantic_Segmentation_Keras","sub_path":"core/nets/refinenets.py","file_name":"refinenets.py","file_ext":"py","file_size_in_byte":10607,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"3"} +{"seq_id":"16907578781","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport glob\nimport PIL.Image\nimport os\n\nif __name__ == '__main__':\n data_root = '../dump/'\n particle_files = glob.glob(os.path.join(data_root, '*.txt'))\n particle_files.sort()\n image_files = glob.glob(os.path.join(data_root, '*.png'))\n image_files.sort()\n plt.ion()\n fig = plt.figure(1, figsize=(24, 8))\n ax_x = fig.add_subplot(231)\n ax_y = fig.add_subplot(232)\n ax_z = fig.add_subplot(233)\n ax_t = fig.add_subplot(234)\n ax_w = fig.add_subplot(235)\n ax_i = fig.add_subplot(236)\n axes = [ax_x, ax_y, ax_z, ax_t, ax_w, ax_i]\n for frame_id, (particle_file, image_file) in enumerate(zip(particle_files, image_files)):\n particles = np.loadtxt(particle_file)\n image = np.array(PIL.Image.open(image_file))\n for i in range(5):\n ax = axes[i]\n ax.cla()\n # ax.plot(particles[:, i], '.')\n if i == 3:\n ax.hist(particles[:, i] / 3.14, bins=50)\n else:\n ax.hist(particles[:, i], bins=50)\n\n ax_i.cla()\n ax_i.imshow(image)\n\n plt.pause(0.01)\n plt.savefig(os.path.join(data_root, 'viz_%06d.png' % frame_id))\n\n\n\n\n","repo_name":"feixh/VISMA-tracker","sub_path":"scripts/visualize_particles.py","file_name":"visualize_particles.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"3"} +{"seq_id":"4768488254","text":"import tkinter as tk\nfrom tkinter import ttk\n\nfrom logic import CellState, ConnectFour, Point\n\nCELL_SIZE = Point(40, 40)\nCELL_PADDING_SIZE = Point(5, 5)\nHEADER_HEIGHT = 20\nFOOTER_HEIGHT = 40\n\n\nclass Application(tk.Tk):\n def __init__(self) -> None:\n super().__init__()\n self.create_widgets()\n\n def create_widgets(self) -> None:\n game = ConnectFour()\n window_size = Point(\n CELL_SIZE.x * game.size.x,\n CELL_SIZE.y * game.size.y + HEADER_HEIGHT + FOOTER_HEIGHT,\n )\n self.minsize(width=window_size.x, height=window_size.y)\n self.maxsize(width=window_size.x, height=window_size.y)\n\n self.title(\"Connect four\")\n self.columnconfigure(0, weight=1)\n self.rowconfigure(0, weight=1)\n\n self.container = ContainerFrame(self, window_size, game)\n\n\nclass ContainerFrame(ttk.Frame):\n PLAYER_NAME = {True: \"Red\", False: \"Yellow\"}\n\n def __init__(self, master: tk.Tk, window_size: Point, game: ConnectFour) -> None:\n super().__init__(master)\n self.window_size = window_size\n self.game = game\n self.init_game()\n self.create_widgets()\n\n def init_game(self) -> None:\n self.game_end = False\n self.is_player_turn = True\n self.turn = 1\n\n def reset_game(self) -> None:\n self.init_game()\n self.reset_game_board()\n self.redraw_header()\n self.redraw_board()\n self.master.update()\n\n def reset_game_board(self) -> None:\n self.game.init_board()\n\n def create_widgets(self) -> None:\n self.grid(column=0, row=0, sticky=tk.N + tk.S + tk.W + tk.E)\n self.create_header()\n self.create_board()\n self.create_reset_button()\n\n def create_header(self) -> None:\n size = Point(self.window_size.x, HEADER_HEIGHT)\n self.header = HeaderFrame(\n self, size, self.turn, self.PLAYER_NAME[self.is_player_turn]\n )\n self.header.grid(row=0)\n self.columnconfigure(0, weight=1)\n self.rowconfigure(0, weight=1)\n\n def create_board(self) -> None:\n self.board = BoardFrame(self, self.game)\n self.board.canvas.bind(\"\", self.handle_click_event)\n self.board.grid(row=1)\n\n def create_reset_button(self) -> None:\n self.reset_button = ttk.Button(self, text=\"Reset Game\", command=self.reset_game)\n self.reset_button.grid(row=2)\n self.rowconfigure(2, weight=1)\n\n def redraw_header(self) -> None:\n self.header.update_widgets(self.turn, self.PLAYER_NAME[self.is_player_turn])\n\n def redraw_board(self) -> None:\n self.board.redraw_widgets(self.game)\n\n def handle_click_event(self, event: tk.Event) -> None:\n if self.game_end:\n return\n x = event.x // CELL_SIZE.x\n if not self.game.can_put_cell(x):\n return\n self.put_cell(x)\n\n def put_cell(self, x: int) -> None:\n if self.is_player_turn:\n pos = self.game.put_player_cell(x)\n else:\n pos = self.game.put_opponent_cell(x)\n self.board.draw_cell(pos, self.game, False)\n self.judge_win_or_draw(x)\n if not self.game_end:\n self.proceed_next_turn()\n\n def judge_win_or_draw(self, x: int) -> None:\n if self.game.judge_win(x):\n self.board.display_win_message(self.PLAYER_NAME[self.is_player_turn])\n self.game_end = True\n elif self.game.judge_draw():\n self.board.display_draw_message()\n self.game_end = True\n\n def proceed_next_turn(self) -> None:\n self.is_player_turn = not self.is_player_turn\n self.turn += 1\n self.redraw_header()\n\n\nclass HeaderFrame(ttk.Frame):\n def __init__(\n self, master: ContainerFrame, size: Point, turn: int, player: str\n ) -> None:\n super().__init__(master, width=size.x, height=size.y)\n self._create_widgets(turn, player)\n\n def _create_widgets(self, turn: int, player: str) -> None:\n self.turn_label = ttk.Label(self)\n self.turn_label.place(relx=0.2, rely=0)\n\n self.player_label = ttk.Label(self)\n self.player_label.place(relx=0.6, rely=0)\n\n self.update_widgets(turn, player)\n\n def update_widgets(self, turn: int, player: str) -> None:\n self.turn_label.config(text=f\"Turn: {turn}\")\n self.player_label.config(text=f\"Player: {player}\")\n\n\nclass BoardFrame(ttk.Frame):\n TAG_CELL = \"cell\"\n TAG_MESSAGE = \"message\"\n\n COLOR_BACKGROUND = \"#286FCD\"\n COLOR_CELL = {\n CellState.EMPTY: \"#1A48A2\",\n CellState.PLAYER: \"#DB3548\",\n CellState.OPPONENT: \"#FAD138\",\n }\n COLOR_MESSAGE = \"#FFFFFF\"\n FONT_MESSAGE = (\"\", 42, \"bold\")\n\n def __init__(self, master: ContainerFrame, game: ConnectFour) -> None:\n super().__init__(master)\n self._create_widgets(game)\n\n def _create_widgets(self, game: ConnectFour) -> None:\n self.canvas = tk.Canvas(\n self, background=self.COLOR_BACKGROUND, highlightthickness=0\n )\n self.draw_cells(game)\n self.canvas.config(\n width=CELL_SIZE.x * game.size.x,\n height=CELL_SIZE.y * game.size.y,\n )\n self.canvas.grid(row=1)\n\n def redraw_widgets(self, game: ConnectFour) -> None:\n self.canvas.delete(self.TAG_CELL, self.TAG_MESSAGE)\n self.draw_cells(game)\n\n def draw_cells(self, game: ConnectFour) -> None:\n need_create = len(self.canvas.find_withtag(self.TAG_CELL)) == 0\n for y in range(game.size.y):\n for x in range(game.size.x):\n self.draw_cell(Point(x, y), game, need_create)\n\n def draw_cell(self, pos: Point, game: ConnectFour, need_create: bool) -> None:\n start_x = CELL_SIZE.x * pos.x + CELL_PADDING_SIZE.x\n start_y = CELL_SIZE.y * pos.y + CELL_PADDING_SIZE.y\n end_x = CELL_SIZE.x * (pos.x + 1) - CELL_PADDING_SIZE.x\n end_y = CELL_SIZE.y * (pos.y + 1) - CELL_PADDING_SIZE.y\n cell = game.get_cell(pos.x, pos.y)\n if need_create:\n self.canvas.create_oval(\n start_x,\n start_y,\n end_x,\n end_y,\n fill=self.COLOR_CELL[cell],\n tags=(self.TAG_CELL, f\"{pos.x}-{pos.y}\"),\n )\n else:\n oval = self.canvas.find_withtag(f\"{pos.x}-{pos.y}\")\n self.canvas.itemconfig(oval, fill=self.COLOR_CELL[cell])\n\n def display_win_message(self, winner: str) -> None:\n self._display_message(f\"{winner} wins!\")\n\n def display_draw_message(self) -> None:\n self._display_message(\"Draw!\")\n\n def _display_message(self, message: str) -> None:\n pos_x = self.canvas.winfo_width() // 2\n pos_y = self.canvas.winfo_height() // 2\n self.canvas.create_text(\n pos_x,\n pos_y,\n text=message,\n font=self.FONT_MESSAGE,\n fill=self.COLOR_MESSAGE,\n tags=self.TAG_MESSAGE,\n )\n","repo_name":"nafuka11/dena-review","sub_path":"gui/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20588886820","text":"from django import forms\nfrom django.contrib import admin\nfrom django.utils.html import format_html_join\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import gettext_lazy as _\n\nfrom geocity.apps.accounts.admin import IntegratorFilterMixin, filter_for_user\nfrom geocity.apps.forms.admin import get_forms_field\nfrom geocity.apps.forms.models import Form\n\nfrom . import models\n\n\nclass SubmissionAmendFieldForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n user = kwargs.pop(\"user\")\n super().__init__(*args, **kwargs)\n self.fields[\"forms\"] = get_forms_field(user)\n\n class Meta:\n model = models.SubmissionAmendField\n fields = [\n \"name\",\n \"api_name\",\n \"is_mandatory\",\n \"is_visible_by_author\",\n \"is_visible_by_validators\",\n \"can_always_update\",\n \"placeholder\",\n \"help_text\",\n \"regex_pattern\",\n \"forms\",\n \"integrator\",\n ]\n\n\n@admin.register(models.SubmissionAmendField)\nclass SubmissionAmendFieldAdmin(IntegratorFilterMixin, admin.ModelAdmin):\n list_display = [\n \"sortable_str\",\n \"is_mandatory\",\n \"is_visible_by_author\",\n \"is_visible_by_validators\",\n \"can_always_update\",\n ]\n search_fields = [\n \"name\",\n ]\n form = SubmissionAmendFieldForm\n\n def sortable_str(self, obj):\n return str(obj)\n\n sortable_str.short_description = \"Champ de traitement des demandes\"\n sortable_str.admin_order_field = \"name\"\n\n # Pass the user from ModelAdmin to ModelForm\n def get_form(self, request, obj=None, **kwargs):\n Form = super().get_form(request, obj, **kwargs)\n\n class RequestForm(Form):\n def __new__(cls, *args, **kwargs):\n kwargs[\"user\"] = request.user\n return Form(*args, **kwargs)\n\n return RequestForm\n\n\n@admin.register(models.ContactTypeForAdminSite)\nclass ContactTypeAdmin(IntegratorFilterMixin, admin.ModelAdmin):\n list_display = [\n \"name\",\n ]\n search_fields = [\n \"name\",\n ]\n\n def sortable_str(self, obj):\n return obj.__str__()\n\n sortable_str.admin_order_field = \"name\"\n sortable_str.short_description = _(\"Types\")\n\n\n@admin.register(models.ContactFormForAdminSite)\nclass ContactFormAdmin(IntegratorFilterMixin, admin.ModelAdmin):\n list_display = [\n \"sortable_str\",\n \"type\",\n \"form_category\",\n \"is_mandatory\",\n \"is_dynamic\",\n ]\n list_filter = [\n \"form_category\",\n \"is_mandatory\",\n ]\n search_fields = [\n \"name\",\n ]\n\n def sortable_str(self, obj):\n return obj.__str__()\n\n sortable_str.admin_order_field = \"type\"\n sortable_str.short_description = _(\"Contact\")\n\n def formfield_for_foreignkey(self, db_field, request, **kwargs):\n if db_field.name == \"form_category\":\n kwargs[\"queryset\"] = filter_for_user(\n request.user, models.FormCategory.objects.all()\n )\n return super().formfield_for_foreignkey(db_field, request, **kwargs)\n\n\n@admin.register(models.Submission)\nclass SubmissionAdmin(admin.ModelAdmin):\n list_display = [\n \"id\",\n \"created_at\",\n \"sent_date\",\n \"status\",\n \"author\",\n \"get_forms\",\n \"administrative_entity\",\n ]\n search_fields = [\n \"id\",\n \"author__first_name\",\n \"author__last_name\",\n ]\n list_filter = (\"status\", \"author\", \"forms\", \"administrative_entity\")\n\n def get_queryset(self, request):\n return (\n super()\n .get_queryset(request)\n .prefetch_related(\"forms\")\n .select_related(\"author\")\n )\n\n def has_add_permission(self, request):\n return False\n\n def get_forms(self, obj):\n return \", \".join(sorted([form.name for form in obj.forms.all()]))\n\n get_forms.admin_order_field = \"forms\"\n get_forms.short_description = \"Formulaires\"\n\n\nclass ComplementaryDocumentTypeAdminForm(forms.ModelForm):\n model = models.ComplementaryDocumentTypeForAdminSite\n\n def clean_form(self):\n form = self.cleaned_data[\"form\"]\n if not self.instance.pk:\n return form\n payment_settings_confirmation_reports = self.instance.children.exclude(\n reports__confirmation_payment_settings_objects=None\n )\n payment_settings_refund_reports = self.instance.children.exclude(\n reports__refund_payment_settings_objects=None\n )\n error_msg = \"\"\n if (\n payment_settings_confirmation_reports.exists()\n and not payment_settings_confirmation_reports.filter(\n reports__confirmation_payment_settings_objects__form__in=[form]\n )\n ):\n error_msg = _(\n \"Ce type de document est utilisé comme confirmation de paiement dans une configuration de paiement, via un modèle d'impression. Vous devez dé-lier le modèle d'impression de la configuration de paiement afin de pouvoir modifier ce champ.\"\n )\n if (\n payment_settings_refund_reports.exists()\n and not payment_settings_refund_reports.filter(\n reports__refund_payment_settings_objects__form__in=[form]\n )\n ):\n error_msg = _(\n \"Ce type de document est utilisé comme remboursement dans une configuration de paiement, via un modèle d'impression. Vous devez dé-lier le modèle d'impression de la configuration de paiement afin de pouvoir modifier ce champ.\"\n )\n if error_msg:\n raise forms.ValidationError(error_msg)\n return form\n\n\nclass ComplementaryDocumentTypeInline(admin.TabularInline):\n model = models.ComplementaryDocumentTypeForAdminSite\n form = ComplementaryDocumentTypeAdminForm\n\n fields = [\"name\"]\n\n verbose_name = _(\"Type de document\")\n verbose_name_plural = _(\"Type de documents\")\n\n # Defines the number of extra forms to by default. Default is set to 3\n # https://docs.djangoproject.com/en/4.1/ref/contrib/admin/#django.contrib.admin.InlineModelAdmin.extra\n extra = 1\n\n def get_queryset(self, request):\n qs = super().get_queryset(request)\n return qs.filter(parent__isnull=False)\n\n\n@admin.register(models.ComplementaryDocumentTypeForAdminSite)\nclass ComplementaryDocumentTypeAdmin(IntegratorFilterMixin, admin.ModelAdmin):\n inlines = [\n ComplementaryDocumentTypeInline,\n ]\n form = ComplementaryDocumentTypeAdminForm\n fields = [\"name\", \"form\", \"integrator\"]\n\n def get_list_display(self, request):\n if request.user.is_superuser:\n list_display = [\n \"name\",\n \"form\",\n \"integrator\",\n \"types_\",\n ]\n else:\n list_display = [\n \"name\",\n \"form\",\n \"types_\",\n ]\n return list_display\n\n # Fields used in search_fields and list_filter\n integrator_fields = [\n \"name\",\n \"form\",\n \"integrator\",\n \"form__administrative_entities\",\n ]\n user_fields = [\n \"name\",\n \"form\",\n ]\n\n def get_search_fields(self, request):\n if request.user.is_superuser:\n search_fields = self.integrator_fields\n else:\n search_fields = self.user_fields\n return search_fields\n\n def get_list_filter(self, request):\n if request.user.is_superuser:\n list_filter = self.integrator_fields\n else:\n list_filter = self.user_fields\n return list_filter\n\n # List types of documents\n def types_(self, obj):\n list_content = format_html_join(\n \"\",\n \"
  • {}
  • \",\n [\n [d]\n for d in models.ComplementaryDocumentType.children_objects.associated_to_parent(\n obj\n ).values_list(\n \"name\", flat=True\n )\n ],\n )\n return mark_safe(f\"
      {list_content}
    \")\n\n types_.short_description = _(\"Type de document\")\n\n def formfield_for_foreignkey(self, db_field, request, **kwargs):\n if db_field.name == \"form\":\n if request.user.is_superuser:\n kwargs[\"queryset\"] = Form.objects.all()\n else:\n kwargs[\"queryset\"] = Form.objects.filter(\n integrator=request.user.groups.get(\n permit_department__is_integrator_admin=True\n )\n )\n if db_field.name == \"parent\":\n if request.user.is_superuser:\n kwargs[\"queryset\"] = models.ComplementaryDocumentType.objects.all()\n else:\n kwargs[\"queryset\"] = models.ComplementaryDocumentType.objects.filter(\n integrator=request.user.groups.get(\n permit_department__is_integrator_admin=True\n )\n )\n\n return super().formfield_for_foreignkey(db_field, request, **kwargs)\n\n def get_queryset(self, request):\n qs = super().get_queryset(request)\n return qs.filter(parent__isnull=True)\n\n\n@admin.register(models.SubmissionInquiry)\nclass SubmissionInquiryAdmin(admin.ModelAdmin):\n list_display = (\"id\", \"start_date\", \"end_date\", \"submitter\", \"submission\")\n\n def sortable_str(self, obj):\n return obj.__str__()\n\n sortable_str.admin_order_field = \"name\"\n sortable_str.short_description = _(\"2.3 Enquêtes public\")\n","repo_name":"yverdon/geocity","sub_path":"geocity/apps/submissions/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":9627,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"3"} +{"seq_id":"22179241556","text":"from datetime import datetime\nimport traceback\n\nfrom pockets import groupify, listify\nfrom sqlalchemy import func, or_\n\nfrom uber.automated_emails import AutomatedEmailFixture\nfrom uber.config import c\nfrom uber.decorators import ajax, all_renderable, csrf_protected, csv_file, public\nfrom uber.errors import HTTPRedirect\nfrom uber.models import AdminAccount, Attendee, AutomatedEmail, Email\nfrom uber.tasks.email import send_email\nfrom uber.utils import get_page\n\n\n@all_renderable()\nclass Root:\n def index(self, session, page='1', search_text=''):\n emails = session.query(Email).order_by(Email.when.desc())\n search_text = search_text.strip()\n if search_text:\n emails = emails.icontains(Email.to, search_text)\n return {\n 'page': page,\n 'emails': get_page(page, emails),\n 'count': emails.count(),\n 'search_text': search_text\n }\n\n def sent(self, session, **params):\n return {'emails': session.query(Email).filter_by(**params).order_by(Email.when).all()}\n\n def pending(self, session, message=''):\n AutomatedEmail.reconcile_fixtures()\n emails_with_count = session.query(AutomatedEmail, AutomatedEmail.email_count).filter(\n AutomatedEmail.subject != '', AutomatedEmail.sender != '',).all()\n emails = []\n for email, email_count in sorted(emails_with_count, key=lambda e: e[0].ordinal):\n email.sent_email_count = email_count\n emails.append(email)\n\n emails_by_sender = groupify(emails, 'sender')\n\n return {\n 'message': message,\n 'automated_emails': emails_by_sender,\n }\n\n def pending_examples(self, session, ident, message=''):\n email = session.query(AutomatedEmail).filter_by(ident=ident).first()\n examples = []\n model = email.model_class\n query = AutomatedEmailFixture.queries.get(model)(session).order_by(model.id)\n limit = 1000\n for model_instance in query.order_by(func.random()).limit(limit):\n if email.would_send_if_approved(model_instance):\n # These examples are never added to the session or saved to the database.\n # They are only used to render an example of the automated email.\n example = Email(\n subject=email.render_subject(model_instance),\n body=email.render_body(model_instance),\n sender=email.sender,\n to=model_instance.email,\n cc=email.cc,\n bcc=email.bcc,\n ident=email.ident,\n fk_id=model_instance.id,\n automated_email_id=email.id,\n automated_email=email,\n )\n examples.append((model_instance, example))\n example_count = len(examples)\n if example_count > 10:\n break\n return {\n 'email': email,\n 'examples': examples,\n 'message': message,\n }\n \n def update_dates(self, session, ident, **params):\n email = session.query(AutomatedEmail).filter_by(ident=ident).first()\n email.apply(params, restricted=False)\n session.add(email)\n session.commit()\n raise HTTPRedirect('pending_examples?ident={}&message={}', ident, 'Email send dates updated')\n\n def test_email(self, session, subject=None, body=None, from_address=None, to_address=None, **params):\n \"\"\"\n Testing only: send a test email as a system user\n \"\"\"\n\n output_msg = \"\"\n\n if subject and body and from_address and to_address:\n send_email.delay(from_address, to_address, subject, body)\n output_msg = \"RAMS has attempted to send your email.\"\n\n right_now = str(datetime.now())\n\n return {\n 'from_address': from_address or c.STAFF_EMAIL,\n 'to_address': (\n to_address or\n (\"goldenaxe75t6489@mailinator.com\" if c.DEV_BOX else AdminAccount.admin_email())\n or \"\"),\n 'subject': c.EVENT_NAME_AND_YEAR + \" test email \" + right_now,\n 'body': body or \"ignore this email, it is a test of the RAMS email system \" + right_now,\n 'message': output_msg,\n }\n\n @public\n @ajax\n def resend_email(self, session, id):\n \"\"\"\n Resend a particular email to the model's current email address.\n\n This is useful for if someone had an invalid email address and did not receive an automated email.\n \"\"\"\n email = session.email(id)\n if email:\n try:\n # If this was an automated email, we can send out an updated copy\n if email.automated_email and email.fk:\n email.automated_email.send_to(email.fk, delay=False, raise_errors=True)\n else:\n send_email.delay(\n c.ADMIN_EMAIL,\n email.fk_email,\n email.subject,\n email.body,\n format=email.format,\n model=email.fk.to_dict('id') if email.fk_id else None,\n ident=email.ident)\n session.commit()\n except Exception:\n traceback.print_exc()\n return {'success': False, 'message': 'Email not sent: unknown error.'}\n else:\n return {'success': True, 'message': 'Email resent.'}\n return {'success': False, 'message': 'Email not sent: no email found with that ID.'}\n\n @csrf_protected\n def approve(self, session, ident):\n automated_email = session.query(AutomatedEmail).filter_by(ident=ident).first()\n if automated_email:\n automated_email.approved = True\n raise HTTPRedirect(\n 'pending?message={}',\n '\"{}\" approved and will be sent out {}'.format(automated_email.subject, \n \"shortly\" if not automated_email.active_when_label\n else automated_email.active_when_label))\n raise HTTPRedirect('pending?message={}{}', 'Unknown automated email: ', ident)\n\n @csrf_protected\n def unapprove(self, session, ident):\n automated_email = session.query(AutomatedEmail).filter_by(ident=ident).first()\n if automated_email:\n automated_email.approved = False\n raise HTTPRedirect(\n 'pending?message={}',\n 'Approval to send \"{}\" rescinded, '\n 'and it will not be sent until approved again'.format(automated_email.subject))\n raise HTTPRedirect('pending?message={}{}', 'Unknown automated email: ', ident)\n\n def emails_by_interest(self, message=''):\n return {\n 'message': message\n }\n\n @csv_file\n def emails_by_interest_csv(self, out, session, **params):\n \"\"\"\n Generate a list of emails of attendees who match one of c.INTEREST_OPTS\n (interests are like \"LAN\", \"music\", \"gameroom\", etc)\n\n This is intended for use to export emails to a third-party email system, like MadMimi or Mailchimp\n \"\"\"\n if 'interests' not in params:\n raise HTTPRedirect('emails_by_interest?message={}', 'You must select at least one interest')\n\n interests = [int(i) for i in listify(params['interests'])]\n assert all(k in c.INTERESTS for k in interests)\n\n attendees = session.query(Attendee).filter_by(can_spam=True).order_by('email').all()\n\n out.writerow([\"fullname\", \"email\", \"zipcode\"])\n\n for a in attendees:\n if set(interests).intersection(a.interests_ints):\n out.writerow([a.full_name, a.email, a.zip_code])\n\n def emails_by_kickin(self, message=''):\n return {\n 'message': message\n }\n\n @csv_file\n def emails_by_kickin_csv(self, out, session, **params):\n \"\"\"\n Generate a list of attendee emails by what kick-in level they've donated at.\n We also select attendees with kick-in levels above the selected level.\n \"\"\"\n if 'amount_extra' not in params:\n raise HTTPRedirect('emails_by_kickin?message={}', 'You must select a kick-in level')\n\n amount_extra = params['amount_extra']\n\n base_filter = Attendee.badge_status.in_([c.NEW_STATUS, c.COMPLETED_STATUS])\n email_filter = [Attendee.can_spam == True] if 'only_can_spam' in params else [] # noqa: E712\n attendee_filter = Attendee.amount_extra >= amount_extra\n if 'include_staff' in params:\n attendee_filter = or_(attendee_filter, Attendee.badge_type == c.STAFF_BADGE)\n\n attendees = session.query(Attendee).filter(\n base_filter, attendee_filter, *email_filter).all()\n\n out.writerow([\"fullname\", \"email\", \"zipcode\"])\n for a in attendees:\n out.writerow([a.full_name, a.email, a.zip_code])\n","repo_name":"magfest/ubersystem","sub_path":"uber/site_sections/email_admin.py","file_name":"email_admin.py","file_ext":"py","file_size_in_byte":9100,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"3"} +{"seq_id":"5512609496","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 22 09:24:45 2022\n\n@author: junho\n\"\"\"\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.preprocessing import StandardScaler\n\npd.set_option('display.max_columns', None)\n\ndata = pd.read_csv('C:/Users/junho/Desktop/study/py/data/diabetes.csv')\nf_data = np.array(data.drop('Outcome', axis=1))\nt_data = np.array(data['Outcome'])\ndata.head()\n\n\n# 데이터를 표준화한다. train과 test 데이터를 동시에 표준화 했다.\n# 분류의 경우에서는 target 표준화 필요 없음 \n# 괜찮을까? 문제라면 무엇이 문제일까?\nf_scale = StandardScaler()\nf_scaled = f_scale.fit_transform(f_data)\n\n# Train 데이터 세트와 Test 데이터 세트를 구성한다\nx_train, x_test, y_train, y_test = train_test_split(f_scaled, t_data, test_size = 0.2)\n\n# Logistic Regression으로 Train 데이터 세트를 학습한다.\nmodel = LogisticRegression()\nmodel.fit(x_train, y_train)\n\n# Test 세트의 Feature에 대한 class를 추정하고, 정확도를 계산한다\nprint(f'* 학습용 데이터로 측정한 정확도 = {model.score(x_train, y_train):0.2f}')\nprint(f'* 시험용 데이터로 측정한 정확도 = {model.score(x_test, y_test):0.2f}')\n\n# 학습된 w, b를 확인해 본다.\nprint('\\nw :')\nprint(model.coef_)\nprint('\\nb :')\nprint(model.intercept_)\nprint('\\nclass :')\nprint(model.classes_)\n\n\n# x_test[n]의 class를 추정한다.\nn = 1\ny_pred = model.predict(x_test[n].reshape(1, -1))[0]\nprint(f'y_test[{n}] = {y_test[n]}, y_pred = {y_pred}')\n# 클래스가 0일확률과 1일확률을 표시 \nprint('probability = ', model.predict_proba(x_test[n].reshape(1, -1))[0])\n\n\n# 수동으로 확률 계산 \n# manual로 x_test[n]의 class를 추정해 본다. 각 파라메터의 기능을 확인한다.\ntheta = np.dot(model.coef_[0], x_test[n]) + model.intercept_\nprob = 1.0 / (1.0 + np.exp(-theta))\nprint('probability = ', prob)\n","repo_name":"jjuunnoo/TIL","sub_path":"python/machine learning/220222_diabetes_logistic.py","file_name":"220222_diabetes_logistic.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18910003857","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport unittest\nfrom context import create_test_api\nfrom chatbot.bot.subsystem import command\nfrom chatbot.bot.subsystem.command import CommandFlag\nfrom chatbot.api.test import TestAPI\n\n\nclass Test(unittest.IsolatedAsyncioTestCase):\n def setUp(self):\n self._echostring = \"\"\n self._api = create_test_api({})\n self._chat = TestAPI.TestChat(self._api)\n self.cmd = command.CommandHandler(prefix=[ \"!\", \"@bot\" ], admins=[ \"root\" ])\n\n async def test_fail(self):\n async def cmd_fail(_msg, _argv):\n return 2 / 0\n\n self.cmd.register(\"fail\", cmd_fail, argc=0)\n\n with self.assertRaises(ZeroDivisionError):\n await self._run_command(\"!fail\")\n\n async def test_permission(self):\n async def cmd_admin(_msg, _argv):\n pass\n\n self.cmd.register(\"admin\", cmd_admin, argc=0, flags=CommandFlag.Admin)\n\n with self.assertRaises(command.CommandPermError):\n await self._run_command(\"@bot admin\")\n\n try:\n await self._run_command(\"@bot admin\", \"root\")\n except command.CommandPermError:\n self.fail(\"There should not be a permission error\")\n\n async def test_invoke(self):\n async def cmd_echo(_msg, argv):\n self._echostring = \" \".join(argv[1:])\n\n self.cmd.register(\"echo\", cmd_echo)\n await self._run_command(\"!echo foo bar asdf\")\n self.assertEqual(self._echostring, \"foo bar asdf\")\n\n async def test_invoke_expand(self):\n async def cmd_expand(_msg, _arg1, arg2):\n self._echostring = arg2\n\n self.cmd.register(\"expand\", cmd_expand, argc=2, flags=CommandFlag.Expand)\n await self._run_command(\"! expand foo bar bla blub\")\n self.assertEqual(self._echostring, \"bar\")\n\n async def test_not_invoke(self):\n async def test_cmd(_msg, _argv):\n self._echostring = \"foo\"\n\n self.cmd.register(\"test\", test_cmd, argc=0)\n\n # These should not be executed\n await self._run_command(\"test\")\n self.assertEqual(self._echostring, \"\")\n\n with self.assertRaises(command.CommandNotFoundError):\n await self._run_command(\"!missing_command\")\n\n async def test_repeat(self):\n async def test_cmd(_msg, _argv):\n self._echostring += \"A\"\n\n self.cmd.register(\"test\", test_cmd, argc=0)\n\n await self._run_command(\"!test\")\n self.assertEqual(self._echostring, \"A\")\n await self._run_command(\"!!\")\n self.assertEqual(self._echostring, \"AA\")\n\n with self.assertRaises(command.CommandNotFoundError):\n await self._run_command(\"!missing_command\")\n\n async def test_missing_handlers(self):\n async def cmd_missing(_msg, _a, _b, _c):\n self._echostring += \"A\"\n\n async def cmd_missing2(_msg, _argv):\n self._echostring += \"B\"\n\n async def cmd_missing_continue(_msg, _argv):\n self._echostring += \"C\"\n raise command.CommandNotFoundError()\n\n self.cmd.register(\"missing_handler1\", cmd_missing, argc=3, flags=CommandFlag.Expand | CommandFlag.Missing)\n self.cmd.register(\"missing_handler2\", cmd_missing_continue, argc=2, flags=CommandFlag.Missing)\n self.cmd.register(\"missing_handler3\", cmd_missing2, argc=0, flags=CommandFlag.Missing)\n\n # Should execute cmd_missing2 because cmd_missing and cmd_missing_continue expect 3 and 2 arguments\n await self._run_command(\"!missing foo\")\n self.assertEqual(self._echostring, \"B\")\n\n # Should execute cmd_missing\n self._echostring = \"\"\n await self._run_command(\"!missing foo bar foobar\")\n self.assertEqual(self._echostring, \"A\")\n\n # Should execute cmd_missing_continue and then cmd_missing2\n self._echostring = \"\"\n await self._run_command(\"!missing foo bar\")\n self.assertEqual(self._echostring, \"CB\")\n\n async def _run_command(self, cmdstring, author=\"user\"):\n msg = TestAPI.TestingMessage(TestAPI.User(author, \"\", None), cmdstring, self._chat)\n await self.cmd.execute(msg)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"mphe/pychatbot","sub_path":"tests/test_command.py","file_name":"test_command.py","file_ext":"py","file_size_in_byte":4175,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"15717891227","text":"import logging\n\nimport plotly.graph_objects as go\nimport yfinance as yf\nfrom plotly.subplots import make_subplots\n\nfrom bots import imps\nfrom openbb_terminal.decorators import log_start_end\nfrom openbb_terminal.stocks.dark_pool_shorts import stockgrid_model\n\nlogger = logging.getLogger(__name__)\n\n\n@log_start_end(log=logger)\ndef spos_command(ticker: str = \"\"):\n \"\"\"Net short vs position [Stockgrid]\"\"\"\n\n # Debug user input\n if imps.DEBUG:\n logger.debug(\"dps-spos %s\", ticker)\n\n # Check for argument\n if ticker == \"\":\n raise Exception(\"Stock ticker is required\")\n\n ticker = ticker.upper()\n\n stock = yf.download(ticker, progress=False)\n if stock.empty:\n raise Exception(\"Stock ticker is invalid\")\n\n # Retrieve data\n df = stockgrid_model.get_net_short_position(ticker)\n\n # Debug user output\n if imps.DEBUG:\n logger.debug(df.to_string())\n\n # Output data\n title = f\"Stocks: [Stockgrid] Net Short vs Position {ticker}\"\n\n fig = make_subplots(shared_xaxes=True, specs=[[{\"secondary_y\": True}]])\n\n fig.add_trace(\n go.Scatter(\n name=\"Position ($)\",\n x=df[\"dates\"].values,\n y=df[\"dollar_dp_position\"] * 1_000,\n line=dict(color=\"#fdc708\", width=2),\n opacity=1,\n showlegend=False,\n ),\n secondary_y=True,\n )\n fig.add_trace(\n go.Bar(\n name=\"Net Short Vol. ($)\",\n x=df[\"dates\"],\n y=df[\"dollar_net_volume\"],\n opacity=1,\n showlegend=False,\n ),\n secondary_y=False,\n )\n if imps.PLT_WATERMARK:\n fig.add_layout_image(imps.PLT_WATERMARK)\n\n # Set y-axes titles\n fig.update_xaxes(dtick=\"M1\", tickformat=\"%b %d\\n%Y\")\n fig.update_yaxes(title_text=\"Position ($)\", secondary_y=True)\n fig.update_traces(hovertemplate=\"%{y:.2s}\")\n fig.update_layout(\n margin=dict(l=0, r=10, t=40, b=20),\n template=imps.PLT_TA_STYLE_TEMPLATE,\n colorway=imps.PLT_TA_COLORWAY,\n title=f\"Net Short Vol. vs Position for {ticker}\",\n title_x=0.5,\n yaxis_title=\"Net Short Vol. ($)\",\n font=imps.PLT_FONT,\n yaxis=dict(\n side=\"left\",\n showgrid=False,\n fixedrange=False,\n layer=\"above traces\",\n titlefont=dict(color=\"#d81aea\"),\n tickfont=dict(color=\"#d81aea\"),\n nticks=10,\n ),\n xaxis=dict(\n rangeslider=dict(visible=False),\n type=\"date\",\n fixedrange=False,\n ),\n xaxis2=dict(\n rangeslider=dict(visible=False),\n type=\"date\",\n fixedrange=False,\n ),\n dragmode=\"pan\",\n legend=dict(orientation=\"h\", yanchor=\"bottom\", y=1.02, xanchor=\"right\", x=1),\n yaxis2=dict(\n side=\"right\",\n position=0.15,\n fixedrange=False,\n titlefont=dict(color=\"#fdc708\"),\n tickfont=dict(color=\"#fdc708\"),\n nticks=10,\n ),\n hovermode=\"x unified\",\n )\n\n imagefile = \"dps_spos.png\"\n\n # Check if interactive settings are enabled\n plt_link = \"\"\n if imps.INTERACTIVE:\n plt_link = imps.inter_chart(fig, imagefile, callback=False)\n\n fig.update_layout(\n width=800,\n height=500,\n )\n\n imagefile = imps.image_border(imagefile, fig=fig)\n\n return {\n \"title\": title,\n \"description\": plt_link,\n \"imagefile\": imagefile,\n }\n","repo_name":"rohankumardubey/OpenBBTerminal","sub_path":"bots/stocks/dark_pool_shorts/spos.py","file_name":"spos.py","file_ext":"py","file_size_in_byte":3505,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"42044235276","text":"import time\nimport numpy as np\nimport json\nfrom transformer import Constants\n\nfrom math import radians, cos, sin, asin, sqrt\nimport math\n\n# \"latitude\":35.4627242,\"longitude\":-80.8526119\n\nusers = np.load('./dataset/dataset/yelp_user_level_4_user_id.npy')\nbusinesses = np.load('./dataset/dataset/yelp_business_level_4_business_id.npy')\n# print([ businesses[i] for i in range(10)])\n\ncount = 0\nf = open('./dataset/dataset/final_business.json', 'r') # 3433618\nline = f.readline()\n\nt1 = time.time()\nw = open('./dataset/dataset/Yelp_poi_coos.txt', 'w')\nwhile line:\n j = json.loads(line)\n\n w.write(str(count+1)+'\\t'+str(j['latitude'])+'\\t'+str(j['longitude'])+'\\n')\n\n if count % 100000 == 0:\n # print(whole_data)\n print(time.time()-t1)\n # break\n\n count += 1\n line = f.readline()\n\nprint(count)\nw.close()\n\ndef geodistance(lat1, lng1,lat2, lng2):\n #lng1,lat1,lng2,lat2 = (125.12802999999997,30.28708,115.86572500000001,28.7427)\n lng1, lat1, lng2, lat2 = map(radians, [float(lng1), float(lat1), float(lng2), float(lat2)]) # 经纬度转换成弧度\n dlon=lng2-lng1\n dlat=lat2-lat1\n a=sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n distance=2*asin(sqrt(a))*6371*1000 # 地球平均半径,6371km\n distance=round(distance/1000,3)\n return distance\n\ncount = 0\nlats = []\nlngs = []\nf = open('./dataset/dataset/Yelp_poi_coos.txt', 'r')\n# f = open('../Gowalla/Gowalla_poi_coos.txt', 'r')\nline = f.readline()\nwhile line:\n count += 1\n j = line.split(\"\\t\")\n lats.append(float(j[1]))\n lngs.append(float(j[2]))\n line = f.readline()\n\nf.close()\n# Gowalla 18737\t32510\n# Yelp 28038\t15745\n\npoi_number = Constants.TYPE_NUMBER\n# Old Yelp 30887 119876 28038\nuser_number = Constants.USER_NUMBER\n\nt1 = time.time()\ndisc = np.zeros((poi_number, poi_number))\nfor i in range(poi_number):\n for j in range(poi_number):\n if i1 or abs(lngs[i]-lngs[j])>1:\n disc[i][j] = disc[j][i] = 999\n else:\n disc[i][j] = disc[j][i] = geodistance(lats[i],lngs[i], lats[j],lngs[j])\n if i % 100 == 0:\n print(i, time.time()-t1)\n\nnp.save('./dataset/dataset/disc.npy', disc)","repo_name":"WangXFng/STaTRL","sub_path":"albert/step5_distance_matrix.py","file_name":"step5_distance_matrix.py","file_ext":"py","file_size_in_byte":2199,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"28229705393","text":"# -*- coding: utf-8 -*-\n\n# This file is part of the DBFV site.\n#\n# the DBFV site is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# the DBFV site is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with the DBFV site. If not, see .\n\n# Standard Library\nimport logging\nfrom email.mime.application import MIMEApplication\nfrom functools import wraps\n\n# Django\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.core import mail\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.db import models\nfrom django.db.models.signals import post_save\nfrom django.http import (\n HttpRequest,\n HttpResponse,\n)\nfrom django.template.loader import render_to_string\nfrom django.urls import reverse\nfrom django.utils.translation import gettext_lazy as _\n\n# dbfv\nfrom submission.helpers import build_starter_pdf, build_judge_pdf\n\nlogger = logging.getLogger(__name__)\n\n\nclass ManagerEmail(models.Model):\n \"\"\"\n Emails to be notified when a new submission is entered\n \"\"\"\n\n email = models.EmailField(_(u'Email'), max_length=30)\n\n def __str__(self):\n \"\"\"\n Return a more human-readable representation\n \"\"\"\n return self.email\n\n\nclass BankAccount(models.Model):\n \"\"\"\n Model for a bank account\n \"\"\"\n\n owner_name = models.CharField(\n verbose_name='Begünstigter',\n max_length=100,\n )\n iban = models.CharField(\n verbose_name='IBAN',\n max_length=34,\n )\n bic = models.CharField(\n verbose_name='BIC', max_length=11, help_text=u'Nur bei Auslandsüberweisung nötig'\n )\n bank_name = models.CharField(\n verbose_name='Bankname',\n max_length=30,\n )\n\n def __str__(self):\n \"\"\"\n Return a more human-readable representation\n \"\"\"\n return \"%s, BIC: %s\" % (self.iban, self.bic)\n\n\nclass State(models.Model):\n \"\"\"\n Model for a state\n \"\"\"\n\n name = models.CharField(\n verbose_name='Name',\n max_length=100,\n )\n short_name = models.CharField(\n verbose_name='Kürzel',\n max_length=3,\n )\n email = models.EmailField(verbose_name='Email', max_length=120, blank=True)\n bank_account = models.ForeignKey(\n BankAccount, verbose_name='Bankkonto', on_delete=models.CASCADE\n )\n\n def __str__(self):\n \"\"\"\n Return a more human-readable representation\n \"\"\"\n return self.name\n\n\nclass Gym(models.Model):\n \"\"\"\n Model for a gym\n \"\"\"\n\n class Meta:\n \"\"\"\n Order first by state name, then by gym name\n \"\"\"\n ordering = [\"state__name\", \"name\"]\n\n # Properties\n name = models.CharField(\n verbose_name='Name',\n max_length=100,\n )\n email = models.EmailField(verbose_name='Email', blank=True, null=True)\n state = models.ForeignKey(State, verbose_name='Bundesland', on_delete=models.CASCADE)\n\n owner = models.CharField(verbose_name='Inhaber', max_length=100, blank=True, null=True)\n zip_code = models.IntegerField(_(u'PLZ'), blank=True, null=True)\n city = models.CharField(_(u'Ort'), max_length=30, blank=True, null=True)\n street = models.CharField(_(u'Straße'), max_length=30, blank=True, null=True)\n is_active = models.BooleanField(_('Ist aktiv'), default=True)\n\n def __str__(self):\n \"\"\"\n Return a more human-readable representation\n \"\"\"\n return u\"{0} ({1}, {2})\".format(self.name, self.city, self.state)\n\n def get_absolute_url(self):\n return reverse('gym-view', kwargs={'pk': self.id})\n\n\nclass Country(models.Model):\n \"\"\"\n Model for a country\n \"\"\"\n\n # This field is required.\n name = models.CharField(max_length=40)\n\n def __str__(self):\n \"\"\"\n Return a more human-readable representation\n \"\"\"\n return self.name\n\n\ndef attachment_submission_dir(instance, filename):\n return \"anlagen/antrag/%s/%s/%s\" % (instance.gym.state.short_name, instance.gym_id, filename)\n\n\n#\n#\n# Submissions\n#\n#\nclass AbstractSubmission(models.Model):\n \"\"\"\n Abstract class with fields and logic common to all submissions types\n (starter, gym and judge)\n \"\"\"\n\n class Meta:\n \"\"\"\n This is an abstract class\n \"\"\"\n abstract = True\n\n MAILMERGE_HEADER = []\n\n SUBMISSION_STATUS_EINGEGANGEN = '1'\n SUBMISSION_STATUS_BEWILLIGT = '2'\n SUBMISSION_STATUS_ABGELEHNT = '3'\n\n SUBMISSION_STATUS = (\n (SUBMISSION_STATUS_EINGEGANGEN, 'Eingegangen'),\n (SUBMISSION_STATUS_BEWILLIGT, 'Bewilligt'),\n (SUBMISSION_STATUS_ABGELEHNT, 'Abgelehnt'),\n )\n\n user = models.ForeignKey(User, verbose_name=_('User'), editable=False, on_delete=models.CASCADE)\n creation_date = models.DateField(_('Creation date'), auto_now_add=True)\n submission_status = models.CharField(\n max_length=2, choices=SUBMISSION_STATUS, default=SUBMISSION_STATUS_EINGEGANGEN\n )\n mail_merge = models.BooleanField(default=False, editable=False)\n \"\"\"Deprecated\"\"\"\n\n pdf_sent = models.BooleanField(default=False)\n \"\"\"Flag indicating whether the athlete has received the confirmation PDF\"\"\"\n\n def get_bank_designated_use(self):\n \"\"\"\n Returns the designated use to be used when doing the bank transfer\n \"\"\"\n return u'{0} {1}
    \\n{2}'.format(self.get_license_type(), self.pk, self.get_name)\n\n def get_email_list(self):\n \"\"\"\n Collects and returns a list with the recipients of notification emails\n \"\"\"\n raise NotImplementedError('You must implement this method in derived classes')\n\n def get_email_subject(self):\n \"\"\"\n Returns the subject for the notification email\n \"\"\"\n return u'Neue {0} beantragt von {1}'.format(self.get_license_type(), self.get_name)\n\n def get_email_template(self):\n \"\"\"\n Returns the template used for the notification email\n \"\"\"\n raise NotImplementedError('You must implement this method in derived classes')\n\n @staticmethod\n def get_license_type():\n \"\"\"\n Returns the name of the license, this is used e.g. in the email subject\n \"\"\"\n raise NotImplementedError('You must implement this method in derived classes')\n\n def get_search_json(self):\n \"\"\"\n Returns the necessary JSON to be used in the search\n \"\"\"\n return {\n 'id': self.id,\n 'name': self.get_name,\n 'status': self.get_submission_status_display(),\n 'date': self.creation_date.strftime(\"%d.%m.%Y\")\n }\n\n def notification_email_hook(self):\n \"\"\"\n Hook to perform custom logic after sending the notification emails\n \"\"\"\n pass\n\n def send_emails(self, extra_data=None):\n \"\"\"\n Email the managers\n \"\"\"\n if extra_data is None:\n extra_data = []\n context = {'submission': self,\n 'fee': self.FEE,\n 'bankaccount': BankAccount.objects.get(pk=self.get_bank_account()),\n 'extra_data': extra_data}\n for email in self.get_email_list():\n\n if email == self.email:\n context['is_user'] = True\n else:\n context['is_user'] = False\n\n message = render_to_string(self.get_email_template(), context)\n mail.send_mail(\n self.get_email_subject(),\n message,\n settings.DEFAULT_FROM_EMAIL, [email],\n fail_silently=True\n )\n\n # Perform custom logic\n self.notification_email_hook()\n\n def get_mailmerge_row(self):\n \"\"\"\n Returns a row for the mailmerge CSV export\n \"\"\"\n raise NotImplementedError('You must implement this method in derived classes')\n\n\nclass SubmissionStarter(AbstractSubmission):\n \"\"\"\n Model for a submission\n \"\"\"\n\n class Meta:\n \"\"\"\n Order first by state name, then by gym name\n \"\"\"\n ordering = [\"creation_date\", \"gym\"]\n\n MAILMERGE_HEADER = [\n 'ID', 'Vorname', 'Nachname', 'Geburtsdatum', 'Aktiv Seit', 'Straße', 'Hausnummer', 'PLZ',\n 'Stadt', 'Telefon', 'Email', 'Nationalität', 'Größe', 'Gewicht', 'Kategorie', 'Studio',\n 'Bundesverband', 'Datum', 'Jahr'\n ]\n\n SUBMISSION_CATEGORY = (\n ('1', u'Bikini-Fitness Klasse I'),\n ('19', u'Bikini-Fitness Klasse II'),\n ('20', u'Bikini-Fitness Klasse III'),\n ('21', u'Frauen Wellness Klasse'),\n ('2', u'Frauen Fitness-Figur Klasse I'),\n ('22', u'Frauen Fitness-Figur Klasse II'),\n # ('3', u'Frauen Bodyklasse'),\n ('4', u'Frauen Physique'),\n # ('5', u'Juniorenklasse'),\n ('6', u'Classic-Bodybuilding Klasse I'),\n ('23', u'Classic-Bodybuilding Klasse II'),\n ('30', u'Classic Physique'),\n ('7', u'Paare'),\n ('31', u'Fit Pairs'),\n ('8', u'Männer Physique Klassse I'),\n ('24', u'Männer Physique Klasse II'),\n ('25', u'Männer Physique Klasse III'),\n # ('9', u'Männer Bodyklasse'),\n # ('10', u'Wellness-Fitness'),\n ('11', u'Muscular-Physique'),\n # ('12', u'Masters-Männer BB'),\n ('26', u'Männer Klasse I'),\n ('27', u'Männer Klasse II'),\n ('28', u'Männer Klasse III'),\n ('29', u'Männer Klasse IV'),\n ('30', u'Männer Klasse V'),\n # ('13', u'Masters-Männer Classic BB'),\n # ('14', u'Masters-Männer Physique'),\n # ('15', u'Masters-Frauen Physique'),\n # ('16', u'Masters-Frauen Bikini Fitness'),\n # ('17', u'Masters-Frauen Figur'),\n )\n # Commented out: out = [3, 5, 9, 10, 12, 13, 14, 15, 16, 17]\n\n FEE = 90\n\n # Personal information\n date_of_birth = models.DateField(_('Geburtsdatum'))\n active_since = models.CharField(_('Aktiv seit'), max_length=20)\n last_name = models.CharField(_('Familienname'), max_length=30)\n first_name = models.CharField(_('Vorname'), max_length=30)\n street = models.CharField(_(u'Straße'), max_length=30)\n house_nr = models.CharField(_(u'Hausnummer'), max_length=30)\n zip_code = models.IntegerField(_(u'PLZ'))\n city = models.CharField(_(u'Ort'), max_length=30)\n tel_number = models.CharField(_(u'Tel. Nr.'), max_length=20)\n email = models.EmailField(_(u'Email'), max_length=120)\n nationality = models.ForeignKey(\n Country,\n verbose_name=u'Staatsangehörigkeit',\n default=37, # Germany\n on_delete=models.CASCADE\n )\n height = models.IntegerField(_(u'Größe (cm)'))\n weight = models.DecimalField(_(u'Wettkampfgewicht (kg)'), max_digits=5, decimal_places=2)\n category = models.CharField(\n _(u'Kategorie'),\n max_length=2,\n choices=SUBMISSION_CATEGORY,\n )\n terms_and_conditions = models.BooleanField(\n 'Hiermit erkläre ich mich mit den Regeln des DBFV e.V.', blank=False\n )\n\n # Other fields\n submission_last_year = models.BooleanField(\n u\"Im Vorjahr wurde bereits eine Lizenz beantragt\", default=False\n )\n\n gym = models.ForeignKey(Gym, verbose_name='Studio', on_delete=models.CASCADE)\n\n def __str__(self):\n \"\"\"\n Return a more human-readable representation\n \"\"\"\n return \"%s - %s\" % (self.creation_date, self.user)\n\n def get_absolute_url(self):\n return reverse('submission-view', kwargs={'pk': self.pk})\n\n @property\n def get_name(self):\n \"\"\"\n Returns the name of the participant\n \"\"\"\n return u\"{0}, {1}\".format(self.last_name, self.first_name)\n\n def get_bank_account(self):\n \"\"\"\n Returns the correct bank account for this submission\n \"\"\"\n bank_account = 1\n if self.gym.state_id == 10:\n bank_account = 2\n\n return bank_account\n\n @staticmethod\n def get_license_type():\n \"\"\"\n Returns the name of the license, this is used e.g. in the email subject\n \"\"\"\n return 'Starterlizenz'\n\n @staticmethod\n def get_email_template():\n \"\"\"\n Returns the template used for the notification email\n \"\"\"\n return 'submission/starter/email_new_submission.html'\n\n def send_pdf_email(self):\n \"\"\"\n Sends the confirmation PDF to the user\n \"\"\"\n email_subject = f'Starterlizenz {self.creation_date.year}'\n email_text = f\"\"\"Sehr geeehrte Dame, sehr geehrter Herr {self.last_name},\n \nmit dieser E-Mail erhalten Sie ihre beantragte Starterlizenz des DBFV e. V. für\ndas Kalenderjahr {self.creation_date.year}.\n\nWir wünschen Ihnen eine gute Vorbereitung und viel Spaß und Erfolg bei der\nTeilnahme an unseren Meisterschaften.\n\nIhr DBFV e. V.\n \"\"\"\n logger.info(f'Sending PDF for submission {self.id} - ({self.user.email})')\n msg = EmailMultiAlternatives(\n email_subject,\n email_text,\n settings.DEFAULT_FROM_EMAIL,\n [self.user.email],\n )\n msg.mixed_subtype = 'related'\n\n # Build the PDF and attach it to the email\n response = HttpResponse(content_type='application/pdf')\n build_starter_pdf(HttpRequest(), self, response)\n msg_part = MIMEApplication(response.content)\n msg_part['Content-Disposition'] = f'attachment; filename=\"Starterlizenz-{self.id}.pdf\"'\n msg.attach(msg_part)\n\n # Send the email\n msg.send()\n\n def get_email_list(self):\n \"\"\"\n Collects and returns a list with the recipients of notification emails\n \"\"\"\n email_list = []\n for email in ManagerEmail.objects.all():\n email_list.append(email.email)\n\n if self.gym.email:\n email_list.append(self.gym.email)\n\n if self.gym.state.email:\n email_list.append(self.gym.state.email)\n\n # Hamburg\n if self.gym.state.pk == 6:\n email_list.append('clausmaibaum@web.de')\n\n # Hessen\n if self.gym.state.pk == 7:\n email_list.append('info@hbbkv.com')\n\n # Bayern\n if self.gym.state.pk == 2:\n email_list.append('lambert.boehm@blv-bfk.de')\n\n email_list.append(self.email)\n return email_list\n\n def notification_email_hook(self):\n \"\"\"\n Notify the managers if the selected gym has no email\n \"\"\"\n if not self.gym.email:\n for email in ManagerEmail.objects.all():\n mail.send_mail(\n 'Studio hat keine Emailadresse',\n u\"Eine Starterlizenz wurde für ein Studio beantragt, dass\\n\"\n u\"keine Emailadresse im System hinterlegt hat.\\n\\n\"\n u\"* Nr.: {studio.pk}\\n\"\n u\"* Name: {studio.name}\\n\"\n u\"* Bundesland: {studio.state.name}\\n\".format(studio=self.gym),\n settings.DEFAULT_FROM_EMAIL, [email.email],\n fail_silently=True\n )\n\n def get_search_json(self):\n \"\"\"\n Returns the necessary JSON to be used in the search\n \"\"\"\n data = super(SubmissionStarter, self).get_search_json()\n data['state'] = self.gym.state.name\n data['category'] = self.get_category_display()\n data['gym'] = self.gym.name\n return data\n\n def get_mailmerge_row(self):\n \"\"\"\n Returns a row for the mailmerge CSV export\n \"\"\"\n return [\n self.pk,\n self.first_name,\n self.last_name,\n self.date_of_birth,\n self.active_since,\n self.street,\n self.house_nr,\n self.zip_code,\n self.city,\n self.tel_number,\n self.email,\n self.nationality.name,\n self.height,\n self.weight,\n self.get_category_display(),\n self.gym.name,\n self.gym.state,\n self.creation_date,\n self.creation_date.year,\n ]\n\n def save(self, *args, **kwargs):\n \"\"\"\n For existing submissions, if we change the status to approved, email the user\n \"\"\"\n if self.pk and self.submission_status == self.SUBMISSION_STATUS_BEWILLIGT:\n self.send_pdf_email()\n self.pdf_sent = True\n\n super().save(*args, **kwargs)\n\n\nclass SubmissionInternational(AbstractSubmission):\n \"\"\"\n Model for a submission\n \"\"\"\n\n class Meta:\n \"\"\"\n Order first by state name, then by gym name\n \"\"\"\n ordering = [\"creation_date\", \"gym\"]\n\n MAILMERGE_HEADER = [\n 'ID', 'Vorname', 'Nachname', 'Geburtsdatum', 'Aktiv Seit', 'Straße', 'PLZ', 'Stadt',\n 'Telefon', 'Email', 'Nationalität', 'Größe', 'Gewicht', 'Kategorie', 'Studio',\n 'Bundesverband', 'Datum', 'Jahr', 'Meisterschaft', 'Datum der Meisterschaft'\n ]\n\n SUBMISSION_CATEGORY = (\n ('1', u'Jugend-Bikini-Fitness'),\n ('2', u'Jugend-Mens Physique'),\n ('3', u'Jugend-Bodybuilding'),\n ('4', u'Junioren-Bikini-Fitness'),\n ('5', u'Junioren-Mens Physique'),\n ('6', u'Junioren-Bodybuilding'),\n ('23', u'Junioren-Classic Bodybuilding'),\n ('21', u'Junioren-Frauen Fitness Figur'),\n ('22', u'Junioren-Frauen Physique'),\n ('7', u'Frauen-Bikini-Fitness'),\n ('25', u'Frauen-Wellness'),\n ('8', u'Frauen-Fitness-Figur'),\n ('9', u'Frauen-Physique'),\n ('10', u'Paare'),\n ('11', u'Handicappt/Wheelchair'),\n ('12', u'Classic Bodybuilding'),\n ('24', u'Classic Physique'),\n ('13', u'Männer Physique'),\n ('24', u'Männer Muscular Physique'),\n ('14', u'Männer Bodybuilding'),\n ('15', u'Masters-Männer BB'),\n ('16', u'Masters-Männer Classic BB'),\n ('17', u'Masters-Männer Physique'),\n ('18', u'Masters-Frauen Physique'),\n ('19', u'Masters-Frauen Bikini Fitness'),\n ('20', u'Masters-Frauen Figur'),\n )\n\n FEE = 0\n\n # Personal information\n date_of_birth = models.DateField(_('Geburtsdatum'))\n last_name = models.CharField(_('Familienname'), max_length=30)\n first_name = models.CharField(_('Vorname'), max_length=30)\n street = models.CharField(_(u'Straße'), max_length=30)\n zip_code = models.IntegerField(_(u'PLZ'))\n city = models.CharField(_(u'Ort'), max_length=30)\n tel_number = models.CharField(_(u'Tel. Nr.'), max_length=20)\n email = models.EmailField(_(u'Email'), max_length=120)\n nationality = models.ForeignKey(\n Country,\n verbose_name=u'Staatsangehörigkeit',\n default=37, # Germany\n on_delete=models.CASCADE\n )\n height = models.IntegerField(_(u'Größe (cm)'))\n weight = models.DecimalField(_(u'Wettkampfgewicht in kg (ca.)'), max_digits=5, decimal_places=2)\n category = models.CharField(_(u'Kategorie'), max_length=100)\n championship = models.CharField(\n _(u'Meisterschaft'), help_text=u'Meisterschaft in der Du starten möchtest', max_length=150\n )\n championship_date = models.DateField(_(u'Datum der Meisterschaft'))\n\n best_placement = models.CharField(\n u'Beste Platzierung',\n max_length=150,\n help_text='Beste Platzierung auf einer deutschen '\n 'DBFV/IFBB-Meisterschaft, mit Datum und Kategorie'\n )\n\n gym = models.ForeignKey(Gym, verbose_name='Studio', on_delete=models.CASCADE)\n\n def __str__(self):\n \"\"\"\n Return a more human-readable representation\n \"\"\"\n return \"%s - %s\" % (self.creation_date, self.user)\n\n def get_absolute_url(self):\n return reverse('submission-international-view', kwargs={'pk': self.pk})\n\n @property\n def get_name(self):\n \"\"\"\n Returns the name of the participant\n \"\"\"\n return u\"{0}, {1}\".format(self.last_name, self.first_name)\n\n def get_bank_account(self):\n \"\"\"\n Returns the correct bank account for this submission\n \"\"\"\n bank_account = 1\n if self.gym.state_id == 10:\n bank_account = 2\n\n return bank_account\n\n @staticmethod\n def get_license_type():\n \"\"\"\n Returns the name of the license, this is used e.g. in the email subject\n \"\"\"\n return 'Internationaler Start'\n\n def get_email_template(self):\n \"\"\"\n Returns the template used for the notification email\n \"\"\"\n return 'submission/international/email_new_submission.html'\n\n def get_email_list(self):\n \"\"\"\n Collects and returns a list with the recipients of notification emails\n \"\"\"\n email_list = [\n 'info@dbfv.de', 'dbfv.falk@gmail.com', \"Margret.Netack@t-online.de\", self.email\n ]\n if self.gym.state.email:\n email_list.append(self.gym.state.email)\n return email_list\n\n def notification_email_hook(self):\n \"\"\"\n Notify the managers if the selected gym has no email\n \"\"\"\n if not self.gym.email:\n for email in ManagerEmail.objects.all():\n mail.send_mail(\n 'Studio hat keine Emailadresse',\n u\"Eine internationale Lizenz wurde für ein Studio beantragt, dass\\n\"\n u\"keine Emailadresse im System hinterlegt hat.\\n\\n\"\n u\"* Nr.: {studio.pk}\\n\"\n u\"* Name: {studio.name}\\n\"\n u\"* Bundesland: {studio.state.name}\\n\".format(studio=self.gym),\n settings.DEFAULT_FROM_EMAIL, [email.email],\n fail_silently=True\n )\n\n def get_search_json(self):\n \"\"\"\n Returns the necessary JSON to be used in the search\n \"\"\"\n data = super().get_search_json()\n data['state'] = self.gym.state.name\n data['category'] = self.category\n data['gym'] = self.gym.name\n return data\n\n def get_mailmerge_row(self):\n \"\"\"\n Returns a row for the mailmerge CSV export\n \"\"\"\n return [\n self.pk, self.first_name, self.last_name, self.date_of_birth,\n self.street, self.zip_code, self.city, self.tel_number, self.email,\n self.nationality.name, self.height, self.weight,\n self.category, self.gym.name, self.gym.state, self.creation_date,\n self.creation_date.year, self.championship, self.championship_date\n ]\n\n\nclass SubmissionGym(AbstractSubmission):\n \"\"\"\n Model for a gym submission\n \"\"\"\n class Meta:\n \"\"\"\n Order first by state name, then by gym name\n \"\"\"\n ordering = [\"creation_date\", \"state\"]\n\n FEE = 30\n\n # Personal information\n state = models.ForeignKey(State, verbose_name=_(u'Landesverband'), on_delete=models.CASCADE)\n name = models.CharField(\n verbose_name=_('Name'), max_length=30, help_text=_('Name des Studios oder Verein')\n )\n owner = models.CharField(\n verbose_name='Inhaber', max_length=30,\n )\n founded = models.DateField(_(u'Gegründet am'))\n street = models.CharField(_(u'Straße'), max_length=30)\n zip_code = models.IntegerField(_(u'PLZ'))\n city = models.CharField(_(u'Ort'), max_length=30)\n tel_number = models.CharField(_(u'Tel. Nr.'), max_length=20)\n email = models.EmailField(_(u'Email'), max_length=120)\n\n # Other fields\n gym = models.OneToOneField(\n Gym, verbose_name='Studio', editable=False, blank=True, null=True, on_delete=models.CASCADE\n )\n\n def __str__(self):\n \"\"\"\n Return a more human-readable representation\n \"\"\"\n return u\"Studiolizenz {0}\".format(self.get_name)\n\n def get_absolute_url(self):\n return reverse('submission-studio-view', kwargs={'pk': self.pk})\n\n @property\n def get_name(self):\n \"\"\"\n Returns the name of the participant\n \"\"\"\n return self.name\n\n def get_bank_account(self):\n \"\"\"\n Returns the correct bank account for this submission\n \"\"\"\n return self.state.bank_account.pk\n\n @staticmethod\n def get_license_type():\n \"\"\"\n Returns the name of the license, this is used e.g. in the email subject\n \"\"\"\n return 'Studiolizenz'\n\n def get_email_template(self):\n \"\"\"\n Returns the template used for the notification email\n \"\"\"\n return 'submission/gym/email_new_submission.html'\n\n def get_email_list(self):\n \"\"\"\n Collects and returns a list with the recipients of notification emails\n \"\"\"\n email_list = []\n for email in ManagerEmail.objects.all():\n email_list.append(email.email)\n\n if self.state.email:\n email_list.append(self.state.email)\n\n email_list.append(self.email)\n return email_list\n\n def get_absolute_url(self):\n return reverse('submission-gym-view', kwargs={'pk': self.pk})\n\n\nclass SubmissionJudge(AbstractSubmission):\n \"\"\"\n Model for a judge submission\n \"\"\"\n class Meta:\n \"\"\"\n Order first by state name, then by gym name\n \"\"\"\n ordering = [\"creation_date\", \"state\"]\n\n MAILMERGE_HEADER = [\n 'ID', 'Vorname', 'Nachname', 'Straße', 'PLZ', 'Stadt', 'Telefon', 'Email', 'Bundesverband',\n 'Datum', 'Jahr'\n ]\n\n FEE = 20\n\n last_name = models.CharField('Familienname', max_length=30)\n first_name = models.CharField('Vorname', max_length=30)\n street = models.CharField(u'Straße', max_length=30)\n zip_code = models.IntegerField(u'PLZ')\n city = models.CharField(u'Ort', max_length=30)\n state = models.ForeignKey(State, verbose_name=u'Landesverband', on_delete=models.CASCADE)\n tel_number = models.CharField(u'Tel. Nr.', max_length=20)\n email = models.EmailField(u'Email', max_length=120, null=True, blank=True)\n\n def __str__(self):\n \"\"\"\n Return a more human-readable representation\n \"\"\"\n return u\"Kampfrichterlizenz {0}\".format(self.get_name)\n\n def get_absolute_url(self):\n return reverse('submission-judge-view', kwargs={'pk': self.pk})\n\n @property\n def get_name(self):\n \"\"\"\n Returns the name of the participant\n \"\"\"\n return u\"{0}, {1}\".format(self.last_name, self.first_name)\n\n def get_bank_account(self):\n \"\"\"\n Returns the correct bank account for this submission\n \"\"\"\n bank_account = 1\n if self.state.pk == 10:\n bank_account = 2\n\n return bank_account\n\n @staticmethod\n def get_license_type():\n \"\"\"\n Returns the name of the license, this is used e.g. in the email subject\n \"\"\"\n return 'Kampfrichterlizenz'\n\n def get_email_template(self):\n \"\"\"\n Returns the template used for the notification email\n \"\"\"\n return 'submission/judge/email_new_submission.html'\n\n def get_email_list(self):\n \"\"\"\n Collects and returns a list with the recipients of notification emails\n \"\"\"\n email_list = []\n for email in ManagerEmail.objects.all():\n email_list.append(email.email)\n\n if self.state.email:\n email_list.append(self.state.email)\n\n email_list.append(self.email)\n email_list.append('kampfrichter@dbfv.de')\n\n # Hamburg\n if self.state.pk == 6:\n email_list.append('clausmaibaum@web.de')\n\n return email_list\n\n def get_mailmerge_row(self):\n \"\"\"\n Returns a row for the mailmerge CSV export\n \"\"\"\n return [\n self.pk, self.first_name, self.last_name, self.street, self.zip_code, self.city,\n self.tel_number, self.email, self.state.name, self.creation_date,\n self.creation_date.year\n ]\n\n def send_pdf_email(self):\n \"\"\"\n Sends the confirmation PDF to the user\n \"\"\"\n email_subject = f'Kampfrichterlizenz {self.creation_date.year}'\n email_text = f\"\"\"Sehr geeehrte Dame, sehr geehrter Herr {self.last_name},\n\nmit dieser E-Mail erhalten Sie ihre beantragte Kampfrichterlizenz des DBFV e. V. für\ndas Kalenderjahr {self.creation_date.year}.\n\nWir wünschen Ihnen eine gute Vorbereitung und viel Spaß und Erfolg bei der\nTeilnahme an unseren Meisterschaften.\n\nIhr DBFV e. V.\n \"\"\"\n logger.info(f'Sending PDF for judge submission {self.id} - ({self.user.email})')\n msg = EmailMultiAlternatives(\n email_subject,\n email_text,\n settings.DEFAULT_FROM_EMAIL,\n [self.user.email],\n )\n msg.mixed_subtype = 'related'\n\n # Build the PDF and attach it to the email\n response = HttpResponse(content_type='application/pdf')\n build_judge_pdf(HttpRequest(), self, response)\n msg_part = MIMEApplication(response.content)\n msg_part['Content-Disposition'] = f'attachment; filename=\"Starterlizenz-{self.id}.pdf\"'\n msg.attach(msg_part)\n\n # Send the email\n msg.send()\n\n def save(self, *args, **kwargs):\n \"\"\"\n For existing submissions, if we change the status to approved, email the user\n \"\"\"\n if self.pk and self.submission_status == self.SUBMISSION_STATUS_BEWILLIGT:\n self.send_pdf_email()\n self.pdf_sent = True\n\n super().save(*args, **kwargs)\n\n\nUSER_TYPE_UNKNOWN = -1\nUSER_TYPE_BUNDESVERBAND = 2\nUSER_TYPE_USER = 3\nUSER_TYPES = (\n (USER_TYPE_BUNDESVERBAND, u'Bundesverband'),\n (USER_TYPE_USER, u'User'),\n (USER_TYPE_UNKNOWN, u'Unbekannt'),\n)\n\n\nclass UserProfile(models.Model):\n \"\"\"\n Model for a user's profile\n \"\"\"\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n\n email_verified = models.BooleanField(default=False)\n \"\"\"Flag indicating whether the user's email has been verified\"\"\"\n\n # User type\n type = models.IntegerField(choices=USER_TYPES, default=USER_TYPE_USER)\n\n # Personal information\n state = models.ForeignKey(State, blank=True, null=True, on_delete=models.CASCADE)\n\n terms_and_conditions = models.BooleanField(\n 'Hiermit erkläre ich mich mit den Regeln des DBFV e.V.', blank=False, default=False\n )\n\n def __str__(self):\n \"\"\"\n Return a more human-readable representation\n \"\"\"\n return 'Profile for %s' % self.user.username\n\n\ndef disable_for_loaddata(signal_handler):\n \"\"\"\n Decorator to prevent clashes when loading data with loaddata and\n post_connect signals. See also:\n http://stackoverflow.com/questions/3499791/how-do-i-prevent-fixtures-from-conflicting\n \"\"\"\n\n @wraps(signal_handler)\n def wrapper(*args, **kwargs):\n if kwargs['raw']:\n # print \"Skipping signal for {0} {1}\".format(args, kwargs)\n return\n signal_handler(*args, **kwargs)\n\n return wrapper\n\n\n# Every new user gets a profile\n@disable_for_loaddata\ndef create_user_profile(sender, instance, created, **kwargs):\n if created:\n UserProfile.objects.get_or_create(user=instance)\n\n\npost_save.connect(create_user_profile, sender=User)\n\n\ndef user_profile(user):\n \"\"\"\n Return the profile of user or None if the user is not authenticated.\n \"\"\"\n if not user.is_authenticated:\n return None\n\n # for authenticated users, look into the profile.\n return user.userprofile\n\n\ndef user_type(user):\n \"\"\"\n Return the type of user or None if the user is not authenticated.\n \"\"\"\n profile = user_profile(user)\n if profile is None:\n return None\n\n return profile.type\n","repo_name":"rolandgeider/dbfv","sub_path":"submission/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":31921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33608198906","text":"\"\"\"\nnew importer to use with MultiChannelEEGDataset\n\"\"\"\n__author__ = 'sstober'\n\nimport os\nimport glob\nimport csv\nimport math\n\nimport logging\nlog = logging.getLogger(__name__)\n\nimport numpy as np\nimport theano\n\nfrom pylearn2.utils.timing import log_timing\nfrom deepthought.util.fs_util import save, load\nfrom deepthought.datasets.rwanda2013rhythms import LabelConverter\n\n\ndef load_data_file(filename):\n\n #data = np.loadtxt(filename, dtype=float, delimiter=' ', skiprows=1) #, autostrip=True, names=False)\n with log_timing(log, 'loading data from {}'.format(filename)):\n data = np.genfromtxt(filename, dtype=theano.config.floatX, delimiter=' ', skip_header=1, autostrip=True)\n log.info('loaded {}'.format(data.shape))\n\n# print data.shape\n# print data[0]\n# print data[-1]\n\n return data\n\ndef load_xlsx_meta_file(filename):\n import xlrd\n\n book = xlrd.open_workbook(filename, encoding_override=\"cp1252\")\n sheet = book.sheet_by_index(0)\n\n onsets = []\n for i in range(1, sheet.nrows):\n onsets.append([sheet.cell(i,2).value, sheet.cell(i,0).value.encode('ascii')])\n log.debug(onsets[-1])\n\n return onsets\n\ndef generate_filepath_from_metadata(metadata):\n return '{}/{}.pklz'.format(\n metadata['subject'],\n metadata['label'],\n )\n\ndef split_session(sourcefile_path, trial_len):\n\n log.info('processing {}'.format(sourcefile_path))\n\n datafile = glob.glob(os.path.join(sourcefile_path,'*.txt'))[0]\n metafile = glob.glob(os.path.join(sourcefile_path,'*_Trials_Onsets.xlsx'))[0]\n\n log.debug('data file: {}'.format(datafile))\n log.debug('meta file: {}'.format(metafile))\n\n onsets = load_xlsx_meta_file(metafile)\n data = load_data_file(datafile)\n log.debug(onsets)\n\n onsets.append([len(data), 'end']) # artificial last marker\n\n trials = {}\n for i in xrange(len(onsets) - 1):\n onset, label = onsets[i]\n next_onset = onsets[i+1][0]\n\n # rounding to integers\n onset = int(math.floor(float(onset)))\n next_onset = int(math.floor(float(next_onset)))\n\n next_onset = min(onset+trial_len, next_onset)\n\n log.debug('[{}..{}) -> {}'.format(onset, next_onset, label))\n trial_data = np.vstack(data[onset:next_onset])\n log.debug('{} samples extracted'.format(trial_data.shape))\n\n trials[label] = trial_data\n\n # filename = os.path.join(path, 'trials.pklz')\n # with log_timing(log, 'saving to {}'.format(filename)):\n # save(filename, trials)\n\n return trials\n\ndef import_dataset(source_path, target_path):\n\n# config = load_config(default_config='../train_sda.cfg');\n\n # DATA_ROOT = source_path\n\n # DATA_ROOT = config.eeg.get('dataset_root', './')\n SAMPLE_RATE = 400 # in Hz\n TRIAL_LENGTH = 32 # in sec\n\n TRIAL_LENGTH += 4 # add 4s after end of presentation\n\n TRIAL_SAMPLE_LENGTH = SAMPLE_RATE * TRIAL_LENGTH\n\n log.info('using dataset at {}'.format(source_path))\n\n '''\n Note from Dan:\n All subjects should have channels 15, 16, 17 and 18 removed [...]\n If you want to make them truly identical, you could remove channel 19 from\n the subjects with more channels, although this should be 'good' data.\n '''\n bad_channels = {}\n bad_channels[1] = [5, 6, 15, 16, 17, 18, 20, 21]\n bad_channels[2] = [ 7, 8, 15, 16, 17, 18, 20, 21]\n bad_channels[3] = [5, 6, 15, 16, 17, 18, 20, 21]\n bad_channels[4] = [ 7, 8, 15, 16, 17, 18, 20, 21]\n bad_channels[5] = [ 7, 8, 15, 16, 17, 18, 20, 21]\n bad_channels[6] = [ 7, 8, 9, 12, 15, 16, 17, 18 ]\n bad_channels[7] = [5, 6, 12, 15, 16, 17, 18, 20 ]\n bad_channels[8] = [ 7, 8, 15, 16, 17, 18, 20, 21]\n bad_channels[9] = [5, 6, 12, 15, 16, 17, 18, 20 ]\n bad_channels[10] = [5, 6, 15, 16, 17, 18, 20, 21]\n bad_channels[11] = [5, 6, 15, 16, 17, 18, 20, 21]\n bad_channels[12] = [5, 6, 15, 16, 17, 18, 20, 21]\n bad_channels[13] = [5, 6, 12, 15, 16, 17, 18, 20 ]\n\n label_converter = LabelConverter()\n\n metadb_file = os.path.join(target_path, 'metadata_db.pklz')\n metadb = {} # empty DB\n\n with log_timing(log, 'generating datasets'):\n for subject_id in xrange(1,14):\n search_path = os.path.join(source_path, 'Sub{0:03d}*'.format(subject_id))\n sourcefile_path = glob.glob(search_path)\n\n if sourcefile_path is None or len(sourcefile_path) == 0:\n log.warn('nothing found at {}'.format(search_path))\n continue\n else:\n sourcefile_path = sourcefile_path[0]\n\n trials = split_session(sourcefile_path, TRIAL_SAMPLE_LENGTH)\n\n for stimulus, trial_data in trials.iteritems():\n stimulus_id = label_converter.get_stimulus_id(stimulus)\n log.debug('processing {} with {} samples and stimulus_id {}'.\n format(stimulus,trial_data.shape,stimulus_id))\n\n channels = trial_data.transpose()\n trial_data = []\n channel_ids = []\n for i, channel in enumerate(channels):\n channel_id = i+1\n # filter bad channels\n if channel_id in bad_channels[subject_id]:\n log.debug('skipping bad channel {}'.format(channel_id))\n continue\n\n # convert to float32\n channel = np.asfarray(channel, dtype='float32')\n\n trial_data.append(channel)\n channel_ids.append(channel_id)\n\n trial_data = np.vstack(trial_data).transpose() # fromat: (samples, channels)\n log.debug('extracted {} from channels: {}'.format(trial_data.shape, channel_ids))\n\n label = label_converter.get_label(stimulus_id, 'rhythm') # raw label, unsorted\n label = label_converter.shuffle_classes[label] # sorted label id\n metadata = {\n 'subject' : subject_id,\n 'label' : label,\n 'meta_label' : label_converter.get_label(stimulus_id, 'rhythm_meta'),\n 'stimulus' : stimulus,\n 'stimulus_id' : stimulus_id,\n 'rhythm_type' : label_converter.get_label(stimulus_id, 'rhythm'),\n 'tempo' : label_converter.get_label(stimulus_id, 'tempo'),\n 'audio_file' : label_converter.get_label(stimulus_id, 'audio_file'),\n 'trial_no' : 1,\n 'trial_type' : 'perception',\n 'condition' : 'n/a',\n 'channels' : channel_ids,\n }\n\n # save data\n savepath = generate_filepath_from_metadata(metadata)\n save(os.path.join(target_path, savepath), (trial_data, metadata), mkdirs=True)\n\n # save metadata\n metadb[savepath] = metadata\n\n log.debug('imported {}={} as {}'.format(label, metadata['meta_label'], savepath))\n\n save(metadb_file, metadb, mkdirs=True)\n log.info('import finished')\n\n\nif __name__ == '__main__':\n import deepthought\n from deepthought.util.config_util import init_logging\n init_logging()\n source_path = os.path.join(deepthought.DATA_PATH, 'rwanda2013rhythms', 'eeg')\n target_path = os.path.join(deepthought.DATA_PATH, 'rwanda2013rhythms', 'multichannel')\n import_dataset(source_path, target_path)","repo_name":"royyannick/deepthought","sub_path":"deepthought/datasets/rwanda2013rhythms/Importer.py","file_name":"Importer.py","file_ext":"py","file_size_in_byte":7779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"} +{"seq_id":"70517596882","text":"import cx_Oracle\nimport os\n\nos.environ[\"NLS_LANG\"] = \".AL32UTF8\"\n'''\n 10.29.128.24:1521/orcl '10.29.182.15:1521/orcl'\n '''\n\ncon = cx_Oracle.connect('hg_jgal', 'hg_jgal!123', '10.29.128.24:1521/qlzcgldb')\ncursor = con.cursor()\n\n# cursor.execute('')\n\n# sql = \"update HG_JGAL_CJDZGL set WZ='http://www.csrc.gov.cn/pub/ningbo/nbcxxx/' where FWJG=38 and BZ='市场禁入'\"\nsql = \"update HG_JGAL_CJDZGL set CJLX=3 where FWJG=10 and BZ='监管措施'\"\n# sql = \"update HG_JGAL_CJDZGL set CJLX=11 where FWJG=41\"\n# sql = \"update HG_JGAL_CJDZGL set QTZT=0 where FWJG=19 and BZ='证券机构监管-其它'\"\n# sql = \"insert into HG_JGAL_CJDZGL (ID, FWJG,WZ,ZJZXSJ,ZJZXJG,CJLX,QTZT,BZ) VALUES (128, 35, 'http://www.csrc.gov.cn/pub/zjhpublicofxj/index.htm?channel=2280','','','3',1,'监管措施')\"\n\n# cursor.execute(sql)\n\ncon.commit()\ncursor.close()\ncon.close()\n","repo_name":"zbh123/rpa_chi","sub_path":"数据处理/hegui/update_hegui.py","file_name":"update_hegui.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"28088544865","text":"import logging\nimport os\n\nimport pytest\nfrom pytest_embedded import Dut\n\n\n@pytest.mark.esp32\n@pytest.mark.esp32c2\n@pytest.mark.esp32s2\n@pytest.mark.esp32s3\n@pytest.mark.esp32c3\n@pytest.mark.esp32c6\n@pytest.mark.esp32h2\n@pytest.mark.generic\ndef test_eventfd(dut: Dut) -> None:\n\n dut.expect_exact('main_task: Calling app_main()')\n\n exp_list_5seconds = [\n 'eventfd_example: Select timeouted for 1 times',\n 'eventfd_example: Timer triggerred for 2 times',\n 'eventfd_example: Progress triggerred for 1 times',\n ]\n\n exp_list_10seconds = [\n 'eventfd_example: Select timeouted for 2 times',\n 'eventfd_example: Timer triggerred for 4 times',\n 'eventfd_example: Progress triggerred for 2 times',\n ]\n\n logging.info('Expecting:{}{}'.format(os.linesep, os.linesep.join(exp_list_5seconds)))\n for exp in exp_list_5seconds:\n dut.expect_exact(exp)\n\n logging.info('Expecting:{}{}'.format(os.linesep, os.linesep.join(exp_list_10seconds)))\n for exp in exp_list_10seconds:\n dut.expect_exact(exp)\n","repo_name":"espressif/esp-idf","sub_path":"examples/system/eventfd/pytest_eventfd.py","file_name":"pytest_eventfd.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","stars":11541,"dataset":"github-code","pt":"3"} +{"seq_id":"10107952804","text":"import vista\nfrom tkinter import Tk\nimport observador\n\n\nclass Controller:\n def __init__(self, root):\n self.root_controller = root\n self.objeto_vista = vista.Mivista(self.root_controller)\n self.el_observador = observador.ConcreteObserverA(self.objeto_vista.mi_abmc)\n\n\nif __name__ == \"__main__\":\n root_tk = Tk()\n Controller(root_tk)\n root_tk.mainloop()\n","repo_name":"tsantino/Rental_Car","sub_path":"controlador.py","file_name":"controlador.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21461033157","text":"# installed beautifulsoup4 & requests\nimport requests\nfrom bs4 import BeautifulSoup\nimport pprint #pretty print\n\nres = requests.get('https://news.ycombinator.com')\nsoup = BeautifulSoup(res.text, 'html.parser')\n# print(soup.find_all('div')) # div, a are elements\nlinks = soup.select('.storylink')\nsubtext = (soup.select('.subtext')) # select the class subtext (dot is a class)\n# print(votes[0].get('id')) #can use get to get the next level down eg id\n\n\ndef sort_by_votes(hn):\n return sorted(hn, key=lambda k: k['points'], reverse=1)\n\n\ndef sort_by_age(hn):\n return sorted(hn, key=lambda k: k['hours'])\n\n\ndef create_custom_hackernews(links, subtext):\n hn = []\n for idx, item in enumerate(links):\n\n title = links[idx].getText() # could replace links[idx] with item\n href = links[idx].get('href', None) # None is default in case no link\n vote = subtext[idx].select('.score')\n hours = subtext[idx].select('.age')[0].getText().replace(' hours ago', '')\n if len(vote):\n points = int(vote[0].getText().replace(' points', '')) # remember vote is a [] with 1 element\n if points > 100:\n hn.append({'idx': idx, 'title': title, 'link': href, 'points': points, 'hours': hours}) # use dict\n # print({'idx': idx, 'title': title, 'link': href, 'points': points})\n return hn\n\n\nhn = create_custom_hackernews(links, subtext)\nprint(f'{len(hn)} stories found with votes >100. Printing in order of upvotes first:')\npprint.pprint(sort_by_votes(hn))\n\n","repo_name":"bobomatic/scraper","sub_path":"scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18750895904","text":"from xml.etree import ElementTree as ET\nimport os\n\n\ndef anonymize(filename):\n ''' Anonymize the file specified by the filename by reading its contents,\n replacing the contents with X's and saving the anonymized contents to \n a copy of the file. '''\n \n def anonymize_string(string):\n ''' Return an anonymized version of the string. '''\n return u'X' * len(string)\n \n def anonymize_text(text):\n ''' Return an anonymized version of the text, keeping the line \n breaks. '''\n return '\\n'.join([anonymize_string(line) for line in text.split('\\n')])\n\n def anonymize_node(node):\n ''' Recursively anonymize the node and all of its child nodes. '''\n for child in node:\n anonymize_node(child)\n\n if 'subject' in node.attrib:\n node.attrib['subject'] = anonymize_string(node.attrib['subject'])\n\n if node.tag in ('description', 'data') and node.text:\n node.text = anonymize_text(node.text)\n if node.tag == 'data':\n node.attrib['extension'] = \\\n anonymize_string(node.attrib['extension'])\n\n if node.tag == 'property' and 'name' in node.attrib and \\\n node.attrib['name'] == 'username':\n node.text = 'XXX' # pylint: disable=W0511\n\n if node.tag == 'attachment' and 'location' in node.attrib:\n node.attrib['location'] = anonymize_string(node.attrib['location'])\n \n tree = ET.parse(file(filename, 'rb'))\n anonymize_node(tree.getroot())\n name, ext = os.path.splitext(filename)\n anonymized_filename = name + '.anonymized' + ext\n tree.write(anonymized_filename)\n return anonymized_filename\n","repo_name":"TaskEvolution/Task-Coach-Evolution","sub_path":"taskcoach/taskcoachlib/tools/anonymize.py","file_name":"anonymize.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"3"} +{"seq_id":"7468524717","text":"# -*- coding: utf-8 -*-\n__author__ = 'bk'\n\nfrom experiment import utils\nfrom input import extractor as md\nfrom model import reg\nfrom model.raiman import raiman_lstm\n\nlstm_models = {'bk': None, 'cho': None, 'raiman': raiman_lstm}\n\nfile_attribute = ['normal', 'outlier', 'complex', 'chaotic', 'fluctuation']\nfile_format = '.png'\n\n# input parameter\nnormalize = False\ndelay_step = 10\nhistory_size = 10\nslice_step = 1\n\n# lstm parameter\n# 1,2 stack, non parallel, 100 seconds train, 0.93\n# 1_1 stack, 150 seconds, 10 history, 50 epochs 0.6\n\n# 2 stack, 100 seconds train, 0.52\nstack = 1\nparallel = True\ntraining_method = 'adadelta'\nrho = 0.62\n\nepochs = 50\n\n\ndef do_experiment(f_attr):\n md.delay_step = delay_step\n md.history_size = history_size\n\n for data_file_path in md.get_file_list(f_attr):\n\n raw_data_parallel, raw_data_non_parallel = md.load_raw_data(data_file_path, normalize)\n train_init_point = md.create_test_init_point(raw_data_parallel)\n\n lstm_raw_data = raw_data_parallel if parallel else raw_data_non_parallel\n model = train_lstm('raiman')\n model.train_lstm(stack, parallel,\n training_method, rho, epochs, slice_step,\n lstm_raw_data, train_init_point, data_file_path, f_attr)\n\n reg.train_regression(raw_data_non_parallel, train_init_point, data_file_path, f_attr)\n\n\ndef train_lstm(model):\n if model == \"bk\":\n model = None\n elif model == 'cho':\n model = None\n elif model == 'raiman':\n model = lstm_models[model]\n return model\n\n\nif __name__ == '__main__':\n do_experiment('small_test')\n utils.show_final_error_result()\n utils.init_error()\n","repo_name":"BK-Yoo/ee532-BrainIT-LSTM","sub_path":"experiment/execute.py","file_name":"execute.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"2968496266","text":"import pandas as pd\nimport numpy as np\nfrom scipy import stats\n\n\nclass DataCleaner:\n def __init__(self, df: pd.DataFrame, deep=False) -> None:\n \"\"\"\n Returns a DataCleaner Object with the passed DataFrame Data set as its own DataFrame\n Parameters\n ----------\n df:\n Type: pd.DataFrame\n\n Returns\n -------\n None\n \"\"\"\n if(deep):\n self.df = df.copy(deep=True)\n else:\n self.df = df\n\n def remove_unwanted_columns(self, columns: list) -> pd.DataFrame:\n \"\"\"\n Returns a DataFrame where the specified columns in the list are removed\n Parameters\n ----------\n columns:\n Type: list\n\n Returns\n -------\n pd.DataFrame\n \"\"\"\n self.df.drop(columns, axis=1, inplace=True)\n return self.df\n\n def remove_nulls(self) -> pd.DataFrame:\n return self.df.dropna()\n\n def change_columns_type_to(self, cols: list, data_type: str) -> pd.DataFrame:\n \"\"\"\n Returns a DataFrame where the specified columns data types are changed to the specified data type\n Parameters\n ----------\n cols:\n Type: list\n data_type:\n Type: str\n\n Returns\n -------\n pd.DataFrame\n \"\"\"\n try:\n for col in cols:\n self.df[col] = self.df[col].astype(data_type)\n except:\n print('Failed to change columns type')\n\n return self.df\n\n def remove_single_value_columns(self, unique_value_counts: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n Returns a DataFrame where columns with a single value are removed\n Parameters\n ----------\n unique_value_counts:\n Type: pd.DataFrame\n\n Returns\n -------\n pd.DataFrame\n \"\"\"\n drop_cols = list(\n unique_value_counts.loc[unique_value_counts['Unique Value Count'] == 1].index)\n return self.df.drop(drop_cols, axis=1, inplace=True)\n\n def remove_duplicates(self) -> pd.DataFrame:\n \"\"\"\n Returns a DataFrame where duplicate rows are removed\n Parameters\n ----------\n None\n\n Returns\n -------\n pd.DataFrame\n \"\"\"\n removables = self.df[self.df.duplicated()].index\n return self.df.drop(index=removables, inplace=True)\n\n def fill_numeric_values(self, missing_cols: list, acceptable_skewness: float = 5.0) -> pd.DataFrame:\n \"\"\"\n Returns a DataFrame where numeric columns are filled with either median or mean based on their skewness\n Parameters\n ----------\n missing_cols:\n Type: list\n acceptable_skewness:\n Type: float\n Default value = 5.0\n\n Returns\n -------\n pd.DataFrame\n \"\"\"\n df_skew_data = self.df[missing_cols]\n df_skew = df_skew_data.skew(axis=0, skipna=True)\n for i in df_skew.index:\n if(df_skew[i] < acceptable_skewness and df_skew[i] > (acceptable_skewness * -1)):\n value = self.df[i].mean()\n self.df[i].fillna(value, inplace=True)\n else:\n value = self.df[i].median()\n self.df[i].fillna(value, inplace=True)\n\n return self.df\n\n def add_columns_from_another_df_using_column(self, from_df: pd.DataFrame, base_col: str, add_columns: list) -> pd.DataFrame:\n try:\n new_df = self.df.copy(deep=True)\n from_df.sort_values(base_col, ascending=True, inplace=True)\n for col in add_columns:\n col_index = from_df.columns.tolist().index(col)\n new_df[col] = new_df[base_col].apply(\n lambda x: from_df.iloc[x-1, col_index])\n\n return new_df\n\n except:\n print('Failed to add columns from other dataframe')\n\n def fill_non_numeric_values(self, missing_cols: list, ffill: bool = True, bfill: bool = False) -> pd.DataFrame:\n \"\"\"\n Returns a DataFrame where non-numeric columns are filled with forward or backward fill\n Parameters\n ----------\n missing_cols:\n Type: list\n ffill:\n Type: bool\n Default value = True\n bfill:\n Type: bool\n Default value = False\n\n Returns\n -------\n pd.DataFrame\n \"\"\"\n for col in missing_cols:\n if(ffill == True and bfill == True):\n self.df[col].fillna(method='ffill', inplace=True)\n self.df[col].fillna(method='bfill', inplace=True)\n\n elif(ffill == True and bfill == False):\n self.df[col].fillna(method='ffill', inplace=True)\n\n elif(ffill == False and bfill == True):\n self.df[col].fillna(method='bfill', inplace=True)\n\n else:\n self.df[col].fillna(method='bfill', inplace=True)\n self.df[col].fillna(method='ffill', inplace=True)\n\n return self.df\n\n def remove_outliers(self) -> pd.DataFrame:\n\n # calculate z-scores of each column in the dataframe\n z_scores = stats.zscore(self.df)\n abs_z_scores = np.abs(z_scores)\n\n # Identify Outliers\n filtered_entries = (abs_z_scores < 3).all(axis=1)\n self.df = self.df[filtered_entries]\n\n return self.df\n\n def fix_outliers(self, column: str) -> pd.DataFrame:\n \"\"\"\n Returns a DataFrame where outlier of the specified column is fixed\n Parameters\n ----------\n column:\n Type: str\n\n Returns\n -------\n pd.DataFrame\n \"\"\"\n self.df[column] = np.where(self.df[column] > self.df[column].quantile(\n 0.95), self.df[column].median(), self.df[column])\n\n return self.df\n\n def fix_outlier_columns(self, columns: list) -> pd.DataFrame:\n \"\"\"\n Returns a DataFrame where outlier of the specified columns is fixed\n Parameters\n ----------\n columns:\n Type: list\n\n Returns\n -------\n pd.DataFrame\n \"\"\"\n try:\n for column in columns:\n self.df[column] = np.where(self.df[column] > self.df[column].quantile(\n 0.95), self.df[column].median(), self.df[column])\n except:\n print(\"Cant fix outliers for each column\")\n\n return self.df\n\n def standardized_column(self, columns: list, new_name: list, func) -> pd.DataFrame:\n \"\"\"\n Returns a DataFrame where specified columns are standardized based on a given function and given new names after\n Parameters\n ----------\n columns:\n Type: list\n new_name:\n Type: list\n func:\n Type: function\n\n Returns\n -------\n pd.DataFrame\n \"\"\"\n try:\n assert(len(columns) == len(new_name))\n for index, col in enumerate(columns):\n self.df[col] = func(self.df[col])\n self.df.rename(columns={col: new_name[index]}, inplace=True)\n\n except AssertionError:\n print('size of columns and names provided is not equal')\n\n except:\n print('standardization failed')\n\n return self.df\n\n def optimize_df(self) -> pd.DataFrame:\n \"\"\"\n Returns the DataFrames information after all column data types are optimized (to a lower data type)\n Parameters\n ----------\n None\n\n Returns\n -------\n pd.DataFrame\n \"\"\"\n data_types = self.df.dtypes\n optimizable = ['float64', 'int64']\n try:\n for col in data_types.index:\n if(data_types[col] in optimizable):\n if(data_types[col] == 'float64'):\n # downcasting a float column\n self.df[col] = pd.to_numeric(\n self.df[col], downcast='float')\n elif(data_types[col] == 'int64'):\n # downcasting an integer column\n self.df[col] = pd.to_numeric(\n self.df[col], downcast='unsigned')\n\n return self.df\n\n except:\n print('Failed to optimize')\n\n def save_clean_data(self, name: str):\n \"\"\"\n The objects dataframe gets saved with the specified name \n Parameters\n ----------\n name:\n Type: str\n\n Returns\n -------\n None\n \"\"\"\n try:\n self.df.to_csv(name, index=False)\n\n except:\n print(\"Failed to save data\")\n","repo_name":"CasualModel/CancerCausality","sub_path":"scripts/data_cleaner.py","file_name":"data_cleaner.py","file_ext":"py","file_size_in_byte":8700,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"21665193395","text":"import pytest\nfrom brownie import network, exceptions\nfrom scripts.utils import (\n LOCAL_BLOCKCHAIN_ENVIRONMENTS,\n STARTING_PRICE,\n get_account,\n get_contract,\n)\nfrom scripts.deploy import deploy_token_farm_and_dapp_token\n\n\ndef test_set_price_feed_contract():\n if network.show_active() not in LOCAL_BLOCKCHAIN_ENVIRONMENTS:\n pytest.skip(\"Only for local testing.\")\n\n account = get_account()\n non_owner_account = get_account(index=1)\n token_farm, dapp_token = deploy_token_farm_and_dapp_token()\n\n token_farm.setPriceFeedContract(\n dapp_token.address,\n # if you set a contract as a param for address brownie automatically knows you only want the address (see below)\n get_contract(\"eth_usd_price_feed\"),\n {\"from\": account},\n )\n\n assert token_farm.tokenPriceFeedMap(dapp_token.address) == get_contract(\n \"eth_usd_price_feed\"\n )\n with pytest.raises(exceptions.VirtualMachineError):\n assert token_farm.setPriceFeedContract(\n dapp_token.address,\n get_contract(\"eth_usd_price_feed\"),\n {\"from\": non_owner_account},\n )\n\n\ndef test_stake_tokens(amount_staked):\n if network.show_active() not in LOCAL_BLOCKCHAIN_ENVIRONMENTS:\n pytest.skip(\"Only for local testing.\")\n account = get_account()\n token_farm, dapp_token = deploy_token_farm_and_dapp_token()\n\n dapp_token.approve(token_farm, amount_staked, {\"from\": account})\n token_farm.stakeTokens(amount_staked, dapp_token)\n\n assert (\n token_farm.stakingBalance(dapp_token.address, account.address) == amount_staked\n )\n assert token_farm.uniqueTokensStaked(account.address) == 1\n assert token_farm.stakers(0) == account.address\n\n return token_farm, dapp_token\n\n\ndef test_issue_tokens(amount_staked):\n if network.show_active() not in LOCAL_BLOCKCHAIN_ENVIRONMENTS:\n pytest.skip(\"Only for local testing.\")\n account = get_account()\n token_farm, dapp_token = test_stake_tokens(amount_staked)\n starting_balance = dapp_token.balanceOf(account.address)\n\n token_farm.issueTokenReward({\"from\": account})\n\n ## we are staking 1 ETH worth of Dapp_Tokens\n ## and the price we instantiated with is $2000,\n ## so we should expect to be issued 2000 dap tokens\n assert dapp_token.balanceOf(account.address) == starting_balance + STARTING_PRICE\n\n\ndef test_allowed_token():\n if network.show_active() not in LOCAL_BLOCKCHAIN_ENVIRONMENTS:\n pytest.skip(\"Only for local testing.\")\n account = get_account()\n\n token_farm, dapp_token = deploy_token_farm_and_dapp_token()\n\n assert token_farm.tokenAllowed(dapp_token)\n\n\ndef test_unstake_tokens(amount_staked):\n if network.show_active() not in LOCAL_BLOCKCHAIN_ENVIRONMENTS:\n pytest.skip(\"Only for local testing.\")\n account = get_account()\n token_farm, dapp_token = test_stake_tokens(amount_staked)\n\n token_farm.unstakeTokens(dapp_token, amount_staked, {\"from\": account})\n\n assert token_farm.uniqueTokensStaked(account.address) == 0\n assert token_farm.stakingBalance(dapp_token.address, account.address) == 0\n","repo_name":"06fsantos/Stake_and_Lobster_DApp","sub_path":"tests/unit/test_token_farm.py","file_name":"test_token_farm.py","file_ext":"py","file_size_in_byte":3113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38042690893","text":"from rbac.common.role import Role\nfrom rbac.common import protobuf\nfrom rbac.common.logs import get_default_logger\nfrom tests.rbac.common.user.create_user_helper import CreateUserTestHelper\nfrom tests.rbac.testdata.role import RoleTestData\n\nLOGGER = get_default_logger(__name__)\n\n\nclass StubTestHelper:\n \"\"\"A minimal test helper required by this test helper\"\"\"\n\n def __init__(self):\n self.user = CreateUserTestHelper()\n\n\n# pylint: disable=invalid-name\nhelper = StubTestHelper()\n\n\nclass CreateRoleTestHelper(RoleTestData):\n \"\"\"Create Role test helper\"\"\"\n\n def message(self):\n \"\"\"Get a test data CreateRole message\"\"\"\n role_id = self.id()\n name = self.name()\n next_id = helper.user.id()\n message = Role().make(\n role_id=role_id, name=name, owners=[next_id], admins=[next_id]\n )\n assert isinstance(message, protobuf.role_transaction_pb2.CreateRole)\n assert message.role_id == role_id\n assert message.name == name\n return message\n\n def create(self):\n \"\"\"Create a test role\"\"\"\n role_id = self.id()\n name = self.name()\n user, key_pair = helper.user.create()\n message = Role().make(\n role_id=role_id, name=name, owners=[user.next_id], admins=[user.next_id]\n )\n\n status = Role().new(\n signer_keypair=key_pair, signer_user_id=user.next_id, message=message\n )\n\n assert len(status) == 1\n assert status[0][\"status\"] == \"COMMITTED\"\n\n role = Role().get(object_id=message.role_id)\n\n assert role.role_id == message.role_id\n assert role.name == message.name\n assert Role().owner.exists(object_id=role.role_id, related_id=user.next_id)\n assert Role().admin.exists(object_id=role.role_id, related_id=user.next_id)\n return role, user, key_pair\n","repo_name":"hyperledger-archives/sawtooth-next-directory","sub_path":"tests/rbac/common/role/create_role_helper.py","file_name":"create_role_helper.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"en","doc_type":"code","stars":89,"dataset":"github-code","pt":"3"} +{"seq_id":"71063615121","text":"from django.shortcuts import render, HttpResponse, redirect\nfrom django import forms\nfrom .models import Reply\nfrom blocks.models import Block\nfrom posts.models import Post\nfrom users.models import User\nfrom message.views import new_message\nfrom django.views.decorators.csrf import csrf_exempt, ensure_csrf_cookie\nimport json\nfrom users.views import login_required\n\n# Create your views here.\n\n@csrf_exempt\ndef create_reply(request):\n # reply_obj = json.loads(params)\n # reply_obj = request.POST['params']\n post_id = int(request.POST.get('post_id'))\n content = request.POST.get('content')\n to_comment_id = int(request.POST.get(\"to_comment_id\", 0))\n email = request.session.get('email')\n\n post = Post.objects.get(id=post_id)\n user = User.objects.get(email=email)\n if to_comment_id != 0:\n to_comment = Reply.objects.get(id=to_comment_id)\n else:\n to_comment = None\n\n if content != '':\n reply = Reply(content=content)\n reply.post = post\n reply.author = user\n reply.author_name = user.nickname\n reply.status = 1\n reply.to_reply = to_comment\n reply.save()\n\n new_message(post.block_id, post_id, content, user.nickname)\n\n # msg_num = int(request.session.get('msg_num')) + 1\n # request.session['msg_num'] = msg_num\n\n status = 'ok'\n error = ''\n\n else:\n status = 'fail'\n error = 'Please input content.'\n\n reply_obj = {\n 'status': status, 'error': error\n }\n # print (reply_obj)\n # return json.dumps(reply_obj)\n # return request, reply_obj\n return HttpResponse(json.dumps(reply_obj), content_type='application/json')\n\n\ndef reply_detail(post_id):\n post_id = int(post_id)\n reply_objs = Reply.objects.filter(post=post_id).order_by('update_timestamp')\n return reply_objs","repo_name":"h128jj/-Life_helper","sub_path":"reply/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"42156682579","text":"import sys\nimport time\nimport pprint\n\npp = pprint.PrettyPrinter(indent=4)\n\nfrom GeneralScriptSupport import *\n\n\n#\n# Class for defining default options\n#\n\nclass DefaultOptions:\n\n def __init__(self):\n self.origDir = \"\"\n self.destDir = \"\"\n\n def setDefaultDefaults(self):\n self.origDir = os.path.abspath(os.path.dirname(sys.argv[0])) + \"/\"\n self.destDir = os.getcwd() + \"/\"\n\n def setDefaultOrigDir(self, origDirIn):\n self.origDir = origDirIn\n\n def getDefaultOrigDir(self):\n return self.origDir\n\n def setDefaultDestDir(self, destDirIn):\n self.destDir = destDirIn\n\n def getDefaultDestDir(self):\n return self.destDir\n\n\n#\n# Main help string\n#\n\nusageHelp = r\"\"\"snapshot-dir.py [other options] [--orig-dir=/] [--dest-dir=/]\n\nThis tool snapshots the contents of an origin directory ('orig-dir') to\ndestination directory ('dest-dir') and creates linkages between the two git\nrepos in the commit message in the 'dest-dir' git branch. The command 'git'\nmust be in the path for this script to be used.\n\nTo sync between any two arbitrary directories invoking this script from any\ndirectory location, one can do:\n\n $ /snapshot-dir.py \\\n --orig-dir=/ \\\n --dest-dir=/\n\nTo describe how this script is used, consider the desire to snapshot the\ndirectory tree:\n\n /orig-dir/\n\nand duplicate it in the directory tree\n\n /dest-dir/\n\nHere, the directories can be any two directories from local git repos with any\nnames as long as they are given a final '/' at the end. Otherwise, if you are\nmissing the final '/', then rsync will copy the contents from 'orig-dir' into\na subdir of 'dest-dir' which is usually not what you want.\n\nA typical case is to have snapshot-dir.py soft linked into orig-dir/ to allow\na simple sync process. This is the case, for example, with the 'tribits'\nsource tree. The linked-in location of snapshot-dir.py gives the default\n'orig-dir' directory automatically (but can be overridden with --orig-dir\noption).\n\nWhen snapshot-dir.py is soft-linked into the 'orig-dir' directory base, the\nway to run this script would be:\n\n $ cd /dest-dir/\n $ /orig-dir/snapshot-dir.py\n\nBy default, this assumes that git repos are used for both the 'orig-dir' and\n'dest-dir' locations. The info about the origin of the snapshot from\n'orig-dir' is recorded in the commit message of the 'dest-dir' git repo to\nprovide tractability for the versions (see below).\n\nNote the trailing '/' is critical for the correct functioning of rsync.\n\nBy default, this script does the following:\n\n1) Assert that the git repo for 'orig-dir' is clean (i.e. no uncommitted\n files, no unknown files, etc.). (Can be disabled by passing in\n --allow-dirty-orig-dir.)\n\n2) Assert that the git repo for / is clean (see above). (Can\nbe disabled by passing in --allow-dirty-dest-dir.)\n\n3) Clean out the ignored files from /orig-dir using 'git\n clean -xdf' run in that directory. (Only if --clean-ignored-files-orig-dir\n is specified.)\n\n4) Run 'rsync -cav --delete' to copy the contents from 'orig-dir' to\n 'dest-dir', excluding the '.git/' directory if it exists in either git repo\n dir. After this runs, / should be an exact duplicate of\n / (except for otherwise noted excluded files). This rsync\n will delete any files in 'dest-dir' that are not in 'orig-dir'. Note that\n if there are ignored untracked files, then the copied .gitignore files\n should avoid showing them as tracked or unknown files in the 'dest-dir' git\n repo as well.\n\n5) Run 'git add .' in / to stage any new files. Note that git\n will automatically stage deletes for any files removed by the 'rsync -cav\n --delete' command!\n\n6) Get the git remote URL from the orig-dir git repo, and the git log for the\n last commit for the directory from orig-dir. This information is used to\n define perfect tracing of the version info when doing the snapshot.\n\n7) Commit the updated dest-dir directory using a commit message with the\n orig-dir repo URL and log info. This will only commit files in 'dest-dir'\n and not in other directories in the destination git repo!\n\nNOTES:\n\n* This script allows the syncing between base git repos or subdirs within git\n repos. This is allowed because the rsync command is told to ignore the\n .git/ directory when syncing.\n\n* The cleaning of the orig-dir/ using 'git clean -xdf' may be somewhat\n dangerous but it is recommended that it be preformed by passing in\n --clean-ignored-files-orig-dir to avoid copying locally-ignored files in\n orig-dir/ (e.g. ignored in .git/info/excludes but not in a committed\n .gitignore file) that get copied to and then committed in the dest-dir/\n repo. Therefore, be sure you don't have any ignored files in orig-dir/ that\n you want to keep before you run this script!\n\n* Snapshotting with this script will create an exact duplicate of 'orig-dir'\n in 'dest-dir' and therefore if there are any local changes to the files or\n changes after the last snapshot, they will get wiped out. To avoid this,\n one can the snapshot on a branch in the 'dest-dir' git repo, then merge that\n branch into the main branch (e.g. 'master') in 'dest-dir' repo. As long as\n there are no merge conflicts, this will preserve local changes for the\n mirrored directories and files. This strategy can work well as a way to\n allow for local modifications but still do the snapshotting..\n\"\"\"\n\n\n#\n# Direct script driver (taking in command-line arguments)\n#\n\ndef snapshotDirMainDriver(cmndLineArgs, defaultOptionsIn = None, stdout = None):\n\n oldstdout = sys.stdout\n\n try:\n \n if stdout:\n sys.stdout = stdout\n\n if defaultOptionsIn:\n defaultOptions = defaultOptionsIn\n else:\n defaultOptions = DefaultOptions()\n defaultOptions.setDefaultDefaults()\n\n #print(\"cmndLineArgs = \" + str(cmndLineArgs))\n \n #\n # A) Get the command-line options\n #\n \n from optparse import OptionParser\n \n clp = OptionParser(usage=usageHelp)\n \n clp.add_option(\n \"--show-defaults\", dest=\"showDefaults\", action=\"store_true\",\n help=\"Show the default option values and do nothing at all.\",\n default=False )\n\n clp.add_option(\n \"--orig-dir\", dest=\"origDir\", type=\"string\",\n default=defaultOptions.getDefaultOrigDir(),\n help=\"Original directory that is the source for the snapshotted directory.\" \\\n +\" Note that it is important to add a final /' to the directory name.\" \\\n +\" The default is the directory where this script lives (or is soft-linked).\" \\\n +\" [default: '\"+defaultOptions.getDefaultOrigDir()+\"']\")\n\n clp.add_option(\n \"--dest-dir\", dest=\"destDir\", type=\"string\",\n default=defaultOptions.getDefaultDestDir(),\n help=\"Destination directory that is the target for the snapshoted directory.\" \\\n +\" Note that a final '/' just be added or the origin will be added as subdir.\" \\\n +\" The default dest-dir is current working directory.\" \\\n +\" [default: '\"+defaultOptions.getDefaultDestDir()+\"']\" \\\n )\n\n clp.add_option(\n \"--assert-clean-orig-dir\", dest=\"assertCleanOrigDir\", action=\"store_true\",\n help=\"Check that orig-dir is committed and clean. [default]\" )\n clp.add_option(\n \"--allow-dirty-orig-dir\", dest=\"assertCleanOrigDir\", action=\"store_false\",\n default=True,\n help=\"Skip clean check of orig-dir.\" )\n\n clp.add_option(\n \"--assert-clean-dest-dir\", dest=\"assertCleanDestDir\", action=\"store_true\",\n help=\"Check that dest-dir is committed and clean. [default]\" )\n clp.add_option(\n \"--allow-dirty-dest-dir\", dest=\"assertCleanDestDir\", action=\"store_false\",\n default=True,\n help=\"Skip clean check of dest-dir.\" )\n\n clp.add_option(\n \"--clean-ignored-files-orig-dir\", dest=\"cleanIgnoredFilesOrigDir\", action=\"store_true\",\n help=\"Clean out the ignored files from orig-dir/ before snapshotting.\" )\n clp.add_option(\n \"--no-clean-ignored-files-orig-dir\", dest=\"cleanIgnoredFilesOrigDir\", action=\"store_false\",\n default=False,\n help=\"Do not clean out orig-dir/ ignored files before snapshotting. [default]\" )\n\n clp.add_option(\n \"--do-rsync\", dest=\"doRsync\", action=\"store_true\",\n help=\"Actually do the rsync. [default]\" )\n clp.add_option(\n \"--skip-rsync\", dest=\"doRsync\", action=\"store_false\",\n default=True,\n help=\"Skip the rsync (testing only?).\" )\n\n clp.add_option(\n \"--do-commit\", dest=\"doCommit\", action=\"store_true\",\n help=\"Actually do the commit. [default]\" )\n clp.add_option(\n \"--skip-commit\", dest=\"doCommit\", action=\"store_false\",\n default=True,\n help=\"Skip the commit.\" )\n \n (options, args) = clp.parse_args(cmndLineArgs)\n \n #\n # B) Echo the command-line\n #\n \n print(\"\")\n print(\"**************************************************************************\")\n print(\"Script: snapshot-dir.py \\\\\")\n\n print(\" --orig-dir='\" + options.origDir + \"' \\\\\")\n print(\" --dest-dir='\" + options.destDir + \"' \\\\\")\n if options.assertCleanOrigDir:\n print(\" --assert-clean-orig-dir \\\\\")\n else:\n print(\" --allow-dirty-orig-dir \\\\\")\n if options.assertCleanDestDir:\n print(\" --assert-clean-dest-dir \\\\\")\n else:\n print(\" --allow-dirty-dest-dir \\\\\")\n if options.cleanIgnoredFilesOrigDir:\n print(\" --clean-ignored-files-orig-dir \\\\\")\n else:\n print(\" --no-clean-ignored-files-orig-dir \\\\\")\n if options.doRsync:\n print(\" --do-rsync \\\\\")\n else:\n print(\" --skip-rsync \\\\\")\n if options.doCommit:\n print(\" --do-commit \\\\\")\n else:\n print(\" --skip-commit \\\\\")\n \n if options.showDefaults:\n return # All done!\n \n #\n # C) Exectute the \n #\n \n snapshotDir(options)\n \n finally:\n sys.stdout = oldstdout\n\n\n\n#\n# Implement the guts of snapshoting after reading in options\n#\n\ndef snapshotDir(inOptions):\n\n #\n print(\"\\nA) Assert that orig-dir is 100% clean with all changes committed\\n\")\n #\n\n if inOptions.assertCleanOrigDir:\n assertCleanGitDir(inOptions.origDir, \"origin\",\n \"The created snapshot commit would not have the correct origin commit info!\" )\n else:\n print(\"Skipping on request!\")\n\n #\n print(\"\\nB) Assert that dest-dir is 100% clean with all changes committed\\n\")\n #\n\n if inOptions.assertCleanDestDir:\n assertCleanGitDir(inOptions.destDir, \"destination\",\n \"Location changes in the destination directory would be overritten and lost!\")\n else:\n print(\"Skipping on request!\")\n\n #\n print(\"\\nC) Cleaning out ignored files in orig-dir\\n\")\n #\n\n if inOptions.cleanIgnoredFilesOrigDir:\n cleanIgnoredFilesFromGitDir(inOptions.origDir, \"origin\")\n else:\n print(\"Skipping on request!\")\n\n #\n print(\"\\nD) Get info from git commit from origDir [optional]\\n\")\n #\n\n # Get the repo for origin\n (remoteRepoName, remoteBranch, remoteRepoUrl) = \\\n getGitRepoUrl(inOptions.origDir)\n print(\"origin remote name = '\" + remoteRepoName + \"'\")\n print(\"origin remote branch = '\" + remoteBranch + \"'\")\n print(\"origin remote URL = '\" + remoteRepoUrl + \"'\")\n\n # Get the last commit message\n originLastCommitMsg = getLastCommitMsg(inOptions.origDir)\n print(\"\\norigin commit message:\\n\\n\" + originLastCommitMsg + \"\\n\")\n\n #\n print(\"\\nE) Run rsync to add and remove files and dirs between two directories\\n\")\n #\n\n if inOptions.doRsync:\n\n excludes = r\"\"\"--exclude=\\.git\"\"\"\n # Note that when syncing one git repo to another, we want to sync the\n # .gitingore and other hidden files as well.\n \n # When we support syncing from hg repos, add these excludes as well:\n # --exclude=\\.hg --exclude=.hgignore --exclude=.hgtags\n \n rtn = echoRunSysCmnd(\n r\"rsync -cav --delete \"+excludes+\" \"+inOptions.origDir+\" \"+inOptions.destDir,\n throwExcept=False,\n timeCmnd=True\n )\n \n if rtn != 0:\n print(\"Rsync failed, aborting!\")\n return False\n\n else:\n\n print(\"\\nSkipping rsync on request!\")\n\n #\n print(\"\\nE) Create a new commit in destination directory [optional]\")\n #\n\n origDirLast = inOptions.origDir.split(\"/\")[-2]\n origSha1 = getCommitSha1(inOptions.origDir)\n\n commitMessage = \\\n \"Automatic snapshot commit from \"+origDirLast+\" at \"+origSha1+\"\\n\"+\\\n \"\\n\"+\\\n \"Origin repo remote tracking branch: '\"+remoteRepoName+\"/\"+remoteBranch+\"'\\n\"+\\\n \"Origin repo remote repo URL: '\"+remoteRepoName+\" = \"+remoteRepoUrl+\"'\\n\"+\\\n \"\\n\"+\\\n \"At commit:\\n\"+\\\n \"\\n\"+\\\n originLastCommitMsg\n\n print(\"\\nGeneratting commit with commit message:\\n\")\n print(\"---------------------------------------\" )\n print(commitMessage )\n print(\"---------------------------------------\" )\n\n if inOptions.doCommit:\n\n echoRunSysCmnd(\n \"git add .\",\n workingDir=inOptions.destDir\n )\n \n echoRunSysCmnd(\n \"git commit -m \\\"\"+commitMessage+\"\\\" -- .\",\n workingDir=inOptions.destDir\n )\n\n else:\n\n print(\"\\nSkipping commit on request!\\n\")\n\n #\n # F) Success! (if you get this far)\n #\n\n return True\n\n\n#\n# Helper functions\n#\n\n\ndef assertCleanGitDir(dirPath, dirName, explanation):\n\n changedFiles = getCmndOutput(\n \"git diff --name-status HEAD -- .\",\n stripTrailingSpaces = True,\n workingDir = dirPath )\n\n if changedFiles:\n raise Exception(\n \"Error, aborting snapshot!\\n\" \\\n \"The \"+dirName+\" git directory '\"+dirPath+\"' is not clean and\" \\\n +\" has the changed files:\\n\"+changedFiles+\"\\n\" \\\n +explanation\n )\n else:\n print(\"The \" + dirName + \" git directory '\" + dirPath + \"' is clean!\")\n\n # NOTE: The above git diff command will not catch unknown files but that is\n # not a huge risk for the use cases that I am concerned with.\n\n\ndef cleanIgnoredFilesFromGitDir(dirPath, dirName):\n\n rtn = echoRunSysCmnd(\n r\"git clean -xdf\",\n workingDir=dirPath,\n throwExcept=False,\n timeCmnd=True\n )\n \n if rtn != 0:\n raise Exception(\n \"Error, cleaning of origin `\"+dirPath+\"` failed!\"\n )\n\n\ndef getCommitSha1(gitDir):\n return getCmndOutput(\"git log -1 --pretty=format:'%h' -- .\", workingDir=gitDir).strip()\n\n\ndef getGitRepoUrl(gitDir):\n\n remoteRepoName = \"\"\n remoteBranch = \"\"\n remoteRepoUrl = \"\"\n\n # Get the remote tracking branch\n trackingBranchStr = getCmndOutput(\n \"git rev-parse --abbrev-ref --symbolic-full-name @{u}\", workingDir=gitDir)\n\n (remoteRepoName, remoteBranch) = trackingBranchStr.strip().split(\"/\")\n\n # Get the list of remote repos\n remoteReposListStr = getCmndOutput(\"git remote -v\", workingDir=gitDir)\n #print(\"remoteReposListStr = \" + remoteReposListStr)\n\n # Loop through looking for remoteRepoName\n for remoteRepo in remoteReposListStr.splitlines():\n\n #print(\"remoteRepo = '\" + remoteRepo + \"'\")\n if remoteRepo == \"\":\n continue\n \n remoteRepoList = remoteRepo.split(\" \")\n #print(\"remoteRepoList = \" + str(remoteRepoList))\n\n # Remove empty items\n k = 0\n while k < len(remoteRepoList):\n if remoteRepoList[k] == \"\":\n del remoteRepoList[k]\n k += 1\n #print(\"remoteRepoList = \" + str(remoteRepoList))\n\n # Get the remote name and URL\n (repoName, repoUrl) = remoteRepoList[0].split(\"\\t\")\n #print(\"repoName = '\" + repoName + \"'\")\n #print(\"repoUrl = '\" + repoUrl + \"'\")\n\n # Grab the URL if the remote name matches\n if repoName == remoteRepoName:\n remoteRepoUrl = repoUrl\n break\n\n # end for\n\n return (remoteRepoName, remoteBranch, remoteRepoUrl)\n\n\ndef getLastCommitMsg(gitDir):\n return getCmndOutput(\n \"git log \" \\\n +\" --pretty=format:'commit %H%nAuthor: %an <%ae>%nDate: %ad%nSummary: %s%n'\" \\\n +\" -1 -- .\",\n workingDir=gitDir\n )\n","repo_name":"CASL/Futility","sub_path":"cmake/tribits/python_utils/SnapshotDir.py","file_name":"SnapshotDir.py","file_ext":"py","file_size_in_byte":15809,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"3"} +{"seq_id":"31329572619","text":"from setuptools import setup,find_packages\nimport os,sys,shutil,subprocess,glob,pip,sysconfig\nfrom distutils.sysconfig import get_python_lib\nfrom pkg_resources import resource_filename\n\nwith open(\"README.rst\", \"r\") as fh:\n long_description = fh.read()\n\nsetup(\n name='uGMRT_beamds',\n version='1.1.1',\n packages=find_packages(),\n package_data={'uGMRT_beamutils':['*','gptool_ver4.2.1/*','sigproc_install/bin/*','readPA/*']},\n author='Devojyoti Kansabanik',\n author_email='dkansabanik@ncra.tifr.res.in',\n\tdescription=\"A python package to make and plot dynamic spectrum from uGMRT beamformer observations\",\n\tlong_description=long_description,\n\tlong_description_content_type='text/markdown',\n\turl=\"https://github.com/devojyoti96/uGMRT_beamds\",\n install_requires=[\"numpy==1.19.0\", \"scipy==1.6.2\",\"astropy==4.3\",'pandas','seaborn','pysigproc'],\n\tpython_requires='>=3',\n\tentry_points={\n 'console_scripts': ['make_uGMRT_ds = uGMRT_beamtool.make_ds:main','plot_uGMRT_ds = uGMRT_beamtool.plot_uGMRT_ds:main']}\n)\n","repo_name":"devojyoti96/uGMRT_beamds","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"4820652732","text":"import pandas as pd\nimport numpy as np \nfrom sklearn.preprocessing import MinMaxScaler,StandardScaler\nimport multiprocessing as mp\nimport argparse\nimport matplotlib as plt \nimport random \n\nfiles_path = \"/mnt/pan/Data14/gene_pred/tfxcan_git/\"\nweights_df = pd.read_csv(files_path + \"Chrom22_results/Chrom22_variant_tfscores.csv\")\ngeno_df = pd.read_csv(files_path + \"Data/Chrom22_genotypes.txt\", sep = \" \")\ndef get_simulated_values(gene):\n t_gene_df = weights_df.loc[weights_df.hgnc_symbol == gene][[\"hgnc_symbol\",\"SNP_ID\",\"cube_root_score_scaled\"]]\n t_geno_df = geno_df.loc[geno_df.SNP_ID.isin(list(t_gene_df.SNP_ID))]\n t_geno_df = t_geno_df.merge(t_gene_df, on = [\"SNP_ID\"], how =\"inner\")\n t_geno_df= t_geno_df.drop(\"SNP_ID\",axis = 1)\n t_geno_df_t = t_geno_df.T\n tw_vector = list(t_geno_df_t.loc[\"cube_root_score_scaled\"])\n t_geno_df_t = t_geno_df_t.iloc[:2504,:]\n t_sum = t_geno_df_t.sum(axis = 1) + np.random.normal(0,10,len(t_geno_df_t))\n return(pd.DataFrame(t_sum,columns = [gene]))\npool = mp.Pool()\ngene_list = sorted(set(weights_df.hgnc_symbol))\ngene_expr_df = pd.concat(list(map(get_simulated_values, gene_list)), axis = 1)\n#print(gene_df.head(5))\ngene_expr_df.insert(0,\"IID\", list(gene_expr_df.index))\ngene_expr_df.to_csv(\"Data/Chrom22_simulated_expression.csv\", index =False)\n\n","repo_name":"bushlab-genomics/TFXcan","sub_path":"simulate_expression.py","file_name":"simulate_expression.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30286966195","text":"from django.urls import path\r\n\r\nfrom . import views\r\n\r\nurlpatterns = [\r\n path('', views.index, name='index'),\r\n path('add', views.addTodo, name='add'),\r\n path('time',views.timekunal),\r\n path('rolltime',views.timeroll),\r\n path('Game',views.render_game),\r\n path('complete/', views.completeTodo, name='complete'),\r\n path('deletecomplete', views.deleteCompleted, name='deletecomplete'),\r\n path('deleteall', views.deleteAll, name='deleteall')\r\n]\r\n","repo_name":"tomkunal/my-django","sub_path":"todo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17644622643","text":"import datetime\nfrom django.contrib.sitemaps import Sitemap\nfrom django.utils.translation import gettext as _\nfrom django.urls import reverse\n\n\nclass CalendarioLunarSitemap(Sitemap):\n priority = 0.5\n changefreq = 'daily'\n i18n = True\n\n def items(self):\n today = datetime.datetime.now().date()\n\n return [\n {\n 'url': 'current_lunar_phase',\n 'kwargs': {}\n },\n {\n 'url': 'following_lunar_phases',\n 'kwargs': {}\n },\n {\n 'url': 'monthly_calendar',\n 'kwargs': {\n 'year': today.year,\n 'month': today.month,\n }\n },\n {\n 'url': 'specific_lunar_phase',\n 'kwargs': {\n 'year': today.year,\n 'month': today.month,\n 'day': today.day,\n }\n }\n ]\n\n def location(self, item):\n return reverse(\n item['url'],\n kwargs=item['kwargs']\n )","repo_name":"agapanto/calendariolunar.cl","sub_path":"lunarphases/sitemaps.py","file_name":"sitemaps.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"14992508516","text":"from problems.Problem_Abs import *\n\n\nclass Ackley(Problem_Abs):\n def Func(self, X):\n if len(X.shape) == 1:\n X = X[:, np.newaxis]\n m = X.shape[0]\n b = 0.2\n a = 20\n c = 2 * np.pi\n X = utils.pow_exe(self.Bound, X)\n x_sum = -b * np.sqrt(np.sum(X ** 2, axis=0) / m)\n cos_sum = np.exp(np.sum(np.cos(c * X), axis=0) / m)\n Z = a + np.exp(1) - a * np.exp(x_sum) - cos_sum\n return Z\n","repo_name":"wangzb-001/nnga_jxf","sub_path":"GAs/problems/Ackley.py","file_name":"Ackley.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"73348426641","text":"#!/usr/bin/env python3\n\"\"\"WordLiterals is used to fetch words from the document from the URL and \n Print it one line at a time.\n\n \"\"\"\n\nfrom urllib.request import urlopen\n\ndef fetch_words():\n \"\"\"\n Fetch a list of words from a URL.abs\n\n Args: None\n\n Returns: List of words to from the Dcoument.\n \"\"\"\n with urlopen('http://sixty-north.com/c/t.txt') as story:\n story_words = []\n for line in story:\n line_words = line.decode('utf-8').split() # This Splites the words and load to line of words.\n for word in line_words:\n story_words.append(word) # This appends each words into the story_words list\n return story_words\n\ndef print_items(items):\n \"\"\"Print items one per line\n\n Args:\n Items are passed through the functions to print\n \"\"\"\n for item in items:\n print(item) #prints each item from the items pass as the story words list from the fetch_words function.\n\ndef main():\n \"\"\"Main Function calls the Fetch words function to fetch the words from the doc and populate it\n it into the items to print one line at a time.\n\n Args:\n None. \n \"\"\"\n items = fetch_words()\n print_items(items)\n\nif __name__ == '__main__':\n main()\n","repo_name":"maheshvaikri/pythonpro","sub_path":"Test/PythonTry/wordliterals.py","file_name":"wordliterals.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"3510507061","text":"# compare_genesis.py --- \n# \n# Filename: compare_genesis.py\n# Description: \n# Author: \n# Maintainer: \n# Created: Fri Jun 21 15:31:01 2013 (+0530)\n# Version: \n# Last-Updated: Wed Jun 26 14:38:58 2013 (+0530)\n# By: subha\n# Update #: 30\n# URL: \n# Keywords: \n# Compatibility: \n# \n# \n\n# Commentary: \n# \n# \n# \n# \n\n# Change log:\n# \n# \n# \n# \n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License as\n# published by the Free Software Foundation; either version 3, or\n# (at your option) any later version.\n# \n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# General Public License for more details.\n# \n# You should have received a copy of the GNU General Public License\n# along with this program; see the file COPYING. If not, write to\n# the Free Software Foundation, Inc., 51 Franklin Street, Fifth\n# Floor, Boston, MA 02110-1301, USA.\n# \n# \n\n# Code:\n\nfrom pylab import *\n\nmoose_soma = loadtxt('symcompartment.txt')\nplot(moose_soma[:,0], moose_soma[:,1], color='#114477', ls='', marker='o', label='moose-soma', alpha=0.5)\nplot(moose_soma[:,0], moose_soma[:,2], color='#4477AA', ls='', marker='o', label='moose-d1', alpha=0.5)\nplot(moose_soma[:,0], moose_soma[:,3], color='#77AADD', ls='', marker='o', label='moose-d2', alpha=0.5)\n\nmoose_soma = loadtxt('symcompartment_readcell.txt')\nplot(moose_soma[:,0], moose_soma[:,1], color='#117744', ls='', marker='x', label='moose-readcell-soma', alpha=0.5)\nplot(moose_soma[:,0], moose_soma[:,2], color='#44AAAA', ls='', marker='x', label='moose-readcell-d1', alpha=0.5)\nplot(moose_soma[:,0], moose_soma[:,3], color='#77CCCC', ls='', marker='x', label='moose-readcell-d2', alpha=0.5)\n\ngen_d1 = loadtxt('genesis_d1_Vm.txt')\ngen_soma = loadtxt('genesis_soma_Vm.txt')\ngen_d2 = loadtxt('genesis_d2_Vm.txt')\nplot(gen_soma[:, 0], gen_soma[:, 1], color='#771122', ls='', marker='x', label='gen-soma', alpha=0.5)\nplot(gen_d1[:,0], gen_d1[:,1], color='#AA4455', ls='', marker='x', label='gen-d1', alpha=0.5)\nplot(gen_d2[:,0], gen_d2[:,1], color='#771155', ls='', marker='x', label='gen-d2', alpha=0.5)\n\ngen_d1 = loadtxt('genesis_readcell_d1_Vm.txt')\ngen_soma = loadtxt('genesis_readcell_soma_Vm.txt')\ngen_d2 = loadtxt('genesis_readcell_d2_Vm.txt')\nplot(gen_soma[:, 0], gen_soma[:, 1], color='#774411', ls='--', label='gen-readcell-soma', alpha=0.5)\nplot(gen_d1[:,0], gen_d1[:,1], color='#AA7744', ls='--', label='gen-readcell-d1', alpha=0.5)\nplot(gen_d2[:,0], gen_d2[:,1], color='#DDAA77', ls='--', label='gen-readcell-d2', alpha=0.5)\n\nlegend()\nshow()\n\n# \n# compare_genesis.py ends here\n","repo_name":"BhallaLab/moose","sub_path":"moose-examples/symcomp/compare_genesis.py","file_name":"compare_genesis.py","file_ext":"py","file_size_in_byte":2747,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"3"} +{"seq_id":"70434789202","text":"from .basemixin import BaseMixin\n\n\nclass CheckTestFilesMixin(BaseMixin):\n \"\"\" Mixin for checking and printing in the header mandatory/optional test\n files. If mandatory files are absent will be raised error. \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._test_script = None\n self._player_settings = None\n self._serial_log_setting = None\n self._precon_interpreter = None\n self._precon_script = None\n self._precon_player_settings = None\n self._standby = None\n self._standby_script = None\n self._timejump_script = None\n self._confirm_wakeup_script = None\n\n def _check_and_get_unique_script(self, path, postfix):\n \"\"\"\n Will raise RuntimeException if no test files found or\n if found more than one\n \"\"\"\n pattern = path.name + postfix\n\n try:\n script, *more = sorted(path.parent.glob(pattern))\n except ValueError:\n raise RuntimeError(\"Got no files with pattern '{}'.\".format(pattern))\n\n if more:\n raise RuntimeError(\n \"Got several files with same pattern '{}'.\".format(pattern)\n )\n\n return script\n\n def check_test_files(self):\n mandatory, optional = self.__get_header_files()\n\n if any(mandatory):\n self._testlog('\\nMandatory test files:')\n self._testlog('\\n'.join(map(str, mandatory)))\n\n missing = list(filter(lambda x: not x.is_file(), mandatory))\n if any(missing):\n self._testlog('\\nNext test files are not found:')\n self._testlog('\\n'.join(map(str, missing)))\n raise RuntimeError('Mandatory test files are not found')\n\n if any(optional):\n self._testlog('\\nOptional test files:')\n self._testlog('\\n'.join(map(str, optional)))\n\n def __get_header_files(self):\n optional = []\n mandatory = []\n\n if self._test_script:\n mandatory.append(self._test_script)\n if self._player_settings:\n mandatory.extend(self._player_settings)\n if self._serial_log_setting:\n optional.append(self._serial_log_setting)\n\n if self._precon_interpreter:\n mandatory.append(self._precon_script)\n mandatory.extend(self._precon_player_settings)\n\n if self._standby:\n mandatory.append(self._standby_script)\n mandatory.append(self._timejump_script)\n mandatory.append(self._confirm_wakeup_script)\n\n mandatory = list(filter(None, mandatory))\n optional = list(filter(None, optional))\n\n return mandatory, optional\n","repo_name":"papachappa/client","sub_path":"client/mixins/checktestfilesmixin.py","file_name":"checktestfilesmixin.py","file_ext":"py","file_size_in_byte":2723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18356151691","text":"import binascii\n\n\ndef byte_to_binary(n):\n \"\"\" 5 -> \"00000101\" \"\"\"\n return ''.join(str((n & (1 << i)) and 1) for i in reversed(range(8)))\n\n\ndef hex_to_binary(h):\n \"\"\"\" \"1f\" -> \"00011111\" \"\"\"\n return ''.join(byte_to_binary(ord(b)) for b in binascii.unhexlify(h))\n\n\ndef ascidxFromBites(in_stream):\n str_len = len(in_stream) // 8\n asc_idx = 0\n for x in xrange(0, str_len):\n bytetmp = int(in_stream[x * 8:(x * 8) + 8], 2)\n if 31 < bytetmp < 127: # and bytetmp != 44 and bytetmp != 96 and bytetmp != 39 and bytetmp != 92:\n asc_idx += 1\n if str_len > 0:\n return (asc_idx * 100) // str_len\n else:\n return 0\n\n\ndef strFromBites(in_stream):\n \"\"\" \"01001001010101000100010001010011\" -> \"ITDS\" \"\"\"\n outasc = \"\"\n for x in xrange(0, len(in_stream) // 8):\n bytetmp = int(in_stream[x * 8:(x * 8) + 8], 2)\n if 31 < bytetmp < 127: # and bytetmp != 44 and bytetmp != 96 and bytetmp != 39 and bytetmp != 92:\n outasc += chr(bytetmp)\n else:\n outasc += \" \"\n return outasc\n\n\ndef hexFromBites(in_stream):\n \"\"\" \"01001001010101000100010001010011\" -> \"49:54:44:53:\" \"\"\"\n outhex = \"\"\n for x in xrange(0, len(in_stream) // 8):\n bytetmp = int(in_stream[x * 8:(x * 8) + 8], 2)\n outhex = outhex + format(bytetmp, '02X') + \":\"\n return outhex\n\ndef bitesFromHex(in_hex):\n \"\"\" \"0A:FF:10:AA:12\" -> \"0000101011111111000100001010101000010010\" \"\"\"\n outbin = \"\"\n if in_hex[0] == \":\":\n in_hex = in_hex[1:]\n\n if ':' in in_hex:\n outbin = ''.join([hex_to_binary(x) for x in in_hex.split(':')])\n else:\n outbin = ''.join([hex_to_binary(in_hex[i*2:i*2+2]) for i in range(len(in_hex) // 2)])\n\n return outbin\n","repo_name":"itds-consulting/tetra-listener","sub_path":"tetra-multiframe-sds/binman.py","file_name":"binman.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"3"} +{"seq_id":"33377262490","text":"from dados import produtos, pessoas, lista\nfrom functools import reduce\n\ndef filtra(produto):\n if produto['preco'] > 50:\n return True\n\n\ndef filtra_idade(pessoa):\n if pessoa['idade'] >= 18:\n return True\n return False\n\n\nnova_lista = filter(filtra, produtos)\nnova_lista1 = filter(filtra_idade, pessoas)\n\nfor produto in nova_lista:\n print(produto)\n\nprint('----------------------------------------------')\n\nfor pessoa in nova_lista1:\n print(pessoa)\n\nprint('----------------------------------------------')\n\n# acumula = 0\n#\n# for item in lista:\n# acumula+=item\n# print(acumula)\n\nsoma_lista = reduce(lambda ac, i: i+ac, lista, 0)\nprint(soma_lista)\n\nprint('----------------------------------------------')\n\nsoma_precos = reduce(lambda ac, p: p['preco'] + ac, produtos, 0)\nprint(soma_precos)\n\nprint(round(soma_precos / len(produtos), 2))\n\nprint('----------------------------------------------')\n\nsoma_idades = reduce(lambda ac, i: i['idade'] + ac, pessoas, 0)\nprint(soma_idades / len(pessoas))\n","repo_name":"luccashiroshi/Curso-Python","sub_path":"map/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"44580593322","text":"#tested with frimware 5-0.22\nimport sensor,image,lcd,time\nfrom machine import I2C\nimport KPU as kpu\nimport mlx90614\n\ntextScale = 2\nrectThickness = 10\n\nlcd.init()\nlcd.clear(0xFFFF)\nsensor.reset()\nsensor.set_pixformat(sensor.RGB565)\nsensor.set_framesize(sensor.QVGA)\nsensor.set_windowing((224, 224))\nsensor.set_brightness(2)\n# sensor.set_auto_gain(1)\n# sensor.set_saturation(2)\nsensor.set_contrast(2)\nsensor.set_vflip(1)\n# sensor.set_hmirror(1)\nsensor.run(1)\nclasses = [\"Mask\", \"No Mask\"]\ncolors = [(0,255,0), (255,0,0)]\n#task = kpu.load(0x200000) #change to \"/sd/name_of_the_model_file.kmodel\" if loading from SD card\ntask = kpu.load('/sd/mask20.kmodel')\na = kpu.set_outputs(task, 0, 7,7,35) #the actual shape needs to match the last layer shape of your model(before Reshape)\nanchor = (0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828)\na = kpu.init_yolo2(task, 0.6, 0.05, 5, anchor) #tweak the second parameter if you're getting too many false positives\nclock = time.clock()\nlcd.direction(lcd.YX_RLDU)\n\n\ni2c = I2C(I2C.I2C0, mode=I2C.MODE_MASTER, freq=100000, scl=10, sda=11, addr_size=7)\ntemp_sensor = mlx90614.MLX90614(i2c)\n\nwhile(True):\n #img = sensor.snapshot().rotation_corr(z_rotation=-90.0)\n img = sensor.snapshot()\n a = img.pix_to_ai()\n clock.tick()\n code = kpu.run_yolo2(task, img)\n fps=clock.fps()\n # print(fps)\n # code = None\n if code:\n # print(\"network found something\")\n for i in code:\n selectedColor = colors[i.classid()]\n label = classes[i.classid()]\n class_length = len(label)\n labelPosX = int((224-(class_length*textScale*4))/2)\n labelPosY = 10\n # print(i.rect(), labelPosX, labelPosY)\n a = img.draw_rectangle(i.rect(), thickness=rectThickness, color=selectedColor)\n a = img.draw_string(labelPosX, labelPosY, label, color=selectedColor, scale=textScale)\n else:\n # print(\"network found nothing\")\n pass\n objTemp = temp_sensor.read_object_temp()\n if temp_sensor.isHumanTemp(objTemp):\n temp_str = \"{:.2f}\".format(objTemp)\n print(temp_str)\n img.draw_string(80, 200, temp_str, colors[1], scale=textScale)\n else:\n print(\"non human temp\")\n \n a = lcd.display(img)\na = kpu.deinit(task)\n","repo_name":"pritamghanghas/sipeed_playground","sub_path":"mask_detector.py","file_name":"mask_detector.py","file_ext":"py","file_size_in_byte":2323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11648058503","text":"\"\"\"Module for generating data\"\"\"\n\nimport datetime\nimport random\n\n\n# Constants for the class TableData.\nfrom typing import Tuple\n\nCAR_COUNT_LIMIT = 1000\nCAR_PRICE_LOW_BOUND = 30000.00\nCAR_PRICE_HIGH_BOUND = 135000.70\n\n\nclass TableData:\n \"\"\"Class for generating random data of table.\"\"\"\n\n def __init__(self, models_names: Tuple[str], headings: Tuple[str]):\n self._models_names = models_names\n self._headings = headings\n\n self.__start_date = datetime.date(2012, 1, 1)\n self.__end_date = datetime.date.today()\n\n def __get_random_model_name(self):\n return self._models_names[random.randrange(len(self._models_names))]\n\n def __get_random_date(self):\n time_between_dates = self.__end_date - self.__start_date\n days_between_dates = time_between_dates.days\n random_number_of_days = random.randrange(days_between_dates)\n random_date = self.__start_date + datetime.timedelta(days=random_number_of_days)\n\n return random_date\n\n def __get_random_count_auto(self, limit=1000):\n return random.randrange(limit)\n\n def __get_random_price(self, low_price: float, high_price: float):\n return round(random.uniform(low_price, high_price), 2)\n\n def get_random_data(self, number_of_records):\n random_data = set()\n\n for i in range(number_of_records):\n random_data.add(\n (\n self.__get_random_model_name(),\n self.__get_random_date().strftime(\"%d/%m/%Y\"),\n self.__get_random_count_auto(CAR_COUNT_LIMIT),\n self.__get_random_price(CAR_PRICE_LOW_BOUND, CAR_PRICE_HIGH_BOUND)\n )\n )\n\n return random_data\n","repo_name":"RedExtreme12/reporting-module","sub_path":"table_data.py","file_name":"table_data.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38597974054","text":"__license__ = 'RPL-1.5'\n\n# $Id: MyTTNclient.py,v 4.1 2021/02/07 14:37:26 teus Exp teus $\n\n# Broker between TTN and some data collectors: luftdaten.info map and MySQL DB\n# if nodes info is loaded and DB module enabled export nodes info to DB\n# the latter will enable to maintain in DB status of kits and kit location/activity/exports\n\n# module mqtt: git clone https://github.com/eclipse/paho.mqtt.python.git\n# cd paho.mqtt.python ; python setup.py install\n# broker server: Debian: apt-get install mqtt\n\n\"\"\"Simple test script for TTN MQTT broker access\n Broker access is designed for multiple brokers data record MQTT downloads.\n MQTT topics may be a list of topics.\n Main routine is GetData() to start and handle data records downloads.\n GetData() returns with the TTN MQTT data record in json/dict format, or\n empty dict for no recort, or None for End of Records/Data.\n Module can be used as library as well CLI\n\n command line (CLI) arguments:\n verbose=true|false or -v or --verbose. Default False.\n user=TTNuser user account name for TTN.\n password=TTNpassword eg ttn-account-v2.abscdefghijl123456789ABCD.\n keepalive=N Keep A Live in seconds for connection, defaults to 180 secs.\n Dflt: None.\n node will be seen at TTN as topic ID. Multiple (no wild card) is possible.\n node='comma separated nodes' ... to subscribe to. Dflt node='+' (all wild card).\n show=pattern regular expression of device IDs to display the full data record.\n\"\"\"\n\nimport paho.mqtt.client as mqttClient\nimport threading\nimport time, datetime\nimport re\nimport sys\nimport json\n\n# routines to collect messages from TTN MQTT broker (yet only subscription)\n# collect records in RecordQueue[] using QueueLock\n# broker with TTN connection details: host, user credentials, list of topics\n# broker = {\n# \"address\": \"eu.thethings.network\", # Broker address default\n# \"port\": 1883, # Broker port default\n# \"user\": \"20201126grub\", # Connection username\n# # Connection password\n# \"password\": ttn-account-v2.GW36kBmsaNZaXYCs0jx4cbPiSfaK6r0q9Zj0jx4Bmsts\"\n# \"topic\": \"+\" , # topic or list of topics to subscribe to\n# }\nclass TTN_broker:\n def __init__(self, broker, fifo, lock, verbose=False, keepalive=180, logger=None):\n self.TTNconnected = None # None=ont yet, False from disconnected, True connected\n self.message_nr = 0 # number of messages received\n self.RecordQueue = fifo # list of received data records\n self.QueueLock = lock # Threadlock fopr queue handling\n self.TTNclient = None # TTN connection handle\n self.verbose = verbose # verbosity\n self.broker = broker # TTN access details\n if not 'lock' in broker.keys(): # make sure timestamp sema is there\n self.broker['lock'] = threading.RLock()\n self.KeepAlive = keepalive # connect keepalive in seconds, default 60\n self.logger = logger # routine to print errors\n \n def _logger(self, pri, message):\n try: self.logger('MyTTNclient',pri,message)\n except: sys.stdout.write(\"MyTTNclient %s: %s.\\n\" % (pri, message)) \n\n def _on_connect(self, client, userdata, flags, rc):\n if rc == 0:\n if self.verbose: self._logger(\"INFO\",\"Connected to broker\")\n self.TTNconnected = True # Signal connection \n with self.broker['lock']: self.broker['timestamp'] = time.time()\n else:\n self._logger(\"ERROR\",\"Connect to MQTT broker failed: %s.\" % [ \"successful\", \"internet connection broke up\", \"invalid client identifier\", \"server unavailable\", \"bad username or password\", \"not authorised\"][rc])\n raise IOError(\"TTN MQTT connection failed\")\n \n def _on_disconnect(self, client, userdata, rc):\n if self.verbose:\n self._logger(\"ERROR\",\"Disconnect rc=%d from broker %s\" % (rc, self.broker))\n self._logger(\"ERROR\",\"Broker disconnect: rc=%d.\" % rc)\n time.sleep(0.1)\n self.TTNconnected = False\n \n def _on_message(self, client, userdata, message):\n self.message_nr += 1\n try:\n record = json.loads(message.payload)\n # self._logger(\"INFO\",\"%s: Message %d received: \" % (datetime.datetime.now().strftime(\"%m-%d %Hh%Mm\"),self.message_nr) + record['dev_id'] + ', port=%d' % record['port'] + ', raw payload=\"%s\"' % record['payload_raw'])\n if len(record) > 25: # primitive way to identify incorrect records\n self._logger(\"WARNING\",\"TTN MQTT records overload. Skipping.\")\n elif len(self.RecordQueue) > 100:\n self._logger(\"WARNING\",\"exhausting record queue. Skip record: %s.\" % record['dev_id'])\n else:\n with self.QueueLock:\n self.RecordQueue.append(record)\n # in principle next should be guarded by a semaphore\n with self.broker['lock']: self.broker['timestamp'] = time.time()\n return True\n except Exception as e:\n # raise ValueError(\"Payload record is not in json format. Skipped.\")\n self._logger(\"ERROR\",\"it is not json payload, error: %s\" % str(e))\n self._logger(\"INFO\",\"\\t%s skipped message %d received: \" % (datetime.datetime.now().strftime(\"%m-%d %Hh%Mm\"),self.message_nr) + 'topic: %s' % message.topic + ', payload: %s' % message.payload)\n return False\n\n @property\n def TTNConnected(self):\n return self.TTNconnected\n \n def TTNinit(self):\n if self.TTNclient == None:\n # may need this on reinitialise()\n self.TTNclientID = \"ThisTTNtestID\" if not 'clientID' in self.broker.keys() else self.broker['clientID']\n if self.verbose:\n self._logger(\"INFO\",\"Initialize TTN MQTT client ID %s\" % self.TTNclientID)\n # create new instance, clean session save client init info?\n self.TTNclient = mqttClient.Client(self.TTNclientID, clean_session=True)\n self.TTNclient.username_pw_set(self.broker[\"user\"], password=self.broker[\"password\"]) # set username and password\n self.TTNclient.on_connect = self._on_connect # attach function to callback\n self.TTNclient.on_message = self._on_message # attach function to callback\n self.TTNclient.on_disconnect = self._on_disconnect # attach function to callback\n for cnt in range(3):\n try:\n # TODO: set_tls setting not yet supported\n # if 'cert' in self.broker.keys() do set ssl\n self.TTNclient.connect(self.broker[\"address\"], port=self.broker[\"port\"], keepalive=self.KeepAlive) # connect to broker\n break\n except Exception as e:\n self._logger(\"INFO\",\"%s connection failure.\" % datetime.datetime.now().strftime(\"%m-%d %Hh%Mm:\"))\n self._logger(\"ERROR\",\"Try to (re)connect failed to %s:%s with error: %s\" % (self.broker[\"address\"],str(self.broker[\"topic\"]), str(e)))\n time.sleep(60)\n if cnt >= 2:\n self._logger(\"FATAL\",\"Giving up.\")\n exit(1)\n else:\n try:\n self.broker['count'] += 1\n time.sleep(self.broker['count']*60) # slow down a bit\n except: self.broker['count'] = 1\n self.TTNclient.reinitialise()\n if self.verbose:\n self._logger(\"INFO\",\"Reinitialize TTN MQTT client\")\n return True\n \n def TTNstart(self):\n if self.TTNconnected: return True\n self.TTNconnected = False\n if not self.TTNclient:\n self.TTNinit()\n else: self.TTNclient.reinitialise(client_id=self.TTNclientID)\n cnt = 0\n if self.verbose:\n self._logger(\"INFO\",\"Starting up TTN MQTT client.\")\n self.TTNclient.loop_start()\n time.sleep(0.1)\n while self.TTNconnected != True: # Wait for connection\n if cnt > 250:\n if self.verbose:\n self._logger(\"FAILURE\",\"waited for connection too long.\")\n self.TTNstop()\n return False\n if self.verbose:\n if not cnt:\n self._logger(\"INFO\",\"Wait for connection\")\n elif (cnt%10) == 0:\n if self.logger == sys.stdout.write:\n sys.stdout.write(\"\\033[F\") #back to previous line \n sys.stdout.write(\"\\033[K\") #clear line \n self._logger(\"INFO\",\"Wait for connection % 3.ds\"% (cnt/10))\n cnt += 1\n time.sleep(0.1)\n qos = 0 # MQTT dflt 0 (max 1 telegram), 1 (1 telegram), or 2 (more)\n try: qos = self.broker['qos']\n except: pass\n self.TTNclient.subscribe(self.broker['topic'], qos=qos)\n if self.verbose:\n self._logger(\"INFO\",\"TTN MQTT client started\")\n return True\n \n def TTNstop(self):\n if not self.TTNclient: return\n if self.verbose: self._logger(\"ERROR\",\"STOP TTN connection\")\n try:\n self.TTNclient.loop_stop()\n self.TTNclient.disconnect()\n except: pass\n self.TTNconnected = False\n self.TTNclient = None # renew MQTT object class\n time.sleep(60)\n\ndef _logger(self, pri, message, logger=None):\n try: logger('MyTTNclient',pri,message)\n except: sys.stdout.write(\"MyTTNclient %s: %s.\\n\" % (pri, message)) \n\nMQTTindx = None\nMQTTFiFo = [] # first in, first out data records queue\nMQTTLock = threading.RLock() # lock for queue access\n# find brokers who need to be (re)started up\ndef MQTTstartup(MQTTbrokers,verbose=False,keepalive=180,logger=None):\n global MQTTindx, MQTTFiFo, MQTTLock\n brokers = MQTTbrokers\n if not type(brokers) is list: brokers = [brokers] # single broker\n for indx in range(len(brokers)-1,-1,-1):\n broker = brokers[indx]\n if not broker or not type(broker) is dict:\n del brokers[indx]\n continue\n if not 'fd' in broker or broker['fd'] == None: # initialize\n broker['fd'] = None # class object handle\n broker['restarts'] = 0 # nr of restarts with timing of 60 seconds\n broker['startTime'] = 0 # last time started\n broker['count'] = 0 # number of secs to delay check for data\n broker['timestamp'] = 0 # last time record\n broker['lock'] = threading.RLock() # sema for timestamp\n if not broker['fd']:\n broker['fd'] = TTN_broker(broker, MQTTFiFo, MQTTLock, verbose=verbose, keepalive=keepalive, logger=logger)\n if not broker['fd']:\n _logger(\"ERROR\",\"Unable to initialize TTN MQTT class for %s\" % str(broker),logger=logger)\n del MQTTbrokers[indx]\n continue\n if not broker['fd'] or not broker['fd'].TTNstart():\n _logger(\"FATAL\",\"Unable to initialize TTN MQTT connection: %s.\" % str(broker),logger=logger)\n del MQTTbrokers[indx]\n elif not broker['startTime']:\n with broker['lock']:\n broker['timestamp'] = broker['startTime'] = time.time()\n MQTTindx = -1\n if not len(brokers): return False\n return True\n\n# stop a broker or a list of brokers\ndef MQTTstop(MQTTbrokers):\n brokers = MQTTbrokers\n if not type(brokers) is list: brokers = [brokers]\n for broker in brokers:\n try:\n broker['fd'].TTNstop(); broker['fd'] = None\n except: pass\n\n# default logging routine\ndef logging(string):\n sys.stdout.write(string + '\\n')\n\n# get a record from an MQTT broker eg TTN\n# verbose: verbosity, keepalive: keep connect,\n# logger: fie to lo, sec2pol: wait on record\ndef GetData(MQTTbrokers, verbose=False,keepalive=180,logger=None, sec2pol=10):\n global MQTTindx, MQTTFiFo, MQTTLock\n timing = time.time()\n while True:\n # find brokers who are disconnected\n if not type(MQTTbrokers) is list: MQTTbrokers = [MQTTbrokers]\n for broker in MQTTbrokers:\n if not broker or len(broker) < 2: continue\n try:\n if not broker['fd'].TTNConnected:\n broker['fd'].MQTTStop()\n broker['fd'] = None\n except:\n if not type(broker) is dict:\n raise ValueError(\"Undefined broker %s\" % str(broker))\n broker['fd'] = None\n if not len(MQTTbrokers) or not MQTTstartup(MQTTbrokers,verbose=verbose,keepalive=keepalive,logger=logger):\n _logger(\"INFO\",\"no MQTT broker available\",logger=logger)\n return None\n if MQTTindx == None: MQTTindx = -1\n now = time.time()\n\n # try to find a (next) queue with a data record\n if len(MQTTFiFo):\n with MQTTLock: record = MQTTFiFo.pop(0)\n return record\n\n # no record found, reset dying connections, delete dead connections\n ToBeRestarted = (0,None,-1) # (minimal wait time, broker)\n for nr in range(len(MQTTbrokers)):\n MQTTindx = (MQTTindx+1)%len(MQTTbrokers)\n broker = MQTTbrokers[MQTTindx]\n\n # CONNECTED broker\n if broker['fd'].TTNConnected:\n # there was no record in the record queue\n if (now - broker['timestamp'] > 20*60) and (now - broker['startTime'] > 45*60):\n _logger(\"ERROR\",\"Waiting (waiting for %d secs, running %d seconds) too long for data from broker %s. Stop connection.\" % (now - broker['timestamp'], now - broker['startTime'], str(broker)),logger=logger)\n broker['fd'].TTNstop()\n del MQTTbrokers[MQTTindx]\n # break # break to while True loop\n if not broker['timestamp']: \n with broker['lock']: broker['timestamp'] = now\n\n # DISCONNECTED broker\n elif broker['restarts'] <= 3: # try simple restart\n _logger(\"ERROR\",\" %s: Connection died. Try again.\" % datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),logger=logger)\n broker['fd'].TTNstop()\n if now-broker['startTime'] > 15*60: # had run for minimal 5 minutes\n broker['restarts'] = 0\n broker['fd'] = None\n with broker['lock']: broker['timestamp'] = now\n # break # break to while True loop\n else:\n broker['restarts'] += 1 # try again and delay on failure\n with broker['lock']: broker['timestamp'] = now\n else:\n _logger(\"ERROR\",\" %s: Too many restries on broker %s\" % (datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\",str(broker))),logger=logger)\n broker['fd'].TTNstop()\n broker['fd'] = None\n broker = {}\n\n if not ToBeRestarted[1]:\n ToBeRestarted = (broker['timestamp'],broker,MQTTindx)\n elif broker['timestamp'] < ToBeRestarted[0]:\n ToBeRestarted = (broker['timestamp'],broker,MQTTindx)\n \n if ToBeRestarted[1]:\n #if verbose:\n # LF = ''\n # if int(time.time()-timing) and logger == sys.stdout.write:\n # LF = \"\\033[F\\033[K\" #back to previous line and clear line \n # _logger(\"INFO\",\"%sWaiting %3d+%3d secs.\" % (LF,time.time()-timing,max(ToBeRestarted[1]['timestamp'] - now,sec2pol)), logger=logger)\n time.sleep(max(ToBeRestarted[1]['timestamp'] - now,sec2pol))\n MQTTindx = ToBeRestarted[2]-1\n else:\n #if verbose:\n # LF = ''\n # if int(time.time()-timing) and logger == sys.stdout.write:\n # LF = \"\\033[F\\033[K\" #back to previous line and clear line \n # _logger(\"INFO\",\"%sAwaiting %3d+%3d secs.\" % (LF,time.time()-timing,sec2pol),logger=logger)\n time.sleep(sec2pol)\n # and try again in the while True loop\n return None\n\nif __name__ == '__main__':\n # show full received TTN MQTT record foir this pattern\n show = None # show details of data record for nodeID pattern\n node = '+' # TTN MQTT devID pattern for subscription device topic part\n # user = \"1234567890abc\" # connection user name\n user = \"201802215971az\" # Connection username\n verbose = False\n logger = None # routine to print messages to console\n # Connection password\n # password = \"ttn-account-v2.ACACADABRAacacadabraACACADABRAacacadabra\"\n password = \"ttn-account-v2.GW3msa6kBNZs0jx4aXYCcbPaK6r0q9iSfZjIOB2Ixts\"\n keepalive = 180 # play with keepalive connection settings, dflt 180 secs\n \n for arg in sys.argv[1:]: # change defualt settings arg: =\n if arg in ['-v','--verbode']:\n verbose = True; continue\n Match = re.match(r'(?Pverbose|show|node|user|password|keepalive)=(?P.*)', arg, re.IGNORECASE)\n if Match:\n Match = Match.groupdict()\n if Match['key'].lower() == 'verbose':\n if Match['value'].lower() == 'false': verbose = False\n elif Match['value'].lower() == 'true': verbose = True\n elif Match['key'].lower() == 'show': # pattern show details record\n show = re.compile(Match['value'], re.I)\n elif Match['key'].lower() == 'node': # comma separated list of devID's\n if node == '+': node = Match['value']\n else: node += ',' + Match['value']\n elif Match['key'].lower() == 'user':\n user = Match['value']\n elif Match['key'].lower() == 'password':\n password = Match['value']\n elif Match['key'].lower() == 'keepalive':\n if Match['value'].isdigit(): keepalive = int(Match['value'])\n \n # TTN MQTT broker access details\n topics = []\n for topic in node.split(','): # list of appID/devices/devID\n topics.append((\"+/devices/\" + topic + \"/up\",0))\n TTNbroker = {\n \"address\": \"eu.thethings.network\", # Broker address\n \"port\": 1883, # Broker port\n \"user\": user, # Connection username\n # Connection password\n \"password\": password,\n \"topic\": (topics[0][0] if len(topics) == 1 else topics), # topic to subscribe to\n }\n MQTTbrokers = [ TTNbroker, ] # may be a list of TTN/user brokers\n\n while True:\n try:\n timing = time.time()\n DataRecord = GetData(MQTTbrokers,verbose=verbose,keepalive=keepalive,logger=None) \n if DataRecord:\n print(\"%s:%s received data record: %s\" % (datetime.datetime.now().strftime(\"%m-%d %Hh%Mm%Ss\"), \" delay %3d secs,\" % (time.time()-timing if verbose else ''),str(DataRecord['dev_id'])))\n if show and show.match(DataRecord['dev_id']):\n print(\"%s\" % str(DataRecord))\n elif record == None: break\n else:\n print(\"No data record received. Try again.\")\n except Exception as e:\n print(\"End of get data record with exception: %s\" % str(e))\n break\n\n MQTTstop(MQTTbrokers)\n exit(0)\n","repo_name":"teusH/MySense","sub_path":"scripts/MyTTNclient.py","file_name":"MyTTNclient.py","file_ext":"py","file_size_in_byte":18978,"program_lang":"python","lang":"en","doc_type":"code","stars":67,"dataset":"github-code","pt":"3"} +{"seq_id":"69845009683","text":"import requests\nfrom bs4 import BeautifulSoup\n\n\n# Функция принимает url адресс вида str --> \"https://google.com\" и возвращает\n# словарь с ключами h1, tittle, description\n# если теги были обнаружены на странице\n\ndef data_from_html(url):\n data_html_dict = {}\n r = requests.get(url)\n html_doc = r.text\n soup = BeautifulSoup(html_doc, 'html.parser')\n\n if soup.h1:\n data_html_dict[\"h1\"] = soup.h1.string\n\n if soup.title:\n data_html_dict[\"title\"] = soup.title.string.strip()\n\n if soup.find(attrs={\"name\": \"description\"}):\n data_html_dict[\"description\"] = (\n soup.find(attrs={\"name\": \"description\"})['content'].strip()\n )\n\n return data_html_dict\n","repo_name":"impuls64s/python-project-83","sub_path":"page_analyzer/parser_url.py","file_name":"parser_url.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"10601634965","text":"import all_together_list\nimport pandas as pd\n\n\ndef lists2dict(list1, list2):\n \"\"\"Return a dictionary where list1 provides\n the keys and list2 provides the values.\"\"\"\n\n # Zip lists: zipped_lists\n zipped_lists = zip(list1, list2)\n\n # Create a dictionary: rs_dict\n rs_dict = dict(zipped_lists)\n\n # Return the dictionary\n return rs_dict\n\n\nrow_lists = all_together_list.row\nfeature_names = all_together_list.header\n\n# Turn list of lists into list of dicts: list_of_dicts\nlist_of_dicts = [lists2dict(feature_names, sublist) for sublist in row_lists]\n\n# Turn list of dicts into a DataFrame: df\ndf = pd.DataFrame(list_of_dicts)\n\n# Print the head of the DataFrame\nprint(df.head())\n","repo_name":"tgpmoraes/curso-python","sub_path":"datacamp/python_programer/ds_toolbox/part2/all_together_v4.py","file_name":"all_together_v4.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8337760641","text":"import socket\r\nimport os # Import socket module\r\n\r\nport = 80 # Reserve a port for your service.\r\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM) \r\n#host = '192.168.1.1' # Create a socket object\r\nhost = socket.gethostname() # Get local machine name\r\ns.bind((host, port)) # Bind to the port\r\ns.listen(5) # Now wait for client connection.\r\n\r\nprint ('Server listening....')\r\nconn, addr = s.accept() \r\nmessage = \"Hello\"\r\nwhile True:\r\n conn.send(bytes(message,'utf-8'))\r\n print(\"message sent!\")\r\n\r\n#data = bytes(s.recv(1024),'utf-8')\r\n#print(\"received message: %s\"%data)\r\n\r\n#while True:\r\n #print(\"KK\")\r\n # conn, addr = s.accept() # Establish connection with client.\r\n # print ('Got connection from', addr)\r\n #data = conn.recv(1024)\r\n #print('Server received', repr(data))\r\n\r\n #filename='mytext.txt'\r\n #f = open(filename,'rb')\r\n #l = f.read(1024)\r\n #while (l):\r\n # conn.send(l)\r\n # print('Sent ',repr(l))\r\n # l = f.read(1024)\r\n #f.close()\r\n\r\n\r\n\r\n\r\n \r\n\r\nprint('Done sending')\r\n#ol conn.send('Thank you for connecting')\r\nconn.close()","repo_name":"poulomic11/socket-programmimg","sub_path":"server 1.py","file_name":"server 1.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23271454192","text":"# -*- coding: utf-8 -*-\nfrom .conftest import DebugRunner, DEBUG_RUNNER_PATH\nfrom trek.migration import Migration\nimport pytest\n\n\nclass TestRunner(object):\n @pytest.mark.migrator(runner_path=DEBUG_RUNNER_PATH)\n def test_load(self, migrator):\n \"the runner should be loaded and initialized at runtime\"\n assert isinstance(migrator.runner, DebugRunner)\n\n @pytest.mark.migrator(runner_path=DEBUG_RUNNER_PATH, extra=['initialized'])\n def test_initialize(self, migrator):\n \"the runner should be initialized with passed arguments\"\n assert migrator.runner.args == ['initialized']\n\n\nclass TestMigrationsToRun(object):\n @pytest.mark.migrations(\n ('1', ('', '')),\n )\n @pytest.mark.migrator(direction='up')\n def test_gets_all_for_zero(self, migrator):\n migrator.current = ''\n\n assert migrator.migrations_to_run() == ['1']\n\n @pytest.mark.migrations(('1', ('', '')), ('2', ('', '')))\n @pytest.mark.migrator(direction='up')\n def test_gets_next_for_up(self, migrator):\n migrator.current = '1'\n assert migrator.migrations_to_run() == ['2']\n\n @pytest.mark.migrations(('1', ('', '')), ('2', ('', '')))\n @pytest.mark.migrator(direction='down')\n def test_includes_current_for_down(self, migrator):\n migrator.current = '2'\n assert migrator.migrations_to_run() == ['2', '1']\n\n @pytest.mark.migrations(('1', ('', '')), ('2', ('', '')))\n @pytest.mark.migrator(count=1)\n def test_limits_to_count(self, migrator):\n migrator.current = ''\n assert migrator.migrations_to_run() == ['1']\n\n @pytest.mark.migrations(('1', ('', '')))\n @pytest.mark.migrator(direction='sideways')\n def test_raises_error_for_bad_migration_direction(self, migrator):\n with pytest.raises(ValueError) as err:\n migrator.migrations_to_run()\n\n assert 'Unknown migration direction \"sideways\"' in str(err)\n\n @pytest.mark.migrator(direction='sideways')\n def test_raises_error_for_no_migrations(self, migrator):\n with pytest.raises(ValueError) as err:\n migrator.migrations_to_run()\n\n assert 'No migrations to run in' in str(err)\n\n\n@pytest.mark.migrations(('1', ('', '')))\ndef test_get_migration(migrator):\n \"only a cursory test here, as Migration is tested elsewhere\"\n migration = migrator.get_migration('1')\n assert isinstance(migration, Migration)\n\n\n@pytest.mark.migrations(('1', ('up', 'down')))\n@pytest.mark.parametrize('direction', ['up', 'down'])\ndef test_migrate(migrator, direction):\n migrator.direction = direction\n\n assert list(migrator.migrate(['1'])) == [('info', direction)]\n\n\n@pytest.mark.migrations(('1', ('up', 'down')))\n@pytest.mark.parametrize('direction', ['up', 'down'])\ndef test_run(migrator, direction):\n migrator.direction = direction\n migrator.current = '0' if direction == 'up' else '1'\n\n assert list(migrator.run()) == [\n ('info', direction),\n ('info', 'Ran 1 migration(s)')\n ]\n","repo_name":"BrianHicks/trek","sub_path":"tests/test_init.py","file_name":"test_init.py","file_ext":"py","file_size_in_byte":2987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"40636178985","text":"import matplotlib.pyplot as plt\nimport os\nfrom matplotlib import rc\nimport xarray\nfrom datetime import datetime, timedelta\nimport pandas as pd\nimport numpy as np\n\nimport settings\nfrom general import utils as gutils\nfrom general import settings as gsettings\n\nplt.style.use(gsettings.fpath_mplstyle)\nrc('font', size=settings.fontsize)\n\n\ndef visualize_classification_seasonality(dts, cached=False, fmt='pdf'):\n \"\"\"\n Visualize monthly classifiations for irradiance, satellite and nubiscope based observations\n\n :param dts: list of datetimes to build the validation figures for\n :param bool cached: whether to load the pre-calculated cached data to speed things up\n :param str fmt: output format\n :return:\n \"\"\"\n # concatenate all claas2/nubiscope dates\n data = None\n tmp_file = './class_tmp_cached.nc'\n\n if cached and os.path.isfile(tmp_file):\n print(\"Loading data from cache\")\n data = xarray.open_dataset(tmp_file)\n else:\n print(\"Generating dataset from individual files\")\n for date in dts:\n # generate filepaths\n fpath_msg = gutils.generate_processed_fpath(date, which='class_validation')\n\n if os.path.isfile(fpath_msg):\n # load data\n d_msg = xarray.open_dataset(fpath_msg)\n data = xarray.concat([data, d_msg], dim='datetime') if data is not None else d_msg\n else:\n continue\n\n data.to_netcdf(tmp_file)\n\n # load statistics\n bsrn_stats = xarray.open_dataset(os.path.join(gsettings.fdir_bsrn_data, '1sec', 'statistics',\n 'daily_stats_bsrn_1sec.nc'))\n bsrn_stats = bsrn_stats.sel(date=slice(dts[0], dts[-1]))\n\n # clear-sky climatology\n cs_norm = xarray.where(data.clearsky.isnull(), 0, 1)\n cs_msg = xarray.where((data.clearsky == 1) | (data.clearsky == 2), 1, 0)\n cs_msg_monthly = (cs_msg / cs_norm).groupby('datetime.month').mean(dim='datetime') * 100\n\n cs_nub = xarray.where((data.clearsky == 1) | (data.clearsky == 3), 1, 0)\n cs_nub_monthly = (cs_nub / cs_norm).groupby('datetime.month').mean(dim='datetime') * 100\n\n cs_vld = xarray.where(data.clearsky == 1, 1, 0)\n cs_vld_monthly = (cs_vld / cs_norm).groupby('datetime.month').mean(dim='datetime') * 100\n\n cs_bsrn_monthly = (bsrn_stats['n_clearsky'] / bsrn_stats['n_possea']).groupby('date.month').mean(dim='date') * 100\n\n # overcast climatology\n oc_norm = xarray.where(data.overcast.isnull(), 0, 1)\n oc_msg = xarray.where((data.overcast == 1) | (data.overcast == 2), 1, 0)\n oc_msg_monthly = (oc_msg / oc_norm).groupby('datetime.month').mean(dim='datetime') * 100\n\n oc_nub = xarray.where((data.overcast == 1) | (data.overcast == 3), 1, 0)\n oc_nub_monthly = (oc_nub / oc_norm).groupby('datetime.month').mean(dim='datetime') * 100\n\n oc_vld = xarray.where(data.overcast == 1, 1, 0)\n oc_vld_monthly = (oc_vld / oc_norm).groupby('datetime.month').mean(dim='datetime') * 100\n\n oc_bsrn_monthly = (bsrn_stats['n_overcast'] / bsrn_stats['n_possea']).groupby('date.month').mean(dim='date') * 100\n\n # create figure\n fig, axes = plt.subplots(1, 2, figsize=gutils.get_image_size(ratio=0.5))\n\n # plot data\n x = cs_nub_monthly.month\n axes[0].plot(x, cs_msg_monthly, label='CLAAS2', marker='x')\n axes[0].plot(x, cs_nub_monthly, label='Nubiscope', marker='x')\n axes[0].plot(x, cs_vld_monthly, label='Validation', marker='x')\n axes[0].plot(x, cs_bsrn_monthly, label='BSRN', marker='x', color='tab:red')\n\n axes[1].plot(x, oc_msg_monthly, marker='x')\n axes[1].plot(x, oc_nub_monthly, marker='x')\n axes[1].plot(x, oc_vld_monthly, marker='x')\n axes[1].plot(x, oc_bsrn_monthly, marker='x', color='tab:red')\n\n # layout\n for ax in axes:\n ax.set_xlabel('Month of year')\n ax.set_xticks(range(1, 13), labels='JFMAMJJASOND')\n ax.set_xlim(1, 12)\n\n axes[0].set_ylabel('Clear-sky (% of time per day)')\n axes[1].set_ylabel('Overcast (% of time per day)')\n axes[0].set_ylim(0, 60)\n axes[1].set_ylim(0, 60)\n\n fig.legend(ncol=4, bbox_to_anchor=(0.5, 1.00), loc='center', frameon=False)\n\n for ax, label in zip(axes, 'ab'):\n ax.text(0., 1.01, '$\\\\bf{%s}$)' % label, transform=ax.transAxes, ha='left', va='bottom')\n\n # export and close\n plt.tight_layout()\n plt.savefig(os.path.join(settings.fdir_img_paper1, 'classification_climate.%s' % fmt), bbox_inches='tight')\n plt.close()\n\n\ndef visualize_cloud_fraction(dts, cached=False, fmt='pdf'):\n \"\"\"\n\n :param dts:\n :param cached:\n :param fmt:\n :return:\n \"\"\"\n # temporary file\n tmp_cf_file = './cache/cf_tmp_cached.nc'\n\n if cached and os.path.isfile(tmp_cf_file):\n cf = xarray.open_dataset(tmp_cf_file)\n else:\n # prepare output lists\n cf_nubs = None\n cf_msgs = None\n\n # prepare input data\n for date in dts:\n # load the preprocessed CLAAS and Nubscope filepaths\n fpath_msg = gutils.generate_processed_fpath(date, which='msgcpp')\n fpath_nubi = gutils.generate_processed_fpath(date, which='nubiscope')\n\n if os.path.isfile(fpath_nubi) and os.path.isfile(fpath_msg):\n # load data\n d_msg = xarray.open_dataset(fpath_msg)\n d_nub = xarray.open_dataset(fpath_nubi)\n\n # interpolate both to a new time axis\n dtime_ax = pd.date_range(date, date + timedelta(minutes=55, hours=23), freq='5min')\n d_msg = d_msg.interp(datetime=dtime_ax, method='nearest')\n d_nub = d_nub.reindex(datetime=dtime_ax, method='nearest')\n\n # get mutual nan mask\n nan_mask = d_msg.ccover.isnull() | d_nub.cldcover_total.isnull()\n\n cf_nub = xarray.where(nan_mask, np.nan, d_nub.cldcover_total / 100.)\n cf_msg = xarray.where(nan_mask, np.nan, d_msg.ccover)\n\n cf_nubs = xarray.concat([cf_nubs, cf_nub], dim='datetime') if cf_nubs is not None else cf_nub\n cf_msgs = xarray.concat([cf_msgs, cf_msg], dim='datetime') if cf_msgs is not None else cf_msg\n\n # create one data file\n cf = xarray.Dataset(data_vars=dict(msg=cf_msgs, nub=cf_nubs))\n cf = cf.dropna(dim='datetime')\n\n # export to temporary file\n cf.to_netcdf(tmp_cf_file)\n\n # create figure\n fig, ax = plt.subplots(1, 1, figsize=gutils.get_image_size(text_width=0.5, ratio=0.8))\n\n # plot data\n xbins = np.linspace(0, 1, 11)\n xbinc = (xbins[1:] + xbins[:-1]) / 2\n\n hist_nub, _ = np.histogram(cf.nub.sel(radius=5), bins=xbins, density=True)\n ax.bar(xbinc, hist_nub, width=0.07, label='Nubiscope', zorder=5, color='tab:blue', alpha=0.7)\n\n colors = plt.get_cmap('inferno')([0.4, 0.7, 0.9])\n for i, r in enumerate([5, 10, 15]):\n hist_msg, _ = np.histogram(cf.msg.sel(radius=r), bins=xbins, density=True)\n ax.scatter(xbinc, hist_msg, marker='x', color=colors[i], s=r*2, label='Satellite' if i == 0 else None,\n zorder=6)\n\n # add corr label\n r2 = np.corrcoef(cf.nub.sel(radius=5), cf.msg.sel(radius=r))[0, 1] ** 2\n ax.text(0.01, 0.98 - i*0.08, 'r$_{%s}$$^2$ = %.2f' % (r, r2), ha='left', va='top', transform=ax.transAxes)\n\n # plot layout\n ax.set_xlim(0, 1)\n ax.set_ylim(0, 8)\n ax.set_xlabel('Cloud fraction (-)')\n ax.set_ylabel('Probability density (-)')\n ax.legend(ncol=2, bbox_to_anchor=(0.5, 1.05), loc='center', frameon=False)\n ax.xaxis.set_ticks(xbins)\n\n # export and close\n plt.tight_layout()\n plt.savefig(os.path.join(settings.fdir_img_paper1, 'cloud_fraction_compare.%s' % fmt), bbox_inches='tight')\n plt.close()\n\n\nif __name__ == \"__main__\":\n dts = gutils.generate_dt_range(datetime(2014, 1, 1), datetime(2016, 12, 31), delta_dt=timedelta(days=1))\n visualize_classification_seasonality(dts, cached=True, fmt='pdf')\n visualize_cloud_fraction(dts=dts, cached=True)\n","repo_name":"WBMol/bsrn-radvar","sub_path":"paper-figures/classification_validation.py","file_name":"classification_validation.py","file_ext":"py","file_size_in_byte":7980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"283585746","text":"import math\nfrom random import shuffle\nimport time\n\nL1 = [3, 2, 1]\nL2 = [5, 4, 3, 2, 1]\nL3 = [7, 6, 5, 4, 3, 2, 1]\nL4 = [9, 8, 7, 6, 5, 4, 3, 2, 1]\nL5 = [11, 10, 9, 8, 7, 6, 5, 4, 2, 1]\nL6 = [5, 5, 4, 4, 3, 3, 2, 2, 1, 1]\nL7 = [4, 4, 4, 3, 3, 3, 2, 2, 2, 1, 1, 1]\n\n\ndef check(A):\n n = len(A)\n for i in range(1, n):\n if A[i-1] > A[i]:\n return True\n return False\n\n\ndef bogo_sort(A):\n k = 0\n while check(A):\n k += 1\n shuffle(A)\n return k # or return A if you want the array\n\n\ndef is_not_in(A, num):\n if num in A:\n return False\n return True\n\n\ndef avg(A):\n avg = (min(A)+max(A))//2\n while is_not_in(A, avg):\n avg += 1\n return avg\n\n\ndef test_bogus(Attempts, A):\n Time = {}\n for i in range(Attempts):\n start = time.time()\n k = bogo_sort(A[:])\n end = time.time() - start\n Time[k] = end\n\n print(max(Time.keys()), Time[max(Time.keys())])\n print(avg(Time.keys()), Time[avg(Time.keys())])\n print(min(Time.keys()), Time[min(Time.keys())])\n\n\ndef P_k(n, k): # calculates P_k for distinct elements ,requires tough floating point copmutations\n return pow((1-(1 / math.factorial(n))), k)\n\n\ndef calc_Pk():\n K = {3: 29, 5: 108, 7: 5002, 9: 275114, 11: 6468303}\n P = []\n for n in K.keys():\n print(P_k(n, K[n]))\n","repo_name":"urataps/ThAndExpCompOfSortAlg","sub_path":"WorstSort/bogus.py","file_name":"bogus.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11404667216","text":"import datetime\nimport os\nfrom typing import Optional\n\nimport numpy as np\n\nfrom sqlalchemy import insert, update\nfrom sqlalchemy.ext.asyncio import async_sessionmaker\nfrom sqlalchemy import select\n\nfrom db.models import *\nfrom db.utils import *\n\n\nasync def generate_joke(user: User, session_maker: async_sessionmaker) -> Optional[Joke]:\n used_joke_ids = await fetch_execute(\n select(JokeSession.joke_id)\n .where(JokeSession.user_id.__eq__(user.id))\n .where(JokeSession.ended_at.is_not(None)),\n session_maker=session_maker\n )\n joke_ids = await fetch_execute(\n select(Joke.id)\n .where(Joke.id.not_in(used_joke_ids))\n .where(Joke.language.__eq__(user.language)),\n session_maker=session_maker\n )\n if not joke_ids:\n return None\n\n joke_id = np.random.choice(joke_ids)\n jokes = await fetch_execute(\n select(Joke)\n .where(Joke.id.__eq__(joke_id)),\n session_maker=session_maker\n )\n if not jokes:\n return None\n\n joke = jokes[0]\n await execute(\n insert(JokeSession)\n .values(user_id=user.id, joke_id=joke.id),\n session_maker=session_maker\n )\n await execute(\n update(Joke)\n .where(Joke.id.__eq__(joke.id))\n .values(displays=Joke.displays + 1),\n session_maker=session_maker\n )\n\n return joke\n\n\nasync def get_curr_joke_session(user: User, session_maker: async_sessionmaker) -> Optional[JokeSession]:\n current_joke_sessions = await fetch_execute(\n select(JokeSession)\n .where(JokeSession.user_id.__eq__(user.id))\n .where(JokeSession.ended_at.is_(None))\n .order_by(JokeSession.created_at.desc())\n .limit(1),\n session_maker=session_maker\n )\n if not current_joke_sessions:\n return None\n\n return current_joke_sessions[0]\n\n\nasync def get_curr_joke(user: User, session_maker: async_sessionmaker) -> Optional[Joke]:\n joke_session: JokeSession = await get_curr_joke_session(user, session_maker=session_maker)\n if not joke_session:\n return None\n\n jokes = await fetch_execute(\n select(Joke)\n .where(Joke.id.__eq__(joke_session.joke_id)),\n session_maker=session_maker\n )\n if not jokes:\n return None\n\n return jokes[0]\n\n\nasync def get_hint(user: User, session_maker: async_sessionmaker) -> Optional[Joke]:\n joke_session: JokeSession = await get_curr_joke_session(user, session_maker=session_maker)\n\n hints = await fetch_execute(\n select(JokeHint)\n .where(JokeHint.joke_id.__eq__(joke_session.joke_id))\n .order_by(JokeHint.id.asc()),\n session_maker=session_maker\n )\n if not hints or len(hints) <= joke_session.hints_shown:\n return None\n\n hint = hints[joke_session.hints_shown]\n await execute(\n update(JokeSession)\n .where(JokeSession.id.__eq__(joke_session.id))\n .values(hints_shown=joke_session.hints_shown + 1),\n session_maker=session_maker\n )\n\n return hint\n\n\nasync def make_guess(user: User, joke: Joke, text: str, session_maker: async_sessionmaker) -> JokeGuess:\n joke_session: JokeSession = await get_curr_joke_session(user, session_maker=session_maker)\n guesses = await fetch_execute(\n insert(JokeGuess)\n .values(text=text, user_id=user.id, joke_id=joke.id, hints_shown=joke_session.hints_shown)\n .returning(JokeGuess),\n session_maker=session_maker\n )\n\n await execute(\n update(JokeSession)\n .where(JokeSession.id.__eq__(joke_session.id))\n .values(ended_at=datetime.datetime.utcnow()),\n session_maker=session_maker\n )\n\n return guesses[0]\n\n\nasync def react_to_joke(user: User, joke_id: int, reaction: JokeReactionTypes,\n session_maker: async_sessionmaker) -> Joke:\n reactions = await fetch_execute(\n select(JokeReaction)\n .where(JokeReaction.user_id.__eq__(user.id))\n .where(JokeReaction.joke_id.__eq__(joke_id)),\n session_maker=session_maker\n )\n\n prev_reaction = None\n if reactions:\n prev_reaction = reactions[0]\n\n if prev_reaction and prev_reaction.reaction == reaction:\n likes_delta = dislikes_delta = 0\n elif prev_reaction:\n await execute(\n update(JokeReaction)\n .where(JokeReaction.id.__eq__(prev_reaction.id))\n .values(reaction=reaction),\n session_maker=session_maker\n )\n likes_delta = 2 * int(reaction == JokeReactionTypes.LIKE) - 1\n dislikes_delta = -likes_delta\n else:\n await execute(\n insert(JokeReaction)\n .values(joke_id=joke_id, user_id=user.id, reaction=reaction),\n session_maker=session_maker\n )\n likes_delta = int(reaction == JokeReactionTypes.LIKE)\n dislikes_delta = 1 - likes_delta\n\n jokes = await fetch_execute(\n update(Joke)\n .where(Joke.id.__eq__(joke_id))\n .values(likes=Joke.likes + likes_delta, dislikes=Joke.dislikes + dislikes_delta)\n .returning(Joke),\n session_maker=session_maker\n )\n\n return jokes[0]\n\n\nasync def react_to_joke_guess(user: User, joke_guess_id: int, reaction: JokeReactionTypes,\n session_maker: async_sessionmaker) -> JokeGuess:\n reactions = await fetch_execute(\n select(JokeGuessReaction)\n .where(JokeGuessReaction.user_id.__eq__(user.id))\n .where(JokeGuessReaction.joke_guess_id.__eq__(joke_guess_id)),\n session_maker=session_maker\n )\n\n prev_reaction = None\n if reactions:\n prev_reaction = reactions[0]\n\n if prev_reaction and prev_reaction.reaction == reaction:\n likes_delta = dislikes_delta = 0\n elif prev_reaction:\n await execute(\n update(JokeGuessReaction)\n .where(JokeGuessReaction.id.__eq__(prev_reaction.id))\n .values(reaction=reaction),\n session_maker=session_maker\n )\n likes_delta = 2 * int(reaction == JokeReactionTypes.LIKE) - 1\n dislikes_delta = -likes_delta\n else:\n await execute(\n insert(JokeGuessReaction)\n .values(joke_guess_id=joke_guess_id, user_id=user.id, reaction=reaction),\n session_maker=session_maker\n )\n likes_delta = int(reaction == JokeReactionTypes.LIKE)\n dislikes_delta = 1 - likes_delta\n\n guesses = await fetch_execute(\n update(JokeGuess)\n .where(JokeGuess.id.__eq__(joke_guess_id))\n .values(likes=JokeGuess.likes + likes_delta, dislikes=JokeGuess.dislikes + dislikes_delta)\n .returning(JokeGuess),\n session_maker=session_maker\n )\n\n return guesses[0]\n\n\nasync def get_other_guess(user: User, joke_id: int, last_shown_guess_id: Optional[int],\n session_maker: async_sessionmaker) -> Optional[JokeGuess]:\n guesses = await fetch_execute(\n select(JokeGuess)\n .where(JokeGuess.joke_id.__eq__(joke_id))\n .where(JokeGuess.user_id.__ne__(user.id))\n .where(JokeGuess.id.__gt__(last_shown_guess_id or 0))\n .order_by(JokeGuess.id.asc())\n .limit(1),\n session_maker=session_maker\n )\n if not guesses:\n return None\n\n guess = guesses[0]\n await execute(\n update(JokeGuess)\n .where(JokeGuess.id.__eq__(guess.id))\n .values(displays=JokeGuess.displays + 1),\n session_maker=session_maker\n )\n return guess\n","repo_name":"MikhailKitikov/WEB-Projects","sub_path":"JokeBot/services/joke_service.py","file_name":"joke_service.py","file_ext":"py","file_size_in_byte":7452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30885991538","text":"import unittest\nfrom feature_selection import recursive_feature_elimination_cv\nfrom data_reading import *\nimport pandas as pd\nimport os\n\n\nclass MyTestCase(unittest.TestCase):\n\n def test_RFECV(self):\n # experimental data fp\n\n filepath_exp = \"/Users/eh19686/Documents/PhD/conducting_polymer_project/HSP_project_p1/dataset/adts201800069-sup-0002-polymers_exp.csv\"\n filepath_output = \"/Users/eh19686/Documents/PhD/conducting_polymer_project/moe_output_data/new_monomers_lowest_data.txt\"\n data_df = dataset_df(filepath_exp, filepath_output)\n print(data_df.keys())\n # print(data_df[\"smiles\"])\n # insert new column into dataframe\n # list of column names\n # os.chdir(\"/Users/eh19686/Documents/PhD/conducting_polymer_project/moe_output_data/dataset2\")\n # data_df.to_excel(\"data_HSP_paper.xlsx\")\n column_names = [\"p_1001\", \"p_1004\", \"p_1007\", \"p_1010\", 'p_1013', 'p_1016', 'p_1019', 'p_1022',\n 'p_1025', 'p_1028', 'p_1031', 'p_1002', 'p_1005', 'p_1008', 'p_1011', 'p_1014', 'p_1017',\n 'p_1020', 'p_1023', 'p_1026', 'p_1029', 'p_1003', 'p_1006', 'p_1009', 'p_1012', 'p_1015',\n 'p_1018', 'p_1021', 'p_1024', 'p_1027', 'p_1030']\n\n idx = 0\n data_df.insert(loc=idx, column='filenames', value=column_names)\n data_df.drop(['label', 'smiles', 'n_electrons', 'n_atoms', 'charge', 'MolWt'], axis=1, inplace=True)\n os.chdir(\"/Users/eh19686/Documents/PhD/conducting_polymer_project/moe_output_data/dataset2\")\n data_df.to_excel(\"data_HSP_paper.xlsx\")\n # reading in other data\n additional_data_fp = \"/Users/eh19686/Documents/PhD/conducting_polymer_project/moe_output_data/moe_output_data.xlsx\"\n additional_data_df = pd.read_excel(additional_data_fp)\n # rename columns\n additional_data_df.rename(columns={'file name ': \"filenames\",\"D\": \"DeltaD\", \"P\": \"DeltaP\", \"H\": \"DeltaH\"}, inplace=True)\n additional_data_df.drop(['Unnamed: 0', 'mol', 'structures '], axis=1, inplace=True)\n print(\"here is additional df\")\n print(additional_data_df)\n print(additional_data_df.keys())\n additional_data_df.to_excel(\"og_data.xlsx\")\n # rename\n # concatenating dfs\n frames = [data_df, additional_data_df]\n full_df = pd.concat(frames, axis=1)\n print(full_df[\"filenames\"].values.tolist())\n\n # read in data\n #\n self.assertTrue(type(filepath_exp), \"str\")\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"jilxlisathompson/DeltahspPredict","sub_path":"test_feature_selection.py","file_name":"test_feature_selection.py","file_ext":"py","file_size_in_byte":2565,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"33457884486","text":"import random\n\nNB_MIN = 1\nNB_MAX = 10\nNB_MAGIQUE = random.randint(NB_MIN,NB_MAX)\nNB_VIE = 3\n\n#dire bonjour\n# demander valeur à l'utilisateur\ndef ask_nb():\n number = 0\n while number == 0:\n nb = input(f\"Choisissez un chiffre entre {NB_MIN} et {NB_MAX}: \")\n try :\n number = int(nb)\n except :\n print(\"ERREUR : Vous devez demander un nombre\")\n else:\n if NB_MIN > number or number > NB_MAX:\n print(f\"Le nombre n'est pas compris entre {NB_MIN} et {NB_MAX} : réessayez\")\n number = 0\n return number\n\n# tester si valeur correspond nb Magique\ndef test_if_ok(nb):\n ok = False\n if nb == NB_MAGIQUE:\n print(\"******************** Bravo vous avez gagné ********************\")\n ok = True\n elif nb < NB_MAGIQUE : print(\"Plus grand !\")\n elif nb > NB_MAGIQUE : print(\"Plus petit\")\n return ok\n\n# caller en nombre de vie\ndef execute(life):\n for i in range(0,life):\n value_test = False\n print(f\"vous avez {life} vie\")\n nombre = int(ask_nb())\n value_test = test_if_ok(nombre)\n life -= 1\n if value_test == True : break\n elif life == 0 : print(f\"******************** vous n'avez plus de vie perdu ! : le nombre magique était {NB_MAGIQUE} ********************\")\n\n\nexecute(NB_VIE)","repo_name":"OPRTNL/cours_python","sub_path":"niveau_1/mon_premier_programme/nombre_magique/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"71146075922","text":"# Library\nimport pandas as pd\n\n# Helper functions\nfrom dataset.twitterdata import read_data, read_label, merge_data_label\nfrom dataset import PROJECT_DATA\n\n\ndef save_data_to_csv():\n \"\"\"\n Read and extract datasets from files.\n \"\"\"\n # read data (jsonl files)\n train_data = read_data(PROJECT_DATA + 'train.data.jsonl')\n dev_data = read_data(PROJECT_DATA + 'dev.data.jsonl')\n test_data = read_data(PROJECT_DATA + 'test.data.jsonl')\n covid_data = read_data(PROJECT_DATA + 'covid.data.jsonl')\n\n # read labels (json files)\n train_label = read_label(PROJECT_DATA + 'train.label.json')\n dev_label = read_label(PROJECT_DATA + 'dev.label.json')\n\n # merge data with class labels\n train_data = merge_data_label(train_data, train_label)\n dev_data = merge_data_label(dev_data, dev_label)\n\n # write filetered data to csv\n open(PROJECT_DATA + 'train.csv','w', newline='').write(train_data.to_csv(index=False))\n open(PROJECT_DATA + 'dev.csv','w', newline='').write(dev_data.to_csv(index=False))\n open(PROJECT_DATA + 'test.csv','w', newline='').write(test_data.to_csv(index=False))\n open(PROJECT_DATA + 'covid.csv','w', newline='').write(covid_data.to_csv(index=False))\n\n\ndef check_input_files(filename):\n \"\"\"\n Check input files if they exist.\n \"\"\"\n try:\n f = open(filename,'r')\n f.close()\n except:\n # read and process all input datasets\n save_data_to_csv()\n\n\ndef read_csv_datasets(filename):\n # check if input files exist\n check_input_files(filename)\n\n # read datasets\n df = pd.read_csv(filename)\n\n return df","repo_name":"jeanabanto/NLP-Project","sub_path":"dataset/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"75093867600","text":"# -*- coding: utf-8 -*-\n\nimport textwrap\nimport matplotlib.pyplot as plt\n\nfrom biosim.simulation import BioSim\n\n\"\"\"\nCompatibility check for BioSim simulations.\n\nThis script shall function with biosim packages written for\nthe INF200 project June 2020.\n\"\"\"\n\n__author__ = \"Hans Ekkehard Plesser, NMBU\"\n__email__ = \"hans.ekkehard.plesser@nmbu.no\"\n\n\nif __name__ == '__main__':\n plt.ion()\n\n geogr = \"\"\"\\\n WWWWWWWWWWWWWWWWWWWWW\n WWWWWWWWHWWWWLLLLLLLW\n WHHHHHLLLLWWLLLLLLLWW\n WHHHHHHHHHWWLLLLLLWWW\n WHHHHHLLLLLLLLLLLLWWW\n WHHHHHLLLDDLLLHLLLWWW\n WHHLLLLLDDDLLLHHHHWWW\n WWHHHHLLLDDLLLHWWWWWW\n WHHHLLLLLDDLLLLLLLWWW\n WHHHHLLLLDDLLLLWWWWWW\n WWHHHHLLLLLLLLWWWWWWW\n WWWHHHHLLLLLLLWWWWWWW\n WWWWWWWWWWWWWWWWWWWWW\"\"\"\n geogr = textwrap.dedent(geogr)\n\n ini_herbs = [{'loc': (10, 10),\n 'pop': [{'species': 'Herbivore',\n 'age': 5,\n 'weight': 20}\n for _ in range(150)]}]\n ini_carns = [{'loc': (10, 10),\n 'pop': [{'species': 'Carnivore',\n 'age': 5,\n 'weight': 20}\n for _ in range(40)]}]\n\n sim = BioSim(island_map=geogr, ini_pop=ini_herbs,\n seed=123456,\n hist_specs = {'fitness': {'max': 1.0, 'delta': 0.05},\n 'age': {'max': 60.0, 'delta': 2},\n 'weight': {'max': 60, 'delta': 2}},\n )\n\n sim.set_animal_parameters('Herbivore', {'zeta': 3.2, 'xi': 1.8})\n sim.set_animal_parameters('Carnivore', {'a_half': 70, 'phi_age': 0.5,\n 'omega': 0.3, 'F': 65,\n 'DeltaPhiMax': 9.})\n sim.set_landscape_parameters('L', {'f_max': 700})\n\n sim.simulate(num_years=100, vis_years=1, img_years=2000)\n\n sim.add_population(population=ini_carns)\n sim.simulate(num_years=100, vis_years=1, img_years=2000)\n\n plt.savefig('check_sim.pdf')\n\n input('Press ENTER')\n","repo_name":"heplesser/nmbu_inf200_june2020","sub_path":"project_description/check_sim.py","file_name":"check_sim.py","file_ext":"py","file_size_in_byte":2214,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"3"} +{"seq_id":"73075845522","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/4/4 18:55\n# @Author : Monster\n# @Email : 945534456@qq.com\n# @File : test_demo_api.py\n\nimport requests\n\nr = requests.get(\"http://127.0.0.1:5000/\")\nresult = r.json()\nprint(result)","repo_name":"monsterone/lemotest","sub_path":"class03_excel/test_demo/test_demo_api.py","file_name":"test_demo_api.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17348378321","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Aug 21 11:00:01 2019\r\n\r\n@author: INRA\r\n\"\"\"\r\n'''\r\nScript d'extraction des metadonnées de chaque image de la base de données et les\r\nstock dans un fichier soit format .txt oubien .csv\r\n'''\r\nimport tifffile\r\nimport os\r\nfrom geo.sphere import distance, destination, bearing\r\nfrom math import *\r\nexf_tag = {}\r\ngps_tag = {}\r\nhome = \"/home/inra-cirad/Bureau/MonDossier/\"\r\nenrg = \"/home/inra-cirad/Bureau/MonDossier/sortie_image3/output\"\r\n#dataset = \"/home/inra-cirad/Bureau/MonDossier/out_vol1\"\r\ndataset = \"/home/inra-cirad/Bureau/MonDossier/datav3/\"\r\n#angles = [45, 135, 225, 315]\r\nfiln = open(os.path.join(os.path.join(home, enrg), \"coord_file3.csv\"),\"w\")\r\nfiln.write(\"image\"+\"\\t\"+\"latitude\"+\"\\t\"+\"longititude\"+\"\\t\"+\"heightFootprint\"+\"\\t\"+\"widthFootprint\"+\"\\t\"+\"bearing\"+\"\\t\"+\"point2\"+\"\\t\"+\"point3\"+\"\\t\"+\"point4\"+\"\\t\"+\"point1\")\r\nfiln.write(\"\\n\")\r\ndatasetFolder = os.path.join(home,dataset)\r\n#xSensor = 4.8\r\n#sySensor = 3.6\r\nfileList = os.listdir(datasetFolder)\r\nfileList.sort()\r\n\r\ndef getFileInformation(filename):\r\n xSensor = 4.8 if \"nm\" in filename else 10.9\r\n ySensor = 3.6 if \"nm\" in filename else 8.7\r\n with tifffile.TiffFile(os.path.join(datasetFolder, filename)) as tif:\r\n tif_tags = {}\r\n exf_tag = tif.pages[0].tags[\"ExifTag\"]\r\n gps_tag = tif.pages[0].tags[\"GPSTag\"]\r\n \r\n for tag in tif.pages[0].tags.values():\r\n name, value = tag.name, tag.value\r\n tif_tags[name] = value\r\n image = tif.pages[0].asarray()\r\n myInfo = {}\r\n myFV = {}\r\n for key, value in exf_tag.value.items():\r\n if str(key) in \"FocalLength\":\r\n focalLenght = value[0]\r\n fVW = 2*atan(xSensor/(2*focalLenght))\r\n fVT = 2*atan(ySensor/(2*focalLenght))\r\n myFV[\"fVW\"] = fVW\r\n myFV[\"fVT\"] = fVT\r\n else:\r\n print(str(key)+\":\"+str(value)+\"\\n\")\r\n\r\n print(\"******GPS tags******\")\r\n myInfo[\"image\"] = filename\r\n count = 0\r\n for key, value in gps_tag.value.items():\r\n if str(key) == \"GPSLatitude\":\r\n myInfo[\"latitude\"] = (value[0]/value[1])+((value[2]/value[3])/60)+((value[4]/value[5])/3600)\r\n elif key in \"GPSLongitude\": \r\n myInfo[\"longitude\"] = (value[0]/value[1])+((value[2]/value[3])/60)+((value[4]/value[5])/3600)\r\n elif key in \"GPSAltitude\":\r\n altitude = value[0]\r\n bottom = altitude*tan(-0.5*myFV[\"fVW\"])\r\n top = altitude*tan(0.5*myFV[\"fVW\"])\r\n left = altitude*tan(-0.5*myFV[\"fVT\"])\r\n right = altitude*tan(0.5*myFV[\"fVT\"])\r\n myInfo[\"heightFootprint\"] = right - left\r\n myInfo[\"widthFootprint\"] = top - bottom\r\n myInfo[\"distance\"] = sqrt((right - left)**2 + (top - bottom)**2)/2\r\n else:\r\n print(key, value)\r\n if len(myInfo) == 6:\r\n print(myInfo)\r\n return myInfo\r\n\r\n\r\ndef getAllInformation():\r\n for fileIndex in range(len(fileList)):\r\n # get bearing between two files\r\n nextFile = fileIndex + 1\r\n myDestination = []\r\n if nextFile < len(fileList):\r\n currentFileInformation = getFileInformation(fileList[fileIndex])\r\n nextFileInformation = getFileInformation(fileList[nextFile])\r\n currentFileInformation['bearing'] = bearing((currentFileInformation['latitude'],currentFileInformation['longitude']),(nextFileInformation['latitude'],nextFileInformation['longitude']))\r\n currentFileInformation['betha2'] = 90 - degrees(asin((currentFileInformation[\"heightFootprint\"]/2)/(currentFileInformation[\"widthFootprint\"]/2)))\r\n currentFileInformation[\"betha3\"] = 90 + currentFileInformation[\"betha2\"]\r\n currentFileInformation[\"betha4\"] = 180+ currentFileInformation[\"betha2\"]\r\n currentFileInformation[\"betha1\"] = 180+ currentFileInformation[\"betha3\"]\r\n myDestination.append( destination((currentFileInformation[\"latitude\"], currentFileInformation[\"longitude\"]),currentFileInformation[\"distance\"], currentFileInformation[\"betha2\"]+currentFileInformation[\"bearing\"]))\r\n myDestination.append( destination((currentFileInformation[\"latitude\"], currentFileInformation[\"longitude\"]),currentFileInformation[\"distance\"], currentFileInformation[\"betha3\"]+currentFileInformation[\"bearing\"]))\r\n myDestination.append( destination((currentFileInformation[\"latitude\"], currentFileInformation[\"longitude\"]),currentFileInformation[\"distance\"], currentFileInformation[\"betha4\"]+currentFileInformation[\"bearing\"]))\r\n myDestination.append( destination((currentFileInformation[\"latitude\"], currentFileInformation[\"longitude\"]),currentFileInformation[\"distance\"], currentFileInformation[\"betha1\"]+currentFileInformation[\"bearing\"]))\r\n\r\n\r\n \r\n filn.write(currentFileInformation[\"image\"]+\"\\t\"+ str(currentFileInformation[\"latitude\"])+\"\\t\"+ str(currentFileInformation[\"longitude\"])+\"\\t\"+ str(currentFileInformation[\"heightFootprint\"])+\"\\t\"+ str(currentFileInformation[\"widthFootprint\"])+\"\\t\"+ str(currentFileInformation[\"bearing\"])+\"\\t\"+str(myDestination[0][0])+\" \"+str(myDestination[0][1])+\"\\t\"+str(myDestination[1][0])+\" \"+str(myDestination[1][1])+\"\\t\"+str(myDestination[2][0])+\" \"+str(myDestination[2][1])+\"\\t\"+str(myDestination[3][0])+\" \"+str(myDestination[3][1]))\r\n filn.write(\"\\n\")\r\n filn.close()\r\n \r\n\r\n\r\ndef main():\r\n getAllInformation()\r\n \r\n \r\nif __name__==\"__main__\":\r\n main()\r\n","repo_name":"lamsking/Intership","sub_path":"script/script_coordonnees.py","file_name":"script_coordonnees.py","file_ext":"py","file_size_in_byte":5505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"70824932881","text":"import pytest\nimport _pytest\n\nFIXTURES_SEEN = set()\nFIXTURE_MANAGER_PATCHED = False\n\n\ndef pytest_plugin_registered(plugin):\n global FIXTURE_MANAGER_PATCHED\n if not FIXTURE_MANAGER_PATCHED:\n orig = _pytest.fixtures.FixtureDef.__init__\n\n def init_wrapper(*args, argname, func, **kwargs):\n if argname in FIXTURES_SEEN:\n if not any(\n mark.name == \"dupe\" for mark in getattr(func, \"pytestmark\", [])\n ):\n pytest.fail(f\"\"\"Duplicate fixture {argname!r} found!\n If this is intended, mark the overriding fixture with pytest.mark.dupe.\"\"\")\n FIXTURES_SEEN.add(argname)\n return orig(*args, argname=argname, func=func, **kwargs)\n\n _pytest.fixtures.FixtureDef.__init__ = init_wrapper\n FIXTURE_MANAGER_PATCHED = True\n\n\ndef pytest_configure(config):\n config.addinivalue_line(\n \"markers\",\n \"dupe: allow fixture to be able to override an existing fixture of the same name\",\n )\n","repo_name":"L3viathan/pytest-fixture-rtttg","sub_path":"pytest_fixture_rtttg.py","file_name":"pytest_fixture_rtttg.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"74974748241","text":"import sys\nsys.path.append('.')\nfrom collections import Counter\nfrom pprint import pprint\nfrom time import time\nfrom typing import List, Union\n\nfrom sklearn import metrics\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.linear_model.stochastic_gradient import BaseSGDClassifier\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.preprocessing import LabelEncoder\n\nfrom minimal_example.data_reader import get_20newsgroups_data\n\n\ndef encode_targets(data_train, data_test):\n train_labels: List[str] = [label for _, label in data_train]\n test_labels: List[str] = [label for _, label in data_test]\n label_encoder = LabelEncoder()\n label_encoder.fit(train_labels)\n targets_train_encoded = label_encoder.transform(train_labels)\n targets_test_encoded = label_encoder.transform(test_labels)\n return label_encoder, targets_train_encoded, targets_test_encoded\n\n\ndef benchmark(\n clf: Union[BaseSGDClassifier, MultinomialNB],\n matrix_train,\n matrix_test,\n y_train,\n y_test,\n):\n print(\"_\" * 80)\n print(str(clf).split(\"(\")[0])\n\n t0 = time()\n clf.fit(matrix_train, y_train)\n train_time = time() - t0\n print(\"train time: %0.3fs\" % train_time)\n\n t0 = time()\n pred_train = clf.predict(matrix_train)\n pred_test = clf.predict(matrix_test)\n test_time = time() - t0\n print(\"test time: %0.3fs\" % test_time)\n\n score = metrics.accuracy_score(y_train, pred_train)\n print(\"train-f1-micro: %0.3f\" % score)\n score = metrics.accuracy_score(y_test, pred_test)\n print(\"test-f1-micro: %0.3f\" % score)\n\n\nif __name__ == \"__main__\":\n data_train = get_20newsgroups_data(\"train\")\n data_test = get_20newsgroups_data(\"test\")\n\n vectorizer = TfidfVectorizer(\n sublinear_tf=True,\n max_df=0.75,\n min_df=2,\n max_features=30000,\n stop_words=\"english\",\n )\n matrix_train = vectorizer.fit_transform([text for text, _ in data_train])\n print(\"n_samples: %d, n_features: %d\" % matrix_train.shape)\n\n matrix_test = vectorizer.transform([text for text, _ in data_test])\n print(\"n_samples: %d, n_features: %d\" % matrix_test.shape)\n\n pprint(Counter([label for _, label in data_train]))\n\n label_encoder, targets_train, targets_test = encode_targets(data_train, data_test)\n\n def benchmark_fun(clf):\n return benchmark(clf, matrix_train, matrix_test, targets_train, targets_test)\n\n benchmark_fun(\n SGDClassifier(alpha=0.00001, loss=\"log\", penalty=\"elasticnet\", l1_ratio=0.2)\n )\n\n benchmark_fun(MultinomialNB(alpha=0.01))\n","repo_name":"dertilo/text-classification","sub_path":"minimal_example/minimal_document_classification.py","file_name":"minimal_document_classification.py","file_ext":"py","file_size_in_byte":2616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"43563184209","text":"# Prepare a format string: time_format\ntime_format = '%Y-%m-%d %H:%M'\n\n# Convert date_list into a datetime object: my_datetimes\nmy_datetimes = pd.to_datetime(date_list, format=time_format) \n\n# Construct a pandas Series using temperature_list and my_datetimes: time_series\ntime_series = pd.Series(temperature_list, index=my_datetimes)\n\n#--EXTRACTING DATES-------------\n\n\n# Extract the hour from 9pm to 10pm on '2010-10-11': ts1\nts1 = ts0.loc['2010-10-11 21:00:00':'2010-10-11 22:00:00']\n\n# Extract '2010-07-04' from ts0: ts2\nts2 = ts0.loc['2010-07-04']\n\n# Extract data from '2010-12-15' to '2010-12-31': ts3\nts3 = ts0.loc['2010-12-15':'2010-12-31']\n\n\n#----COMBINING THE TIME SERIES-------------\n\n# Reindex without fill method: ts3\nts3 = ts2.reindex(ts1.index)\n\n# Reindex with fill method, using forward fill: ts4\nts4 = ts2.reindex(ts1.index,method='ffill')\n\n# Combine ts1 + ts2: sum12\nsum12 = ts1+ts2\n\n# Combine ts1 + ts3: sum13\nsum13 = ts1+ts3\n\n# Combine ts1 + ts4: sum14\nsum14 = ts1+ts4\n","repo_name":"JitenKumar/Data-Science-Practice-With-Python","sub_path":"Pandas Foundation/Pandas Time Series/time_series.py","file_name":"time_series.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"11148904706","text":"#Ture Farwell\n#oc-cs240 Advance Computer Science\n#Pygame part 1 - Create a screen with a pale blue back round and a olympic flag\n#pygame part 2 - Add a backround and have it spin\n\nimport pygame\n\npygame.init()\n\n# Screen size\nwidth, height = 640, 480\nscreen = pygame.display.set_mode((width, height))\n\npie = pygame.image.load(\"pie.jpg\").convert_alpha()\npie_rect = pie.get_rect()\n\nhorizontal = 1\nvertical = 3\n\nrunning = True\nwhile running:\n\n screen.fill((186,203,240))\n screen.blit(pie, pie_rect)\n pygame.draw.rect(screen,(255, 255, 255),(160,100,300,250))\n pygame.draw.circle(screen, (0,0,255),(220,180), 50, 6)#blue\n pygame.draw.circle(screen,(0,0,0),(310,180),50,6) #black ring\n pygame.draw.circle(screen,(255,0,0),(400,180),50,6) #red ring\n pygame.draw.circle(screen,(255,255,0),(255,250),50,6) #yellow ring\n pygame.draw.circle(screen,(0,255,0),(360,250),50,6) #green ring\n pygame.display.flip() #\n\n pie_rect[0] += horizontal\n pie_rect[1] += vertical\n\n if pie_rect.right >= width:\n horizontal = -1\n elif pie_rect.left <= 0:\n horizontal = 1\n if pie_rect.bottom >= height:\n vertical = -3\n elif pie_rect.top <= 0:\n vertical = 3\n \n for event in pygame.event.get(): #code to exit program\n if event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and event.key == pygame.K_q):#if you press the red x button or press q it will close the program.\n running = False\n \n","repo_name":"farwelltj/cs240-OC","sub_path":"pygame 1.py","file_name":"pygame 1.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71717064721","text":"#!/usr/bin/python\n\"\"\"\nmap 语法: map(function, list), 将集合元素映射成另一种值\n2.x: 返回列表\n3.x: 返回迭代器\n\"\"\"\n\nimport os\nimport sys\n\nsys.path.append(os.getcwd())\n\nfrom src.utils.applogger import AppLogger\n\nLOG = AppLogger(\"map\")\n\ndef main():\n num = [1, 2, 3]\n vals = map(lambda n: n * 2, num)\n # list 兼容 python2/3x 处理\n double_num = list(vals)\n LOG.info(\"vals: %s, type: %s\" % (vals, type(vals)))\n LOG.info(\"double_num: %s\" % double_num)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"bascker/py-note","sub_path":"src/base/map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33402271262","text":"\"\"\"\nHere is my solution to the Validate Binary Search Tree Problem\nhttps://leetcode.com/explore/interview/card/top-interview-questions-easy/94/trees/625/\n\"\"\"\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def isValidBST(self, root: Optional[TreeNode]) -> bool:\n global treeList\n treeList = []\n if not root.left and not root.right:\n return True\n rootCompare(root)\n sortList = sorted(treeList)\n if treeList != sortList or float('-inf') in treeList:\n return False\n\n if len(treeList) != len(set(treeList)):\n return False\n return True\n\ndef rootCompare(root):\n global treeList\n if root:\n if root.left and root.left.val >= root.val:\n treeList.append(float('-inf'))\n elif root.right and root.right.val <= root.val:\n treeList.append(float('-inf'))\n rootCompare(root.left)\n treeList.append(root.val)\n rootCompare(root.right)\n pass\n","repo_name":"DerekAThompson/Fundamentals-Review","sub_path":"Leet Code Interview Questions/Trees/Validate Binary Search Tree.py","file_name":"Validate Binary Search Tree.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19267889531","text":"from botapi import BotAPI\n\nurl = \"https://s35-en.ikariam.gameforge.com\"\nworld = \"s35-en.ikariam.gameforge.com\"\nemail = \"bottest0@centrum.sk\"\npwd = \"matejko123\"\n\nbot = BotAPI(url, world, email, pwd)\nprint(bot.get_state())\n# print(bot.research('military'))\n# print(get_state())\n# print(build(city_id=42663, position=12, building=4))\n# print(set_workers(1))\n# print(buy_merchant_ship(1))\n# print(upgrade(42663, 1, 1))\n# print(transport(42139, 1, 2, 3, 4, 5))\n","repo_name":"matejsladek/ika-bot","sub_path":"testapi.py","file_name":"testapi.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"1207113939","text":"import json\nimport pickle\nimport re \n\ndef process_rank_result():\n ################################## Baseline ####################################\n # attribute = 'color' # [itemform, color]\n # threshold = 0.12\n # INPUT_PREDICT = f'./output/VQA/result/{attribute}_result_generation_baseline.json'\n # PROCESSED_PREDICT = f'./output/VQA/result/processed_{attribute}_result_generation_{threshold}.json'\n ################################################################################\n\n ############################### New Architecture ############################### \n attribute = 'pattern' # [itemform, color, pattern, itemshape]\n inference = 'rank' # [rank, generate]\n match_head = 'itm' # [itm, itc]\n threshold = 0.9 # [0.3, 0.5, 0.7, 0.8, 0.9, 0.95]\n i = 'final' # [0, 2, 4, 6, 8, 'final']\n INPUT_PREDICT = f'./output/{attribute}/result/{attribute}_{inference}_{match_head}_epoch_{i}.json'\n PROCESSED_PREDICT = f'./output/{attribute}/result/processed_{threshold}_{attribute}_{inference}_{match_head}_epoch_{i}.json'\n ################################################################################ \n\n f_preds= open(INPUT_PREDICT, 'rb')\n preds = json.load(f_preds)\n print(f\"number of preds before preprocessing: {len(preds)}\")\n\n processed_preds = []\n for pred in preds:\n # remove [unknown] from predictions and use threshold to filter\n if pred[\"value\"] != \"unknown\" and pred[\"confidence\"] > threshold:\n # if pred[\"confidence\"] > threshold:\n # if pred[\"value\"] != \"unknown\":\n processed_preds.append(pred)\n print(f\"number of preds after preprocessing: {len(processed_preds)}\")\n\n with open(PROCESSED_PREDICT, 'w') as f_out:\n json.dump(processed_preds, f_out)\n print(f'saved processed_preds to {PROCESSED_PREDICT}!!')\n\n\ndef process_generate_result():\n ################################## Baseline ####################################\n # attribute = 'itemform'\n # attribute = 'color'\n # threshold = 0.12\n # INPUT_PREDICT = f'./output/VQA/result/{attribute}_result_generation_baseline.json'\n # PROCESSED_PREDICT = f'./output/VQA/result/processed_{attribute}_result_generation_{threshold}.json'\n ################################################################################\n\n ############################### New Architecture ############################### \n attribute = 'color' # [itemform, color, pattern, itemshape]\n result_folder = 'color_vis_attn'\n inference = 'generate' # [rank, generate]\n match_head = 'itc' # [itm, itc]\n threshold = 0.9 # [0.3, 0.5, 0.7, 0.8, 0.9]\n i = 'final' # [0, 2, 4, 6, 8, 'final']\n INPUT_PREDICT = f'./output/{result_folder}/result/{attribute}_{inference}_{match_head}_epoch_{i}.json'\n gt_path = f'/pg_intern_ebs/hejiecui/data/evaluation/{attribute}/gt_test.pkl'\n PROCESSED_PREDICT = f'./output/{result_folder}/result/processed_{threshold}_{attribute}_{inference}_{match_head}_epoch_{i}.json'\n answer_list = f'/pg_intern_ebs/hejiecui/data/{attribute}/{attribute}_answer_list_known.json'\n ################################################################################ \n\n f_preds= open(INPUT_PREDICT, 'rb')\n preds = json.load(f_preds)\n\n f_gt = open(gt_path, 'rb')\n gt = list(pickle.load(f_gt))\n ASIN = \"asin\"\n gt_by_asin = {x[ASIN]: x for x in gt}\n\n # if attribute == 'color':\n # possible_words = ['blue','brown','multi','green','black','color','red','yellow','silver','gray','clear','white','gold','necklace','orange','stainless steel','steel','other','chrome','pink','purple','wood','natural','beige']\n # elif attribute == 'itemform':\n with open(answer_list) as f:\n possible_words = json.load(f)\n # print(possible_words)\n\n processed_preds = []\n for pred in preds:\n # predict_values = pred[\"value\"].split(\" or \")\n predict_values = re.findall(r\"\\b({})\\b\".format('|'.join(possible_words)), pred[\"value\"], flags=re.IGNORECASE)\n # print(f'pred[\"asin\"]: {pred[\"asin\"]}, pred[\"value\"]: {pred[\"value\"]}, predict_values: {predict_values}')\n for v in predict_values:\n asin = pred['asin']\n # remove [unknown] from predictions and use threshold to filter\n # if v != \"unknown\" and pred[\"confidence\"] > threshold and gt_by_asin[asin]['gold value'] != '':\n if pred[\"confidence\"] > threshold and gt_by_asin[asin]['gold value'] != '':\n sub_predict = {\"asin\": pred[\"asin\"], \"value\": v, \"confidence\": pred[\"confidence\"]}\n processed_preds.append(sub_predict)\n\n print(f\"number of preds before preprocessing: {len(preds)}\")\n print(f\"number of preds after preprocessing: {len(processed_preds)}\")\n with open(PROCESSED_PREDICT, 'w') as f_out:\n json.dump(processed_preds, f_out)\n print(f'saved processed_preds to {PROCESSED_PREDICT}!!')\n\n\n\nif __name__ == '__main__':\n # process_rank_result()\n process_generate_result()","repo_name":"HennyJie/PV2TEA_Multimodal_Attribute_Extraction","sub_path":"evaluate/processings.py","file_name":"processings.py","file_ext":"py","file_size_in_byte":4976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"1019408425","text":"import math\nimport time\n\nimport cv2\nimport firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import firestore\nfrom ultralytics import YOLO\n\nfrom .models import CameraModel\n\ncred = credentials.Certificate('./googleServices/firedetection.json')\napp = firebase_admin.initialize_app(cred)\ndb = firestore.client()\n\n\ndef find_camera(ids):\n cameras_list = CameraModel.objects.get(id=ids)\n cameras = [cameras_list.web_address]\n return cameras[int(0)]\n\n\ndef generate(pk):\n cam = find_camera(pk)\n yolo_output = video_detection(cam, pk)\n for detection_ in yolo_output:\n ref, buffer = cv2.imencode('.jpg', detection_)\n\n frame = buffer.tobytes()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')\n\n\ndef video_detection(path_x, pk):\n camera_pk = CameraModel.objects.get(pk=pk)\n print(camera_pk.web_address)\n alert_camera = db.collection(\"AlertCamera\").document(str(camera_pk.pk) + camera_pk.camera_name)\n\n video_capture = path_x\n detected_camera = CameraModel.objects.filter(web_address=video_capture)[0]\n # Create a Webcam Object\n cap = cv2.VideoCapture(video_capture)\n frame_width = int(cap.get(3))\n frame_height = int(cap.get(4))\n # out=cv2.VideoWriter('output.avi', cv2.VideoWriter_fourcc('M', 'J', 'P','G'), 10, (frame_width, frame_height))\n\n model = YOLO(\"YOLO-Weights/best.pt\")\n classNames = [\"Fire\", \"Smoke\"]\n iterator = 0\n while True:\n iterator += 1\n print(iterator)\n is_detected = False\n success, img = cap.read()\n results = model(img, stream=True, imgsz=320, conf=0.5)\n for r in results:\n boxes = r.boxes\n for box in boxes:\n is_detected = True\n x1, y1, x2, y2 = box.xyxy[0]\n x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)\n\n cv2.rectangle(img, (x1, y1), (x2, y2), (255, 0, 255), 3)\n conf = math.ceil((box.conf[0] * 100)) / 100\n cls = int(box.cls[0])\n class_name = classNames[cls]\n label = f'{class_name}{conf}'\n t_size = cv2.getTextSize(label, 0, fontScale=1, thickness=2)[0]\n\n c2 = x1 + t_size[0], y1 - t_size[1] - 3\n cv2.rectangle(img, (x1, y1), c2, [255, 0, 255], -1, cv2.LINE_AA) # filled\n cv2.putText(img, label, (x1, y1 - 2), 0, 1, [255, 255, 255], thickness=1, lineType=cv2.LINE_AA)\n if iterator >= 1000:\n iterator = 0\n alert_camera.set({\n \"id\": str(camera_pk.pk) + camera_pk.camera_name,\n \"web_address\": camera_pk.web_address,\n \"camera_id\": camera_pk.pk,\n \"time\": int(round(time.time() * 1000)),\n }, merge=True)\n\n yield img\n # out.write(img)\n # cv2.imshow(\"image\", img)\n # if cv2.waitKey(1) & 0xFF==ord('1'):\n # break\n # out.release()\n\n\ncv2.destroyAllWindows()\n","repo_name":"DilshodbekMX/EarlyFireDetecion1Version","sub_path":"director/multiCameraDetect.py","file_name":"multiCameraDetect.py","file_ext":"py","file_size_in_byte":3048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30513388802","text":"_base_ = [\n 'ircsn_ig65m-pretrained-r152-bnfrozen_8xb12-32x2x1-58e_kinetics400-rgb.py'\n]\n\n# model settings\nmodel = dict(\n backbone=dict(\n norm_eval=True,\n bn_frozen=True,\n bottleneck_mode='ip',\n pretrained= # noqa: E251\n 'https://download.openmmlab.com/mmaction/recognition/csn/ipcsn_from_scratch_r152_sports1m_20210617-7a7cc5b9.pth' # noqa: E501\n ),\n data_preprocessor=dict(\n type='ActionDataPreprocessor',\n mean=[110.2008, 100.63983, 95.99475],\n std=[58.14765, 56.46975, 55.332195],\n format_shape='NCTHW'))\n","repo_name":"open-mmlab/mmaction2","sub_path":"configs/recognition/csn/ipcsn_sports1m-pretrained-r152-bnfrozen_32x2x1-58e_kinetics400-rgb.py","file_name":"ipcsn_sports1m-pretrained-r152-bnfrozen_32x2x1-58e_kinetics400-rgb.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":3560,"dataset":"github-code","pt":"3"} +{"seq_id":"74877042642","text":"import cv2\nimport numpy as np\nimport tensorflow as tf\nimport imutils\nfrom imutils.contours import sort_contours\nimport math\nimport os\n\ndirectory= os.path.dirname(__file__)\n\n#stores the labels for the letter ai model ie if model outputs 0 then it is A etc\nlabelsL = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L',\n 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']\n\n#stores the labels for the number ai model ie if model outputs 0 then it is 0 etc\nlabelsN = ['0','1','2','3','4','5','6','7','8','9']\n\n#labelsM = ['div','decimal','(',')','+','-','=','alpha','beta','cos','delta','gamma','geq','gt','infty','int','lambda','log','lt','mu','neq','pi','pm','rightarrow','sigma','sqrt','tan','theta','times','[',']','nothing','!','sin']\n\n#stores the labels for the maths ai model ie if model outputs 0 then it is - etc\nlabelsM = ['-','+','.','/','(',')','!','theta']\n\n#stores the labels for the extra ai model ie if model outputs 0 then it is _alpha\nlabelsE = ['_alpha','_beta','_delta','_Delta1','_epsilon','_exists','_gamma','_infty','_int','_lambda','_mu','_omega','_Omega1','_pi','_sim','_sqrt{ }','_theta','_sigma','_Sigma1','_neq','_1','_','+']\n\n\n#stores manual probabilities meaning if the model outputs a percentage for that specific symbol above the probability in the dict then it is that symbol no matter how high the other models outputs are\nprobabilities = dict()\nprobabilities['_sqrt{ }'] = 0.85\nprobabilities['_exists'] = 0.972\nprobabilities['_theta'] = 0.85\nprobabilities['_infty'] = 0.95\nprobabilities['_pi'] = 0.90\nprobabilities['_int'] = 0.96\nprobabilities['_epsilon'] = 0.97\nprobabilities['+'] = [0.99,0.999,'T']\n\n\n\n\n\nclass Image:\n ''' class to store the extracted letters/symbols from the image'''\n # the image itself ie the 2d array of pixels with the same dimensions as the original image\n image = None\n\n # the start and end of the row and column of the image part of the image which is the actual letter\n # ie if you extract from image[rowStart:rowEnd, colStart:colEnd] you get the letter itself\n rowStart = 0\n rowEnd = 0\n colStart = 0\n colEnd = 0\n\n # the row and column the letter is in relative to the other letters\n rowNumber = None\n columnNumber = None\n\n # the number of pixels in the image\n pixelCount = None\n\n # the index of the image in the array of all images\n index = None\n\n # the character the image is predicted to be\n character = None\n\n\ndef checkSimilarity(image1, image2):\n '''Checks if the two images are similar based on the number of pixels that are different'''\n if image2 is None:\n return 1\n count = 0\n difference = 0\n for i in range(0, image1.shape[0]):\n for j in range(0, image1.shape[1]):\n count += 1\n if (image1[i][j] != image2[i][j]):\n difference += 1\n\n return difference/count\n\n\n\ndef sort(imgArray):\n '''imgArray is the array of each individual symbol extracted from the image \\n\n uses the ai model to work out what all the symbols are then sorts them into rows and columns'''\n\n #stores each symbol extracted from the image and its properties\n allImages = []\n for img in imgArray:\n shape = img.shape\n rowStart = shape[0]\n rowEnd = 0\n colStart = shape[1]\n colEnd = 0\n pixelCount = 0\n\n # find the start and end of the row and column of the image ie remove all the black space around the image (background is black white is the writing)\n for i in range(0, shape[0]):\n for j in range(0, shape[1]):\n pixel = img[i][j]\n if pixel != 0:\n pixelCount += 1\n if (colStart > j):\n colStart = j\n if (colEnd < j):\n colEnd = j\n if (rowStart > i):\n rowStart = i\n if (rowEnd < i):\n rowEnd = i\n \n #include some of the black space as improves accuracy in ai model\n if (rowStart == rowEnd):\n rowEnd = rowEnd + 1\n if (colStart == colEnd):\n colEnd = colEnd + 1\n if (rowStart >= 5):\n rowStart -= 5\n if (colStart >= 5):\n colStart -= 5\n if (rowEnd + 5 < shape[0]):\n rowEnd += 5\n if (colEnd + 5 < shape[1]):\n colEnd += 5\n tempImg = img[rowStart:rowEnd, colStart:colEnd]\n before = tempImg\n\n # resize img to 50x50 as that is what the ai model was trained on\n tempImg = cv2.resize(tempImg, (50, 50))\n before2 = tempImg\n\n #normalise the image\n tempImg = np.array(tempImg) / 255\n\n #reshape the image to be 4d array as that is what the ai model was trained on\n tempImg = tempImg.reshape(-1, 50, 50, 1)\n\n #pass the image into the ai model's\n predictionLetter = loadedLetter_model.predict(tempImg)\n predictionNumber = loadedNumber_model.predict(tempImg)\n predictionMaths = loadedMaths_model.predict(tempImg)\n predictionExtra = loadedExtra_model.predict(tempImg)\n highestIndex = [-1,-1,-1,-1]\n highestValue = [0,0,0,0]\n \n #find the highest value and index of the highest value for each model\n for i in range(0, len(predictionLetter[0])):\n if (predictionLetter[0][i] > highestValue[0]):\n highestValue[0] = predictionLetter[0][i]\n highestIndex[0] = i\n for i in range(0, len(predictionNumber[0])):\n if (predictionNumber[0][i] > highestValue[1]):\n highestValue[1] = predictionNumber[0][i]\n highestIndex[1] = i\n for i in range(0, len(predictionMaths[0])):\n if (predictionMaths[0][i] > highestValue[2]):\n highestValue[2] = predictionMaths[0][i]\n highestIndex[2] = i\n for i in range(0, len(predictionExtra[0])):\n if (predictionExtra[0][i] > highestValue[3]):\n highestValue[3] = predictionExtra[0][i]\n highestIndex[3] = i\n highestTotal = -1\n finalPrediction = -1\n finalLabel = -1\n print(\"Potential Letter: \"+ labelsL[highestIndex[0]] + \" with a confidence of \" + str(predictionLetter[0][highestIndex[0]]))\n print(\"Potential Number: \"+ labelsN[highestIndex[1]] + \" with a confidence of \" + str(predictionNumber[0][highestIndex[1]]))\n print(\"Potential Maths: \"+ labelsM[highestIndex[2]] + \" with a confidence of \" + str(predictionMaths[0][highestIndex[2]]))\n print(\"Potential Extra: \"+ labelsE[highestIndex[3]] + \" with a confidence of \" + str(predictionExtra[0][highestIndex[3]]))\n # if(highestValue[1] > 0.99):\n # highestTotal = 1\n # finalPrediction = predictionNumber\n # finalLabel = labelsN[highestIndex[highestTotal]]\n\n #run through the probabilities dict and if the highest value is above the probability then it is that symbol no matter what the other models output\n if(labelsM[highestIndex[2]] in probabilities):\n print(\"Highest Value: \"+str(highestValue[2]))\n print(\"Highest Label: \"+str(probabilities[labelsM[highestIndex[2]]][0]))\n print(\"Highest Label: \"+str(probabilities[labelsM[highestIndex[2]]][1]))\n print(\"Highest Label: \"+str(probabilities[labelsM[highestIndex[2]]][2]))\n print(\"1. \", highestValue[0])\n print(\"3.\",labelsL[highestIndex[0]])\n\n\n if(highestValue[2] >= probabilities[labelsM[highestIndex[2]]][0] and highestValue[0] >= probabilities[labelsM[highestIndex[2]]][1] and labelsL[highestIndex[0]] == probabilities[labelsM[highestIndex[2]]][2] ):\n highestValue[2] = 1\n\n #the weighting of the maths model is significantly higher ie for nearly all cases it will be 0.99 or higher it only goes about 0.999 if it is a maths symbol\n if(highestValue[2] < 0.999):\n highestValue[2] = 0\n\n #run through the probabilities dict and if the highest value is above the probability then it is that symbol no matter what the other models output\n if(labelsE[highestIndex[3]] in probabilities):\n if(highestValue[3] >= probabilities[labelsE[highestIndex[3]]]):\n highestValue[3] = 1\n \n #works out which model has the highest likeliness and sets the final prediction and label to that\n if(highestValue[0] > highestValue[1] and highestValue[0] > highestValue[2] and highestValue[0] > highestValue[3]):\n highestTotal = 0\n finalPrediction = predictionLetter\n finalLabel = labelsL[highestIndex[highestTotal]]\n elif(highestValue[1] >= highestValue[0] and highestValue[1] >= highestValue[2] and highestValue[1] >= highestValue[3]):\n highestTotal = 1\n finalPrediction = predictionNumber\n finalLabel = labelsN[highestIndex[highestTotal]]\n elif(highestValue[2] >= highestValue[0] and highestValue[2] >= highestValue[1] and highestValue[2] >= highestValue[3]):\n highestTotal = 2\n finalPrediction = predictionMaths\n finalLabel = labelsM[highestIndex[highestTotal]]\n elif(highestValue[3] >= highestValue[0] and highestValue[3] >= highestValue[1] and highestValue[3] >= highestValue[2]):\n highestTotal = 3\n finalPrediction = predictionExtra\n finalLabel = labelsE[highestIndex[highestTotal]]\n # cv2.imshow(str(finalLabel), before)\n # cv2.waitKey(0)\n\n #if the highest value is above 0.5 then it is that symbol otherwise ignore it\n if(highestValue[highestTotal] > 0.5):\n print(\"IT is a \" + finalLabel +\n \" with a confidence of \" + str(finalPrediction[0][highestIndex[highestTotal]]))\n \n #create a new image object and store all the properties of the image in it then add it to the array of all images\n newImage = Image()\n newImage.image = img\n newImage.rowStart = rowStart\n newImage.rowEnd = rowEnd\n newImage.colStart = colStart\n newImage.colEnd = colEnd\n newImage.pixelCount = pixelCount\n newImage.character = finalLabel\n newImage.index = len(allImages)\n allImages.append(newImage)\n else:\n print(\"IT is NOT a \" + str(highestIndex[highestTotal]) +\n \" with a confidence of \" + str(finalPrediction[0][highestIndex[highestTotal]]))\n #cv2.imshow(str(labels[highestIndex]), before)\n #cv2.waitKey(0)\n \n #sort the images into rows\n class rows:\n rowStart = None\n rowEnd = None\n rowNumber = None\n allRows = []\n initialRow = rows()\n initialRow.rowStart = allImages[0].rowStart\n initialRow.rowEnd = allImages[0].rowEnd\n initialRow.rowNumber = 0\n allRows.append(initialRow)\n index = 0\n for image in allImages:\n for row in allRows:\n\n # the image's start is less than the row's start and the image's end is greater than the row's end meaning\n # the images contains the rows olds start and end meaning it is part of the row and the row's new start and end\n if (image.rowStart < row.rowStart and image.rowEnd > row.rowEnd):\n image.rowNumber = row.rowNumber\n allRows[row.rowNumber].rowStart = image.rowStart\n allRows[row.rowNumber].rowEnd = image.rowEnd\n break\n\n # the image's start is less than the row's start and the image's end is less than the row's end meaning\n # the image is inside the rows dimensions so is a part of it\n if (image.rowStart > row.rowStart and image.rowEnd < row.rowEnd):\n image.rowNumber = row.rowNumber\n break\n\n # count how many pixels are within the rows range\n count = 0\n for i in range(row.rowStart, row.rowEnd):\n for j in range(image.colStart, image.colEnd):\n if (image.image[i][j] != 0):\n count += 1\n\n # if more than half the pixels are within the row's range then the image is part of the row\n if (count/image.pixelCount > 0.5):\n image.rowNumber = row.rowNumber\n \n # if the image doesnt fit into any current row's then create a new row and add it to the array of all rows\n if (image.rowNumber is None):\n\n newRow = rows()\n newRow.rowStart = image.rowStart\n newRow.rowEnd = image.rowEnd\n newRow.rowNumber = len(allRows)\n allRows.append(newRow)\n image.rowNumber = newRow.rowNumber\n allImages[index] = image\n index += 1\n print(len(allRows))\n\n #sort the images into columns\n for row in allRows:\n allImagesInRow = []\n\n #assign each image a index value for later use\n index = 0\n for img in allImages:\n allImages[index].index = index\n index += 1\n #add all images in the row to the array\n if (img.rowNumber == row.rowNumber):\n allImagesInRow.append(img)\n\n #sort the images in the row by their start column\n allImagesInRow.sort(key=lambda x: x.colStart)\n print(\"Length Before: \"+str(len(allImagesInRow)))\n removed = []\n #remove duplicates based on similarity\n for i in range(0, len(allImagesInRow)-1):\n if (i >= len(allImagesInRow)-1):\n break\n sim = checkSimilarity(\n allImagesInRow[i].image, allImagesInRow[i+1].image)\n print(\"sim: \"+str(sim))\n if (sim < 0.00009):\n removed.append(allImagesInRow[i+1])\n allImagesInRow.pop(i+1)\n print(\"removed\")\n print(\"Length After: \"+str(len(allImagesInRow)))\n\n #assign each image a column number\n for i in range(0, len(allImagesInRow)):\n allImagesInRow[i].columnNumber = i\n allImages[allImagesInRow[i].index] = allImagesInRow[i]\n for i in range(0, len(removed)):\n allImages.remove(removed[i])\n\n return allImages\n\n#Load Letter Model\njson_file = open(\n directory+'/Model/Model/LetterModel.json', 'r')\nletterModel = json_file.read()\njson_file.close()\nloadedLetter_model = tf.keras.models.model_from_json(letterModel)\nloadedLetter_model.load_weights(\n directory+\"/Model/Model/LetterModel.h5\")\n\n#Load Number Model\njson_file = open(\n directory+'/Model/Model/NumberModel.json', 'r')\nnumberModel = json_file.read()\njson_file.close()\nloadedNumber_model = tf.keras.models.model_from_json(numberModel)\nloadedNumber_model.load_weights(\n directory+\"/Model/Model/NumberModel.h5\")\n\n#Load Maths symbol Model\njson_file = open(\n directory+'/Model/Model/MathsModel.json', 'r')\nmathsModel = json_file.read()\njson_file.close()\nloadedMaths_model = tf.keras.models.model_from_json(mathsModel)\nloadedMaths_model.load_weights(\n directory+\"/Model/Model/MathsModel.h5\")\n\n\n#Load Extra Model\njson_file = open(\n directory+'/Model/Model/ExtraModel.json', 'r')\nextraModel = json_file.read()\njson_file.close()\nloadedExtra_model = tf.keras.models.model_from_json(extraModel)\nloadedExtra_model.load_weights(\n directory+\"/Model/Model/ExtraModel.h5\")\n\n#read in the image as greyscale\nimg = cv2.imread(directory+'/Testing_Images/Formula6.jpg', 0)\n\n\n#invert the image as the writing is white and the background is black\nimg = (255-img)\n#img = cv2.GaussianBlur(img, (5, 5), 0)\n#img = cv2.bitwise_not(img)\nvis = img.copy()\nmser = cv2.MSER_create()\nregions, _ = mser.detectRegions(img)\n# print(regions)\nhulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions]\ncv2.polylines(vis, hulls, 1, (0, 255, 0))\ncv2.imshow('img', vis)\ncv2.waitKey(0)\nmask = np.zeros((img.shape[0], img.shape[1], 1), dtype=np.uint8)\n\n\nimgArray = []\nhigh_thresh, thresh_im = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\nlowThresh = 0.5*high_thresh\n\n#detect edges in the image\nedged = cv2.Canny(img, lowThresh, high_thresh)\n\n#find the contours in the image from the edges\ncnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\ncnts = imutils.grab_contours(cnts)\n\n#sort the contours from left to right\ncnts = sort_contours(cnts, method=\"left-to-right\")[0]\n\n#loop through each contour and draw it onto the mask then add the image with the mask applied to the array of images\nfor contour in cnts:\n tempImg = np.zeros((img.shape[0], img.shape[1], 1), dtype=np.uint8)\n cv2.drawContours(tempImg, [contour], -1, (255, 255, 255), -1)\n imgArray.append(cv2.bitwise_and(img, img, mask=tempImg))\n # cv2.imshow('img', cv2.bitwise_and(img, img, mask=tempImg))\n # cv2.waitKey(0)\n\nprevious = None\ntempImgArray = []\n# does removes some duplicates based on similarity if they are next to each other in the array (not all) are removed\n# as some are duplicates but not next to each other\nfor img2 in imgArray:\n if (checkSimilarity(img2, previous) < 0.0009):\n previous = img2\n continue\n tempImgArray.append(img2)\n previous = img2\nimgArray = tempImgArray\n\n\nfinalImgArray = sort(imgArray)\n\n#create a dict of all the rows and their images\narrayRows = dict()\nfor img2 in finalImgArray:\n if (not (img2.rowNumber in arrayRows)):\n arrayRows[img2.rowNumber] = [img2]\n else:\n arrayRows[img2.rowNumber].append(img2)\n\n#sort the rows into order and add spaces between symbols where they should be ie how they are written\narrayRows2 = []\nfor row in arrayRows:\n arrayRows[row].sort(key=lambda x: x.colStart)\n total = 0\n previous = arrayRows[row][0].colEnd\n first = True\n\n #calculate all the spaces between the symbols in the row and add each space value to an array\n values = []\n for img2 in arrayRows[row]:\n if (first):\n first = False\n continue\n\n space = img2.colStart - previous\n total += space\n previous = img2.colEnd\n values.append(space)\n \n #if no spaces then continue\n if(len(values) == 0):\n continue\n\n #calculate the average space between the symbols\n average = total/(len(values))\n originalValues = values.copy()\n\n #calculate the standard deviation of the spaces\n for i in range(0, len(values)):\n values[i] = values[i] - average\n final = 0\n for i in range(0, len(values)):\n final += values[i]*values[i]\n\n standardDeviation = math.sqrt(final/len(values))\n\n #the threshold for a space to be considered a space ie anything above this value is a space\n value = average + (1*standardDeviation)\n print(\"mean: \"+str(average))\n print(\"standard deviation: \"+str(standardDeviation))\n print(\"value: \"+str(value))\n\n #add spaces between the symbols where they should be\n for i in range(0, len(arrayRows[row])):\n\n if (i == 0):\n continue\n print(\"value[\"+str(i-1)+\"]: \"+str(originalValues[i-1]))\n if (originalValues[i-1] > value):\n print(\"space\")\n spaceImage = Image()\n spaceImage.image = None\n spaceImage.character = ' '\n arrayRows[row].insert(i, spaceImage)\n\n arrayRows2.append(arrayRows[row])\narrayRows2.sort(key=lambda x: x[0].rowStart)\ncharacters = []\n\n#this turns symbols into what they should be ie as = has space between them the edge detection will see them as two symbols so this turns them into one symbol ie -- will become = only if it should be\nfor row in arrayRows2:\n characters.append([])\n previous = None\n for img2 in row:\n #cv2.imshow(\"img\", img2.image)\n # cv2.waitKey(0)\n current = img2.character\n if(previous != None):\n if(current == '-' and previous.character == '-'):\n count = 0\n #Do a check to see if both - are in same row\n for i in range(previous.rowStart,previous.rowEnd):\n for j in range(img2.colStart,img2.colEnd):\n if(previous.image[i][j] != 0):\n count += 1\n print(\"count: \"+str(count))\n print(\"previous pixel count: \"+str(previous.pixelCount*0.5))\n if(count > 0.5*previous.pixelCount):\n characters[len(characters)-1].pop()\n characters[len(characters)-1].append('=')\n previous = None\n continue\n previous = img2\n characters[len(characters)-1].append(img2.character)\nprint(characters)\n\n#turn the 2d array of symbols into a string\nfinalString = \"\"\nfor row in characters:\n for character in row:\n finalString += character\n finalString += '\\n'\nprint(finalString)\n\n#replace some of the symbols with their latex equivalent\nfinalString = finalString.replace('div', '\\div')\nfinalString = finalString.replace('_alpha', \"\\\\alpha\")\nfinalString = finalString.replace('_beta', '\\\\beta')\nfinalString = finalString.replace('_gamma', '\\gamma')\nfinalString = finalString.replace('_delta', '\\delta')\nfinalString = finalString.replace('_theta', '\\\\theta')\nfinalString = finalString.replace('_lambda', '\\lambda')\nfinalString = finalString.replace('_mu', '\\mu')\nfinalString = finalString.replace('_pi', '\\pi')\nfinalString = finalString.replace('_sigma', '\\sigma')\nfinalString = finalString.replace('_sqrt', '\\sqrt')\nfinalString = finalString.replace('_tan', '\\\\tan')\nfinalString = finalString.replace('_times', '\\\\times')\nfinalString = finalString.replace('_infty', '\\infty')\nfinalString = finalString.replace('_int', '\\int')\nfinalString = finalString.replace('_log', '\\log')\nfinalString = finalString.replace('pm', '\\pm')\nfinalString = finalString.replace('rightarrow', '\\\\rightarrow')\nfinalString = finalString.replace('geq', '\\geq')\nfinalString = finalString.replace('gt', '\\gt')\nfinalString = finalString.replace('lt', '\\lt')\nfinalString = finalString.replace('neq', '\\\\neq')\nfinalString = finalString.replace('nothing', '')\nfinalString = finalString.replace('sin', '\\sin')\nfinalString = finalString.replace('cos', '\\cos')\n\nprint(finalString)\n\n\ncv2.destroyAllWindows()\n","repo_name":"Cufe12345/Luxfe","sub_path":"Backend/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":22258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12974973545","text":"#O programa deverá receber um número desconhecido de valores referentes aos preços das mercadorias. Um valor zero deve ser informado pelo operador para indicar o final da compra. O programa deve então mostrar o total da compra e perguntar o valor em dinheiro que o cliente forneceu, para então calcular e mostrar o valor do troco. Após esta operação, o programa deverá voltar ao ponto inicial, para registrar a próxima compra. A saída deve ser conforme o exemplo abaixo:\n#Mercadinho BigBom\n#Produto 1: R$ 2.20\n#Produto 2: R$ 5.80\n#Produto 3: R$ 0\n\nqtd_compra = [] #quantidade de compras feitas\n\nwhile(True):\n \n qtd_compra.append(1)\n print(f\"Compra {len(qtd_compra)}\")\n\n preco_produto = 1 #representa o valor de cada produto\n total_compra = 0 #representa o total da compra\n np = 1 #número do produto\n pagamento = 0 #valor pago pelo cliente\n troco = 0 #troco do cliente\n\n while(preco_produto != 0):\n preco_produto = float(input(f\"Informe o preço do produto {np}: R$\"))\n total_compra = total_compra + preco_produto\n np = np+1\n \n print(f\"Total da compra: R${total_compra:.2f}\")\n pagamento = float(input(\"Pagamento: R$\"))\n\n while(pagamento < total_compra):\n print(\"Pagamento inválido, faça novamente!\")\n pagamento = float(input(\"Pagamento: R$\"))\n\n troco = pagamento - total_compra\n\n print(f\"Troco: R${troco:.2f}\")\n\n print(\"Luis Felipe Adriani\")\n print(\"RA: 1922432\")","repo_name":"LuisFelipeA/Python-projects","sub_path":"mercado_lista.py","file_name":"mercado_lista.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"4023336727","text":"# Plot the surface temperature and precipitation through the year for the monsoon region\n\nfrom data_handling_updates import month_dic\nimport numpy as np\nimport xarray as xr\nimport matplotlib.pyplot as plt\nfrom pylab import rcParams\nimport sh\nfrom climatology import peak_mse, precip_centroid\n\n\ng = 9.8\ncp = 287.04/2*7\nL = 2.500e6\n\ndef pick_lons(data, lonin):\n #Find index range covering specified longitudes\n if lonin[1]>lonin[0]:\n lons = [data.lon[i] for i in range(len(data.lon)) if data.lon[i] >= lonin[0] and data.lon[i] < lonin[1]]\n else:\n lons = [data.lon[i] for i in range(len(data.lon)) if data.lon[i] >= lonin[0] or data.lon[i] < lonin[1]]\n return lons\n \ndef precip_mse_plot(data, ax_in, lonin=[-1.,361.], do_xlabels=False, plot_type=None, precip_contour=8., p_cent=True, mse_max=True, month_labels=True):\n \n lons = pick_lons(data, lonin)\n \n try:\n precip_plot = (data.precipitation*86400.).sel(lon=lons).mean('lon')\n \n except:\n precip_plot = ((data.convection_rain + data.condensation_rain)*86400.).sel(lon=lons).mean('lon')\n \n \n mse_plot = (data.temp*cp + data.sphum*L + data.height*g).mean('lon')/1000.\n \n if plot_type == None:\n # Default case, plot precip with mse overlaid. Choice of whether or not to highlight a specific precip contour\n f1 = precip_plot.plot.contourf(ax=ax_in, x='xofyear', y='lat', levels = np.arange(2.,15.,2.), add_colorbar=False, add_labels=False, extend='max', cmap='Blues')\n if not precip_contour == None:\n precip_plot.plot.contour(ax=ax_in, x='xofyear', y='lat',levels=np.arange(precip_contour-100.,200.,100.), add_labels = False, add_colorbar=False, colors='k', linewidth=2)\n cs = mse_plot.sel(pfull=850.).plot.contour(ax=ax_in, x='xofyear', y='lat', levels=np.arange(200.,401.,10.), add_labels = False, colors='0.7', add_colorbar=False, linewidths=2)\n plt.clabel(cs, fontsize=15, inline_spacing=-1, fmt= '%1.0f')\n if p_cent:\n data = precip_centroid(data,lonin=lonin)\n data.p_cent.plot.line(color='w', ax=ax_in)\n ax_in.set_xlabel('')\n \n elif plot_type == 'precip':\n # No mse, plot precip and precip centroid\n f1 = precip_plot.plot.contourf(ax=ax_in, x='xofyear', y='lat', levels = np.arange(2.,15.,2.), add_colorbar=False, add_labels=False, extend='max', cmap='Blues', linewidth=2)\n if p_cent:\n data = precip_centroid(data,lonin=lonin)\n data.p_cent.plot.line(color='k', ax=ax_in, linewidth=2)\n ax_in.set_xlabel('')\n \n elif plot_type == 'mse':\n # Plot mse in colour, overplot max mse\n f1 = mse_plot.sel(pfull=850.).plot.contourf(ax=ax_in, x='xofyear', y='lat', levels=np.arange(200.,401.,10.), add_labels = False, extend='both', add_colorbar=False)\n if mse_max:\n data_mse = peak_mse(data, lonin=lonin)\n data_mse.mse_max_loc.plot.line('k',ax=ax_in)\n ax_in.set_xlabel('')\n\n \n \n ax_in.set_ylabel('Latitude')\n ax_in.set_ylim(-60,60)\n ax_in.set_yticks(np.arange(-60.,61.,30.))\n ax_in.grid(True,linestyle=':')\n \n if month_labels:\n mn_dic = month_dic(1)\n tickspace = range(13,72,18)\n labels = [mn_dic[(k+5)/6 ] for k in tickspace]\n \n ax_in.set_xlim((1,72))\n ax_in.set_xticks(tickspace)\n \n if do_xlabels:\n ax_in.set_xlabel('')\n ax_in.set_xticklabels(labels,rotation=25)\n \n return f1\n\n\nif __name__ == \"__main__\":\n \n data = xr.open_dataset('/scratch/rg419/Data_moist/climatologies/full_qflux.nc')\n \n plot_dir = '/scratch/rg419/plots/other_monsoons/'\n mkdir = sh.mkdir.bake('-p')\n mkdir(plot_dir)\n \n #rcParams['figure.figsize'] = 6, 10\n #rcParams['font.size'] = 20\n \n fig, (ax1, ax2) = plt.subplots(2, sharex=True)\n f1 = precip_mse_plot(data, ax1, lonin=[345.,45.])\n ax1.set_title('Africa')\n precip_mse_plot(data, ax2, do_xlabels=True, lonin=[240.,270.])\n ax2.set_title('Central America')\n plt.subplots_adjust(left=0.2, right=0.95, top=0.95, bottom=0., hspace=0.2)\n #Colorbar\n cb1=fig.colorbar(f1, ax=(ax1, ax2), use_gridspec=True, orientation = 'horizontal',fraction=0.15, pad=0.07, aspect=30)\n cb1.set_label('Precipitation, mm/day')\n \n plt.savefig(plot_dir+'precip_mse_hm.pdf', format='pdf')\n plt.close() \n\n","repo_name":"subond/python_scripts","sub_path":"python_bin_updates/physics_updates/climatology/precip_and_mse_hm.py","file_name":"precip_and_mse_hm.py","file_ext":"py","file_size_in_byte":4420,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"36285668888","text":"import re\nfrom urlparse import urljoin\n\nfrom django.conf import settings\nfrom django.db import models\nfrom django.contrib.auth.models import SiteProfileNotAvailable, ImproperlyConfigured\nfrom django.core.urlresolvers import reverse, NoReverseMatch\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.core.cache import cache\n\nimport remoteobjects\nimport typepad\nimport typepadapp.models\n\n\nclass User(typepad.User):\n '''\n Mock Django User model using the TypePad API.\n The following methods override django.contrib.auth User.\n '''\n\n @property\n def username(self):\n return self.preferred_username\n\n @property\n def first_name(self):\n return self.display_name\n\n @property\n def last_name(self):\n return self.display_name\n\n @property\n def password(self):\n # not aware of password\n raise NotImplementedError\n\n @property\n def is_staff(self):\n return self.is_superuser or self.is_featured_member\n\n @property\n def is_active(self):\n return True\n\n @property\n def is_superuser(self):\n for admin in typepadapp.models.GROUP.admins():\n if self.id == admin.target.id:\n return True\n return False\n\n @property\n def is_featured_member(self):\n if settings.FEATURED_MEMBER is None: return False\n return settings.FEATURED_MEMBER in (self.xid,\n self.preferred_username)\n\n @property\n def date_joined(self):\n return None # does this need to be a datetime?\n\n @property\n def groups(self):\n #return self._groups\n raise NotImplementedError\n\n @property\n def can_post(self):\n if self.is_active and self.is_staff:\n return True\n return settings.ALLOW_COMMUNITY_POSTS\n\n @property\n def user_permissions(self):\n #return self._user_permissions\n raise NotImplementedError\n\n class Meta:\n verbose_name = _('user')\n verbose_name_plural = _('users')\n\n def __unicode__(self):\n return self.username\n\n def get_absolute_url(self):\n \"\"\"Relative url to the user's member profile page.\"\"\"\n try:\n return reverse('member', args=[self.preferred_username or self.url_id])\n except NoReverseMatch:\n return None\n\n def is_anonymous(self):\n return False\n\n def is_authenticated(self):\n return True\n\n def get_full_name(self):\n return self.name\n\n def set_password(self, raw_password):\n raise NotImplementedError\n\n def check_password(self, raw_password):\n raise NotImplementedError\n\n def set_unusable_password(self):\n raise NotImplementedError\n\n def has_usable_password(self):\n raise NotImplementedError\n\n def get_group_permissions(self):\n raise NotImplementedError\n\n def get_all_permissions(self):\n raise NotImplementedError\n\n def has_perm(self, perm):\n raise NotImplementedError\n\n def has_perms(self, perm_list):\n raise NotImplementedError\n\n def has_module_perms(self, module):\n raise NotImplementedError\n\n def get_and_delete_messages(self):\n # required by django.core.context_processors\n return []\n\n def email_user(self, subject, message, from_email=None):\n \"Sends an e-mail to this User.\"\n from django.core.mail import send_mail\n send_mail(subject, message, from_email, [self.email])\n\n # @cached_function\n def get_profile(self):\n \"\"\"\n Returns site-specific profile for this user. Raises\n SiteProfileNotAvailable if this site does not allow profiles.\n \"\"\"\n # user profile class from settings\n if not getattr(settings, 'AUTH_PROFILE_MODULE', False):\n raise SiteProfileNotAvailable\n try:\n app_label, model_name = settings.AUTH_PROFILE_MODULE.split('.')\n model = models.get_model(app_label, model_name)\n except (ImportError, ImproperlyConfigured):\n raise SiteProfileNotAvailable\n if model is None:\n error = 'Could not load configured profile model %s.models.%s' % (app_label, model_name)\n if app_label not in settings.INSTALLED_APPS:\n error = '%s. Is %r in INSTALLED_APPS?' % (error, app_label)\n raise ImproperlyConfigured(error)\n\n try:\n # get the model by user_id field (instead of a user foreign key field)\n profile = model._default_manager.get(user_id__exact=self.id)\n except model.DoesNotExist:\n # UserProfile for this site doesn't exist yet for this TypePad user\n profile = model()\n # user_id should be populated in case this profile is ever saved\n profile.user_id = self.id\n return profile\n\n def save(self):\n # does nothing yet\n # required by django.contrib.auth login\n pass\n\n def delete(self):\n # does nothing yet\n pass\n\n '''\n End Django User model properties and methods\n '''\n\n def group_events(self, group, start_index=1, max_results=None, **kwargs):\n if max_results is None:\n max_results = settings.EVENTS_PER_PAGE\n return self.events.filter(by_group=group, start_index=start_index,\n max_results=max_results, **kwargs)\n\n def group_memberships(self, group, **kwargs):\n return self.memberships.filter(by_group=group, **kwargs)\n\n def group_notifications(self, group, start_index=1, max_results=None, **kwargs):\n if max_results is None:\n max_results = settings.EVENTS_PER_PAGE\n return self.notifications.filter(by_group=group,\n start_index=start_index, max_results=max_results, **kwargs)\n\n def following(self, group=None, start_index=1, max_results=None, **kwargs):\n if max_results is None:\n max_results = settings.MEMBERS_PER_WIDGET\n if group is not None:\n return self.relationships.filter(following=True, by_group=group,\n start_index=start_index, max_results=max_results, **kwargs)\n return self.relationships.filter(following=True,\n start_index=start_index, max_results=max_results, **kwargs)\n\n def followers(self, group=None, start_index=1, max_results=None, **kwargs):\n if max_results is None:\n max_results = settings.MEMBERS_PER_WIDGET\n if group is not None:\n return self.relationships.filter(follower=True, by_group=group,\n start_index=start_index, max_results=max_results, **kwargs)\n return self.relationships.filter(follower=True,\n start_index=start_index, max_results=max_results, **kwargs)\n\n @property\n def edit_url(self):\n try:\n return reverse('edit_profile_url')\n except NoReverseMatch:\n return None\n\n @property\n def feed_url(self):\n \"\"\"URL for atom feed of user's activity.\"\"\"\n try:\n url = self.get_absolute_url().lstrip('/') # remove starting /\n return reverse('feeds', kwargs={'url': url})\n except NoReverseMatch:\n return None\n\n @property\n def typepad_url(self):\n import logging\n logging.getLogger(\"typepadapp.models.users\").warn(\n 'User.typepad_url is deprecated; use User.profile_page_url instead')\n return self.profile_page_url\n\n @property\n def typepad_edit_url(self):\n import logging\n logging.getLogger(\"typepadapp.models.users\").warn(\n 'User.typepad_edit_url is deprecated; use User.profile_edit_page_url instead')\n return self.profile_edit_page_url\n\n @property\n def typepad_frame_url(self):\n import logging\n logging.getLogger(\"typepadapp.models.users\").warn(\n 'User.typepad_frame_url is deprecated; use User.follow_frame_content_url instead')\n return self.follow_frame_content_url\n\n @property\n def userpic(self):\n \"\"\"Returns a URL for a userpic for the User.\n\n The returned URL should be sized for a 50x50 square, but this\n cannot be guaranteed. The img tag should be styled in a way\n that bounds the presentation to 50 pixels square.\n\n \"\"\"\n try:\n return self.avatar_link.square(50).url\n except AttributeError:\n pass\n try:\n return reverse('static-serve', kwargs={'path': settings.DEFAULT_USERPIC_PATH})\n except NoReverseMatch:\n pass\n return None\n\n\nclass UserProfile(typepad.UserProfile):\n\n @property\n def is_superuser(self):\n for admin in typepadapp.models.GROUP.admins():\n if self.id == admin.target.id:\n return True\n return False\n\n @property\n def is_featured_member(self):\n if settings.FEATURED_MEMBER is None: return False\n return settings.FEATURED_MEMBER in (self.id,\n self.preferred_username)\n\n @property\n def typepad_frame_url(self):\n import logging\n logging.getLogger(\"typepadapp.models.users\").warn(\n 'UserProfile.typepad_frame_url is deprecated; use UserProfile.follow_frame_content_url instead')\n return self.follow_frame_content_url\n\n def get_profile(self):\n return User.get_by_id(self.id).get_profile()\n\n\n### Caching support\n\nif settings.FRONTEND_CACHING:\n from typepadapp.caching import cache_link, cache_object, invalidate_rule\n from typepadapp import signals\n\n def make_user_alias_cache_key(self):\n \"\"\"Attempts to use a caching key of the user's username, if available.\"\"\"\n return \"objectcache:%s:%s\" % (self.cache_namespace, self.preferred_username or self.url_id)\n User.cache_key = property(make_user_alias_cache_key)\n UserProfile.cache_key = property(make_user_alias_cache_key)\n\n User.get_by_url_id = cache_object(User.get_by_url_id)\n user_invalidator = invalidate_rule(\n key=lambda sender, instance=None, group=None, **kwargs: instance,\n signals=[signals.member_banned, signals.member_unbanned],\n name=\"user cache invalidation for member_banned, member_unbanned signals\")\n\n UserProfile.get_by_url_id = cache_object(UserProfile.get_by_url_id)\n user_profile_invalidator = invalidate_rule(\n key=lambda sender, instance=None, group=None, **kwargs: UserProfile.get_by_url_id(instance.preferred_username or instance.url_id),\n signals=[signals.member_banned, signals.member_unbanned],\n name=\"user profile cache invalidation for member_banned, member_unbanned signals\")\n\n User.events = cache_link(User.events)\n user_events_invalidator = invalidate_rule(\n key=lambda sender, group=None, instance=None, **kwargs:\n instance and instance.author and group and [instance.author.notifications.filter(by_group=group),\n instance.author.preferred_username and User.get_by_url_id(instance.author.preferred_username).notifications.filter(by_group=group)],\n signals=[signals.asset_created, signals.asset_deleted],\n name=\"user notifications for group cache invalidation for asset_created, asset_deleted signals\")\n\n User.notifications = cache_link(User.notifications)\n # signals.asset_created, signals.asset_deleted\n\n # We can't effectively signal to invalidate these lists because\n # follow/unfollow actions happen on typepad\n User.memberships = cache_link(User.memberships)\n user_memberships_invalidator = invalidate_rule(\n key=lambda sender, instance=None, group=None, **kwargs:\n instance and group and [User.get_by_url_id(instance.url_id).group_memberships(group),\n instance.preferred_username and User.get_by_url_id(instance.preferred_username).group_memberships(group)],\n signals=[signals.member_banned, signals.member_unbanned, signals.member_joined, signals.member_left],\n name=\"user membership invalidation for member_banned, member_unbanned, member_joined, member_left signals\")\n\n User.elsewhere_accounts = cache_link(User.elsewhere_accounts)\n # signals.profile_webhook\n\n User.relationships = cache_link(User.relationships)\n # signals.following_webhook, signals.member_left, signals.member_joined\n\n # do these endpoints really work??\n # User.comments = cache_link(User.comments)\n # User.assets = cache_link(User.assets)\n\n User.favorites = cache_link(User.favorites)\n user_favorites_invalidator = invalidate_rule(\n key=lambda sender, instance=None, **kwargs: instance and [instance.author.favorites,\n instance.author.preferred_username and User.get_by_url_id(instance.author.preferred_username).favorites],\n signals=[signals.favorite_created, signals.favorite_deleted],\n name=\"user favorites stream for favorite created/deleted signals\")\n","repo_name":"evanelias/typepadapp","sub_path":"typepadapp/models/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":12776,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"27644100376","text":"def superDigit(n, k):\n # Write your code here\n while len(n) > 1:\n s = 0\n for digit in n:\n s += int(digit)\n n = str(s)\n s = s*k\n n = str(s)\n if s >= 10:\n s = int(n[0]) + int(n[1])\n return(s)\n\n","repo_name":"glaucorampone/HackerRank-Python-Solutions","sub_path":"superDigit.py","file_name":"superDigit.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"41027019768","text":"import orjson\nfrom helpers.providers.faker import faker\n\nfrom inspirehep.records.api import JobsRecord\nfrom inspirehep.records.marshmallow.jobs.base import JobsPublicListSchema, JobsRawSchema\n\n\ndef test_jobs_serializer_should_serialize_whole_basic_record():\n schema = JobsRawSchema()\n data = {\n \"$schema\": \"http://localhost:5000/schemas/records/jobs.json\",\n \"_collections\": [\"Jobs\"],\n \"deadline_date\": \"1996-11-15\",\n \"description\": \"Join us!\",\n \"position\": \"staff\",\n \"regions\": [\"Europe\"],\n \"status\": \"closed\",\n }\n\n job = JobsRecord(data)\n result = schema.dump(job).data\n\n assert result == data\n\n\ndef test_jobs_api_serializer_doesent_return_reference_letters():\n data = {\"emails\": [\"test.test.test@cern.ch\", \"test@cern.ch\"]}\n job = faker.record(\"job\", data={\"reference_letters\": data})\n result = JobsPublicListSchema().dumps(job).data\n result_data = orjson.loads(result)\n\n assert \"reference_letters\" not in result_data\n\n\ndef test_jobs_api_serializer_doesent_return_email_in_contact_details():\n data = [{\"email\": \"test.test.test@cern.ch\", \"name\": \"Test, Contact\"}]\n\n job = faker.record(\"job\", data={\"contact_details\": data})\n result = JobsPublicListSchema().dumps(job).data\n result_data = orjson.loads(result)\n\n assert \"emails\" not in result_data[\"contact_details\"]\n\n\ndef test_jobs_api_serializer_doesent_return_email_in_acquisition_source():\n acquisition_source = {\n \"email\": \"test.test.test@cern.ch\",\n \"internal_uid\": 60000,\n \"method\": \"submitter\",\n \"orcid\": \"0000-0000-0000-0000\",\n \"source\": \"submitter\",\n \"submission_number\": \"None\",\n }\n\n job = faker.record(\"job\", data={\"acquisition_source\": acquisition_source})\n result = JobsPublicListSchema().dumps(job).data\n result_data = orjson.loads(result)\n\n assert \"email\" not in result_data[\"acquisition_source\"]\n","repo_name":"inspirehep/inspirehep","sub_path":"backend/tests/unit/records/marshmallow/jobs/test_base.py","file_name":"test_base.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"3"} +{"seq_id":"328188642","text":"import discord\nfrom discord import app_commands\nfrom discord.ext import commands\nfrom discord.app_commands import Choice\n\nimport os\nimport json\nimport aiohttp\n\nimport logging\nfrom logging.handlers import TimedRotatingFileHandler\n\nimport dotenv\nimport python_aternos\n\n\ndotenv.load_dotenv()\n\nfP = os.path.dirname(os.path.realpath(__file__))\nsP = os.path.dirname(os.path.realpath(__file__)) + \"/sessions/{username}.aternos\"\nif not 'logs' in os.listdir(fP): os.mkdir(f\"{fP}/logs\")\nif not 'sessions' in os.listdir(fP): os.mkdir(f\"{fP}/sessions\")\nif not 'uconfig.json' in os.listdir(fP):\n\twith open('uconfig.json', 'w') as f:\n\t\tjson.dump({\"guilds\": {}, \"users\": {}}, f, indent=2)\n\n# Silence other loggers\n#for log_name, log_obj in logging.Logger.manager.loggerDict.items():\n#\tif log_name == \"discord.client\":\n#\t\tlog_obj.setLevel(logging.ERROR)\n#\t\n#\telif log_name not in [__name__, \"discord\"]:\n#\t\tlog_obj.disabled = True\n\nlogging.basicConfig(\n\tformat='%(asctime)s %(levelname)-8s %(message)s',\n\tlevel=logging.DEBUG,\n\tdatefmt='%Y-%m-%d %H:%M:%S'\n)\nhandler = TimedRotatingFileHandler(f\"{fP}/logs/acs.log\", when=\"midnight\", interval=1)\nhandler.suffix = \"%Y%m%d\"\nformatter = logging.Formatter(\"%(asctime)s %(levelname)-8s %(message)s\")\nhandler.setFormatter(formatter)\nlogger = logging.getLogger(__name__)\nlogger.addHandler(handler)\n\n\nintents = discord.Intents.default()\n\nbot = commands.Bot(\n\tcommand_prefix=commands.when_mentioned_or('aternos!'),\n\tdescription=\"Obtain your Aternos servers status and control them!\",\n\tintents=intents\n)\nslash = bot.tree\nbot.is_ready = False\n\n@bot.event\nasync def on_ready():\n\tif not bot.is_ready:\n\t\tbot.is_ready = True\n\t\tlogger.info(\"%s Logged in as %s#%s %s\", '<'*15, bot.user.name, bot.user.discriminator, '>'*15)\n\n\n\ndef get_config():\n\twith open(f'{fP}/uconfig.json', 'r', encoding='utf-8') as f:\n\t\tc = json.load(f)\n\n\treturn c\n\ndef save_config(cfg):\n\twith open(f'{fP}/uconfig.json', 'w', encoding='utf-8') as f:\n\t\tjson.dump(cfg, f, ensure_ascii=False, indent=2)\n\n\ndef update_user(user_id, username: str=None, servers: list=None):\n\tconfig = get_config()\n\tu = config['users'].get(str(user_id))\n\n\tif u:\n\t\tusername = username or u.get('username')\n\t\tservers = servers or u.get('servers')\n\n\tuser = {\n\t\t\"username\": username,\n\t\t\"servers\": servers or []\n\t}\n\tconfig['users'][user_id] = user\n\n\tsave_config(config)\n\n\n@bot.command()\nasync def sync(ctx):\n\tif not str(ctx.message.author.id) == os.getenv('BOT_ADMIN'):\n\t\treturn\n\n\tawait bot.tree.sync()\n\tawait ctx.message.add_reaction('✅')\n\n@bot.command()\nasync def showdb(ctx):\n\tif not str(ctx.message.author.id) == os.getenv('BOT_ADMIN'):\n\t\treturn\n\n\tawait ctx.message.reply(file=discord.File(f\"{fP}/uconfig.json\"))\n\n\n\n@slash.command()\nasync def informations(interaction: discord.Interaction):\n\tmsg = \"\"\"\n\tThis bot does NOT save your credentials informations, but only your session settings, generated from the credentials you gave us.\n\tWe do not sell or use your data for anything more than providing this bot.\n\n\tWhen you login to your Aternos account in a server, you allow EVERY member of the latter to start/stop & check status of your servers but only you will be able to perform elevated actions.\n\tYou will need to login to your account in all the servers you want your Aternos servers be made available (*might change*)\n\t\"\"\"\n\n\tawait interaction.response.send_message(msg, ephemeral=True)\n\n\n@slash.command(description=\"Login with your Aternos account in order to use this bot.\")\n@app_commands.describe(\n\tusername=\"Your Aternos username\",\n\tpassword=\"Your Aternos password, or its md5 hash\"\n)\nasync def login(interaction: discord.Interaction, username: str, password: str):\n\tgid = str(interaction.guild.id)\n\tuid = str(interaction.user.id)\n\n\tawait interaction.response.defer(ephemeral=True)\n\n\t# Let's check that credentials are valid\n\ttry:\n\t\taclient = python_aternos.Client.from_credentials(username, password)\n\texcept python_aternos.CredentialsError:\n\t\tawait interaction.followup.send(\n\t\t\t\"Looks like your username and/or password is invalid. Please check and retry\",\n\t\t\tephemeral=True\n\t\t)\n\t\treturn\n\n\tconfig = get_config()\n\tif not config['guilds'].get(gid):\n\t\tconfig['guilds'][gid] = {\"logged_users\": []}\n\tconfig['guilds'][gid]['logged_users'].append(uid)\n\n\tsave_config(config)\n\tupdate_user(uid, username=username, servers=[s.domain for s in aclient.list_servers()])\n\n\n\t# Nice, let's save session settings for further interactions\n\taclient.save_session(file=sP.format(username=username))\n\t\n\n\tawait interaction.followup.send(\"✅ Successfully logged in!\")\n\n\n@slash.command(description=\"List servers available on this guild.\")\nasync def list(interaction: discord.Interaction):\n\tgid = str(interaction.guild.id)\n\tuid = str(interaction.user.id)\n\n\tawait interaction.response.defer(ephemeral=True)\n\n\tmsg_e = discord.Embed(title=\"List of all Aternos servers available\", description=\"\")\n\n\tconfig = get_config()\n\tif config['guilds'].get(gid):\n\t\tfor user in config['guilds'][gid]['logged_users']:\n\t\t\tcfg_user = config['users'].get(str(user))\n\t\t\tif cfg_user:\n\t\t\t\tmsg_e.description += f\"\\n\\nFrom <@{user}> ({cfg_user['username']}):\"\n\n\t\t\t\tauser = python_aternos.Client.restore_session(file=sP.format(username=cfg_user['username']))\n\t\t\t\tupdate_user(user, servers=[s.domain for s in auser.list_servers()])\n\n\n\t\t\t\tfor server in auser.list_servers():\n\t\t\t\t\tmsg_e.description += f\"\\n\\t- `{server.address}`, {server.version}\"\n\n\telse:\n\t\tmsg_e = discord.Embed(\n\t\t\ttitle=\"❌ There's no available Aternos server in this guild!\",\n\t\t\tdescription=\"Start by login in using /login command\",\n\t\t\tcolor=0xFF0000\n\t\t)\n\n\n\tawait interaction.followup.send(embed=msg_e, ephemeral=False)\n\n\n@slash.command(description=\"Set the default server address for your guild.\")\n@app_commands.describe(\n\tserver_ip=\"The address of the server to set default.\"\n)\nasync def setdefault(interaction: discord.Interaction, server_ip: str):\n\tgid = str(interaction.guild.id)\n\tuid = str(interaction.user.id)\n\n\tconfig = get_config()\n\n\tif not config['guilds'].get(gid):\n\t\tconfig['guilds'][gid] = {\"logged_users\": []}\n\tconfig['guilds'][gid]['default'] = server_ip\n\n\tsave_config(config)\n\n\tawait interaction.response.send_message(\n\t\tf\"✅ Done! `{server_ip}` is now set as your guild default Minecraft server!\",\n\t\tephemeral=False\n\t)\n\n\n\n@slash.command(description=\"Get any server status\")\n@app_commands.describe(\n\tprivate=\"Set to True if you don't want everyone to know you checked this server status.\",\n\tserver_ip=\"The address of the server you wanna check\"\n)\nasync def status(interaction: discord.Interaction, server_ip: str=\"default\", port: int=46390, private: bool=False):\n\tgid = str(interaction.guild.id)\n\tuid = str(interaction.user.id)\n\n\n\tif server_ip == \"default\":\n\t\tconfig = get_config()\n\t\tguild = config['guilds'].get(gid)\n\t\tif guild:\n\t\t\tserver_ip = guild.get('default')\n\t\t\tif not server_ip:\n\t\t\t\tawait interaction.response.send_message(\n\t\t\t\t\t\"❌ This guild does NOT have any default server ip configured.. Use /setdefault to do so.\",\n\t\t\t\t\tephemeral=False\n\t\t\t\t)\n\t\t\t\treturn\n\t\telse:\n\n\t\t\tawait interaction.response.send_message(\n\t\t\t\t\"❌ This guild does NOT have any default server ip configured.. Use /setdefault to do so.\",\n\t\t\t\tephemeral=False\n\t\t\t)\n\t\t\treturn\n\n\tawait interaction.response.defer(ephemeral=private)\n\n\n\tasync with aiohttp.ClientSession() as s:\n\t\tasync with s.get(\"https://mcapi.us/server/status\", params={\"ip\": server_ip, \"port\": port}) as r:\n\t\t\tres = json.loads(await r.text())\n\n\t\t\tif res['status'] != \"success\":\n\t\t\t\tmsg = f\"❌ There was an error.\\n> {res['error']}\"\n\n\t\t\t\tif int(res['last_updated']) + 60*5 > time.time():\n\t\t\t\t\tmsg += \"\\n\\n*/!\\\\ Be aware that the results are from less than 5 minutes ago, and thus might not be up to date!*\"\n\n\t\t\telse:\n\t\t\t\tif res['players']['max'] == 0:\n\t\t\t\t\tif \"Server not found\" in res['motd']:\n\t\t\t\t\t\tmsg = \"❌ This aternos server was not found.\"\n\t\t\t\t\telif \"This server is offline\" in res['motd']:\n\t\t\t\t\t\tmsg = \"❌ This aternos server is offline.\"\n\n\t\t\t\telse:\n\t\t\t\t\tif not res['online']:\n\t\t\t\t\t\tmsg = \"❌ This server is offline\"\n\n\t\t\t\t\telse:\n\t\t\t\t\t\t# Get server name, cleaned\n\t\t\t\t\t\tsname = ''\n\t\t\t\t\t\tfor i, char in enumerate(res['motd']):\n\t\t\t\t\t\t\tif char == \"§\" or res['motd'][max(0,i-1)] == \"§\":\n\t\t\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t\t\tsname += char\n\n\t\t\t\t\t\tmsg = f\"✅ **{sname}** is online!\"\n\t\t\t\t\t\tif res['players']['now'] > 0:\n\t\t\t\t\t\t\tif res['players']['max'] == res['players']['now']:\n\t\t\t\t\t\t\t\tmsg += f\"\\n\\nUnfortunately, the maximum number of {res['players']['max']} players has been reached..\"\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tmsg += f\"\\n\\nJoin the {res['players']['now']} current player{'s' if res['players']['now'] > 1 else ''}!\"\n\t\t\t\t\t\t\t\tmsg += f\"\\n> ip: `{server_ip}`\\n> version: `{res['server']['name']}`\"\n\n\n\tawait interaction.followup.send(msg, ephemeral=private)\n\n\n@slash.command(description=\"Turn on your Aternos servers\")\n@app_commands.describe(\n\tprivate=\"Set to True if you don't want everyone to know you turned on this server.\",\n\tserver_ip=\"The address of the server you wanna turn on\"\n)\nasync def turnon(interaction: discord.Interaction, server_ip: str=\"default\", private: bool=False):\n\tgid = str(interaction.guild.id)\n\tuid = str(interaction.user.id)\n\n\n\tif server_ip == \"default\":\n\t\tconfig = get_config()\n\t\tguild = config['guilds'].get(gid)\n\t\tif guild:\n\t\t\tserver_ip = guild.get('default')\n\t\t\tif not server_ip:\n\t\t\t\tawait interaction.response.send_message(\n\t\t\t\t\t\"❌ This guild does NOT have any default server ip configured.. Use /setdefault to do so.\",\n\t\t\t\t\tephemeral=False\n\t\t\t\t)\n\t\t\t\treturn\n\t\telse:\n\t\t\tawait interaction.response.send_message(\n\t\t\t\t\"❌ This guild does NOT have any default server ip configured.. Use /setdefault to do so.\",\n\t\t\t\tephemeral=False\n\t\t\t)\n\t\t\treturn\n\n\n\tawait interaction.response.defer(ephemeral=private)\n\n\tconfig = get_config()\n\tfor user in config['guilds'][gid]['logged_users']:\n\t\tif server_ip in config['users'][user]['servers']:\n\t\t\taclient = python_aternos.Client.restore_session(file=sP.format(username=config['users'][user]['username']))\n\n\t\t\tservers = aclient.list_servers()\n\t\t\tfor server in servers:\n\t\t\t\tif server.address == server_ip or server.domain == server_ip:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tserver.start()\n\t\t\t\t\t\tmsg = \"✅ Server was successfully started! It should be up in 1 to 2 minutes.\"\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\tmsg = f\"❌ An error occured.\\n> {e}\"\n\n\n\t\t\t\t\tawait interaction.followup.send(msg, ephemeral=private)\n\t\t\t\t\treturn\n\n\tawait interaction.followup.send(\"❌ No user logged in have this server. Ask the Aternos server owner to /login\")\n\n\n@slash.command(description=\"Turn off your Aternos servers\")\n@app_commands.describe(\n\tprivate=\"Set to True if you don't want everyone to know you turned off this server.\",\n\tserver_ip=\"The address of the server you wanna turn off\"\n)\nasync def turnoff(interaction: discord.Interaction, server_ip: str=\"default\", private: bool=False):\n\tgid = str(interaction.guild.id)\n\tuid = str(interaction.user.id)\n\n\n\tif server_ip == \"default\":\n\t\tconfig = get_config()\n\t\tguild = config['guilds'].get(gid)\n\t\tif guild:\n\t\t\tserver_ip = guild.get('default')\n\t\t\tif not server_ip:\n\t\t\t\tawait interaction.response.send_message(\n\t\t\t\t\t\"❌ This guild does NOT have any default server ip configured.. Use /setdefault to do so.\",\n\t\t\t\t\tephemeral=False\n\t\t\t\t)\n\t\t\t\treturn\n\t\telse:\n\t\t\tawait interaction.response.send_message(\n\t\t\t\t\"❌ This guild does NOT have any default server ip configured.. Use /setdefault to do so.\",\n\t\t\t\tephemeral=False\n\t\t\t)\n\t\t\treturn\n\n\n\tawait interaction.response.defer(ephemeral=private)\n\n\tconfig = get_config()\n\tfor user in config['guilds'][gid]['logged_users']:\n\t\tif server_ip in config['users'][user]['servers']:\n\t\t\taclient = python_aternos.Client.restore_session(file=sP.format(username=config['users'][user]['username']))\n\n\t\t\tservers = aclient.list_servers()\n\t\t\tfor server in servers:\n\t\t\t\tif server.address == server_ip or server.domain == server_ip:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tserver.start()\n\t\t\t\t\t\tmsg = \"✅ Server was successfully stopped!\"\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\tmsg = f\"❌ An error occured.\\n> {e}\"\n\n\n\t\t\t\t\tawait interaction.followup.send(msg, ephemeral=private)\n\t\t\t\t\treturn\n\n\tawait interaction.followup.send(\"❌ No user logged in have this server. Ask the Aternos server owner to /login\")\n\n\n\nbot.run(os.getenv('DISCORD_BOT_TOKEN'))","repo_name":"ghrlt/aternos-discord-bot","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":12147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22153380065","text":"import os\n# 导入SQLite驱动:\nimport sqlite3\n\n# (方便测试,生产切勿这样用)先删除旧的库\nthisPath = os.path.abspath('.').strip()\nnewPath = os.path.join(thisPath, 'test.db')\nos.remove(newPath)\n\n# 连接到SQLite数据库\n# 数据库文件是test.db\n# 如果文件不存在,会自动在当前目录创建:\nconn = sqlite3.connect('test.db')\n# 创建一个Cursor:\ncursor = conn.cursor()\n# 执行一条SQL语句,创建user表:\ncursor.execute('create table user (id varchar(20) primary key, name varchar(20))')\n\n# 继续执行一条SQL语句,插入一条记录:\ncursor.execute('insert into user (id, name) values (\\'1\\', \\'Michael\\')')\n\n# 通过rowcount获得插入的行数:\nprint('通过rowcount获得插入的行数:',cursor.rowcount)\n# 执行查询语句:\ncursor.execute('select * from user where id=?', ('1',))\n# 获得查询结果集:\nvalues = cursor.fetchall()\nprint('获得查询结果集:',values)\n# 关闭Cursor:\ncursor.close()\n# 提交事务:\nconn.commit()\n# 关闭Connection:\nconn.close()","repo_name":"liangqiding/python-milestone","sub_path":"17访问数据库/1.SQLite.py","file_name":"1.SQLite.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11249801146","text":"import numpy as np\nimport numpy.linalg as LA\nimport matplotlib.pyplot as plt\nfrom scipy.ndimage import filters\nfrom PIL import Image, ImageDraw\n\n\ndef MSE(blk1, blk2):\n return np.mean(\n LA.norm(\n np.array(blk1, dtype=np.int) - np.array(blk2, dtype=np.int), axis=2))\n\n\ndef drawMF(img, blk_sz, mf):\n img_rgba = img.convert('RGBA')\n mf_layer = Image.new(mode='RGBA', size=img_rgba.size, color=(0, 0, 0, 0))\n draw = ImageDraw.Draw(mf_layer)\n width = img_rgba.size[0]\n height = img_rgba.size[1]\n num_row = height // blk_sz\n num_col = width // blk_sz\n for i in xrange(num_row):\n left = (0, i * blk_sz)\n right = (width, i * blk_sz)\n draw.line([left, right], fill=(0, 0, 255, 255))\n for j in xrange(num_col):\n up = (j * blk_sz, 0)\n down = (j * blk_sz, height)\n draw.line([up, down], fill=(0, 0, 255, 255))\n for i in xrange(num_row):\n for j in xrange(num_col):\n center = (j * blk_sz + 0.5 * blk_sz, i * blk_sz + 0.5 * blk_sz)\n \"\"\"mf[i,j][0] is the row shift and mf[i,j][1] is the column shift In PIL coordinates, head[0] is x (column shift) and head[1] is y (row shift).\"\"\"\n head = (center[0] + mf[i, j][1], center[1] + mf[i, j][0])\n draw.line([center, head], fill=(255, 0, 0, 255))\n return Image.alpha_composite(img_rgba, mf_layer)\n","repo_name":"WebKit/WebKit","sub_path":"Source/ThirdParty/libwebrtc/Source/third_party/libvpx/source/libvpx/tools/3D-Reconstruction/MotionEST/Util.py","file_name":"Util.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","stars":6880,"dataset":"github-code","pt":"3"} +{"seq_id":"29062233407","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\nfrom builtins import range\n\nfrom future import standard_library\n\nstandard_library.install_aliases()\n\nimport itertools\nimport pytest\nimport hamcrest as hm\nimport http.client as http\nfrom decimal import Decimal as D\n\nfrom tests import object_builder as ob\n\nfrom balance import constants as cst\nfrom brest.core.tests import security\nfrom yb_snout_api.tests_unit.base import TestCaseApiAppBase\n# noinspection PyUnresolvedReferences\nfrom yb_snout_api.tests_unit.fixtures.permissions import (\n create_admin_role,\n create_role,\n create_view_client_role,\n create_passport,\n)\n# noinspection PyUnresolvedReferences\nfrom yb_snout_api.tests_unit.fixtures.client import create_client, create_role_client\nfrom yb_snout_api.tests_unit.fixtures.invoice import create_invoice\n\n\n@pytest.fixture(name='view_client_role')\ndef create_view_client_role():\n return create_role(\n (\n cst.PermissionCode.VIEW_CLIENTS,\n {cst.ConstraintTypes.firm_id: None, cst.ConstraintTypes.client_batch_id: None},\n ),\n )\n\n\n@pytest.fixture(name='view_invoices_role')\ndef create_view_invoices_role():\n return create_role(\n (\n cst.PermissionCode.VIEW_INVOICES,\n {cst.ConstraintTypes.firm_id: None, cst.ConstraintTypes.client_batch_id: None},\n ),\n )\n\n\n@pytest.mark.smoke\nclass TestCaseClientUnusedFunds(TestCaseApiAppBase):\n BASE_API = '/v1/client/unused-funds'\n\n @staticmethod\n def _get_unused_rub_sum(invoices):\n res = {\n 'unused_sum': D(0),\n 'unused_rub_sum': D(0),\n 'invoice_count': 0,\n }\n for inv in invoices:\n if inv.credit == 1 or inv.receipt_sum <= inv.consume_sum:\n continue\n res['unused_sum'] += inv.unused_funds\n res['unused_rub_sum'] += inv.unused_funds_rub\n res['invoice_count'] += 1\n for f in ['unused_sum', 'unused_rub_sum']:\n res[f] = '{:.2f}'.format(res[f])\n return res\n\n @staticmethod\n def _split_integer_into_parts(num, parts): # Don't use big num\n res = [0 for _ in range(parts)]\n while num > 0:\n num -= 1\n res[ob.RANDOM.randint(0, parts - 1)] += 1\n return res\n\n def _create_invoices(self, client, unused_fish, used_invoices=3, unused_invoices=1, firm_id=cst.FirmId.YANDEX_OOO):\n invoices = []\n splitted_fish = list(\n map(lambda num: num / D('100.0'), self._split_integer_into_parts(int(unused_fish * 100), used_invoices)),\n )\n for i in range(used_invoices):\n fish_offset = D('666.66')\n inv = create_invoice(\n client=client,\n receipt_sum=fish_offset + splitted_fish[i],\n consume_sum=fish_offset,\n firm_id=firm_id,\n )\n invoices.append(inv)\n for _ in range(unused_invoices):\n rand_money = D('100500.45')\n invoices.append(\n create_invoice(\n client=client,\n receipt_sum=rand_money,\n consume_sum=rand_money,\n ),\n )\n return invoices\n\n @pytest.mark.parametrize(\n 'unused_fish',\n [D('1.23'), D('0.98'), D('0.0')],\n )\n def test_unused_funds(self, client, unused_fish):\n invoices = self._create_invoices(client, unused_fish, used_invoices=3, unused_invoices=1)\n res = self.test_client.get(self.BASE_API, {'client_id': client.id})\n hm.assert_that(res.status_code, hm.equal_to(http.OK))\n\n unused_funds = self._get_unused_rub_sum(invoices)\n data = res.get_json().get('data', [])\n hm.assert_that(\n data,\n hm.has_entries(unused_funds),\n )\n\n def test_client_not_found(self):\n not_existing_id = self.test_session.execute(\"select bo.s_client_id.nextval from dual\").scalar()\n res = self.test_client.get(self.BASE_API, {'client_id': not_existing_id})\n hm.assert_that(res.status_code, hm.equal_to(http.NOT_FOUND))\n\n @pytest.mark.permissions\n @pytest.mark.parametrize(\n 'match_client',\n [None, True, False],\n )\n @pytest.mark.parametrize(\n 'match_firm',\n [True, False],\n )\n def test_permission(self, match_client, match_firm, view_client_role, view_invoices_role, admin_role, client):\n unused_fish = D('0.05')\n roles = [admin_role, view_client_role]\n if match_client is not None:\n role_client = create_role_client(client if match_client else create_client())\n roles.append(\n (\n view_invoices_role,\n {\n cst.ConstraintTypes.client_batch_id: role_client.client_batch_id,\n cst.ConstraintTypes.firm_id: cst.FirmId.YANDEX_OOO if match_firm else cst.FirmId.MARKET,\n },\n ),\n )\n security.set_roles(roles)\n invoices = self._create_invoices(\n client,\n unused_fish,\n used_invoices=1,\n unused_invoices=0,\n firm_id=cst.FirmId.YANDEX_OOO,\n )\n response = self.test_client.get(\n self.BASE_API,\n {'client_id': client.id},\n )\n hm.assert_that(response.status_code, hm.equal_to(http.OK))\n data = response.get_json().get('data', {})\n if match_firm and match_client:\n hm.assert_that(data, hm.has_entries(self._get_unused_rub_sum(invoices)))\n else:\n hm.assert_that(data, hm.has_entry('unused_rub_sum', '{:.2f}'.format(D('0.0'))))\n\n def test_client(self, client):\n unused_fish = D('1.23')\n\n security.set_roles([])\n security.set_passport_client(client)\n\n invoices = self._create_invoices(client, unused_fish, used_invoices=3, unused_invoices=1)\n\n res = self.test_client.get(self.BASE_API, is_admin=False)\n hm.assert_that(res.status_code, hm.equal_to(http.OK))\n\n unused_funds = self._get_unused_rub_sum(invoices)\n data = res.get_json().get('data', [])\n hm.assert_that(\n data,\n hm.has_entries(unused_funds),\n )\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"billing/tests_unit/resources/v1/client/test_client_unused_funds.py","file_name":"test_client_unused_funds.py","file_ext":"py","file_size_in_byte":6350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"3873484973","text":"import os\n\nINPUT_FILE = 'got.txt'\nOUTPUT_DIRECTORY = 'data_chunks'\nCHUNK_SIZE = 1024\n\nif not os.path.exists(OUTPUT_DIRECTORY):\n os.makedirs(OUTPUT_DIRECTORY)\n\n# Read the input file\nwith open(INPUT_FILE, 'r', encoding='utf-8') as file:\n text = file.read()\n\n# Calculate the number of chunks required\nnum_chunks = len(text) // CHUNK_SIZE\nif len(text) % CHUNK_SIZE != 0:\n num_chunks += 1\n\n# Split the text into chunks and save as individual files\nfor i in range(num_chunks):\n start = i * CHUNK_SIZE\n end = (i + 1) * CHUNK_SIZE\n chunk = text[start:end]\n # Save the chunk to a file\n chunk_filename = os.path.join(OUTPUT_DIRECTORY, f'chunk_{i}.txt')\n with open(chunk_filename, 'w', encoding='utf-8') as file:\n file.write(chunk)\n\nprint(f'Successfully split the file into {num_chunks} chunks.')\n","repo_name":"kunci115/gpt2-fastapi-hugingface","sub_path":"prepare_data.py","file_name":"prepare_data.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"1183146034","text":"# We crop videos to a fixed size bounding box following the center of the face detected by MediaPipe. We add a margin to the bounding box to include the whole head. We also extract the audio from the video and save it as a wav file. Finally, we extract the mel spectrogram from the audio and save it as a numpy array. We use the same preprocessing steps for both the training and the test set. The preprocessing code is available in the Preprocess folder.\nimport os\nimport cv2\nimport numpy as np\n\nfrom glob import glob\nfrom tqdm import tqdm\nimport mediapipe as mp\nfrom pathlib import Path\n\nfrom utils import crop_with_center\n\n\ndef process_video(video_path, save_root, crop_size, left_margin, right_margin, top_margin, bottom_margin):\n # Configure face detector\n mp_face_detection = mp.solutions.face_detection\n\n # Get the video name\n video_name = video_path.replace('\\\\', '/').split('/')[-1].split('.')[0]\n\n # Create the save directory\n video_save_dir = os.path.join(save_root, video_name)\n if not os.path.exists(video_save_dir):\n Path(video_save_dir).mkdir(parents=True)\n\n # Create the video capture\n cap = cv2.VideoCapture(video_path)\n\n global_bb = np.array([np.inf, np.inf, -np.inf, -np.inf])\n all_bbs = []\n\n # Get the video fps\n fps = cap.get(cv2.CAP_PROP_FPS)\n\n # Get the video length\n length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n\n # Get the video width and height\n width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n\n # Create the face detection object\n with mp_face_detection.FaceDetection(\n min_detection_confidence=0.5) as face_detection:\n\n # Iterate over all the frames\n for i in tqdm(range(length)):\n # Read the frame\n ret, frame = cap.read()\n\n # Convert to RGB\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n\n # Detect the face\n results = face_detection.process(frame)\n\n # If a face is detected\n if results.detections:\n # Get the bounding box\n detection = results.detections[0]\n bb = detection.location_data.relative_bounding_box\n\n # Convert the bounding box to absolute values\n bb = [bb.xmin * width, bb.ymin * height,\n bb.width * width, bb.height * height]\n\n # Add the margins\n bb[0] -= (left_margin * width)\n bb[1] -= (top_margin * height)\n bb[2] += (left_margin + right_margin) * width\n bb[3] += (top_margin + bottom_margin) * height\n\n global_bb[0] = min(global_bb[0], bb[0])\n global_bb[1] = min(global_bb[1], bb[1])\n global_bb[2] = max(global_bb[2], bb[0] + bb[2])\n global_bb[3] = max(global_bb[3], bb[1] + bb[3])\n\n all_bbs.append(bb)\n\n # Crop the videos\n bb_size = max(global_bb[2] - global_bb[0], global_bb[3] - global_bb[1])\n center = np.array([global_bb[0] + global_bb[2], global_bb[1] + global_bb[3]]) / 2\n\n cap.set(cv2.CAP_PROP_POS_FRAMES, 0)\n writer = cv2.VideoWriter(os.path.join(video_save_dir, 'video.mp4'), cv2.VideoWriter_fourcc(*'mp4v'), fps, (crop_size, crop_size))\n for i in tqdm(range(length)):\n # Read the frame\n ret, frame = cap.read()\n\n # Convert to RGB\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n\n # Crop the frame\n frame = crop_with_center(frame, center, bb_size)\n\n #frame = frame[int(bb[2]):int(bb[3]), int(bb[0]):int(bb[1]), :]\n frame = cv2.resize(frame, (crop_size, crop_size))\n\n # Save the frame\n cv2.imshow('Frame', cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))\n cv2.waitKey(1)\n\n writer.write(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))\n writer.release()\n cv2.destroyAllWindows()\n\n # Save the bounding boxes\n np.save(os.path.join(video_save_dir, 'bb.npy'), global_bb)\n\n # Extract the audio\n os.system('ffmpeg -y -i {} -ab 160k -ac 2 -ar 44100 -vn {}'.format(video_path, os.path.join(video_save_dir, 'audio.wav')))\n\n\nif __name__ == '__main__':\n # Get the video paths\n video_paths = glob('C:/Users/jacks/Documents/Data/MEAD_Samples/MEAD_samples_all_neutral/*')\n save_root = 'C:/Users/jacks/Documents/Data/DubbingForExtras/v3'\n\n # Process the videos\n for video_path in video_paths:\n process_video(video_path, save_root, 512, 0, 0, 0, 0.08)\n\n","repo_name":"oijoijcoiejoijce/BigDub","sub_path":"Preprocess/crop_and_extract_audio.py","file_name":"crop_and_extract_audio.py","file_ext":"py","file_size_in_byte":4567,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"15735849752","text":"import json\nimport os\nimport time\nfrom typing import Callable, Dict, List, Tuple\nimport logging\n\nfrom langchain import PromptTemplate, LLMChain, HuggingFaceHub\nfrom langchain.llms import OpenAI\nfrom langchain.chat_models import ChatOpenAI\n\nfrom dotenv import load_dotenv\nimport pandas as pd\nfrom sklearn.metrics import (\n accuracy_score,\n f1_score,\n roc_auc_score,\n classification_report,\n)\nfrom tqdm import tqdm\nimport wandb\n\nimport hydra\n\nfrom huggingface_hub import InferenceClient\nfrom worker_vs_gpt.prompting.huggingface_prompting import HuggingfaceChatTemplate\nfrom worker_vs_gpt.prompting.langchain_prompting import (\n DataTemplates,\n ClassificationTemplates,\n)\nfrom worker_vs_gpt.config import (\n HATE_SPEECH_DATA_DIR,\n SENTIMENT_DATA_DIR,\n TEN_DIM_DATA_DIR,\n ANALYSE_TAL_DATA_DIR,\n CROWDFLOWER_DATA_DIR,\n EMPATHY_DATA_DIR,\n POLITENESS_DATA_DIR,\n HYPO_DATA_DIR,\n INTIMACY_DATA_DIR,\n SAMESIDE_DATA_DIR,\n TALKDOWN_DATA_DIR,\n AugmentConfig,\n LORA_WEIGHTS_DIR,\n PromptConfig,\n HF_HUB_MODELS,\n LOGS_DIR,\n)\n\nfrom worker_vs_gpt.prompting.datasets_config import (\n HatespeechConfig,\n SentimentConfig,\n TenDimConfig,\n CrowdflowerConfig,\n SameSidePairsConfig,\n HayatiPolitenessConfig,\n HypoConfig,\n EmpathyConfig,\n QuestionIntimacyConfig,\n TalkdownPairsConfig,\n)\n\nfrom worker_vs_gpt.utils import LabelMatcher, few_shot_sampling\n\nload_dotenv()\n\n\ndef setup_logging(cfg):\n for handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)\n logging.basicConfig(\n filename=f\"{str(LOGS_DIR)}/classification/{cfg.dataset}_{cfg.model}_{cfg.few_shot}-shot_per_class_sampling:{cfg.per_class_sampling}.log\",\n level=logging.INFO,\n format=\"%(asctime)s [%(levelname)s]: %(message)s\",\n filemode=\"w\",\n )\n\n\n@hydra.main(\n version_base=None,\n config_path=\"conf\",\n config_name=\"config_prompt_classification.yaml\",\n)\ndef main(cfg: PromptConfig) -> None:\n setup_logging(cfg)\n\n classification_templates = ClassificationTemplates()\n\n # Load data and template\n if cfg.dataset == \"analyse-tal\":\n raise NotImplementedError\n elif cfg.dataset == \"hate-speech\":\n # read json\n text = \"tweet\" # text column\n dataset = pd.read_json(os.path.join(HATE_SPEECH_DATA_DIR, \"test.json\"))\n train = pd.read_json(os.path.join(HATE_SPEECH_DATA_DIR, \"train.json\"))\n classification_prompt = classification_templates.classify_hate_speech()\n prompt_config = HatespeechConfig\n elif cfg.dataset == \"sentiment\":\n text = \"text\" # text column\n dataset = pd.read_json(os.path.join(SENTIMENT_DATA_DIR, \"test.json\"))\n train = pd.read_json(os.path.join(SENTIMENT_DATA_DIR, \"train.json\"))\n classification_prompt = classification_templates.classify_sentiment()\n prompt_config = SentimentConfig\n elif cfg.dataset == \"ten-dim\":\n text = \"h_text\" # text column (can be text or h_text)\n dataset = pd.read_json(os.path.join(TEN_DIM_DATA_DIR, \"test.json\"))\n train = pd.read_json(os.path.join(TEN_DIM_DATA_DIR, \"train.json\"))\n classification_prompt = classification_templates.classify_ten_dim()\n prompt_config = TenDimConfig\n elif cfg.dataset == \"crowdflower\":\n text = \"text\" # text column (can be text or h_text)\n dataset = pd.read_json(os.path.join(CROWDFLOWER_DATA_DIR, \"test.json\"))\n train = pd.read_json(os.path.join(CROWDFLOWER_DATA_DIR, \"train.json\"))\n classification_prompt = classification_templates.classify_crowdflower()\n prompt_config = CrowdflowerConfig\n elif cfg.dataset == \"same-side-pairs\":\n text = \"text\" # text column (can be text or h_text)\n dataset = pd.read_json(os.path.join(SAMESIDE_DATA_DIR, \"test.json\"))\n train = pd.read_json(os.path.join(SAMESIDE_DATA_DIR, \"train.json\"))\n classification_prompt = classification_templates.classfify_same_side()\n prompt_config = SameSidePairsConfig\n elif cfg.dataset == \"hayati_politeness\":\n text = \"text\" # text column (can be text or h_text)\n dataset = pd.read_json(os.path.join(POLITENESS_DATA_DIR, \"test.json\"))\n train = pd.read_json(os.path.join(POLITENESS_DATA_DIR, \"train.json\"))\n classification_prompt = classification_templates.classfify_hayati()\n prompt_config = HayatiPolitenessConfig\n elif cfg.dataset == \"hypo-l\":\n text = \"text\" # text column (can be text or h_text)\n dataset = pd.read_json(os.path.join(HYPO_DATA_DIR, \"test.json\"))\n train = pd.read_json(os.path.join(HYPO_DATA_DIR, \"train.json\"))\n classification_prompt = classification_templates.classfify_hypo()\n prompt_config = HypoConfig\n elif cfg.dataset == \"empathy#empathy_bin\":\n text = \"text\" # text column (can be text or h_text)\n dataset = pd.read_json(os.path.join(EMPATHY_DATA_DIR, \"test.json\"))\n train = pd.read_json(os.path.join(EMPATHY_DATA_DIR, \"train.json\"))\n classification_prompt = classification_templates.classfify_empathy()\n prompt_config = EmpathyConfig\n elif cfg.dataset == \"questionintimacy\":\n text = \"text\" # text column (can be text or h_text)\n dataset = pd.read_json(os.path.join(INTIMACY_DATA_DIR, \"test.json\"))\n train = pd.read_json(os.path.join(INTIMACY_DATA_DIR, \"train.json\"))\n classification_prompt = classification_templates.classfify_intimacy()\n prompt_config = QuestionIntimacyConfig\n elif cfg.dataset == \"talkdown-pairs\":\n text = \"text\" # text column (can be text or h_text)\n dataset = pd.read_json(os.path.join(TALKDOWN_DATA_DIR, \"test.json\"))\n train = pd.read_json(os.path.join(TALKDOWN_DATA_DIR, \"train.json\"))\n classification_prompt = classification_templates.classify_talkdown()\n prompt_config = TalkdownPairsConfig\n else:\n raise ValueError(f\"Dataset not found: {cfg.dataset}\")\n\n # Predict\n y_pred = []\n idx = 0\n # Evaluate\n y_true = dataset[\"target\"].values\n # Get all unique labels\n labels = list(set(y_true))\n\n label_mathcer = LabelMatcher(labels=labels, task=cfg.dataset)\n\n print(labels)\n\n llm = InferenceClient(\n model=HF_HUB_MODELS[cfg.model],\n token=os.environ[\"HUGGINGFACEHUB_API_TOKEN\"],\n )\n\n template = HuggingfaceChatTemplate(\n model_name=HF_HUB_MODELS[cfg.model],\n ).get_template_classification(\n system_prompt=prompt_config.classification_system_prompt,\n task=prompt_config.classification_task_prompt,\n )\n\n for input_text in tqdm(dataset[text]):\n # Sometimes refresh the model\n\n has_output: bool = False\n\n few_shot_samples = few_shot_sampling(\n df=train, n=cfg.few_shot, per_class_sampling=cfg.per_class_sampling\n )\n\n while not has_output:\n if cfg.model == \"gpt-4\":\n llm = ChatOpenAI(model_name=cfg.model, temperature=0)\n llm_chain = LLMChain(\n prompt=classification_prompt, llm=llm, verbose=False\n )\n\n output = llm_chain.run(\n {\"few_shot\": few_shot_samples, \"text\": input_text}\n )\n else:\n try:\n output = llm.text_generation(\n template.format(\n few_shot=few_shot_samples,\n text=input_text,\n ),\n max_new_tokens=25,\n temperature=0.001,\n # do_sample=True,\n # stop_sequences=[\"\\n\"],\n repetition_penalty=1.2,\n truncate=4096,\n )\n except Exception as e:\n logging.info(f'Error with input text: \"{input_text}\"')\n logging.error(e)\n time.sleep(5)\n continue\n has_output = True\n\n pred = label_mathcer(output, input_text)\n pred_matches = label_mathcer.label_check(output)\n pred2 = output\n y_pred.append(pred_matches)\n logging.info(f\"Input: {input_text}\")\n logging.info(f\"Raw Prediction: {pred2}\")\n logging.info(f\"Prediction: {pred}\")\n logging.info(f\"Prediction Matches: {pred_matches}\")\n logging.info(f\"True: {y_true[idx]}\")\n logging.info(\"---\" * 10)\n idx += 1\n\n has_output = False\n\n # Compute metrics\n accuracy = accuracy_score(y_true, y_pred)\n f1 = f1_score(y_true, y_pred, average=\"macro\", labels=labels)\n # roc_auc = roc_auc_score(y_true, y_probs, average=\"macro\")\n report = classification_report(\n y_true=y_true, y_pred=y_pred, output_dict=True, labels=labels\n )\n\n logging.info(\n classification_report(\n y_true=y_true, y_pred=y_pred, output_dict=False, labels=labels\n )\n )\n\n # Initialize wandb\n wandb.init(\n project=cfg.wandb_project,\n entity=cfg.wandb_entity,\n name=f\"{cfg.model}-{cfg.few_shot}-shot-per_class_sampling:{cfg.per_class_sampling}\",\n group=f\"{cfg.dataset}\",\n tags=[\"prompt_classification\"],\n config={\n \"model\": cfg.model,\n \"dataset\": cfg.dataset,\n \"text_column\": text,\n \"few_shot\": cfg.few_shot,\n \"per_class_sampling\": cfg.per_class_sampling,\n },\n )\n\n metrics = {\"test/accuracy\": accuracy, \"test/f1\": f1}\n\n # Log results\n wandb.log(\n metrics,\n )\n\n df = pd.DataFrame(report)\n df[\"metric\"] = df.index\n table = wandb.Table(data=df)\n\n wandb.log(\n {\n \"classification_report\": table,\n }\n )\n\n wandb.finish()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"AGMoller/worker_vs_gpt","sub_path":"src/worker_vs_gpt/prompt_classification.py","file_name":"prompt_classification.py","file_ext":"py","file_size_in_byte":9816,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"3"} +{"seq_id":"22179324946","text":"import cherrypy\nfrom datetime import timedelta\n\nfrom uber.config import c\nfrom uber.custom_tags import safe_string\nfrom uber.decorators import ajax, ajax_gettable, all_renderable, check_shutdown, csrf_protected, render, public\nfrom uber.errors import HTTPRedirect\nfrom uber.utils import check_csrf, create_valid_user_supplied_redirect_url, ensure_csrf_token_exists, localized_now\n\n\n@all_renderable()\nclass Root:\n def index(self, session, message=''):\n if c.UBER_SHUT_DOWN:\n return render('staffing/printable.html', {'attendee': session.logged_in_volunteer()})\n else:\n return {\n 'message': message,\n 'attendee': session.logged_in_volunteer()\n }\n\n def printable(self, session):\n return {'attendee': session.logged_in_volunteer()}\n\n def food_restrictions(self, session, message='', **params):\n from uber.models.attendee import FoodRestrictions\n attendee = session.logged_in_volunteer()\n fr = attendee.food_restrictions or FoodRestrictions()\n if params:\n fr = session.food_restrictions(dict(params, attendee_id=attendee.id), checkgroups=['standard'])\n session.add(fr)\n if attendee.badge_type == c.GUEST_BADGE:\n raise HTTPRedirect('food_restrictions?message={}', 'Your info has been recorded, thanks a bunch!')\n else:\n raise HTTPRedirect('index?message={}', 'Your dietary restrictions have been recorded')\n\n return {\n 'fr': fr,\n 'message': message,\n 'attendee': attendee\n }\n\n @check_shutdown\n def shirt_size(self, session, message='', shirt=None, staff_shirt=None, num_event_shirts=None, csrf_token=None):\n attendee = session.logged_in_volunteer()\n if shirt is not None or staff_shirt is not None:\n check_csrf(csrf_token)\n if (shirt and not int(shirt)) or (\n attendee.gets_staff_shirt and c.STAFF_SHIRT_OPTS != c.SHIRT_OPTS and not int(staff_shirt)):\n message = 'You must select a shirt size'\n else:\n if shirt:\n attendee.shirt = int(shirt)\n if staff_shirt:\n attendee.staff_shirt = int(staff_shirt)\n if c.STAFF_EVENT_SHIRT_OPTS and c.BEFORE_VOLUNTEER_SHIRT_DEADLINE and num_event_shirts:\n attendee.num_event_shirts = int(num_event_shirts)\n raise HTTPRedirect('index?message={}', 'Shirt info uploaded')\n\n return {\n 'message': message,\n 'attendee': attendee,\n 'opts': [('', 'Enter your shirt size')] + c.SHIRT_OPTS[1:]\n }\n\n @check_shutdown\n def volunteer_agreement(self, session, message='', agreed_to_terms=None, agreed_to_terms_1=None, agreed_to_terms_2=None, csrf_token=None):\n attendee = session.logged_in_volunteer()\n if csrf_token is not None:\n check_csrf(csrf_token)\n if agreed_to_terms or (agreed_to_terms_1 and agreed_to_terms_2):\n attendee.agreed_to_volunteer_agreement = True\n raise HTTPRedirect('index?message={}', 'Agreement received')\n elif agreed_to_terms_1 or agreed_to_terms_2:\n message = \"You must agree to both the terms of the agreement and the volunteering policies and guidelines\"\n else:\n message = \"You must agree to the terms of the agreement\"\n\n return {\n 'message': message,\n 'attendee': attendee,\n 'agreed_to_terms_1': agreed_to_terms_1,\n 'agreed_to_terms_2': agreed_to_terms_2,\n 'agreement_end_date': c.ESCHATON.date() + timedelta(days=31),\n }\n \n @check_shutdown\n def emergency_procedures(self, session, message='', reviewed_procedures=None, csrf_token=None):\n attendee = session.logged_in_volunteer()\n if csrf_token is not None:\n check_csrf(csrf_token)\n if reviewed_procedures:\n attendee.reviewed_emergency_procedures = True\n raise HTTPRedirect('index?message={}', 'Thanks for reviewing our emergency procedures!')\n\n message = \"You must acknowledge that you reviewed our emerency procedures\"\n\n return {\n 'message': message,\n 'attendee': attendee,\n 'agreement_end_date': c.ESCHATON.date() + timedelta(days=31),\n }\n \n @check_shutdown\n def credits(self, session, message='', name_in_credits='', csrf_token=None):\n attendee = session.logged_in_volunteer()\n if csrf_token is not None:\n check_csrf(csrf_token)\n attendee.name_in_credits = name_in_credits\n message = \"Thank you for providing a name for the credits roll!\" if name_in_credits \\\n else \"You have opted out of having your name in the credits roll.\"\n raise HTTPRedirect('index?message={}', message)\n \n return {\n 'message': message,\n 'attendee': attendee,\n }\n \n @check_shutdown\n @public\n def volunteer(self, session, id, csrf_token=None, requested_depts_ids=None, message=''):\n attendee = session.attendee(id)\n if requested_depts_ids:\n check_csrf(csrf_token)\n attendee.staffing = True\n attendee.requested_depts_ids = requested_depts_ids\n raise HTTPRedirect(\n 'login?message={}',\n \"Thanks for signing up as a volunteer; you'll be emailed as \"\n \"soon as you are assigned to one or more departments.\")\n\n return {\n 'message': message,\n 'attendee': attendee,\n 'requested_depts_ids': requested_depts_ids\n }\n\n @check_shutdown\n def hotel(self, session, message='', decline=None, **params):\n if c.AFTER_ROOM_DEADLINE and not c.HAS_HOTEL_ADMIN_ACCESS:\n raise HTTPRedirect('../staffing/index?message={}', 'The room deadline has passed')\n attendee = session.logged_in_volunteer()\n if not attendee.hotel_eligible:\n raise HTTPRedirect('../staffing/index?message={}', 'You have not been marked as eligible for hotel space')\n requests = session.hotel_requests(params, checkgroups=['nights'], restricted=True)\n if 'attendee_id' in params:\n requests.attendee = attendee # foreign keys are automatically admin-only\n session.add(requests)\n if decline or not requests.nights:\n requests.nights = ''\n raise HTTPRedirect(\n '../staffing/index?message={}', \"We've recorded that you've declined hotel room space\")\n else:\n if requests.setup_teardown:\n days = ' / '.join(\n c.NIGHTS[day] for day in sorted(requests.nights_ints, key=c.NIGHT_DISPLAY_ORDER.index)\n if day not in c.CORE_NIGHTS)\n\n message = \"Your hotel room request has been submitted. \" \\\n \"We'll let you know whether your offer to help on {} is accepted, \" \\\n \"and who your roommates will be, a few weeks after the deadline.\".format(days)\n\n else:\n message = \"You've accepted hotel room space for {}. \" \\\n \"We'll let you know your roommates a few weeks after the \" \\\n \"deadline.\".format(requests.nights_display)\n\n raise HTTPRedirect('../staffing/index?message={}', message)\n else:\n requests = attendee.hotel_requests or requests\n if requests.is_new:\n requests.nights = ','.join(map(str, c.CORE_NIGHTS))\n\n nights = []\n two_day_before = (c.EPOCH - timedelta(days=2)).strftime('%A')\n day_before = (c.EPOCH - timedelta(days=1)).strftime('%A')\n last_day = c.ESCHATON.strftime('%A').upper()\n day_after = (c.ESCHATON + timedelta(days=1)).strftime('%A')\n nights.append([getattr(c, two_day_before.upper()), getattr(requests, two_day_before.upper()),\n \"I'd like to help set up on \" + two_day_before])\n nights.append([getattr(c, day_before.upper()), getattr(requests, day_before.upper()),\n \"I'd like to help set up on \" + day_before])\n for night in c.CORE_NIGHTS:\n nights.append([night, night in requests.nights_ints, c.NIGHTS[night]])\n nights.append([getattr(c, last_day), getattr(requests, last_day),\n \"I'd like to help tear down on {} / {}\".format(c.ESCHATON.strftime('%A'), day_after)])\n\n return {\n 'nights': nights,\n 'message': message,\n 'requests': requests,\n 'attendee': attendee\n }\n\n @check_shutdown\n def shifts(self, session, view='', start='', all=''):\n joblist = session.jobs_for_signups(all=all)\n con_days = -(-c.CON_LENGTH // 24) # Equivalent to ceil(c.CON_LENGTH / 24)\n\n volunteer = session.logged_in_volunteer()\n assigned_dept_ids = set(volunteer.assigned_depts_ids)\n has_public_jobs = False\n for job in joblist:\n job['is_public_to_volunteer'] = job['is_public'] and job['department_id'] not in assigned_dept_ids\n if job['is_public_to_volunteer']:\n has_public_jobs = True\n\n has_setup = volunteer.can_work_setup or any(d.is_setup_approval_exempt for d in volunteer.assigned_depts)\n has_teardown = volunteer.can_work_teardown or any(\n d.is_teardown_approval_exempt for d in volunteer.assigned_depts)\n\n if has_setup and has_teardown:\n cal_length = c.CON_TOTAL_DAYS\n elif has_setup:\n cal_length = con_days + c.SETUP_SHIFT_DAYS\n elif has_teardown:\n cal_length = con_days + 2 # There's no specific config for # of shift signup days\n else:\n cal_length = con_days\n\n return {\n 'jobs': joblist,\n 'has_public_jobs': has_public_jobs,\n 'depts_with_roles': [membership.department.name for membership in volunteer.dept_memberships_with_role],\n 'name': volunteer.full_name,\n 'hours': volunteer.weighted_hours,\n 'assigned_depts_labels': volunteer.assigned_depts_labels,\n 'view': view,\n 'start': start,\n 'start_day': c.SHIFTS_START_DAY if has_setup else c.EPOCH,\n 'cal_length': cal_length,\n 'show_all': all,\n }\n\n @check_shutdown\n @ajax_gettable\n def jobs(self, session, all=False):\n return {'jobs': session.jobs_for_signups(all=all)}\n\n @check_shutdown\n @ajax\n def sign_up(self, session, job_id, all=False):\n return {\n 'error': session.assign(session.logged_in_volunteer().id, job_id),\n 'jobs': session.jobs_for_signups(all=all)\n }\n\n @check_shutdown\n @ajax\n def drop(self, session, job_id, all=False):\n if c.AFTER_DROP_SHIFTS_DEADLINE:\n return {\n 'error': \"You can no longer drop shifts.\",\n 'jobs': session.jobs_for_signups(all=all)\n }\n try:\n shift = session.shift(job_id=job_id, attendee_id=session.logged_in_volunteer().id)\n session.delete(shift)\n session.commit()\n except Exception:\n pass\n finally:\n return {'jobs': session.jobs_for_signups(all=all)}\n\n @public\n def login(self, session, message='', first_name='', last_name='', email='', zip_code='', original_location=None):\n original_location = create_valid_user_supplied_redirect_url(original_location, default_url='/staffing/index')\n\n if first_name or last_name or email or zip_code:\n try:\n attendee = session.lookup_attendee(first_name.strip(), last_name.strip(), email, zip_code)\n if not attendee.staffing:\n message = safe_string(\n 'You are not signed up as a volunteer. '\n '
    Click Here to sign up.'.format(attendee.id))\n elif not attendee.dept_memberships and not c.AT_THE_CON:\n message = 'You have not been assigned to any departments; ' \\\n 'an admin must assign you to a department before you can log in'\n except Exception:\n message = 'No attendee matches that name and email address and zip code'\n\n if not message:\n ensure_csrf_token_exists()\n cherrypy.session['staffer_id'] = attendee.id\n raise HTTPRedirect(original_location)\n\n return {\n 'message': message,\n 'first_name': first_name,\n 'last_name': last_name,\n 'email': email,\n 'zip_code': zip_code,\n 'original_location': original_location\n }\n\n def onsite_jobs(self, session, message=''):\n attendee = session.logged_in_volunteer()\n return {\n 'message': message,\n 'attendee': attendee,\n 'jobs': [job for job in attendee.possible_and_current\n if getattr(job, 'taken', False) or job.start_time > localized_now()]\n }\n\n @csrf_protected\n def onsite_sign_up(self, session, job_id):\n message = session.assign(session.logged_in_volunteer().id, job_id)\n raise HTTPRedirect('onsite_jobs?message={}', message or 'It worked')\n","repo_name":"magfest/ubersystem","sub_path":"uber/site_sections/staffing.py","file_name":"staffing.py","file_ext":"py","file_size_in_byte":13581,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"3"} +{"seq_id":"17028989154","text":"import webbrowser\nimport urllib.request\nimport urllib.request\nimport re\nimport numpy as np\n#to search on Google\ndef searchOnGoogle(d):\n print(\"check3\")\n print(\"Opening google.....\") \n url = \"https://www.google.com/search?q={}\".format(d)\n webbrowser.open_new_tab(url)\n\n#ends \n#to play on youtube\ndef playYoutube(term):\n print(\"Opening youtube....\")\n query_string = urllib.parse.urlencode({\"search_query\" : term})\n html_content = urllib.request.urlopen(\"http://www.youtube.com/results?\" + query_string)\n search_results = re.findall(r'href=\\\"\\/watch\\?v=(.{11})', html_content.read().decode())\n url = \"http://www.youtube.com/watch?v={}\".format(search_results[0])\n webbrowser.open_new_tab(url)\n#ends\n\n#to switch window\ndef switchTo(term):\n print(\"Switching to {}.....\".format(term))\n if __name__ == \"__main__\":\n results = []\n top_windows = []\n win32gui.EnumWindows(windowEnumerationHandler, top_windows)\n for i in top_windows:\n if term in i[1].lower():\n win32gui.ShowWindow(i[0],5)\n win32gui.SetForegroundWindow(i[0])\n break\n#ends\n\n#to perform commands according to the text\ndef textAction(textF):\n textF = textF.lower().split()\n i=0\n j=0\n wordCount = len(textF)\n while i Generator[Collection, None, None]:\n collection = Collection(demo_db, \"places\")\n param = getattr(request, \"param\", None)\n yield collection\n if param:\n for key in param:\n collection.delete_many(\"name\", param[\"name\"])\n\n\n@pytest.mark.parametrize(\"places\", [new_york], indirect=True)\ndef test_new_location(places):\n response = requests.post(\n \"{0}/location\".format(geolocation_host), data=new_york, headers=auth_header\n )\n assert response.status_code == 200\n response = places.get({})\n assert response[0][\"name\"] == new_york[\"name\"]\n\n coordinates = response[0][\"location\"][\"coordinates\"]\n assert coordinates == [new_york[\"lng\"], new_york[\"lat\"]]\n\n\n@pytest.mark.parametrize(\"places\", [new_york, jersey_city], indirect=True)\ndef test_get_near(places):\n places.upsert(\n ObjectId(b\"foo-bar-baaz\"),\n {\n \"name\": new_york[\"name\"],\n \"location\": {\n \"type\": \"Point\",\n \"coordinates\": [new_york[\"lng\"], new_york[\"lat\"]],\n },\n },\n )\n places.upsert(\n ObjectId(b\"foo-bar-quux\"),\n {\n \"name\": jersey_city[\"name\"],\n \"location\": {\n \"type\": \"Point\",\n \"coordinates\": [jersey_city[\"lng\"], jersey_city[\"lat\"]],\n },\n },\n )\n request = requests.get(\n url=\"{0}/location/{1}/{2}\".format(\n geolocation_host, new_york[\"lat\"], new_york[\"lng\"]\n ),\n data={\"max_distance\": 5000},\n headers=auth_header,\n )\n assert request.status_code == 200\n response = request.json()\n\n assert response[0][\"name\"] == new_york[\"name\"]\n assert response[1][\"name\"] == jersey_city[\"name\"]\n","repo_name":"coding-to-music/docker-flask-mongodb-k8-grafana-mqtt-fastapi","sub_path":"tests/test_geolocation_search.py","file_name":"test_geolocation_search.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"16113622597","text":"from olive.proto.zoodroom_pb2 import ResourceOwnerPasswordCredentialResponse, ResourceOwnerPasswordCredentialRequest, \\\n CreateClientRequest, CreateClientResponse, VerifyAccessTokenRequest, VerifyAccessTokenResponse, \\\n GetClientByClientIdRequest, GetClientByClientIdResponse, RefreshTokenRequest, RefreshTokenResponse\nfrom olive.exc import ClientNotFound, AccessTokenNotFound\nfrom olive.authentication import Authentication\nfrom olive.proto import zoodroom_pb2_grpc\nfrom olive.consts import UTC_DATE_FORMAT\nfrom marshmallow import ValidationError\nfrom olive.validation import Validation\nfrom olive.proto.rpc import Response\nimport traceback\nimport datetime\n\n\nclass CranberryService(zoodroom_pb2_grpc.CranberryServiceServicer):\n def __init__(self, access_token_store, refresh_token_store, client_store, expires_in, app):\n self.access_token_store = access_token_store\n self.refresh_token_store = refresh_token_store\n self.client_store = client_store\n self.app = app\n self.expires_in = expires_in\n\n def ResourceOwnerPasswordCredential(self, request: ResourceOwnerPasswordCredentialRequest, context) \\\n -> ResourceOwnerPasswordCredentialResponse:\n try:\n self.app.log.debug('validating client {}'.format(request.client_id))\n self.client_store.exists(client_id=request.client_id,\n client_secret=request.client_secret)\n\n self.app.log.debug('authenticating user {} with password: ****'.format(request.username))\n # TODO authenticate user in the future -> user.authenticate_user & user.get_user_by_username\n # TODO suppose it is Authenticated by now and fetch user data\n user_id = 'A-SAMPLE-USER-ID'\n scopes = ['list-of-user-permissions', 'fetched-from-apple']\n\n # TODO if user scope changes by admin, user's tokens should be purged by apple -> VoidUserTokens\n\n auth = Authentication()\n access_token_payload = {\n 'client_id': request.client_id,\n 'access_token': auth.generate_token(user_id),\n 'refresh_token': auth.generate_token(user_id),\n 'expires_in': self.expires_in,\n 'user_id': user_id,\n 'scope': scopes,\n 'grant_type': 'password'\n }\n\n self.app.log.debug('saving access token:\\n{}'.format(access_token_payload))\n access_token_id = self.access_token_store.save(access_token_payload)\n self.app.log.debug('access-token has been saved successfully: {}'.format(access_token_id))\n\n refresh_token_payload = {\n 'client_id': request.client_id,\n 'refresh_token': access_token_payload['refresh_token'],\n 'expires_in': self.expires_in,\n 'user_id': user_id,\n 'scope': scopes,\n 'grant_type': 'password'\n }\n\n self.app.log.debug('saving refresh token:\\n{}'.format(refresh_token_payload))\n refresh_token_id = self.refresh_token_store.save(refresh_token_payload)\n self.app.log.debug('refresh token has been saved successfully: {}'.format(refresh_token_id))\n\n return Response.message(\n access_token=access_token_payload['access_token'],\n refresh_token=access_token_payload['refresh_token'],\n expires_in=self.expires_in,\n scope=scopes\n )\n except ValueError as ve:\n self.app.log.error('Schema value error:\\r\\n{}'.format(traceback.format_exc()))\n return Response.message(\n error={\n 'code': 'value_error',\n 'message': str(ve),\n 'details': []\n }\n )\n except ValidationError as ve:\n self.app.log.error('Schema validation error:\\r\\n{}'.format(ve.messages))\n return Response.message(\n error={\n 'code': 'invalid_schema',\n 'message': 'Given data is not valid!',\n 'details': []\n }\n )\n except ClientNotFound:\n self.app.log.error('Client Not Found: {}'.format(traceback.format_exc()))\n return Response.message(\n error={\n 'code': 'client_not_found',\n 'message': 'Client Not found!',\n 'details': []\n }\n )\n except Exception:\n self.app.log.error('An error occurred: {}'.format(traceback.format_exc()))\n return Response.message(\n error={\n 'code': 'server_error',\n 'message': 'Server is in maintenance mode',\n 'details': []\n }\n )\n\n def CreateClient(self, request: CreateClientRequest, context) -> CreateClientResponse:\n try:\n try:\n self.client_store.is_client_id_exists(client_id=request.client_id)\n return Response.message(\n error={\n 'code': 'client_exists',\n 'message': 'Client {} is duplicate'.format(request.client_id),\n 'details': [request.client_id]\n })\n except ClientNotFound:\n self.app.log.info('client id {} is free for registration'.format(request.client_id))\n\n self.app.log.debug('verifying URL schemes')\n for url in request.redirection_uris:\n if not Validation.is_url_valid(url):\n return Response.message(\n error={\n 'code': 'invalid_redirection_uri',\n 'message': 'Redirection URI {} is not valid!'.format(url),\n 'details': [url]\n }\n )\n\n out = self.client_store.save({\n 'client_id': request.client_id,\n 'client_secret': request.client_secret,\n 'redirection_uris': request.redirection_uris,\n 'fullname': request.fullname,\n 'logo': request.logo,\n 'description': request.description,\n 'is_active': request.is_active or False,\n })\n self.app.log.info('client creation output: {}'.format(out))\n\n return Response.message(\n created=True\n )\n except ValueError as ve:\n self.app.log.error('Schema value error:\\r\\n{}'.format(traceback.format_exc()))\n return Response.message(\n error={\n 'code': 'value_error',\n 'message': str(ve),\n 'details': []\n }\n )\n except ValidationError as ve:\n self.app.log.error('Schema validation error:\\r\\n{}'.format(ve.messages))\n return Response.message(\n error={\n 'code': 'invalid_schema',\n 'message': 'Given data is not valid!',\n 'details': []\n }\n )\n except Exception:\n self.app.log.error('An error occurred: {}'.format(traceback.format_exc()))\n return Response.message(\n error={\n 'code': 'server_error',\n 'message': 'Server is in maintenance mode',\n 'details': []\n }\n )\n\n def VerifyAccessToken(self, request: VerifyAccessTokenRequest, context) -> VerifyAccessTokenResponse:\n try:\n self.app.log.debug('getting token {} for client {}...'.format(request.access_token, request.client_id))\n token = self.access_token_store.get_one(client_id=request.client_id,\n access_token=request.access_token)\n self.app.log.debug('token information: {}'.format(token))\n\n issue_date_obj = datetime.datetime.strptime(token['created_at'], UTC_DATE_FORMAT)\n expires_in_second_obj = datetime.timedelta(seconds=token['expires_in'])\n expires_in_obj = issue_date_obj + expires_in_second_obj\n if expires_in_obj < datetime.datetime.utcnow():\n hours, remainder = divmod((datetime.datetime.utcnow() - expires_in_obj).seconds, 3600)\n self.app.log.error('access token {} is expired on {} for {} days, {} hour(s), {} minute(s)'.format(\n request.access_token,\n expires_in_obj.strftime(UTC_DATE_FORMAT),\n (datetime.datetime.utcnow() - expires_in_obj).days,\n hours,\n remainder // 60))\n return Response.message(\n error={\n 'code': 'invalid_token',\n 'message': 'The access token provided is expired, revoked or malformed.'\n }\n )\n\n return Response.message()\n except ValueError as ve:\n self.app.log.error('Schema value error:\\r\\n{}'.format(traceback.format_exc()))\n return Response.message(\n error={\n 'code': 'value_error',\n 'message': str(ve),\n 'details': []\n }\n )\n except ValidationError as ve:\n self.app.log.error('Schema validation error:\\r\\n{}'.format(ve.messages))\n return Response.message(\n error={\n 'code': 'invalid_schema',\n 'message': 'Given data is not valid!',\n 'details': []\n }\n )\n except AccessTokenNotFound:\n self.app.log.error('token not found error:\\r\\n{}'.format(traceback.format_exc()))\n return Response.message(\n error={\n 'code': 'invalid_token',\n 'message': 'The access token provided is expired, revoked or malformed.',\n 'details': []\n }\n )\n except Exception:\n self.app.log.error('An error occurred: {}'.format(traceback.format_exc()))\n return Response.message(\n error={\n 'code': 'server_error',\n 'message': 'Server is in maintenance mode',\n 'details': []\n }\n )\n\n def GetClientByClientId(self, request: GetClientByClientIdRequest, context) -> GetClientByClientIdResponse:\n try:\n self.app.log.debug('getting client {}'.format(request.client_id))\n client = self.client_store.get_client_by_client_id(client_id=request.client_id)\n self.app.log.debug('client information: {}'.format(client))\n return Response.message(\n client_id=request.client_id,\n client_secret=client['client_secret'],\n redirection_uris=client['redirection_uris'],\n fullname=client['fullname'],\n logo=client['logo'],\n description=client['description']\n )\n except ClientNotFound:\n self.app.log.error('Client Not Found: {}'.format(traceback.format_exc()))\n return Response.message(\n error={\n 'code': 'client_not_found',\n 'message': 'Client Not found!',\n 'details': []\n }\n )\n except ValueError as ve:\n self.app.log.error('Schema value error:\\r\\n{}'.format(traceback.format_exc()))\n return Response.message(\n error={\n 'code': 'value_error',\n 'message': str(ve),\n 'details': []\n }\n )\n except ValidationError as ve:\n self.app.log.error('Schema validation error:\\r\\n{}'.format(ve.messages))\n return Response.message(\n error={\n 'code': 'invalid_schema',\n 'message': 'Given data is not valid!',\n 'details': []\n }\n )\n except Exception:\n self.app.log.error('An error occurred: {}'.format(traceback.format_exc()))\n return Response.message(\n error={\n 'code': 'server_error',\n 'message': 'Server is in maintenance mode',\n 'details': []\n }\n )\n\n def RefreshToken(self, request: RefreshTokenRequest, context) -> RefreshTokenResponse:\n try:\n raise NotImplemented\n except ValueError as ve:\n self.app.log.error('Schema value error:\\r\\n{}'.format(traceback.format_exc()))\n return Response.message(\n error={\n 'code': 'value_error',\n 'message': str(ve),\n 'details': []\n }\n )\n except ValidationError as ve:\n self.app.log.error('Schema validation error:\\r\\n{}'.format(ve.messages))\n return Response.message(\n error={\n 'code': 'invalid_schema',\n 'message': 'Given data is not valid!',\n 'details': []\n }\n )\n except Exception:\n self.app.log.error('An error occurred: {}'.format(traceback.format_exc()))\n return Response.message(\n error={\n 'code': 'server_error',\n 'message': 'Server is in maintenance mode',\n 'details': []\n }\n )\n","repo_name":"alirezastack/cranberry","sub_path":"cranberry/core/oauth.py","file_name":"oauth.py","file_ext":"py","file_size_in_byte":13831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"70267682642","text":"import pygame, sys\r\nfrom pygame import Color\r\nimport numpy as np\r\nimport cv2\r\nimport joblib\r\n\r\npygame.init()\r\n\r\npixels = 640\r\npixels1 = 480\r\ndrawing = False\r\nBLACK = (0,0,0)\r\nWHITE = (255,255,255)\r\nRED = (255,0,0)\r\ncord_x = []\r\ncord_y = []\r\nboundary = 5\r\n\r\nnumber_labels = {0:\"Zero\", 1:\"One\", 2:\"Two\", 3:\"Three\", 4:\"Four\",5:\"Five\", 6:\"Six\", 7:\"Seven\",8:\"Eight\", 9:\"Nine\"}\r\n\r\nload_model = joblib.load('models/rfc_model') # random forest classifier, support vector classification, tensorflow\r\n\r\n\r\nscreen = pygame.display.set_mode((pixels,pixels1))\r\n#clock = pygame.time.Clock()\r\ntxt_font = pygame.font.Font('fonts/04B_19.TTF',25) #add font \r\n\r\n# pygame.display.set_caption('Digit Canvas')\r\n# Icon = pygame.image.load('logo.png') \r\n# pygame.display.set_icon(Icon)\r\npredict =True\r\nredt = True\r\nwhile predict:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n \r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_BACKSPACED:\r\n screen.fill(BLACK)\r\n \r\n if event.type == pygame.MOUSEMOTION and drawing:\r\n x_pos,y_pos = pygame.mouse.get_pos() #event.get..\r\n pygame.draw.circle(screen, WHITE, (x_pos, y_pos), 4, 0)\r\n \r\n cord_x.append(x_pos)\r\n cord_y.append(y_pos)\r\n \r\n #if event.type == pygame.MOUSEBUTTONDOWN:\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_UP:\r\n drawing == True\r\n\r\n #if event.type == pygame.MOUSEBUTTONUP:\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_UP:\r\n drawing == False\r\n #cord_x = sorted(cord_x)\r\n #cord_y = sorted(cord_y)\r\n \r\n rect_min_x , rect_max_x = max(cord_x[0] - boundary,0), min(pixels, cord_x[-1] + boundary)\r\n rect_min_y , rect_max_y = max(cord_y[0] - boundary,0), min(cord_y[-1] + boundary,pixels)\r\n\r\n cord_x = []\r\n cord_y = []\r\n \r\n img_arr = np.array(pygame.PixelArray(screen))[rect_min_x:rect_max_x, rect_min_y:rect_max_y].T.astype(np.float32)\r\n\r\n if True:\r\n img = cv2.resize(img_arr,(1,784))\r\n img = np.pad(img,(10,10), 'constant',constant_values = 0)\r\n img = cv2.resize(img,(28,28))/255\r\n \r\n label = str(number_labels[np.argmax(load_model.predict (img))])\r\n\r\n textSurface = txt_font.render(label, True, RED, WHITE)\r\n textRecobj = np.testing.get_rect()\r\n textRecobj.left , textRecobj.bottom = rect_min_x, rect_max_y\r\n\r\n screen.blit(textSurface,textRecobj) \r\n \r\n pygame.display.update()\r\n #clock.tick(120)\r\n\r\n\r\n\r\n","repo_name":"vanshj22/Handwritten-Digit-Recognition-ml","sub_path":"main1.py","file_name":"main1.py","file_ext":"py","file_size_in_byte":2890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"41745112576","text":"from .wordPreparer import UnevenWordPair\nimport sklearn\nimport numpy as np\n\nimport sys\nif (\"--create-df\" in sys.argv):\n\tUnevenWordPair.createDF()\nimport pandas as pd\n\n#select dataset (change if you want to use a different named dataset (probably not)):\ndf = pd.read_csv('wordsim.csv')\n\n#get pairs and y.\npairs = df.loc['Pairs']\ny = df.loc['Similarities']\n\n#turn the Pairs into a whole lot of numbers:\nX = [UnevenWordPair(pairs[i][0], pairs[i][1]).getFinalArrangement() for i in range(len(pairs))]\n\n#split half the data for the algorithm to learn from,\n#half to test the results later:\nfrom sklearn.model_selection import train_test_split \nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.4) #40% of the word pairs will be just for tests test\n\n\n\n\n#for the moment of truth, now for the actual learning!\nfrom sklearn.neural_network import MLPRegressor\n#create our model:\nmlp = MLPRegressor(hidden_layer_sizes=(100, 100, 500), warm_start=True) #make 3 hidden layers with \n#100, 100 and 500 nodes, respectively\n\n#do your work, complicated math!\nmlp.fit(X_train, y_train) # :D\n\n#evaluate the results:\n#get the predictions out of the testing data:\ny_predicted = mlp.predict(X_test)\n\n#get the average error:\nfrom sklearn.metrics import mean_absolute_error \nabsErr = mean_absolute_error(y_test, y_pred)\ndebug = [\"Error rate (mean): \" + str(absErr),\n\"Predicted y values: \" + \"[\" + \", \".join( str(x) for x in y_predicted) + \"]\",\n\"Real y values: \" + \"[\" + \", \".join( str(x) for x in y_test) + \"]\",\n\"Predicted y values: \" + \"[\" + \", \".join( str(x) for x in y_predicted) + \"]\"]\n\n#open file to save the scoring:\nf = open('wordSimCalc.out', 'ab') #append mode\n\n#print info about testing results:\nfor line in debug:\n\tprint(line)\n\tf.write(line)\n\nf.close() #close file\n\n#make sure to keep track of sklearn's version:\nfrom sklearn import __version__ as sk_ver\nf = open('sklearn_version', 'w')\nf.write(sk_ver)\nf.close()\n\n#now export this good stuff!\nfrom sklearn.externals import joblib\njoblib.dump(mlp, 'wordsim.pkl')\n","repo_name":"joshsomething/WordSimilarity","sub_path":"wordSim/wordSimCalc.py","file_name":"wordSimCalc.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"28010174330","text":"# run (console output): python extract.py submission-folder-name\r\n# run (file output): python extract.py submission-folder-name > result-file.txt\r\n\r\nimport os\r\nimport re\r\nimport sys\r\n\r\nif len(sys.argv)==1:\r\n print('usage: python extract.py foldername')\r\n exit(0)\r\n\r\nlist = os.listdir(sys.argv[1])\r\n\r\nfor name in list:\r\n itnum = re.findall('[iI][tT][0-9]{8}', name);\r\n if len(itnum)==1:\r\n print(itnum[0])\r\n else:\r\n print('could not find IT number in this folder = ', name)\r\n","repo_name":"shyam3001/share","sub_path":"extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"43798556433","text":"# 스타트와 링크 https://www.acmicpc.net/problem/14889\n# 시간초과를 해결하지 못했습니다..............\nimport sys\ninput = sys.stdin.readline\n\ndef team(cnt, start):\n global res\n\n if cnt == n//2:\n start = 0\n link = 0\n\n for i in range(n):\n for j in range(i+1, n):\n if check[i] and check[j]:\n start += graph[i][j]\n start += graph[j][i]\n elif not check[i] and not check[j]:\n link += graph[i][j]\n link += graph[j][i]\n\n res = min(res, abs(start - link))\n return\n\n for i in range(start, n):\n if check[i]:\n continue\n\n check[i] = True\n team(cnt+1, start+1)\n check[i] = False\n\n\nn = int(input())\ngraph = [list(map(int, input().split())) for _ in range(n)]\ncheck = [False] * n\nres = int(1e9) \n\nteam(0, 0)\n\nprint(res)","repo_name":"KDT-02-Algorithm-Study/Algorithm-Study","sub_path":"week11_230323/14889_스타트와_링크/14889_최은비.py","file_name":"14889_최은비.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"22314992414","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom .random_process import RandomProcess\n\n__author__ = \"Christian Heider Nielsen\"\n\nimport random\n\nimport numpy\n\n__all__ = [\"SelfAvoiding\"]\n\n\nclass SelfAvoiding(RandomProcess):\n def __init__(self, num_of_options=4, n=10, **kwargs):\n super().__init__(**kwargs)\n self.num_of_options = num_of_options\n self.n = n\n self.reset()\n\n def sample(self, steps=1):\n while (\n (self.x > 0)\n and (self.x < self.n - 1)\n and (self.y > 0)\n and (self.y < self.n - 1)\n ):\n self.a[self.x][self.y] = 1\n if (\n self.a[self.x - 1][self.y]\n and self.a[self.x + 1][self.y]\n and self.a[self.x][self.y - 1]\n and self.a[self.x][self.y + 1]\n ):\n self.deadEnds += 1\n return self.a[self.x - 1][self.y]\n r = random.randrange(1, self.num_of_options + 1)\n if (r == 1) and (not self.a[self.x + 1][self.y]):\n self.x += 1\n elif (r == 2) and (not self.a[self.x - 1][self.y]):\n self.x -= 1\n elif (r == 3) and (not self.a[self.x][self.y + 1]):\n self.y += 1\n elif (r == 4) and (not self.a[self.x][self.y - 1]):\n self.y -= 1\n\n return self.a[self.x - 1][self.y]\n\n def reset(self):\n self.deadEnds = 0\n\n self.a = numpy.zeros((self.n, self.n))\n\n self.x = self.n // 2\n self.y = self.n // 2\n\n\nif __name__ == \"__main__\":\n\n def main(n=5, trials=3):\n r = SelfAvoiding()\n\n for t in range(trials):\n print(r.sample())\n\n main()\n","repo_name":"sintefneodroid/agent","sub_path":"neodroidagent/utilities/exploration/sampling/random_process/self_avoiding.py","file_name":"self_avoiding.py","file_ext":"py","file_size_in_byte":1717,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"3"} +{"seq_id":"23761639604","text":"from pymavlink import mavutil\n\n# Create the connection\nmaster = mavutil.mavlink_connection('udpin:localhost:14551')\n# Wait a heartbeat before sending commands\nmaster.wait_heartbeat()\n\nprint(f\"Heartbeat from system (system {master.target_system}, component {master.target_component})\")\n\nmaster.mav.send(mavutil.mavlink.MAVLink_set_position_target_local_ned_message(10, master.target_system,\\\n master.target_component, mavutil.mavlink.MAV_FRAME_LOCAL_NED, int(0b010111111000), 0, 0, -10, 0, 0, 0, 0, 0, 0, -5, 0.5))\n\n#master.mav.send(mavutil.mavlink.MAVLink_set_position_target_global_int_message(10, master.target_system,\\\n# master.target_component, mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT, \\\n# int(0b110111111000), int(-35.3629849 * 10 ** 7), int(149.1649185 * 10 ** 7), 10, 0, 0, 0, 0, 0, 0, 1.57, 0.5))\n\n\nwhile 1:\n msg = master.recv_match(\n type='LOCAL_POSITION_NED', blocking=True)\n print(msg)","repo_name":"causemx/pi_mav_control","sub_path":"test/move.py","file_name":"move.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7748373576","text":"from django.urls import path\n\nfrom . import views\n\napp_name = \"courses\"\n\nurlpatterns = [\n # Course Management URLs\n path('courses/', views.course_list, name='course_list'),\n path('courses//', views.course_detail, name='course_detail'),\n\n \n]","repo_name":"Frank1963-mpoyi/school-management-project","sub_path":"school/apps/web/courses/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38780280664","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Encoder(nn.Module):\n def __init__(self):\n super(Encoder, self).__init__()\n self.conv1 = nn.Conv2d(1, 20, kernel_size=5)\n self.pool1 = nn.MaxPool2d(kernel_size=2)\n # self.bn1 = nn.BatchNorm2d(20)\n\n self.conv2 = nn.Conv2d(20, 50, kernel_size=5)\n self.pool2 = nn.MaxPool2d(kernel_size=2)\n # self.bn2 = nn.BatchNorm2d(50)\n\n self.fc1 = nn.Linear(50*4*4, 500)\n\n def forward(self, x):\n # x = F.relu(self.bn1(self.pool1(self.conv1(x))))\n # x = F.relu(self.bn2(self.pool2(self.conv2(x))))\n x = F.relu(self.pool1(self.conv1(x)))\n x = F.relu(self.pool2(self.conv2(x)))\n x = x.view(x.size(0), 50*4*4)\n x = self.fc1(x)\n return x\n\n\nclass Classifier(nn.Module):\n def __init__(self, args, n_classes=10):\n super(Classifier, self).__init__()\n self.fc2 = nn.Linear(500, n_classes)\n self.use_drop = args.use_drop\n self.use_bn = args.use_bn\n self.use_gumbel = args.use_gumbel\n\n def forward(self, x):\n x = F.dropout(F.relu(x))\n x = self.fc2(x)\n return x\n\n\nclass Generator(nn.Module):\n def __init__(self, nz=100):\n super(Generator, self).__init__()\n self.network = nn.Sequential(\n # input is Z, going into a convolution\n nn.ConvTranspose2d(nz, 512, 4, 1, 0, bias=False),\n nn.BatchNorm2d(512),\n nn.ReLU(True),\n\n # state size. (ngf*8) x 4 x 4\n nn.ConvTranspose2d(512, 256, 3, 2, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.ReLU(True),\n\n # state size. (ngf*2) x 8 x 8\n nn.ConvTranspose2d(256, 128, 4, 2, 1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(True),\n\n # state size. (ngf) x 16 x 16\n nn.ConvTranspose2d(128, 1, 4, 2, 1, bias=False),\n nn.Tanh()\n # state size. (nc) x 32 x 32\n )\n\n def forward(self, x):\n # print(x.shape) # torch.Size([64, 100, 1, 1])\n x = self.network(x)\n # print(x.shape) # torch.Size([64, 1, 28, 28])\n\n return x\n\n","repo_name":"sobalgi/cuda","sub_path":"visual/model/dade_usps_mnist_gry.py","file_name":"dade_usps_mnist_gry.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"} +{"seq_id":"28137658814","text":"import configparser\nimport logging\nimport os\n\n\nclass TraderBase:\n \"\"\"\n Helper class for logging and config parsing\n \"\"\"\n\n @staticmethod\n def get_timezone():\n \"\"\"\n Return configured timezone or europe berlin as default\n :return:\n \"\"\"\n from pytz import timezone\n from os import environ\n config = TraderBase.get_config()\n try:\n return timezone(config['autotrader']['time_zone'])\n except (configparser.NoSectionError, configparser.NoOptionError, KeyError, TypeError):\n if environ.get('TZ') is not None:\n return timezone(os.environ['TZ'])\n return timezone('Europe/Berlin')\n\n @staticmethod\n def setup_logger(name: str):\n \"\"\"\n Setup the autotrader standard logger\n :param name: name of logger\n :return: instance of logger\n \"\"\"\n logger = logging.getLogger(name)\n log_formatter = logging.Formatter(\"%(asctime)s [%(filename)s:%(lineno)s - %(funcName)20s()]\"\n \" [%(levelname)-5.5s] %(message)s\")\n file_handler = logging.FileHandler(os.path.join(os.path.dirname(os.path.realpath(__file__)),\n \"..\", \"%s_broker.log\" % name), mode='w')\n file_handler.setFormatter(log_formatter)\n logger.addHandler(file_handler)\n console_handler = logging.StreamHandler()\n console_handler.setFormatter(log_formatter)\n logger.addHandler(console_handler)\n logger.setLevel(logging.INFO)\n logger.debug(\"Logging Setup successful\")\n return logger\n\n @staticmethod\n def get_config(configfile=None):\n \"\"\"\n Returns the autotrader config file. The path to the config file can be set by environment\n variable CONFIG_FILE or a co\n :return:\n \"\"\"\n config = configparser.ConfigParser()\n if configfile is None:\n configfile = os.environ.get('CONFIG_FILE')\n\n if configfile is None:\n configfile = os.path.join(os.path.abspath(os.path.join(__file__, os.pardir)), '..',\n '..', 'config.ini')\n if not os.path.exists(configfile):\n return None\n config.read(configfile)\n return config\n","repo_name":"SlashGordon/autotrader","sub_path":"autotrader/base/trader_base.py","file_name":"trader_base.py","file_ext":"py","file_size_in_byte":2314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42384286331","text":"\"\"\"\nBase class for all HID transport mechanisms.\n\"\"\"\nimport logging\nfrom . import toolinfo\n\n\nclass HidTool(object):\n \"\"\"\n Holds transport and DAP properties of a CMSIS-DAP debugger.\n Used to select the debugger to use if multiple debuggers are connected.\n \"\"\"\n\n # pylint: disable=too-many-instance-attributes, too-many-arguments\n # These are primary keys used to identify the debugger.\n\n def __init__(self, vendor_id, product_id, serial_number, product_string=\"\", manufacturer_string=\"\"):\n self.logger = logging.getLogger(__name__)\n self.logger.addHandler(logging.NullHandler())\n self.interface_number = -1\n self.vendor_id = vendor_id\n self.product_id = product_id\n self.serial_number = serial_number\n self.product_string = product_string\n self.manufacturer_string = manufacturer_string\n self.firmware_version = \"\"\n self.device_vendor_id = \"\"\n self.device_name = \"\"\n self.packet_size = 64\n\n def set_packet_size(self, packet_size):\n \"\"\"\n Sets the packet size\n :param packet_size: bytes per packet\n \"\"\"\n self.packet_size = packet_size\n\n def set_product_string(self, product_string):\n \"\"\"\n Sets the product string\n :param product_string: product name string\n \"\"\"\n self.product_string = product_string\n\n\nclass HidTransportBase(object):\n \"\"\"\n Base class for HID transports\n \"\"\"\n\n def __init__(self):\n self.logger = logging.getLogger(__name__)\n self.logger.addHandler(logging.NullHandler())\n self.devices = []\n self.device = None\n self.detect_devices()\n self.connected = False\n\n def __del__(self):\n # Make sure we always disconnect the HID connection\n self.disconnect()\n\n def detect_devices(self):\n \"\"\"\n Raise error as this method needs to be overridden.\n \"\"\"\n raise NotImplementedError(\"method needs to be defined by sub-class\")\n\n def connect(self, serial_number=None, product=None):\n \"\"\"\n Makes a HID connection to a debugger\n :param serial_number: instance serial number to connect to\n :param product: product type to connect to\n :return:\n \"\"\"\n if self.connected:\n return True\n\n # Support systems which use an empty-string as the standard for a unspecified USB serial\n if serial_number == '':\n serial_number = None\n\n # Support tool shortnames\n toolname_in_product_string = toolinfo.tool_shortname_to_product_string_name(product)\n\n device_count = len(self.devices)\n self.logger.debug(\"{:d} devices available\".format(device_count))\n if device_count == 0:\n raise IOError(\"No CMSIS-DAP devices found.\")\n\n matching_devices = 0\n selected_device = 0\n\n # TODO: this section should be refactored to have fewer branches and be more systematic\n\n # No product or serial number specified\n if serial_number is None and product is None:\n matching_devices = device_count\n if device_count == 1:\n selected_device = 0\n else:\n self.logger.debug(\"Looking for a match in {0:d} units\".format(device_count))\n i = 0\n for device in self.devices:\n # Match both product and serial number\n if serial_number is not None and product is not None:\n if device.serial_number == serial_number and device.product_string.upper().startswith(\n toolname_in_product_string.upper()):\n selected_device = i\n matching_devices += 1\n # Look for product type if serial is not specified\n elif serial_number is None and product is not None:\n self.logger.debug(\"> {:s}\".format(device.product_string))\n if device.product_string.upper().startswith(toolname_in_product_string.upper()):\n selected_device = i\n matching_devices += 1\n # Look for serial number if product is not specified\n elif product is None and serial_number is not None:\n if device.serial_number == serial_number:\n selected_device = i\n matching_devices += 1\n # Something is horribly wrong\n else:\n return False\n i += 1\n\n # Did we find exactly 1 tool?\n if matching_devices != 1:\n log_str = \"Found {:d} daps matching the filter serial = \\\"{}\\\" and product = \\\"{}\\\"\"\n self.logger.debug(log_str.format(matching_devices, serial_number, toolname_in_product_string))\n if matching_devices > 1:\n self.logger.error(\"Too many products found. Please specify one of:\")\n for device in self.devices:\n self.logger.error(\" > {:s} {:s}\".format(device.product_string,\n device.serial_number))\n return False\n\n # Everything is peachy, connect to the tool\n self.device = self.devices[selected_device]\n self.hid_connect(self.device)\n self.logger.debug(\"Connected OK\")\n self.connected = True\n packet_size = toolinfo.get_default_report_size(self.device.product_id)\n self.device.set_packet_size(packet_size)\n self.hid_info()\n return True\n\n def disconnect(self):\n \"\"\"\n Release the HID connection\n :return:\n \"\"\"\n if self.connected:\n self.hid_disconnect()\n self.connected = False\n\n def hid_connect(self, device):\n \"\"\"\n Raise error as this method needs to be overridden.\n \"\"\"\n raise NotImplementedError(\"method needs to be defined by sub-class\")\n\n def hid_info(self):\n \"\"\"\n Raise error as this method needs to be overridden.\n \"\"\"\n raise NotImplementedError(\"method needs to be defined by sub-class\")\n\n def hid_disconnect(self):\n \"\"\"\n Raise error as this method needs to be overridden.\n \"\"\"\n raise NotImplementedError(\"method needs to be defined by sub-class\")\n\n def get_report_size(self):\n \"\"\"\n Get the packet size in bytes\n :return: bytes per packet/report\n \"\"\"\n return self.device.packet_size\n","repo_name":"microchip-pic-avr-tools/pykitcommander","sub_path":"pykitcommander/picpack/pic24fj128ga705/common/pyedbglib/hidtransport/hidtransportbase.py","file_name":"hidtransportbase.py","file_ext":"py","file_size_in_byte":6511,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"9603042496","text":"\"\"\"\nFIT3155 - Lab 10 - Task 2\n\"\"\"\n\n\ndef decode_single_value(codeword):\n read_len = convert_to_decimal(\"1\")\n pos = 0\n while True:\n component = codeword[pos : pos + read_len]\n if component[0] == \"1\":\n return convert_to_decimal(component[: len(component)])\n else:\n # flip first bit of binary string\n encoded_list = list(component)\n encoded_list[0] = \"1\"\n component = \"\".join(encoded_list)\n component_as_decimal = convert_to_decimal(component)\n pos = pos + read_len\n read_len = component_as_decimal + 1\n\n\ndef convert_to_decimal(binary_string: str):\n return int(binary_string, 2)\n\n\ndef decode(codewords):\n result = []\n for codeword in codewords:\n result.append(decode_single_value(codeword))\n return result\n","repo_name":"dylanpinn/FIT3155","sub_path":"labs/lab10/l_elias_decoder.py","file_name":"l_elias_decoder.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"40277238760","text":"\"\"\"\nCode based on the vae example at: https://raw.githubusercontent.com/pytorch/examples/master/vae/main.py\n\"\"\"\nimport argparse\nimport torch.utils.data\nfrom torch import optim\nfrom torchvision.utils import save_image\nfrom Q2.model import VAE\nfrom Q2.dataloader import binarized_mnist_data_loader, MNIST_IMAGE_SIZE\nimport os\n\n\nparser = argparse.ArgumentParser(description='VAE MNIST Example')\nparser.add_argument('--batch-size', type=int, default=64, metavar='N',\n help='input batch size for training (default: 128)')\nparser.add_argument('--max-batch-idx', type=int, default=99999, metavar='N',\n help='only for debugging locally')\nparser.add_argument('--hidden-features', type=int, default=100, metavar='N',\n help='latent variable size')\nparser.add_argument('--epochs', type=int, default=20, metavar='N',\n help='number of epochs to train (default: 10)')\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='enables CUDA training')\nparser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\nparser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='how many batches to wait before logging training status')\nargs = parser.parse_args()\nargs.cuda = not args.no_cuda and torch.cuda.is_available()\n\ntorch.manual_seed(args.seed)\n\ndevice = torch.device(\"cuda\" if args.cuda else \"cpu\")\n\nmodel = VAE(args.hidden_features).to(device)\n\noptimizer = optim.Adam(model.parameters(), lr=3e-4)\n\ncurrent_dir = os.path.dirname(os.path.realpath(__file__))\nresults_dir = '{}/results'.format(current_dir)\nsaved_model = '{}/saved_model'.format(current_dir)\n\ndef train(epoch):\n model.train()\n train_loss = 0\n\n for batch_idx, data in enumerate(train_loader):\n if batch_idx > args.max_batch_idx:\n break\n data = data.to(device)\n optimizer.zero_grad()\n recon_batch, mu, logvar = model(data)\n loss = model.loss_function(data, recon_batch, mu, logvar)\n loss.backward()\n train_loss += loss.item()\n optimizer.step()\n if batch_idx % args.log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader),\n loss.item()))\n\n train_loss /= (batch_idx + 1)\n print('====> Epoch: {} Average loss: {:.4f}'.format(epoch, train_loss))\n\n\ndef validate(epoch):\n model.eval()\n valid_loss = 0\n\n with torch.no_grad():\n for i, data in enumerate(valid_loader):\n data = data.to(device)\n recon_batch, mu, logvar = model(data)\n valid_loss += model.loss_function(data, recon_batch, mu, logvar).item()\n if i == 0:\n n = min(data.size(0), 8)\n comparison = torch.cat([data[:n], recon_batch.view(args.batch_size, 1, MNIST_IMAGE_SIZE, MNIST_IMAGE_SIZE)[:n]])\n save_image(comparison.cpu(),\n '{}/reconstruction_{}.png'.format(results_dir, epoch), nrow=n)\n\n valid_loss /= (i + 1)\n print('====> Average Validation loss: {:.4f}'.format(valid_loss))\n return valid_loss\n\n\ndef test():\n model.eval()\n test_loss = 0\n with torch.no_grad():\n for i, data in enumerate(test_loader):\n data = data.to(device)\n recon_batch, mu, logvar = model(data)\n test_loss += model.loss_function(data, recon_batch, mu, logvar).item()\n\n test_loss /= (i + 1)\n print('====> Average Test loss: {:.4f}'.format(test_loss))\n\n\ndef sample(epoch):\n model.eval()\n with torch.no_grad():\n sample = torch.randn(args.batch_size, args.hidden_features).to(device)\n sample = model.decode(sample).cpu()\n save_image(sample.view(args.batch_size, 1, MNIST_IMAGE_SIZE, MNIST_IMAGE_SIZE),\n '{}/sample_{}.png'.format(results_dir, epoch))\n\n\nif __name__ == '__main__':\n train_loader, valid_loader, test_loader = binarized_mnist_data_loader('{}/binarized_mnist'.format(current_dir),\n args.batch_size)\n best_valid_loss = None\n for epoch in range(1, args.epochs + 1):\n train(epoch)\n new_valid_loss = validate(epoch)\n if best_valid_loss is None or new_valid_loss < best_valid_loss:\n best_valid_loss = new_valid_loss\n print('Saving model with avg loss {}'.format(best_valid_loss))\n torch.save(model.state_dict(),\n os.path.join(saved_model, 'params_epoch_{}_loss_{:.4f}.pt'.format(epoch, best_valid_loss)))\n\n sample(epoch)\n\n test()\n","repo_name":"mhmorta/IFT6135_Generative_models","sub_path":"Q2/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25380452010","text":"'''from random import randint\ntupla = ()\nfor c in range(0, 5):\n num = randint(0, 10)\n tupla += (num)\nprint(tupla)'''\n\nfrom random import choice\nnumeros = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)\nsorteados = []\nfor c in range(0, 5):\n a = choice(numeros)\n sorteados += [a]\ntupla = tuple(sorteados)\nprint('Os valores sorteados foram:')\nfor c in range(0, 5):\n print(f'{tupla[c]}', end=' ')\nordem = sorted(tupla)\nprint(f'\\nO maior valor sorteado foi: {ordem[4]}\\n'\n f'O menor valor sorteado foi: {ordem[0]}')\n#ou\nprint(f'\\nO maior valor sorteado foi: {max(ordem)}\\n'\n f'O menor valor sorteado foi: {min(ordem)}')\n","repo_name":"joaomarquardt/python-exercicios-cev","sub_path":"ex074.py","file_name":"ex074.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"13766479237","text":"import random\n\nhistory = {}\n\n\ndef try_to_guess(name, answer):\n try_num = 0\n while try_num < 10:\n guess_answer = int(input('请输入一个数字: '))\n if guess_answer < answer:\n print('你输入的数字比正确答案小。')\n elif guess_answer == answer:\n print('回答正确!')\n history[name].append('成功')\n break\n else:\n print('你输入的数字比正确答案大。')\n try_num += 1\n else:\n print('猜错次数太多,失败。')\n history[name].append('失败')\n\n\ndef show_history():\n for name, data in history.items():\n print('用户:{}, 记录如下:{}'.format(name, data))\n\n\ndef start():\n name = input('请输入你的名字:')\n if name == '退出':\n return\n if name not in history:\n history[name] = []\n answer = random.randint(0, 1024)\n try_to_guess(name, answer)\n\n\ndef default():\n pass\n\nif __name__ == '__main__':\n select_dict = {'1': show_history, '2': start, '3': exit}\n while True:\n select = input('1.历史记录\\n2.继续游戏\\n3.退出游戏\\n输入数字选择:')\n select_dict.get(select, default)()\n","repo_name":"kingname/SourceCodeOfBook","sub_path":"第2章/program/Guess.py","file_name":"Guess.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","stars":315,"dataset":"github-code","pt":"3"} +{"seq_id":"7374990727","text":"from megnet.utils.models import load_model, AVAILABLE_MODELS\nimport numpy as np\nfrom keras.models import Model\nimport warnings\nimport pandas as pd\nfrom .megnet_setup_evaluate import load_model_scaler, megnet_evaluate_structures\nwarnings.filterwarnings(\"ignore\")\nfrom typing import Tuple, Any\n# print(AVAILABLE_MODELS)\ndef get_MEGNetBaseFeatures(structures):\n MEGNetFeats_structs=[]\n for model_name in ['Eform_MP_2019','Efermi_MP_2019','Bandgap_MP_2018','logK_MP_2019','logG_MP_2019']:\n model=load_model(model_name) \n intermediate_layer_model = Model(inputs=model.input,\n outputs=model.layers[-3].output) \n MEGNetModel_structs=[]\n for s in structures:\n try:\n graph = model.graph_converter.convert(s)\n inp = model.graph_converter.graph_to_input(graph)\n pred = intermediate_layer_model.predict(inp, verbose=False)\n model_struct=pd.DataFrame([pred[0][0]], \n columns=[f\"{model_name}_{idx+1}\" for idx in \n range(len(pred[0][0]))])\n MEGNetModel_structs.append(model_struct)\n except Exception as e:\n print(e)\n print(\"Probably an invalid structure was passed to the model, continuing..\")\n model_struct=pd.DataFrame([np.nan]*32, \n columns=[f\"{model_name}_{idx+1}\" for idx in \n range(len(pred[0][0]))])\n continue\n ## now append the columns with the layer of each model\n MEGNetModel_structs=pd.concat(MEGNetModel_structs,axis=0)\n MEGNetFeats_structs.append(MEGNetModel_structs)\n print(f\"Features calculated for model {model_name}.\")\n ## now every structure calculated with each model is combined in a final dataframe\n MEGNetFeats_structs=pd.concat(MEGNetFeats_structs,axis=1)\n return MEGNetFeats_structs\n\n\ndef get_MEGNetFeatures(structures,\n n_targets : int = 1,\n neuron_layers : Tuple[int] = (64,32,16), \n model=None, \n model_file=None, \n scaler=None,\n scaler_file=None,\n **kwargs):\n '''From a specified model, either passed directly or loaded from file\n scaler is optional to scale back the produced output. \n Reads a set of structures filters them'''\n model_name=kwargs.get('model_name','myMEGNetModel')\n if model is None:\n model,scaler=load_model_scaler(n_targets=n_targets, \n neuron_layers=neuron_layers,\n model_file=model_file, scaler_file=scaler_file, \n **kwargs)\n\n MEGNetFeatsDF=[] \n structures_valid,ypred=megnet_evaluate_structures(model,structures)\n print(ypred)\n for s in structures:\n if s in list(structures_valid):\n s_idx = list(structures_valid).index(s)\n p = ypred[s_idx]\n if scaler is None:\n feat_data=pd.DataFrame([p],columns=[f\"MEGNet_{model_name}_{idx+1}\" for idx in range(n_targets)])\n struct=pd.DataFrame({'structure': [s]})\n modeldata_struct = pd.concat([struct,feat_data], axis=1)\n else:\n feat_data=pd.DataFrame(scaler.inverse_transform(p.reshape(1, -1)),\n columns=[f\"MEGNet_{model_name}_{idx+1}\" for idx in range(n_targets)])\n struct=pd.DataFrame({'structure': [s]})\n modeldata_struct = pd.concat([struct,feat_data], axis=1)\n else:\n feat_data=pd.DataFrame([[np.nan] * n_targets],columns=[f\"MEGNet_{model_name}_{idx+1}\" for idx in range(n_targets)])\n struct=pd.DataFrame({'structure': [s]})\n modeldata_struct = pd.concat([struct,feat_data], axis=1) \n MEGNetFeatsDF.append(modeldata_struct)\n MEGNetFeatsDF = pd.concat(MEGNetFeatsDF,axis=0) \n MEGNetFeatsDF = MEGNetFeatsDF.reset_index(drop=True)\n return MEGNetFeatsDF\n \n\n__all__ = ['get_MEGNetFeatures', 'get_MEGNetBaseFeatures']","repo_name":"rogeriog/MEGNetModelTools","sub_path":"megnettools/megnet_featurization.py","file_name":"megnet_featurization.py","file_ext":"py","file_size_in_byte":4232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"74491779920","text":"\nfrom time import sleep\n\nprint('\\n\\nContagem regressiva para os fogos!\\n')\nsleep(2)\ncontagem = 10\nfor c in range(0,11):\n print(contagem)\n sleep(0.5)\n print('\\n')\n contagem = contagem - 1\nprint('\\n\\033[7mBOOOOOOOOOOMMMMM!!!!!!\\033[m\\n')","repo_name":"DanBelasco/Python","sub_path":"0- 115 Exercícios Curso em video/ex046.py","file_name":"ex046.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"20092393435","text":"#!/usr/bin/env python\n#-*- cpdomg\" itf-8 -*-\n\n# Author: Kaze Mewno (C.H. Chiu)\n# Last Modify: 2016.6.29\n#\n# This is a cron program that doing following jobs:\n# 1. check the upload list and process files in it.\n# 2. check the expire time of the files that has been modified.\n\n\n\nimport sys, os\nimport shutil\nimport subprocess\nimport time\nimport struct\nimport sqlite3\nimport hashlib\nimport ConfigParser\nfrom random import Random\nimport re\n\nsys.path.append(\"/home/pkriachu/androguard/\")\nsys.path.append(\"/home/pkriachu/androguard/elsim/\")\nfrom androguard.core.bytecodes.apk import *\n\n\ncodroid_conf = '../codroid.conf'\n\ncodroid_root = '/home/pkriachu/codroid'\n\nremote_lifetime = 259200\npatched_lifetime = 86400\nsubmit_url = \"\"\n\n#lock_file = '/tmp/codroid_cron_lock'\n#sleep_interval = 60\n#lock_interval = 2 * sleep_interval\n\n\n\ndef init() :\n config = ConfigParser.ConfigParser()\n config.read(codroid_conf)\n\n codroid_root = config.get('global', 'codroid_root')\n patched_lifetime = config.get('global', 'patched_lifetime')\n\n remote_lifetime = config.get('remote', 'remote_lifetime')\n submit_url = config.get('remote', 'submit_url')\n\n\n\ndef process_uploads() : \n codroid_db = codroid_root + '/codroid.db'\n conn = sqlite3.connect(codroid_db)\n conn.row_factory = sqlite3.Row\n query = conn.cursor()\n insert = conn.cursor()\n delete = conn.cursor()\n\n # check uploading files\n query.execute('SELECT * FROM uploads')\n for row in query :\n # common process\n # pre-processing: collect required infomation\n patch_file = row['file_name']\n patch_mode = row['modify_type']\n apk_path = \"%s/uploads/%s\" % (codroid_root, patch_file)\n apk = APK(apk_path)\n\n apk_id = apk.get_package()\n apk_version = \"%s/%s\" % (apk.get_androidversion_code(), apk.get_androidversion_name())\n apk_hash = row['file_hash']\n uploader = row['uploader']\n upload_time = row['upload_time']\n modify_type = patch_mode\n\n now = time.time()\n\n # append apk infomation to database\n insert.execute(\"INSERT INTO apks(apk_id, apk_version, apk_hash, uploader, type, upload_time, process_time) VALUES (?, ?, ?, ?, ?, ?, datetime(?, 'unixepoch', 'localtime'))\",\n (apk_id, apk_version, apk_hash, uploader, modify_type, upload_time, now ))\n\n file_id = insert.lastrowid\n insert.execute(\"INSERT INTO files VALUES(?, ?)\", (file_id, now+patched_lifetime))\n\n\n # local patching\n if patch_mode == 'local' :\n # patch apk files\n patch_script = \"%s/tools/patch.sh\" % (codroid_root)\n out = subprocess.call([patch_script, patch_file, patch_mode, str(file_id)])\n\n # remote patching\n elif patch_mode == 'remote' :\n # generating authentication code\n auth_code = \"\"\n cur = conn.cursor()\n # generate an unique auth code\n while auth_code == \"\" or cur.fetchone() != None :\n (auth_code, salt) = generate_auth_code(apk_id, uploader, now+remote_lifetime)\n cur.execute(\"SELECT fid FROM remote_auth WHERE authcode='%s'\" % (auth_code))\n # save the auth info into database\n insert.execute(\"INSERT INTO remote_auth VALUES (?, ?, ?, ?)\", (file_id, auth_code, salt, now+remote_lifetime))\n\n # pre-processing (smali): setting the transfer infomation\n with open(codroid_root + '/coverage/org_template/codroid/utility/NetworkWriterTask.smali', 'r') as source, open(codroid_root + '/coverage/org/codroid/utility/NetworkWriterTask.smali', 'w+') as target :\n content = source.read()\n write_url = \"%s?key=%s\" % (submit_url, auth_code)\n target.write(content.replace('@SUBMIT_URL@', write_url))\n with open(codroid_root + '/coverage/org_template/codroid/utility/Statistics.smali', 'r') as source, open(codroid_root + '/coverage/org/codroid/utility/Statistics.smali', 'w+') as target :\n content = source.read()\n target.write(content.replace('@PACKAGE_NAME@', apk_id))\n\n # patch apk files\n patch_script = \"%s/tools/patch.sh\" % (codroid_root)\n out = subprocess.call([patch_script, patch_file, \"network\", str(file_id)])\n\n # post-processing: generate the dir for records, and copy the metafile to it.\n records_dir = \"%s/records/%s\" % (codroid_root, file_id)\n metafile = \"%s/downloads/%s.meta\" % (codroid_root, file_id)\n os.mkdir(records_dir)\n if os.path.isfile(metafile) :\n shutil.copy(metafile, \"%s/metafile\" % (records_dir))\n\n\n # post_processing: remove file from uploads\n delete.execute(\"DELETE FROM uploads WHERE file_name = '%s'\" % (row['file_name']))\n os.remove(\"%s/uploads/%s\" % (codroid_root, row['file_name']))\n\n conn.commit()\n\n conn.close()\n return\n\n\ndef hash_file(file_path) :\n # read stuff in 64kb chunks\n BUF_SIZE = 65536\n hasher = hashlib.md5()\n with open(file_path, 'rb') as f:\n while True:\n data = f.read(BUF_SIZE)\n if not data :\n break\n hasher.update(data)\n return hasher.hexdigest()\n\n\ndef generate_auth_code(apk_id, uploader, timestamp) :\n hasher = hashlib.md5()\n salt = create_salt()\n hasher.update(\"%s%s%s%s\" % (apk_id, uploader, timestamp , salt))\n return (hasher.hexdigest(), salt)\n\n\ndef create_salt(length = 4) :\n salt = ''\n chars = 'AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz0123456789'\n len_chars = len(chars) - 1\n random = Random()\n for i in xrange(length) :\n # choose a random character from chars\n salt += chars[random.randint(0, len_chars)] \n return salt\n\n\n\n\n\ndef is_executed() :\n status = False\n try :\n #sys.stderr.write('* Check CoDroid executeion status...\\n')\n\n # This command will check how much codroid_cron.py is running, include itself.\n out = subprocess.check_output('ps ax | grep codroid_cron.py | grep python | grep -cv grep', shell=True)\n\n #sys.stderr.write('* out=%s\\n' % (out))\n\n if int(out) == 1 :\n status = False\n else :\n status = True\n\n except :\n #sys.stderr.write('* Exception: %s\\n' % (sys.exc_info()[0]))\n status = False\n\n return status\n\n\n\n\n\ndef check_expire() :\n codroid_db = codroid_root + '/codroid.db'\n conn = sqlite3.connect(codroid_db)\n conn.row_factory = sqlite3.Row\n query = conn.cursor()\n insert = conn.cursor()\n delete = conn.cursor()\n\n # check files\n now = time.time()\n query.execute('SELECT * FROM files')\n for row in query :\n file_id = row['id']\n exp_time = row['exp_time']\n\n # if expired\n if exp_time <= now :\n pattern = \"^%d\\..+$\" % file_id\n for file in os.listdir(\"%s/downloads\"%(codroid_root)) :\n if re.match(pattern, file) :\n os.remove(\"%s/downloads/%s\"%(codroid_root, file))\n delete.execute(\"DELETE FROM files WHERE id = '%d'\" % (file_id))\n conn.commit()\n conn.close()\n return\n\n\n\n\n\nif __name__ == \"__main__\" :\n if is_executed() :\n sys.exit(\"* CoDroid is already executed.\")\n\n process_uploads()\n check_expire()\n\n","repo_name":"pkriachu/CoDroid","sub_path":"tools/codroid_cron.py","file_name":"codroid_cron.py","file_ext":"py","file_size_in_byte":7323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"9757149816","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom fake_useragent import UserAgent\nimport datetime\n\n\ndef get_html(url):\n if url:\n r = requests.get(url, headers={'User-Agent': UserAgent().chrome})\n html = r.text\n return html\n\n\ndef parse(html):\n data_matches = []\n soup = BeautifulSoup(html, 'html')\n days = soup.find('div', class_='esport-match-future-list')\n matches = days.find_all('div', class_='esport-match-single')\n\n for i in range(len(matches)):\n match = matches[i].find('a', class_='team-vs-team')\n team_left = match.find('div', class_='team team-left')\n team_right = match.find('div', class_='team team-right')\n time = match.find('div', class_='time')\n print(time.text)\n if team_right:\n team_right_bet = team_right.find('span', class_='name')\n if team_right_bet:\n print(team_right_bet.text)\n if team_left:\n team_left_bet = team_left.find('span', class_='name')\n if team_left_bet:\n print(team_left_bet.text)\n data_matches.append(['Дата⏰:', time.text, team_left_bet.text, '🆚', team_right_bet.text])\n print(data_matches)\n return list(data_matches)\n\ndef get_matches():\n url = 'https://dota2.ru/esport/matches/'\n data = parse(get_html(url))\n return data\n","repo_name":"nikitaEgorov19/practice_po_programming-python-","sub_path":"123456-master/123456-master/dota2rerre.py","file_name":"dota2rerre.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23753534631","text":"import re\nimport sys\nfrom classifier import Classifier\n\n'''\nAuthors:\nDaan Krol (s3142221)\nNiels Rocholl (s3501108)\nNiels Rocholl (s3501108)\nJulian Bruinsma (s3215601)\n'''\n\ndef main():\n classifier = Classifier()\n print_example_questions()\n for question in sys.stdin:\n module = classifier.classify(question)\n if module is None:\n print(\"Could not classify the question\")\n else:\n answer = module.answer()\n if answer:\n print(\"Answer:\")\n # Remove duplicates from list\n answer = list(set(answer))\n # Remove urls from answers\n answer = [a for a in answer if not a.startswith('http://')]\n for a in answer:\n is_date = re.search(\"(.*)(T00:00:00Z)\", a)\n if is_date:\n print(is_date.group(1))\n else:\n print(a)\n else:\n print(\"Could not answer the question.\")\n\n exit()\n\n\ndef print_example_questions():\n print(\"Printing example questions:\")\n example_questions = [\n 'What is the density of ice?',\n 'What is the chemical formula for dopamine?',\n 'What is the cause of Anthrax?',\n 'What is the boiling point of water?',\n 'What is the atomic number of silver?',\n 'What is the field of work of CERN?',\n 'What is the half-life of uranium-235?',\n 'Who is the inventor of the automobile?',\n 'Who is the mother of Isaac Newton?',\n 'Who are the founders of Nvidia?'\n ]\n test_questions = [\n \"At what speed does a photon move?\",\n \"How big is the Milky Way?\",\n \"How many awards has Albert Einstein received?\",\n \"How many languages did Nikola Tesla speak?\",\n \"Name all crew members of the Apollo 15 mission.\",\n \"Penicilin was discovered by whom?\",\n \"What are the effects of a tsunami?\",\n \"Is HTML a markup language?\",\n \"When was the Doppler effect discovered?\",\n \"Where did Carl Linnaeus study?\"\n ]\n\n for tq in test_questions:\n print(tq)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"nielsRocholl/qa-system","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"13029623589","text":"import re\nfrom tkinter import *\nfrom tkinter.constants import BOTH, BOTTOM, END, INSERT, LEFT\nfrom tkinter.scrolledtext import ScrolledText\nimport tkinter.messagebox\nimport tkinter.filedialog\nfrom tkinter.ttk import *\nimport logging\nimport sys\n\nfield_pat = re.compile(r'-(.+?)-')\nscope = {}\n\ntop = Tk()\ntop.title('NotePad+')\ncontents = ScrolledText()\ncontents.pack(side=BOTTOM, expand=True, fill=BOTH)\n\n# filenames = tkinter.Entry()\n# filenames.pack(side=LEFT, expand=True, fill=X)\n\nlogging.basicConfig(filename='log.log', level=logging.DEBUG,\n format='%(levelname)s: %(asctime)s %(message)s')\n\n\ndef _re_placement(match):\n code = match.group(1)\n try:\n return str(eval(code, scope))\n except SyntaxError:\n exec(code, scope)\n return ''\n\n\nclass Events:\n def __init__(self):\n self.filename = self.init_path = r'C:\\Users\\Public\\Documents\\un-know.txt'\n\n def load(self):\n try:\n self.filename = tkinter.filedialog.askopenfilename()\n with open(self.filename) as file:\n contents.delete('1.0', END)\n contents.insert(INSERT, file.read())\n logging.info('opened a file,call:\"{}\"'.format(self.filename))\n top.title('NotePad+ : {}'.format(self.filename))\n except FileNotFoundError:\n pass\n\n def change(self):\n try:\n with tkinter.filedialog.asksaveasfile() as f:\n lines = []\n for line in open(self.filename, 'r').read():\n lines.append(line)\n text = ''.join(lines)\n f.write(field_pat.sub(_re_placement, text))\n logging.info('changed a file,call:\"{}\"'.format(self.filename))\n except Exception as e:\n tkinter.messagebox.showerror(title='Error', message='Error: ' + str(e))\n logging.error(e)\n\n def save(self):\n try:\n if self.filename != self.init_path:\n with open(self.filename, 'w') as f:\n f.write(contents.get('1.0', END))\n logging.info('saved a file,call:\"{}\"'.format(self.filename))\n else:\n self.save_as()\n except FileNotFoundError:\n pass\n\n def save_as(self):\n with tkinter.filedialog.asksaveasfile(title='另存为') as f:\n top.title('NotePad+ : {}'.format(f.filename))\n f.write(contents.get('1.0', END))\n logging.info('saved as a file,call:\"{}\"'.format(self.filename))\n\n def new(self):\n contents.delete('1.0', END)\n logging.info('create a file')\n top.title('Un know')\n \n def import_file(self):\n self.filename = tkinter.filedialog.askopenfilename()\n self.file = re.findall(r'[A-Z0-9a-z]*\\.py', self.filename)\n self.file = self.file[0]\n sys.path.append(self.filename[0:-len(self.file)])\n exec('from {} import *'.format(self.file[0:-3]), scope)\n\n\nevents = Events()\nlogging.info('open app')\nButton(text='new', command=events.new).pack(side=LEFT)\nButton(text='open', command=events.load).pack(side=LEFT)\nButton(text='change as', command=events.change).pack(side=LEFT)\nButton(text='save', command=events.save).pack(side=LEFT)\nButton(text='save as', command=events.save_as).pack(side=LEFT)\nButton(text='import', command=events.import_file).pack(side=LEFT)\ntkinter.mainloop()\nlogging.info('close app')\n","repo_name":"FleetEmpire/templates","sub_path":"templates.py","file_name":"templates.py","file_ext":"py","file_size_in_byte":3420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25156033297","text":"'''\n\nCREATED: 2022\nLAST EDIT: 24-MAR-2022\nAUTHORS: ZHONGKAI AND DUANE\n\nDESCRIPTION:\nPART OF CELL EXTRACTOR\nUSES FULL RESOLUTION, ALIGNED IMAGES\n\nLOC: /Active_Atlas_Data/data_root/pipeline_data/DK55/preps/CH1/full_aligned\n\nCREATES EXAMPLE FOR SINGLE SECTION\nSTEPS:\neach section, turned into 10 tiles (info in: \"Y:\\Active_Atlas_Data\\cell_segmentation\\DK55\\tile_info.csv\")\n\nsection_id = image_id\nprep_id/{channel}/{image_id}/{tiles}.tif\n\n\"Y:\\Active_Atlas_Data\\cell_segmentation\\DK55\\CH1\\015\\000tile-0.tif\"\n\n'''\n\nfrom cell_extractor.ExampleFinder import ExampleFinder\nimport argparse\n\ndef calculate_one_section(animal, section, disk, segmentation_threshold):\n extractor = ExampleFinder(animal=animal, section=section, disk=disk, segmentation_threshold = segmentation_threshold)\n extractor.find_examples()\n extractor.save_examples()\n\n\ndef main():\n animal = 'DK55'\n section = 180\n disk = '/net/birdstore/Active_Atlas_Data/'\n for threshold in [2000, 3000, 4000]:\n calculate_one_section(animal, section, disk=disk, segmentation_threshold=threshold)\n\n\nif __name__ =='__main__':\n # parser = argparse.ArgumentParser()\n # parser.add_argument('--animal', type=str, help='Animal ID')\n # parser.add_argument('--section', type=int, help='Secton being processed')\n # parser.add_argument('--disk', type=str, help='storage disk')\n # args = parser.parse_args()\n # animal = args.animal\n # section = args.section\n # disk = args.disk\n\n main()\n\n\n","repo_name":"ActiveBrainAtlas2/preprocessing-pipeline","sub_path":"in_development/Duane/create_example_for_one_section.py","file_name":"create_example_for_one_section.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"8218502021","text":"#!/usr/bin/python3\n\n#Write a program that accepts user input to create a list of integers. Then, compute the sum of all the integers in the list.\n\n#prompt the user to enter numbers\nlist_numbers = input('Please input numbers seperated by spaces ')\n\n#out of curiosity let me print the above\nprint(list_numbers)\n\n#use split function to seperat numbers using spaces\n\ninteger_list = list_numbers.split()\n\n#print the list\nprint(' You have enter the following numbers : ', integer_list)\n\n #convert the numbers into int type\n\n #first declare variable to store the list\nnumbers = []\n\nfor i in integer_list:\n\n numbers.append(float(i))\n#sum up the integers\nnumbers.sort()\nprint(numbers)\n\nresult = sum(numbers)\nprint(f'The sum of the list {numbers} sorrted is : {result}')\n","repo_name":"nguredavid/learnpython","sub_path":"python-data_structures/code_challenge.py","file_name":"code_challenge.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"75330195922","text":"def leiaDinheiro(msg):\n ok = False\n\n while not ok:\n num = str(input(msg)).replace(',', '.').strip()\n if num.isalpha() or num =='':\n print(f'\\033[0;31mERRO!{num} é um preço inválido.\\033[m')\n else:\n ok = True\n return float(num)\n\n","repo_name":"luiz-educosta/Estudando_Python","sub_path":"estudandopython/Exercicios_curso_em_video/curso_python/modularizacao/utilidadescev/dado/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"71447683921","text":"import scripts.library as lib\nimport scripts.definers as define\nimport scripts.find_makefile as finder\nimport re\nimport logging\nimport os\n\n\ndef removeDirectory(folder):\n logging.debug(\n \"-- The folder {} items will be deleted\".format(folder.folder_name))\n identifier, make_path = finder.directory_finder(folder.pwd_folder)\n make_content = lib.readWrite(path=make_path + \"/Makefile\")\n make_list = make_content.splitlines()\n\n lines_rm = []\n logging.info(\"------ REMOVING ------\\n\")\n\n for line in make_list:\n logging.debug(\n \"-- FINDING Makefile Line: {}\\t || \\tto find -> {}\".format(line, folder.folder_name + '/'))\n if re.search(folder.folder_name + '/', line):\n logging.debug(\"-- Adding {} line from makefile to erase\".format(line))\n lines_rm.append(line)\n\n for rm in lines_rm:\n logging.info(\"-- REMOVING {} from makefile\".format(rm))\n make_list.remove(rm)\n\n make_content = '\\n'.join(make_list)\n lib.readWrite(path=make_path + \"/Makefile\", writing=make_content)\n logging.info(\n \"-- All files from the directory has been removed from the Makefile\")\n\n\ndef removeFilefromMakefile(file):\n makeContent = lib.readWrite(path=file.makefile + \"/Makefile\")\n line_list = makeContent.splitlines()\n\n logging.info(\"------ REMOVING ------\\n\")\n logging.info(\"-- The file name is: {}\\n\".format(file.filename))\n\n for line in line_list:\n logging.debug(\"-- FINDING Makefile Line: {}\\t||\\tto find -> {}\".format(line,\n define.PATHS[file.identifier] + file.filename,))\n if define.PATHS[file.identifier] + file.filename in line:\n if re.match(define.MAKEFILE_VAR[file.identifier] + \"\\t\", line):\n logging.info(\"-- Adding the header because we remove it\")\n line_list.insert(line_list.index(\n line), define.MAKEFILE_VAR[file.identifier] + \"\\t\\\\\")\n line_list.remove(line)\n logging.debug(\"-- {} removed !\\n\".format(line))\n break\n\n makeContent = '\\n'.join(line_list)\n lib.readWrite(path=file.makefile + \"/Makefile\", writing=makeContent)\n logging.info(\"-- Line well removed\\n\\n\")\n\n\nif __name__ == '__main__':\n print(\"This are the functions to remove a file from a Makefile\")\n","repo_name":"ColColty/Makefile_Completer","sub_path":"scripts/remover.py","file_name":"remover.py","file_ext":"py","file_size_in_byte":2363,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"37368719375","text":"import datetime as dt\nimport logging\nfrom logging import Logger\n\nfrom airflow.decorators import dag, task\nfrom airflow.models.baseoperator import chain\nfrom airflow.operators.empty import EmptyOperator\nfrom airflow.providers.postgres.hooks.postgres import PostgresHook\nfrom airflow.utils.edgemodifier import Label\nfrom airflow.utils.trigger_rule import TriggerRule\n\n\nfrom tasks.metadata_tasks import (\n metadata_schema_exists,\n create_metadata_schema,\n metadata_table_exists,\n)\n\n\ntask_logger = logging.getLogger(\"airflow.task\")\n\nPOSTGRES_CONN_ID = \"dwh_db_conn\"\n\n\n@task(trigger_rule=TriggerRule.NONE_FAILED_MIN_ONE_SUCCESS)\ndef create_metadata_table(conn_id: str, task_logger: Logger):\n try:\n task_logger.info(f\"Creating table metadata.census_metadata\")\n postgres_hook = PostgresHook(postgres_conn_id=conn_id)\n conn = postgres_hook.get_conn()\n cur = conn.cursor()\n cur.execute(\n f\"\"\"CREATE TABLE IF NOT EXISTS metadata.census_metadata (\n id SERIAL PRIMARY KEY,\n metadata_url TEXT NOT NULL,\n last_modified TIMESTAMP NOT NULL,\n size TEXT,\n description TEXT,\n is_dir BOOLEAN,\n is_file BOOLEAN,\n time_of_check TIMESTAMP WITH TIME ZONE NOT NULL,\n updated_metadata_available BOOLEAN DEFAULT NULL\n );\"\"\"\n )\n conn.commit()\n return \"success\"\n except Exception as e:\n print(f\"Failed to create metadata table metadata.census_metadata. Error: {e}, {type(e)}\")\n\n\n@dag(\n schedule=None,\n start_date=dt.datetime(2022, 11, 1),\n catchup=False,\n tags=[\"metadata\"],\n)\ndef create_census_ftp_metadata_table():\n\n metadata_schema_exists_branch_1 = metadata_schema_exists(\n conn_id=POSTGRES_CONN_ID, task_logger=task_logger\n )\n create_metadata_schema_1 = create_metadata_schema(\n conn_id=POSTGRES_CONN_ID, task_logger=task_logger\n )\n metadata_table_exists_1 = metadata_table_exists(\n table_name=\"census_metadata\",\n conn_id=POSTGRES_CONN_ID,\n task_logger=task_logger,\n create_route=\"create_metadata_table\",\n exists_route=\"end\",\n )\n create_metadata_table_1 = create_metadata_table(\n conn_id=POSTGRES_CONN_ID, task_logger=task_logger\n )\n end_1 = EmptyOperator(task_id=\"end\", trigger_rule=TriggerRule.NONE_FAILED_MIN_ONE_SUCCESS)\n\n chain(\n metadata_schema_exists_branch_1,\n [create_metadata_schema_1, metadata_table_exists_1],\n )\n chain(\n metadata_table_exists_1,\n [create_metadata_table_1, Label(\"Census metadata table exists\")],\n end_1,\n )\n chain(create_metadata_schema_1, create_metadata_table_1, end_1)\n\n\ncreate_census_ftp_metadata_table()\n","repo_name":"MattTriano/analytics_data_where_house","sub_path":"airflow/dags/metadata/create_census_ftp_metadata_table.py","file_name":"create_census_ftp_metadata_table.py","file_ext":"py","file_size_in_byte":2835,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"} +{"seq_id":"22188589311","text":"#The wait() function would return a named tuple which contains two set –\n# one set contains the futures which completed (either got result or exception)\n# and the other set containing the ones which didn’t complete.\n\n#We can control the behavior of the wait function by defining when it should return.\n# We can pass one of these values to the return_when param of the function:\n# FIRST_COMPLETED,\n# FIRST_EXCEPTION\n# and ALL_COMPLETED.\n# By default, it’s set to ALL_COMPLETED,\n# so the wait function returns only when all futures complete\n#But using that parameter, we can choose to return when\n# the first future completes or first exception encounters.\n\nfrom concurrent.futures import ThreadPoolExecutor, wait,FIRST_COMPLETED,ALL_COMPLETED\nfrom time import sleep\nfrom random import randint\n\ndef return_after_5_secs(num):\n sleep(randint(1, 5))\n return \"Return of {}\".format(num)\n\npool = ThreadPoolExecutor(5)\nfutures = []\nfor x in range(5):\n futures.append(pool.submit(return_after_5_secs, x))\n\n\n#print(wait(futures))\nprint(wait(futures ,return_when=FIRST_COMPLETED))\nprint(wait(futures ,return_when=ALL_COMPLETED))","repo_name":"iuyt9003/pythonexamples","sub_path":"examples/threads_examples/threadpool_executer_wait.py","file_name":"threadpool_executer_wait.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"8622521263","text":"\nimport os\nimport sys\n\nclass Utils:\n def __init__(self, config):\n self.config = config\n\n def get_targets(self, targets):\n ls = list()\n if 'symlink_bam' in targets:\n for sample in self.config['ordered_samples']:\n ls.append(f'analysis/inbam/{sample}/{sample}.bam')\n ls.append(f'analysis/inbam/{sample}/{sample}.bai')\n if 'run_qualimap_bamqc' in targets:\n for sample in self.config['ordered_samples']:\n ls.append(f'analysis/qualimap/{sample}/qualimapReport.html')\n if 'run_qualimap_multibamqc' in targets:\n ls.append(f'analysis/qualimap/multi/input_data.txt')\n ls.append(f'analysis/qualimap/multi/multisampleBamQcReport.html')\n if 'bedtools_coverage' in targets:\n for sample in self.config['ordered_samples']:\n ls.append(f'analysis/bedtools/{sample}/coverage.txt')\n if 'bedtools_coverage_bed' in targets:\n for sample in self.config['ordered_samples']:\n ls.append(f'analysis/bedtools/{sample}/coverage.bed.txt')\n if 'bedtools_genomecov' in targets:\n for sample in self.config['ordered_samples']:\n ls.append(f'analysis/bedtools/{sample}/genomecov.txt')\n if 'samtools_depth_genome' in targets:\n for sample in self.config['ordered_samples']:\n ls.append(f'analysis/samtools/{sample}/depth.genome.txt')\n if 'samtools_depth_target' in targets:\n for sample in self.config['ordered_samples']:\n ls.append(f'analysis/samtools/{sample}/depth.target.txt')\n if 'covtobed' in targets:\n for sample in self.config['ordered_samples']:\n ls.append(f'analysis/covtobed/{sample}/covered.10x.bed')\n if 'covtobed_allsamples' in targets:\n ls.append(f'analysis/covtobed/allsamples/covered.10x.bed')\n if 'bedtools_merge' in targets:\n for sample in self.config['ordered_samples']:\n ls.append(f'analysis/covtobed/{sample}/covered.10x.merge.bed')\n if 'bedtools_intersect' in targets:\n for sample in self.config['ordered_samples']:\n ls.append(f'analysis/covtobed/{sample}/covered.10x.target.bed')\n if 'bedtools_intersect_co' in targets:\n ls.append(f'analysis/covtobed/{sample}/covered.10x.target.bed')\n if 'cnvkit_batch' in targets:\n for sample in self.config['ordered_samples']:\n ls.append(f'analysis/cnvkit/{sample}/{sample}.cnn')\n ls.append(f'analysis/cnvkit/{sample}/{sample}.cnr')\n ls.append(f'analysis/cnvkit/{sample}/{sample}.cns')\n if 'cnvkit_segment' in targets:\n for sample in self.config['ordered_samples']:\n ls.append(f'analysis/cnvkit/{sample}/{sample}.cns')\n if 'cnvkit_call' in targets:\n for sample in self.config['ordered_samples']:\n ls.append(f'analysis/cnvkit/{sample}/{sample}.call.cns')\n if 'cnvkit_call_bed' in targets:\n for sample in self.config['ordered_samples']:\n ls.append(f'analysis/cnvkit/{sample}/{sample}.call.bed')\n if 'cnvkit_call_anno' in targets:\n for sample in self.config['ordered_samples']:\n ls.append(f'analysis/cnvkit/{sample}/{sample}.call.anno.bed')\n if 'cnvkit_gainloss' in targets:\n for sample in self.config['ordered_samples']:\n ls.append(f'analysis/cnvkit/{sample}/{sample}.gene.gainloss.txt')\n if 'cnvkit_scatter' in targets:\n for sample in self.config['ordered_samples']:\n ls.append(f'analysis/cnvkit/{sample}/{sample}-scatter.pdf')\n ls.append(f'analysis/cnvkit/{sample}/{sample}-scatter.png')\n if 'cnvkit_diagram' in targets:\n for sample in self.config['ordered_samples']:\n ls.append(f'analysis/cnvkit/{sample}/{sample}-diagram.pdf')\n ls.append(f'analysis/cnvkit/{sample}/{sample}-diagram.png')\n if 'parse_cnvkit' in targets:\n ls.append(f'analysis/cnvkit/compbed/ref.ipsc57LBvsRef.bed')\n ls.append(f'analysis/cnvkit/compbed/comp.ipsc57LBvsRef.bed')\n if 'cnvkit_heatmap' in targets:\n ls.append(f'analysis/cnvkit/heatmap.png')\n return ls\n\n\n\nclass MakeConfig:\n def __init__(self, id_dic, dirPath_bam, workdir):\n self.id_dic = id_dic\n self.init_config_dic()\n self.addpath_bam(dirPath_bam)\n self.workdir = workdir\n #\n self.write_config('config.yaml')\n\n\n def init_config_dic(self):\n self.config_dic = dict()\n for tbi_id, cst_id in self.id_dic.items():\n self.config_dic.setdefault('samples', {}).setdefault(cst_id, {}).setdefault('tbi_id', tbi_id)\n self.config_dic.setdefault('samples', {}).setdefault(cst_id, {}).setdefault('bam', None)\n self.config_dic.setdefault('samples', {}).setdefault(cst_id, {}).setdefault('bai', None)\n\n def addpath_bam(self, _path):\n for tbi_id, cst_id in self.id_dic.items():\n #bam = os.path.join(_path, tbi_id, f'{tbi_id}.dedup.bam')\n #bai = os.path.join(_path, tbi_id, f'{tbi_id}.dedup.bai')\n bam = os.path.join(_path, tbi_id, f'{tbi_id}.merge.bam')\n bai = os.path.join(_path, tbi_id, f'{tbi_id}.merge.bam.bai')\n if os.path.isfile(bam) and os.path.isfile(bai):\n self.config_dic['samples'][cst_id]['bam'] = os.path.abspath(bam)\n self.config_dic['samples'][cst_id]['bai'] = os.path.abspath(bai)\n else:\n print('Could not found : ', bam)\n print('or')\n print('Could not found : ', bai)\n sys.exit()\n #\n\n def write_config(self, outfn):\n outfh = open(outfn, 'w')\n outfh.write('\\n')\n outfh.write('workdir: {0}\\n'.format(self.workdir))\n outfh.write('target_bed: /BiO/BioResources/References/Human/hg19/targetkit/SureSelect_Human_All_Exon_V5.bed\\n')\n outfh.write('genome_fasta: /BiO/BioResources/References/Human/hg19/hg19.fa\\n')\n outfh.write('access_bed: /BiO/BioTools/cnvkit/data/access-5k-mappable.hg19.bed\\n')\n outfh.write('ref_flat: /BiO/BioPeople/baekip/BioResource/hg19/refFlat.txt\\n')\n outfh.write('ipsc57_bed: bin/ipsc_57_LBvsRef_Region.bed\\n')\n outfh.write('\\n')\n outfh.write('ordered_samples:\\n')\n for sample_id, info_dic in sorted(self.config_dic['samples'].items()):\n outfh.write(' - {0}\\n'.format(sample_id))\n outfh.write('\\n')\n outfh.write('samples:\\n')\n for sample_id, info_dic in self.config_dic['samples'].items():\n outfh.write(' {0}:\\n'.format(sample_id))\n outfh.write(' tbi_id: {0}\\n'.format(info_dic['tbi_id']))\n outfh.write(' bam: {0}\\n'.format(info_dic['bam']))\n outfh.write(' bai: {0}\\n'.format(info_dic['bai']))\n outfh.close()\n\n\n\n\ndef main():\n id_dic = {\"TN1808L0024-10\":\"H9ESP_44-10\",\n \"TN1808L0024-1\":\"H9ESP_44-1\",\n \"TN1808L0024-2\":\"H9ESP_44-2\",\n \"TN1808L0024-3\":\"H9ESP_44-3\",\n \"TN1808L0024-4\":\"H9ESP_44-4\",\n \"TN1808L0024-5\":\"H9ESP_44-5\",\n \"TN1808L0024-6\":\"H9ESP_44-6\",\n \"TN1808L0024-7\":\"H9ESP_44-7\",\n \"TN1808L0024-8\":\"H9ESP_44-8\",\n \"TN1808L0024-9\":\"H9ESP_44-9\",\n \"TN1808L0025-10\":\"H9ESP_75-10\",\n \"TN1808L0025-1\":\"H9ESP_75-1\",\n \"TN1808L0025-2\":\"H9ESP_75-2\",\n \"TN1808L0025-3\":\"H9ESP_75-3\",\n \"TN1808L0025-4\":\"H9ESP_75-4\",\n \"TN1808L0025-5\":\"H9ESP_75-5\",\n \"TN1808L0025-6\":\"H9ESP_75-6\",\n \"TN1808L0025-7\":\"H9ESP_75-7\",\n \"TN1808L0025-8\":\"H9ESP_75-8\",\n \"TN1808L0025-9\":\"H9ESP_75-9\",\n \"TN1808L0030-10\":\"HPS0076_48-10\",\n \"TN1808L0030-1\":\"HPS0076_48-1\",\n \"TN1808L0030-2\":\"HPS0076_48-2\",\n \"TN1808L0030-3\":\"HPS0076_48-3\",\n \"TN1808L0030-4\":\"HPS0076_48-4\",\n \"TN1808L0030-5\":\"HPS0076_48-5\",\n \"TN1808L0030-6\":\"HPS0076_48-6\",\n \"TN1808L0030-7\":\"HPS0076_48-7\",\n \"TN1808L0030-8\":\"HPS0076_48-8\",\n \"TN1808L0030-9\":\"HPS0076_48-9\",\n \"TN1808L0031-10\":\"HPS0076_68-10\",\n \"TN1808L0031-1\":\"HPS0076_68-1\",\n \"TN1808L0031-2\":\"HPS0076_68-2\",\n \"TN1808L0031-3\":\"HPS0076_68-3\",\n \"TN1808L0031-4\":\"HPS0076_68-4\",\n \"TN1808L0031-5\":\"HPS0076_68-5\",\n \"TN1808L0031-6\":\"HPS0076_68-6\",\n \"TN1808L0031-7\":\"HPS0076_68-7\",\n \"TN1808L0031-8\":\"HPS0076_68-8\",\n \"TN1808L0031-9\":\"HPS0076_68-9\"}\n #dirPath_bam = \"/BiO/BioPeople/baekip/BioProjects/StemCell_Project/MFDS-Human-C1-2018-08-TBD171103/result/07-1_picard_dedup\"\n dirPath_bam = \"/BiO/BioPeople/baekip/BioProjects/StemCell_Project/MFDS-Human-C1-2018-08-TBD171103/result/06-1_picard_merge\"\n workdir = \"/BiO/BioPeople/baekip/BioProjects/StemCell_Project/MFDS-Human-C1-2018-08-TBD171103/202012301118_cnvkit\"\n #\n config_obj = MakeConfig(id_dic, dirPath_bam, workdir)\n\n\n\n\nif __name__=='__main__':\n main()\n\n\n","repo_name":"seung1yoo/cnvkit_pipeline","sub_path":"bin/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":9334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34843713139","text":"# -*- coding: utf-8 -*-\n\"\"\"\nAuthor - Ramiro Gutierrez Alaniz\nCompany - RestCont\nArea - IT; B-E Develpment\nDate - Sunday, January 31, 2016\n\"\"\"\n# Imports\nfrom django.conf.urls import patterns, url\nfrom .views import *\n\n# General Url patterns result_detail\nurlpatterns = patterns(\n \n 'events.views',\n # Event index\n url( r'^$', event_index, name='views.event.index' ),\n # Events\n url( r'^(?P[0-9]+)$', event_detail, name='views.event.detail' ),\n # Competitions\n url( r'^competition/(?P[0-9]+)$', competiton_detail, name=\"views.competition.detail\" ),\n # Results index\n url( r'^results/$', result_list, name=\"views.event.results\" ),\n # Get search results\n url( r'^rd/search/(?P[0-9]+)$', search_results, name=\"views.event.results.search\" ),\n # Result detail search_results\n url( r'^rd/(?P[0-9]+)$', result_detail, name=\"views.event.result.detail\" ),\n \n)# End of general sytem url patterns","repo_name":"Actime/events","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"15044414754","text":"def funk ():\r\n fach = input(\"Das Fach: \")\r\n offen = open (fach, \"r\")\r\n liste1 = (offen.readlines())\r\n listenoten = []\r\n nummernoten = len(liste1)\r\n gewichtsum = 0\r\n for i in range(nummernoten):\r\n es = liste1[i]\r\n aknot = float(es[4:-1])\r\n zs = aknot*int(es[0:3])/100\r\n print(\"Deine \"+ str(i+1) +\". Note: \"+ es[4:-1] + \"; Gewichtung der Notein %: \" +es[0:3])\r\n listenoten.append(zs)\r\n gewichtsum = gewichtsum + int(es[0:3])\r\n i = i + 1\r\n\r\n summenoten = 0\r\n for ii in listenoten:\r\n summenoten = summenoten + float(ii)\r\n durchschnitt = summenoten/(gewichtsum/100)\r\n\r\n print(\"Dein Notendurchschnitt in \"+ fach +\": \" + str(durchschnitt))\r\n\r\n","repo_name":"Enricone27/Random-Projekte","sub_path":"Python/Notenberechner/AllesAnzeigen.py","file_name":"AllesAnzeigen.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"de","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"10046085401","text":"import numpy as np\nimport pytest\n\nfrom piecewise.environment import EnvironmentStepTypes, make_discrete_mux_env\nfrom piecewise.environment.supervised.multiplexer.multiplexer_util import \\\n calc_total_bits\nfrom piecewise.error.environment_error import OutOfDataError\n\n\nclass TestClassificationEnvironmentViaDiscreteMultiplexer:\n _DUMMY_ACTION = 0\n\n def _setup_short_epoch(self):\n num_address_bits = 1\n total_bits = calc_total_bits(num_address_bits)\n num_data_points = 2**total_bits\n mux = make_discrete_mux_env(num_address_bits=num_address_bits,\n shuffle_dataset=False)\n return mux, num_data_points\n\n def test_step_type(self):\n mux = make_discrete_mux_env()\n assert mux.step_type == EnvironmentStepTypes.single_step\n\n def test_observe_order_no_shuffle(self):\n mux = make_discrete_mux_env(num_address_bits=1, shuffle_dataset=False)\n expected_obs_seq_iter = \\\n iter([[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1],\n [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1]])\n while not mux.is_terminal():\n obs = mux.observe()\n assert np.array_equal(obs, next(expected_obs_seq_iter))\n mux.act(self._DUMMY_ACTION)\n\n def test_act_all_correct(self):\n mux = make_discrete_mux_env(num_address_bits=1, shuffle_dataset=False)\n correct_actions_iter = iter([0, 0, 1, 1, 0, 1, 0, 1])\n\n while not mux.is_terminal():\n response = mux.act(next(correct_actions_iter))\n assert response.was_correct_action\n\n def test_act_all_incorrect(self):\n mux = make_discrete_mux_env(num_address_bits=1, shuffle_dataset=False)\n incorrect_actions_iter = iter([1, 1, 0, 0, 1, 0, 1, 0])\n\n while not mux.is_terminal():\n response = mux.act(next(incorrect_actions_iter))\n assert not response.was_correct_action\n\n def test_act_changes_next_obs(self):\n mux, num_data_points = self._setup_short_epoch()\n last_obs = None\n\n for _ in range(num_data_points):\n obs = mux.observe()\n if last_obs is not None:\n assert not np.array_equal(last_obs, obs)\n mux.act(self._DUMMY_ACTION)\n last_obs = obs\n\n def test_same_obs_on_repeated_observe(self):\n mux, num_data_points = self._setup_short_epoch()\n last_obs = None\n\n for _ in range(num_data_points):\n obs = mux.observe()\n if last_obs is not None:\n assert np.array_equal(last_obs, obs)\n last_obs = obs\n\n def test_is_terminal_act_only_epoch(self):\n mux, num_data_points = self._setup_short_epoch()\n for _ in range(num_data_points):\n mux.act(self._DUMMY_ACTION)\n assert mux.is_terminal()\n\n def test_is_not_terminal_observe_only_epoch(self):\n mux, num_data_points = self._setup_short_epoch()\n for _ in range(num_data_points):\n mux.observe()\n assert not mux.is_terminal()\n\n def test_is_terminal_act_and_observe_epoch(self):\n mux, num_data_points = self._setup_short_epoch()\n for _ in range(num_data_points):\n mux.observe()\n mux.act(self._DUMMY_ACTION)\n assert mux.is_terminal()\n\n def test_out_of_data_on_extra_act(self):\n mux, num_data_points = self._setup_short_epoch()\n for _ in range(num_data_points):\n mux.observe()\n mux.act(self._DUMMY_ACTION)\n with pytest.raises(OutOfDataError):\n mux.act(self._DUMMY_ACTION)\n\n def test_out_of_data_on_extra_observe(self):\n mux, num_data_points = self._setup_short_epoch()\n for _ in range(num_data_points):\n mux.observe()\n mux.act(self._DUMMY_ACTION)\n with pytest.raises(OutOfDataError):\n mux.observe()\n\n def test_reset_with_two_epochs_no_shuffle(self):\n mux = make_discrete_mux_env(num_address_bits=1,\n shuffle_dataset=False)\n\n first_epoch_obs_seq = []\n first_epoch_reward_seq = []\n while not mux.is_terminal():\n first_epoch_obs_seq.append(mux.observe())\n response = mux.act(self._DUMMY_ACTION)\n first_epoch_reward_seq.append(response.reward)\n\n mux.reset()\n second_epoch_obs_seq = []\n second_epoch_reward_seq = []\n while not mux.is_terminal():\n second_epoch_obs_seq.append(mux.observe())\n response = mux.act(self._DUMMY_ACTION)\n second_epoch_reward_seq.append(response.reward)\n\n assert np.array_equal(first_epoch_obs_seq, second_epoch_obs_seq)\n assert np.array_equal(first_epoch_reward_seq, second_epoch_reward_seq)\n","repo_name":"jtbish/piecewise","sub_path":"piecewise/test/unit/test_classification_environment.py","file_name":"test_classification_environment.py","file_ext":"py","file_size_in_byte":4760,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"} +{"seq_id":"74195254482","text":"from helpers import transcribe_until_stop, get_protein_codon_dict\n\n\ndef run(rna_string=\"AUGGCCAUGGCGCCCAGAACUGAGAUCAAUAGUACCCGUAUUAACGGGUGA\"):\n proteins = transcribe_until_stop(rna_string)\n proteins_nostop = [protein for protein in proteins if protein != \"Stop\"]\n d= get_protein_codon_dict()\n\n for protein in proteins_nostop:\n print(\"Codon: {0}, Aminoacid: {1}\".format(d[protein], protein))\n result = \"\".join(proteins_nostop)\n print(result)\n return result\n\n\nrun()","repo_name":"denizcetiner/rosalindpractice","sub_path":"PROT.py","file_name":"PROT.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17324173726","text":"from asyncore import dispatcher\nfrom email.message import Message\n## Echo Bot\nimport telepot\nfrom telepot.loop import MessageLoop\n\nwith open(\"./data/token.txt\", \"r\") as f:\n TOKEN = f.read()\n\ndef handle(msg):\n content_type, chat_type, chat_id, msg_date, msg_id = telepot.glance(msg, long=True)\n print(msg)\n print('-' * 36)\n\n if content_type == 'text':\n bot.sendMessage(chat_id, '(Echo)' + msg['text'])\n\nbot = telepot.Bot(TOKEN)\n# bot.message_loop(handle, run_forever=True)\nMessageLoop(bot, handle).run_forever()\n\nprint(\"Listening...\")\n\nimport time\nwhile True:\n time.sleep(10)\n\n\n","repo_name":"jiheon788/chat-bot","sub_path":"echo_bot.py","file_name":"echo_bot.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25388624257","text":"from aiogram.types import ReplyKeyboardMarkup, KeyboardButton\n\nfrom models import Filter\nfrom keyboards.skip_button import skip_button\n\nfilter_keyboard = ReplyKeyboardMarkup(\n keyboard=[\n [\n KeyboardButton(filter.value) for filter in Filter\n ],\n [\n skip_button\n ]\n ]\n)\n","repo_name":"KolesniCow/resizer-bot","sub_path":"keyboards/filter_keyboard.py","file_name":"filter_keyboard.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"35175339049","text":"#File: hexes.py\n#Name: John J.\n#Description: This program draws 8 hexes \nfrom turtle import * \n\ndef initialize():\n\t\"\"\"\"This function intitalizes the turtle by putting the turtles pen up\n\tand turning it to the left\"\"\"\n\tup()\n\tleft(90)\n\ndef drawHexagon():\n\t\"\"\"This is the function that is responsible for drawing the hexagon\n\tit moves the turtle to the correct position to begin the hexagon and\n\tthen draws all six sides\"\"\"\n\tforward(43.3)\n\tright (90)\n\tdown()\n\tforward (25)\n\tleft(60)\n\tfor i in range(5):\n\t\tforward(50)\n\t\tleft(60)\n\tforward(25)\n\tright(90)\n\tup()\n\tforward(43.3)\n\tleft(180)\n\ndef rotate():\n\t\"\"\"This function rotates the turtle so that it is in the correct \n\tposition to begin to draw the next hexagon\"\"\"\n\tleft(60)\n\ndef main():\n\tinput(\"Hit Enter to start program\")\n\t\"\"\"The main function is responsible for putting all the individual\n\tfunctions together and also tells the computer to execute the \n\tdrawHexagon command and rotate command six times\"\"\"\n\tinitialize()\n\tfor i in range(6):\n\t\tdrawHexagon()\n\t\trotate()\n\tinput(\"Hit Enter to finish the program\")\n\t\nmain()\n","repo_name":"judgejohn17/Computer-Science-I","sub_path":"homework/Hexes.py","file_name":"Hexes.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5522648770","text":"import pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_absolute_error\n\ndef predict_storage(csv_data):\n # Load CSV data into a DataFrame\n df = pd.read_csv(csv_data)\n\n # Perform data preprocessing and feature engineering here\n\n # Split the data into features (X) and target (y)\n X = df.drop(columns=['Storage']) # Replace 'Storage' with your target column name\n y = df['Storage']\n\n # Split the data into training and testing sets\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n\n # Initialize and train a linear regression model\n model = LinearRegression()\n model.fit(X_train, y_train)\n\n # Make predictions on the test set\n predictions = model.predict(X_test)\n\n # Calculate evaluation metric (MAE for example)\n mae = mean_absolute_error(y_test, predictions)\n\n return {'predictions': predictions.tolist(), 'mae': mae}\n\n# Example usage:\nif __name__ == '__main__':\n # Replace 'data.csv' with the path to your CSV file\n result = predict_storage('/home/youssef/Desktop/stage_ete/A2SV/A2SV_RetailAI/web_app/app/test.csv')\n print(result)\n","repo_name":"youssefboutaleb/A2SV_RetailAI","sub_path":"web_app/app/predict_storage.py","file_name":"predict_storage.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"6564092947","text":"#!/usr/bin/python2\nimport os\nimport sys\nimport time\nimport errno\nfrom dulwich.repo import Repo\nfrom dulwich.diff_tree import tree_changes\nimport shutil\nimport subprocess\n\nSOURCE_FOLDER = \"source\"\nSOURCE_REPO = \"git@github.com:BlindMindStudios/StarRuler2.git\"\nDEST_FOLDER = \"dest\"\nDEST_REPO = \"git@github.com:BlindMindStudios/SR2Beta.git\"\nLOG_FOLDER = \"log\"\n\nCURRENT_COMMIT = \"\"\nSource = None\nDest = None\n\nSOURCE_DIRS = [\"source/game/\", \"source/as_addons/\",\n \"source/os/\", \"source/sound/\", \"source/util/\",\n \"source/network/\"]\nSOURCE_LINUX = SOURCE_DIRS + [\"source/linux/\"]\nSOURCE_WINDOWS = SOURCE_DIRS + [\n \"source/msvc10/\", \"source/angelscript/\",\n \"source/glfw/\", \"source/libpng/\", \"source/freetype2/\",\n \"source/linux/\"]\n\nclass BuildStep(object):\n @classmethod\n def neededBy(cls, path):\n return False\n\n def execute(self):\n pass\n\n def finished(self):\n return True\n\n def finalize(self):\n pass\n\nclass BuildGame32(BuildStep):\n @classmethod\n def neededBy(cls, path):\n for d in SOURCE_LINUX:\n if path.startswith(d):\n return True\n return False\n\n def execute(self):\n #Build 32-bit version\n self.clean32 = subprocess.Popen(\n [\"ARCH=32 make -f source/linux/Makefile clean_code\"],\n shell=True, cwd=SOURCE_FOLDER)\n self.clean32.wait()\n\n self.build32 = subprocess.Popen(\n [\"ARCH=32 make -f source/linux/Makefile -j3 version compile\"],\n shell=True, cwd=SOURCE_FOLDER,\n stdout=open(os.path.join(LOG_FOLDER, \"build32_\"+CURRENT_COMMIT+\".log\"), \"w\"),\n stderr=subprocess.STDOUT)\n\n def finished(self):\n if self.build32.poll() == None:\n return False\n return True\n\n def finalize(self):\n if self.build32.returncode == 0:\n copy_stage_folder(\"bin/lin32\")\n\nclass BuildGame64(BuildStep):\n @classmethod\n def neededBy(cls, path):\n for d in SOURCE_LINUX:\n if path.startswith(d):\n return True\n return False\n\n def execute(self):\n self.clean64 = subprocess.Popen(\n [\"ARCH=64 make -f source/linux/Makefile clean_code\"],\n shell=True, cwd=SOURCE_FOLDER)\n self.clean64.wait()\n\n self.build64 = subprocess.Popen(\n [\"ARCH=64 make -f source/linux/Makefile -j3 version compile\"],\n shell=True, cwd=SOURCE_FOLDER,\n stdout=open(os.path.join(LOG_FOLDER, \"build64_\"+CURRENT_COMMIT+\".log\"), \"w\"),\n stderr=subprocess.STDOUT)\n\n def finished(self):\n if self.build64.poll() == None:\n return False\n return True\n\n def finalize(self):\n if self.build64.returncode == 0:\n copy_stage_folder(\"bin/lin64\")\n\nclass BuildAS32(BuildStep):\n @classmethod\n def neededBy(cls, path):\n if path.startswith(\"source/angelscript/\"):\n return True\n return False\n\n def execute(self):\n self.build32 = subprocess.Popen(\n [\"ARCH=32 make -f source/linux/Makefile -j3 angelscript\"],\n shell=True, cwd=SOURCE_FOLDER,\n stdout=open(os.path.join(LOG_FOLDER, \"as32_\"+CURRENT_COMMIT+\".log\"), \"w\"),\n stderr=subprocess.STDOUT)\n self.build32.wait();\n\nclass BuildAS64(BuildStep):\n @classmethod\n def neededBy(cls, path):\n if path.startswith(\"source/angelscript/\"):\n return True\n return False\n\n def execute(self):\n self.build64 = subprocess.Popen(\n [\"ARCH=64 make -f source/linux/Makefile -j3 angelscript\"],\n shell=True, cwd=SOURCE_FOLDER,\n stdout=open(os.path.join(LOG_FOLDER, \"as64_\"+CURRENT_COMMIT+\".log\"), \"w\"),\n stderr=subprocess.STDOUT)\n self.build64.wait();\n\nclass BuildWindows(BuildStep):\n def __init__(self):\n self.running = False\n\n @classmethod\n def neededBy(cls, path):\n for d in SOURCE_WINDOWS:\n if path.startswith(d):\n return True\n return False\n\n def execute(self):\n pass\n\n def finished(self):\n if not self.running:\n #Create marker file for windows build\n with open(os.path.join(SOURCE_FOLDER, \"WIN_COMPILE\"), \"w\") as f:\n f.write(\"BUILD\");\n self.running = True\n return False\n\n return not os.path.exists(os.path.join(SOURCE_FOLDER, \"WIN_COMPILE\"))\n\n def finalize(self):\n #Copy over symbols\n self.syncsymbols = subprocess.Popen(\n [\"rsync -tavz symbols/ bms@glacicle.org:starruler2.com/symbols/\"],\n shell=True)\n\n #Stage all the files needed\n if os.path.exists(os.path.join(DEST_FOLDER, \"Star Ruler 2.exe\")):\n Dest.stage(\"Star Ruler 2.exe\")\n\n for f in os.listdir(os.path.join(DEST_FOLDER, \"bin/win32\")):\n if f.endswith(\".exe\") or f.endswith(\".dll\"):\n Dest.stage(\"bin/win32/\"+f)\n\n for f in os.listdir(os.path.join(DEST_FOLDER, \"bin/win64\")):\n if f.endswith(\".exe\") or f.endswith(\".dll\"):\n Dest.stage(\"bin/win64/\"+f)\n\n #Rename the log to indicate the commit\n if os.path.exists(os.path.join(LOG_FOLDER, \"msvc32_build.log\")):\n os.rename(os.path.join(LOG_FOLDER, \"msvc32_build.log\"),\n os.path.join(LOG_FOLDER, \"msvc32_\"+CURRENT_COMMIT+\".log\"))\n\n if os.path.exists(os.path.join(LOG_FOLDER, \"msvc64_build.log\")):\n os.rename(os.path.join(LOG_FOLDER, \"msvc64_build.log\"),\n os.path.join(LOG_FOLDER, \"msvc64_\"+CURRENT_COMMIT+\".log\"))\n\n\nBUILD_STEPS = [BuildAS32, BuildAS64, BuildGame32, BuildGame64, BuildWindows]\n\ndef publish_file(path):\n if path.startswith(\"source/\"):\n return False\n return True\n\ndef copy(path):\n folder = os.path.dirname(path)\n\n #Make the target folder\n try:\n os.makedirs(os.path.join(DEST_FOLDER, folder))\n except OSError as exc:\n pass\n\n #Copy the file\n shutil.copy2(os.path.join(SOURCE_FOLDER, path),\n os.path.join(DEST_FOLDER, path))\n\ndef copy_stage_folder(path):\n folder = os.path.join(SOURCE_FOLDER, path)\n for root, folders, files in os.walk(folder):\n for f in files:\n fpath = os.path.join(root[len(SOURCE_FOLDER)+1:], f)\n copy(fpath)\n Dest.stage(fpath)\n\ndef setup():\n #Clone the repositories first\n #Note: This assumes source and dest are in a consistent state,\n #which is something that needs to be guaranteed manually\n if not os.path.exists(SOURCE_FOLDER):\n p = subprocess.Popen([\"git\", \"clone\", SOURCE_REPO, SOURCE_FOLDER])\n p.wait()\n if not os.path.exists(DEST_FOLDER):\n p = subprocess.Popen([\"git\", \"clone\", DEST_REPO, DEST_FOLDER])\n p.wait()\n if not os.path.exists(LOG_FOLDER):\n os.mkdir(LOG_FOLDER)\n\n #Prepare accessors\n global Source\n Source = Repo(SOURCE_FOLDER)\n global Dest\n Dest = Repo(DEST_FOLDER)\n\ndef updateRepo(folder):\n p = subprocess.Popen([\"git\", \"fetch\", \"origin\", \"master\"], cwd=folder)\n p.wait()\n\ndef pushRepo(folder):\n p = subprocess.Popen([\"git\", \"push\", \"origin\", \"master\"], cwd=folder)\n p.wait()\n\ndef listCommits(repo, front, back, l):\n #Find all parent commits\n if not isinstance(back, set):\n stack = [repo.commit(back)]\n back = set()\n\n while stack:\n c = stack.pop()\n if c.id not in back:\n back.add(c.id)\n for parent in c._get_parents():\n stack.append(repo.commit(parent))\n\n #Make sure it isn't already done\n if front in back:\n return\n\n #Check this commit\n commit = repo.commit(front)\n l.append(commit)\n\n for parent in commit._get_parents():\n listCommits(repo, parent, back, l)\n\ndef switchTo(folder, commit):\n p = subprocess.Popen([\"git\", \"reset\", \"--hard\", commit], cwd=folder)\n p.wait()\n\ndef main():\n setup()\n\n while True:\n #Try to update the repo\n updateRepo(SOURCE_FOLDER)\n HEAD = Source.head();\n FETCH_HEAD = Source.ref(\"FETCH_HEAD\")\n\n #Check if there are any updates\n if HEAD == FETCH_HEAD:\n time.sleep(60)\n continue\n\n #Build the tree of commits to build\n commits = []\n listCommits(Source, FETCH_HEAD, HEAD, commits)\n commits.reverse()\n base_tree = Source.commit(HEAD).tree\n\n for c in commits:\n global CURRENT_COMMIT\n CURRENT_COMMIT = c.id\n switchTo(SOURCE_FOLDER, c.id)\n print(\"Executing commit \"+c.id)\n\n #Iterate through the difference tree\n diff = tree_changes(Source, base_tree, c.tree)\n steps = [False for x in BUILD_STEPS]\n for d in diff:\n #Handle deletes\n if d.type == 'delete':\n fname = d.old.path\n if publish_file(fname):\n os.remove(os.path.join(DEST_FOLDER, fname))\n Dest.stage(fname)\n continue\n\n #Copy over all the files\n fname = d.new.path\n if not fname:\n continue\n if publish_file(fname):\n copy(fname)\n Dest.stage(fname)\n for i, st in enumerate(BUILD_STEPS):\n if st.neededBy(fname):\n steps[i] = True\n\n #Execute all build steps\n cursteps = []\n for i, needed in enumerate(steps):\n if not needed:\n continue\n step = BUILD_STEPS[i]()\n step.execute()\n while not step.finished():\n time.sleep(1)\n cursteps.append(step)\n\n #Finalize all build steps\n for step in cursteps:\n step.finalize()\n\n #Set up the commit in the new repo\n Dest.do_commit(\n message=c.message+\"\\n\\nOriginal Commit: BlindMindStudios/StarRuler2@\"+c.id,\n committer=c.committer,\n author=c.author,\n commit_timestamp=c._commit_time,\n commit_timezone=c._commit_timezone,\n author_timestamp=c._author_time,\n author_timezone=c._author_timezone)\n\n #Set up for next\n base_tree = c.tree\n\n #Push this individual commit\n pushRepo(DEST_FOLDER)\n\nif __name__ == '__main__':\n main()\n# vim: ff=unix sw=4 et:\n","repo_name":"BlindMindStudios/StarRuler2-Source","sub_path":"source/ci/sr2-ci.py","file_name":"sr2-ci.py","file_ext":"py","file_size_in_byte":10584,"program_lang":"python","lang":"en","doc_type":"code","stars":1426,"dataset":"github-code","pt":"31"} +{"seq_id":"1419153216","text":"\"\"\"\nDeclare global variables here.\n\"\"\"\n\nimport argparse\nimport yaml\nimport torch\nfrom dataset import FoodDataset, get_paths_labels\nfrom model import Classifier\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--config', default='param.yaml', type=str)\n\nargs = parser.parse_args()\n\nwith open(args.config) as f:\n config = yaml.load(f, Loader=yaml.FullLoader)\n\n\nargs = argparse.Namespace(**config) # Export this variable to be used\n # in other files\n\nmodel = Classifier().cuda() # Export this variable to be used\n # in other files\nckpt = torch.load(args.checkpoint)\nmodel.load_state_dict(ckpt['model_state_dict'])\n\ntrain_paths, train_labels = get_paths_labels(args.dataset_dir)\ntrain_set = FoodDataset(train_paths, train_labels, mode='eval')\n\nimg_indices = [i for i in range(10)]\nimages, labels = train_set.get_batch(img_indices) # Export this variable to be used\n # in other files\n\ndef initialize():\n return args, model, img_indices, images, labels\n","repo_name":"EdwardLeeMacau/ntu_ml","sub_path":"machine_learning_spring_2023/hw09_explainable/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"3356655390","text":"N = int(input())\ncards = list(map(int, input().split()))\no_sum = [0]\ne_sum = [0]\nfor i in range(0,N,2):\n o_sum.append(o_sum[-1] + cards[i])\n e_sum.append(e_sum[-1] + cards[i+1])\nres = [o_sum[-1]]\nfor i in range(N//2):\n res.append(o_sum[i]+e_sum[-1]-e_sum[i])\n res.append(o_sum[i+1]+e_sum[-2]-e_sum[i])\nprint(max(res))","repo_name":"SSAFY-algamza/ssafy-algorithm-study","sub_path":"f1rstf1y9/BOJ/BOJ_20159_동작 그만, 밑장 빼기냐.py","file_name":"BOJ_20159_동작 그만, 밑장 빼기냐.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"15579845835","text":"from __future__ import annotations\nimport utils\nimport torch\nfrom torchvision import transforms\nfrom typing import Tuple, Dict\nfrom CheckPoint import CheckPoint\nfrom Config import Config\nfrom EarlyStopping import EarlyStopping\nfrom Plotting import Plotting\nfrom Model import CustomEfficientNet\n\nTRAIN_ACC = 'train_acc'\nTRAIN_COST = 'train_cost'\nTEST_ACC = 'test_acc'\nTEST_COST = 'test_cost'\nRED = '\\033[91m'\nGREEN = '\\033[92m'\nWHITE = '\\033[97m'\nBLACK = '\\033[39m'\n\nclass Controller:\n \"\"\"`Controller` instance act like an agent which controll all the training-loop related process \n for example run the training and testing, provide the api to user for epoch mode or early stopping mode, \n provide api to user for using adaption or fine-tunning phase in transfer learning and all internal related proccess in training loop\n \n Parameters\n ----------\n \n model : torch.nn.Module\n model used in for training-loop\n train_loader : torch.utils.data.DataLoader\n dataloader of training datasets used in training loop\n test_loader : torch.utils.data.DataLoader\n dataloder of testing datasets used in training loop\n criterion : torch.nn.Module\n loss function used in training loop\n optimizer : torch.optim.Optimizer\n optimzer used in training loop\n check_point : CheckPoint\n Checkpoint instance for storing some value produce in training loop\n config : Config\n Config instance used to get some configurable values used in training loop or in initiation\n early_stopping : EarlyStopping\n EarlyStopping instance used to provide early stopping mode\n plotting : Plotting\n Plotting instance used to provide plotting proccess\n device : torch.device\n device available to locate dataloader (default: torch.device('cpu'))\n epochs : int\n max epochs used in epochs mode or early stopping mode if needed\n \n Attributes\n ----------\n model, train_loader, test_loader, criterion, optimizer, cnf (config), cp (checkpoint)\n es (early_stopping), plt (plotting), max_epochs (epochs), device\n \n epoch : int\n used as epcohs counter (default: 1)\n ckpt : dict\n used to store value like accuary and cost of training step and testing step while running training loop\n result : list\n arra used to store all accyary and cost value of training step and testing step while running training loop\n \n Example\n -------\n # importing all library needed\n from Controller import Controller\n \n # initialize all object needed\n \n ctr = Controller(model=model,\n test_loader=test_loader,\n train_loader=train_loader,\n criterion=criterion,\n optimizer=optimizer,\n writer=writer,\n epochs=3,\n check_point=cp,\n early_stopping=ea,\n plotting=plotting,\n config=cnf,\n device=torch.device('cpu'))\n\n \"\"\"\n def __init__(self,\n model: torch.nn.Module,\n train_loader: torch.utils.data.DataLoader,\n test_loader: torch.utils.data.DataLoader,\n criterion: torch.nn.Module,\n optimizer: torch.optim.Optimizer,\n check_point: CheckPoint,\n config: Config,\n early_stopping: EarlyStopping,\n plotting: Plotting,\n device: torch.device = torch.device('cpu'),\n epochs: int = 30\n ) -> None:\n\n self.model = model\n self.train_loader = train_loader\n self.test_loader = test_loader\n self.criterion = criterion\n self.optimizer = optimizer\n self.cnf = config\n self.cp = check_point\n self.es = early_stopping\n self.plt = plotting\n self.epoch = 1\n self.max_epochs = epochs\n self.device = device\n self._run_fn = self._run_with_epoch\n self.ckpt = {\n TRAIN_ACC: 0,\n TRAIN_COST: 0,\n TEST_ACC: 0,\n TEST_COST: 0\n }\n self.result = {\n TRAIN_ACC: [],\n TRAIN_COST: [],\n TEST_ACC: [],\n TEST_COST: []\n }\n\n def _reset_ckpt(self) -> None:\n \"\"\"reset a value of ckpt attributes to its default value (zero value to all key)\"\"\"\n self.ckpt = {\n TRAIN_ACC: 0,\n TRAIN_COST: 0,\n TEST_ACC: 0,\n TEST_COST: 0\n }\n\n def _reset_result(self) -> None:\n \"\"\"reset a value of result attributes to its default value (empty list value to all key)\"\"\"\n self.result = {\n TRAIN_ACC: [],\n TRAIN_COST: [],\n TEST_ACC: [],\n TEST_COST: []\n }\n\n def _loop_fn(self, mode: str = 'train') -> Tuple[float, float]:\n \"\"\"run test step or training step based on mode given in argument and return a tuple of accuary, cost value of its mode step\n \n Parameters\n ----------\n \n mode : str\n value which decide what step to run per batch wheterh is test step or training step \n \"\"\"\n\n dataloader = self.train_loader\n if mode == 'test':\n dataloader = self.test_loader\n \n loss = acc = cost = y_pred_class = 0\n for _, (X, y) in enumerate(dataloader):\n # Send data to target device\n X, y = X.to(self.device), y.to(self.device)\n \n if mode == 'train':\n self.optimizer.zero_grad()\n \n # 1. Forward pass\n y_pred: torch.Tensor = self.model(X)\n \n # 2. Calculate and accumulate loss\n loss = self.criterion(y_pred, y)\n if mode == 'train':\n loss.backward()\n\n # 3 backpropogation\n if mode == 'train':\n self.optimizer.step()\n \n # accumulate the accuracy and loss item\n y_pred_class = y_pred.argmax(dim=1)\n acc += torch.sum(y_pred_class == y).item() \n cost += loss.item()\n\n return acc, cost\n\n def _add_ckpt_to_result(self):\n \"\"\"append every ckpt value in dict to every result key\"\"\"\n self.result[TRAIN_ACC].append(self.ckpt[TRAIN_ACC])\n self.result[TRAIN_COST].append(self.ckpt[TRAIN_COST])\n self.result[TEST_ACC].append(self.ckpt[TEST_ACC])\n self.result[TEST_COST].append(self.ckpt[TEST_COST])\n\n def _inc_epochs(self) -> None:\n \"\"\"increment epoch by 1 after all batch are trained (or equal 1 training loop)\"\"\"\n self.epoch += 1\n\n def _run_with_early_stopping(self, ckpt: CheckPoint, plt: Plotting, es: EarlyStopping, cnf: Config):\n \"\"\"run training loop in early stopping mode\n \n Parameters\n ----------\n \n ckpt : CheckPoint\n CheckPoint instance used to log the model and save the weights of model to .pth file,\n also used to save the best weight of model while traning loop process\n plt : Plotting\n Plotting instance used to plot the accuary and cost of the model either for every certain epoch or after the training loop end\n es : EarlyStopping\n EarlyStopping instance used to provide logic of early stopping mode related\n cnf : Config\n Config instance used to provide cofigured value in json config file\"\"\"\n\n prev_acc_cost = {}\n while True:\n self._train_step()\n self._test_step()\n ckpt.log(train_acc=self.ckpt[TRAIN_ACC],\n train_cost=self.ckpt[TRAIN_COST],\n test_acc=self.ckpt[TEST_ACC],\n test_cost=self.ckpt[TEST_COST])\n\n # DONE (checkpoint)\n plt.add_epoch_to_tick(self.epoch)\n ckpt.save_weights(cnf, self.epoch)\n # DONE (early stopping)\n # DONE (plotting)\n # plt.plot_acc_runtime(self.epoch, ckpt=ckpt)\n # plt.plot_cost_runtime(self.epoch, ckpt=ckpt)\n # self._report_per_epoch()\n self._write_report(self.epoch, prev_acc_cost)\n prev_acc_cost = self._set_acc_cost(prev_acc_cost)\n\n if es.is_stop(self.model, ckpt=ckpt, counter=self.epoch):\n self._run_fn = self._run_with_epoch\n plt.plot_acc(ckpt=ckpt)\n plt.plot_cost(ckpt=ckpt)\n break\n\n \n self._add_ckpt_to_result()\n self._reset_ckpt()\n self._inc_epochs()\n \n def _coloring_report(self, prev, key) -> str:\n \"\"\"color the report line. red if the result lower the previous, green if the report higher, if there are no difference the color is white\n this function need to be refactored\"\"\"\n line = ''\n if prev[key] < self.ckpt[key]:\n line += (\" | \" + f\"{key}: {GREEN}{self.ckpt[key]:.4f} ⬆ {BLACK}\")\n return line\n elif prev[key] > self.ckpt[key]:\n line += (\" | \" + f\"{key}: {RED}{self.ckpt[key]:.4f} ⬇ {BLACK}\")\n return line\n else:\n line += (\" | \" + f\"{key}: {self.ckpt[key]:.4f} \")\n return line\n \n def _make_report_line(self, report : Dict, epoch: int) -> str:\n # line = f'\\rEpoch: {self.epoch}/{self.max_epochs} '\n line = f'Epoch: {self.epoch}/{self.es.max_counter}'\n \n if epoch == 1 or not bool(report): # check if epoch 0 or report dict is empty\n line += (\" | \" + f\"{TEST_ACC}: {self.ckpt[TEST_ACC]:.4f} \")\n line += (\" | \" + f\"{TEST_COST}: {self.ckpt[TEST_COST]:.4f} \")\n line += (\" | \" + f\"{TRAIN_ACC}: {self.ckpt[TRAIN_ACC]:.4f} \")\n line += (\" | \" + f\"{TRAIN_COST}: {self.ckpt[TRAIN_COST]:.4f} \")\n else:\n line += self._coloring_report(report, TEST_ACC)\n line += self._coloring_report(report, TEST_COST)\n line += self._coloring_report(report, TRAIN_ACC)\n line += self._coloring_report(report, TRAIN_COST)\n return line\n \n def _set_acc_cost(self, report: Dict) -> Dict:\n report[TEST_ACC] = self.ckpt[TEST_ACC]\n report[TEST_COST] = self.ckpt[TEST_COST]\n report[TRAIN_ACC] = self.ckpt[TRAIN_ACC]\n report[TRAIN_COST] = self.ckpt[TRAIN_COST]\n return report\n \n \n def _write_report(self, epoch: int, report) -> None:\n line = self._make_report_line(report=report, epoch=epoch)\n print(line, end='\\n')\n \n \n \n def _run_with_epoch(self, plt: Plotting, ckpt: CheckPoint, mode : str = 'train_acc'):\n \"\"\"provide logic training loop with epoch mode\n \n Paramters\n ---------\n plt : Plotting\n Plotting instance used to plot the accuary and cost of the model either for every certain epoch or after the training loop end\n ckpt : CheckPoint\n CheckPoint instance used to log the model and save the weights of model to .pth file,\n also used to save the best weight of model while traning loop process\n \n \"\"\"\n prev_acc_cost = {}\n for _ in range(self.max_epochs):\n self._train_step()\n self._test_step()\n ckpt.log(test_acc=self.ckpt[TEST_ACC],\n test_cost=self.ckpt[TEST_COST],\n train_acc=self.ckpt[TRAIN_ACC],\n train_cost=self.ckpt[TRAIN_COST])\n # print(f\"\\rEpoch: {self.epoch}/{self.max_epochs} test_acc: {self.ckpt[TEST_ACC]:.4f} | test_cost: {self.ckpt[TEST_COST]:.4f} | train_acc: {self.ckpt[TRAIN_ACC]:.4f} | train_cost: {self.ckpt[TRAIN_COST]:.4f}\", end=' ')\n # prev = self._report_per_epoch(prev=prev)\n \n # self._report_result()\n self._write_report(self.epoch)\n prev_acc_cost = self._set_acc_cost(prev_acc_cost)\n plt.add_epoch_to_tick(self.epoch)\n self._add_ckpt_to_result()\n \n if self._is_improve(mode=mode, ckpt=ckpt):\n ckpt.weight = self.model.state_dict().copy()\n torch.save(ckpt.weights, self.cnf.MODEL_OUTPUT_DIR / \"best_weights.pth\")\n \n \n self._inc_epochs()\n \n plt.plot_acc(ckpt=ckpt)\n plt.plot_cost(ckpt=ckpt)\n \n def _is_improve(self, ckpt: CheckPoint, mode : str = 'test_acc') -> bool:\n \"\"\"Return true if model is improve else return false\n Improved in here mean the monitored mode value is higher than the past\n \n Paramaters\n ----------\n \n ckpt : CheckPoint\n checkpoint instnace which hold the reference accuracy/cost and current max accuracy or min cost\\\n mode : str\n mode to be referencing to monitor model for deciding whether the model improved or not\n for example if mode `test_acc` so only the accuracy of testing to be monitored for deciding if model improved or not\n \n Example\n -------\n \n self._is_improved(ckpt) # assume ckpt is valid argument\"\"\"\n if mode == 'test_acc':\n ref = ckpt.test_acc[-1]\n improve = ref > ckpt.best_acc\n elif mode == 'test_cost':\n ref = ckpt.test_cost[-1]\n improve = ref < ckpt.best_cost\n elif mode == 'train_acc':\n ref = ckpt.train_acc[-1]\n improve = ref > ckpt.best_acc\n elif mode == 'train_cost':\n improve = ckpt.best_cost\n else:\n raise Exception(f\"can't support mode: {mode}, modes available are {'test_acc', 'test_cost', 'train_acc', 'train_cost'}\") \n \n if improve:\n if mode.endswith('_acc'):\n ckpt.best_acc = ref\n elif mode.endswith('_cost'):\n ckpt.best_cost = ref\n return True\n return False\n\n def _train_step(self) -> None:\n \"\"\"Provide logic for training step process. will create side effect (set value (accuracy and cost) to ckpt attributes)\"\"\"\n self.model.train()\n acc, cost = self._loop_fn(mode='train')\n self.ckpt[TRAIN_ACC] = acc / len(self.train_loader.dataset)\n self.ckpt[TRAIN_COST] = cost / len(self.train_loader.dataset)\n\n def _test_step(self) -> None:\n \"\"\"Provide logic for testing step process. will create side effect (set value (accuracy and cost) to ckpt attributes)\"\"\"\n self.model.eval()\n with torch.inference_mode():\n acc, cost = self._loop_fn(mode='test')\n self.ckpt[TEST_ACC] = acc / len(self.test_loader.dataset)\n self.ckpt[TEST_COST] = cost / len(self.test_loader.dataset)\n \n\n def adaptation(self) -> Controller:\n \"\"\"Provide API for adaptation phase by freezing the extractor layers of model\"\"\"\n self.model.freeze()\n return self\n\n def fine_tunning(self) -> Controller:\n \"\"\"Provide API for fine-tuning phase by unfreezing the extractor layers of model\"\"\"\n self.model.unfreeze()\n return self\n \n def with_epoch(self)-> Controller:\n \"\"\"Provide API for set controller to run in epoch mode \"\"\"\n self._run_fn = lambda : self._run_with_epoch(plt=self.plt, ckpt=self.cp)\n return self\n\n def with_early_stopping(self) -> Controller:\n \"\"\"Provide API for set controller to run in early stopping mode \"\"\"\n self._run_fn = lambda: self._run_with_early_stopping(\n ckpt=self.cp, plt=self.plt, es=self.es, cnf=self.cnf)\n return self\n\n def run(self):\n \"\"\"this function will run the whole training loop either by epochs or early stopping (another mode could be added before like adaptation or fine-tunning)\n \n Parameter\n ---------\n -\n \n Example\n -------\n ctrl = Controller(*args) # assume this is valid for sake of simplicity\n \n # only call these methods should be doing the whole training loop\n ctrl.adaptation().with_early_stopping().run()\n \"\"\"\n self._reset_result()\n self._reset_ckpt()\n self._run_fn()\n self.epoch = 1\n \n @staticmethod\n def new_from_config(cnf : Config, augmentation_func : transforms.Compose = None) -> Controller:\n \"\"\"create a `Controller` instance using json config file\n \n Parameters\n ----------\n \n cnf : Config\n Config instance which hold all configured value\n augmentation_func : torchvision.Composes\n transfomation function used to augmenting datasets\n \n Example\n -------\n from Config import Config\n \n Config.new_from_config('path/to/config.json', augment_transform)\"\"\"\n device = utils.get_device()\n transfom_func = utils.get_EfficientNet_transform()\n trainloader, testloader, _ = utils.create_data_loader(\n train_path=cnf.TRAIN_PATH, test_path=cnf.TEST_PATH,\n transform_func=transfom_func, batch_size=cnf.BATCH_SIZE,\n augmentation_func=augmentation_func) \n model = CustomEfficientNet(num_of_class=cnf.NUM_OF_CLASS, drop_out=cnf.DROP_OUT, device=device)\n es = EarlyStopping(cnf=cnf, max_counter=cnf.ES_EPOCHS, load_best_when_stop=cnf.LOAD_BEST, patience=cnf.PATIENCE)\n cp = CheckPoint()\n plt = Plotting(plot_every=cnf.PLOT_EVERY)\n \n return Controller(\n model=model,\n test_loader=testloader,\n train_loader=trainloader,\n optimizer=utils.get_optimizer(name=cnf.OPTIMIZER_NAME, lr=cnf.LEARNING_RATE, weight_decay=cnf.WEIGHT_DECAY),\n criterion=torch.nn.CrossEntropyLoss(),\n early_stopping=es,\n check_point=cp,\n plotting=plt,\n epochs=cnf.EPOCHS,\n device=device\n )\n","repo_name":"mfajri11/EfficientNet-AksaraSunda","sub_path":"src/Controller.py","file_name":"Controller.py","file_ext":"py","file_size_in_byte":17964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"24052869619","text":"#!/usr/bin/python3\nimport sys\nimport sqlite3\nfrom sqlite3 import Error\nimport re\nimport urllib.request\nimport PyPDF2\n\n\n\n#fetchs the data of the daily activity normanpd page, uses regex to find all the pdf links to the daily incident Summery, \n#downloads all the daily activity pdfs, and returns the list of pdf links\ndef fetchincidents():\n\t#use urlib to access the data of the daily activy of the norman pd website\n\twith urllib.request.urlopen('http://normanpd.normanok.gov/content/daily-activity') as websiteData:\n\t\t#read the website and set it to pdfData\n\t\tpdfData = websiteData.read().decode('utf-8')\n\t\t#use rege to find all string in pdfData of the format\n\t\t#(four digits)-(two digits)-(two digits)%20Daily%20Incident%20Summary.pdf\n\t\tmatch = re.findall(r'\\d{4}\\-\\d{2}\\-\\d{2}%20Daily%20Incident%20Summary\\.pdf',str(pdfData))\n\t\t#for loop with the length of the number of strings found from the regex\n\t\tfor index in range(len(match)):\n\t\t\t#create complete string of the download link of the pdf file\n\t\t\tpdfurl = 'http://normanpd.normanok.gov/filebrowser_download/657/%s' % (match[index])\n\t\t\t#open and download the pdf\n\t\t\twith urllib.request.urlopen(pdfurl) as fetchPDF:\n\t\t\t\t#create new file with the name of the iterator\n\t\t\t\tfile = open(match[index], 'wb')\n\t\t\t\t#write to the new file with the PDF data\n\t\t\t\tfile.write(fetchPDF.read())\n\t\t\t\t#close the file\n\t\t\t\tfile.close()\n\t\t\t#end with\n\t\t#end for loop\n\t\t#returns the list of pdf names\n\t\treturn match\t\n\t#end with\n#end fetchincident\n\t\n#reads the pdf files, extracts the data, formats it, and puts it into an array of size 5 that will be appended to an array\n#taht is returned\n#input matches which is an array of strings that contain the links to the pdfs downloaded\ndef extractincidents(matches):\n\t#create empty array rawIncidents\n\trawIncidents = []\n\t#for loop that iterates all the strings in matches\n\tfor match in matches:\n\t\t#open a the pdf to read\n\t\tpdfPath = open(match, 'rb')\n\t\t#use the PyPDF2 PdfFileReader to read the pdf data\n\t\tpdf = PyPDF2.PdfFileReader( pdfPath )\n\t\t#fnd the number of pages in pdf\n\t\tpages = pdf.getNumPages()\n\t\t#for loop that runs for the number of pages in the pdf\n\t\tfor pageNum in range(pages):\n\t\t\t#set page to the current page\n\t\t\tpage = pdf.getPage(pageNum)\n\t\t\t#find all and extract the date_time in the pdf (2 digits)\\(2 digits)\\(4 digits) (2 digits):(2 digits)\n\t\t\tdates = re.findall((r'\\d{1,2}\\/\\d{1,2}\\/\\d{4} \\d{1,2}\\:\\d{2}'),page.extractText())\n\t\t\t#split by the regex date_time \n\t\t\tsans_dates = re.split((r'\\d{1,2}\\/\\d{1,2}\\/\\d{4} \\d{1,2}\\:\\d{2}'),page.extractText())\n\t\t\t#create empty array tempIncidents\n\t\t\ttempIncidents = []\n\t\t\t#for loops that runs the length of date_time found\n\t\t\tfor i in range(len(dates)):\n\t\t\t\t#appends the two strings together\n\t\t\t\tstring = \"%s %s\" % (dates[i],sans_dates[i+1])\n\t\t\t\t#appends the new string the tempIncidents\n\t\t\t\ttempIncidents.append(string)\n\t\t\t#for loop iterates all elements in tempIncidents\n\t\t\tfor i in tempIncidents:\n\t\t\t\t#splits the iterator by \\n and appends that array to rawIncident\n\t\t\t\trawIncidents.append(i.split('\\n'))\n\t#creates array incident\n\tincidents = []\n\t#for loop iterates through rawIncidents\n\tfor incident in rawIncidents:\n\t\t#find length of current iterator\n\t\tincidentLength = len(incident)\n\t\t#if the length is 2 then skip to next iterator\n\t\tif incidentLength == 2:\n\t\t\tcontinue\n\t\t#if length is 4, put the first 2 elements in arrray detail, then none and none, then the 3rd element\n\t\telif incidentLength == 4:\n\t\t\tdetails = [incident[0], incident[1], None, None, incident[2]]\n\t\t#if length is 6 or 8, the put the first 5 elements in the array detail\n\t\telif incidentLength == 6 or incidentLength == 8:\n\t\t\tdetails = [incident[0],incident[1],incident[2],incident[3],incident[4]]\n\t\t#if length is length is 7 or 9, then append the 3rd and 4th strings, then put the 1st, 2nd, new string,\n\t\t#5th and 6th elements in the array detail\n\t\telif incidentLength == 7 or incidentLength == 9:\n\t\t\tstring = \"%s %s\" % (incident[2],incident[3])\n\t\t\tdetails = [incident[0],incident[1], string, incident[4], incident[5]]\n\t\t#append array details to incident\n\t\tincidents.append(details)\t\n\t#return incidents\n\treturn incidents\n#creates normanpd.db database and #creates the table incidents\ndef createdb():\n\t#create connect of normanpd.db\n\tconn = sqlite3.connect('normanpd.db')\n\t#creates cursor\n\tc = conn.cursor()\n\n\t#write create command in string\n\tcreate = \"CREATE TABLE incidents (id INTEGER, number TEXT, date_time TEXT, location TEXT, nature TEXT, ORI TEXT);\"\n\t\n\t#excute the create command\n\tc.execute(create)\n\n#populates the table with the pdf data array\ndef populatedb(incidents):\n\t#create connection and cursor\n\tconn = sqlite3.connect('normanpd.db')\n\tc = conn.cursor()\n\t#idnum set to 0\n\tidnum = 0\n\t#for loop iterates incidents\n\tfor incident in incidents:\n\t\t#set incidentNumber to 2nd element\n\t\tincidentNumber = incident[1]\n\t\t#set date_time to 1st element\n\t\tdate_time = incident[0]\n\t\t#set location the the 3rd element\n\t\tlocation = incident[2]\n\t\t#set element to 4th element\n\t\tnature = incident[3]\n\t\t#set ORI to 5th element\n\t\tORI = incident[4]\n\t\t#arrange the data to to format in order of table data\n\t\tinsertdata = (idnum, incidentNumber, date_time, location, nature, ORI)\n\t\t#insert data into the table\n\t\tc.execute('INSERT INTO incidents VALUES (?,?,?,?,?,?)', insertdata)\n\t\t#increament idnum\n\t\tidnum += 1\n\t\t#commit connect\n\t\tconn.commit()\n\t#end for\n\t#close connection\n\tconn.close()\n\n#stdout the rowcount of incidents\n#and stdout 5 random rows\ndef status():\n\t#establish connection and cursor\n\tconn = sqlite3.connect('normanpd.db')\n\tc = conn.cursor()\n\n\t#write string for count command\n\tcount = \"SELECT count(*) from incidents\"\n\t#use forloop to iterate the command\n\tfor row in c.execute(count):\n\t\t#set rowcount to the string of the iterate\n\t\trowcount = str(row)\n\t\n\t#stdout row count, remove the first character and last two character\n\tsys.stdout.write(rowcount[1:-2] + '\\n')\n\n\t#write string of random command\n\trandom = \"SELECT * FROM incidents ORDER BY RANDOM() LIMIT 5\"\n\t#for loop iterare execute command\n\tfor row in c.execute(random):\n\t\t#write rows\n\t\tsys.stdout.write(str(row) + '\\n')\n\t#commits connection\n\tconn.commit()\n\t#closes connection\n\tconn.close()\n\n\n","repo_name":"zackwwhite/normanpd","sub_path":"normanpd/normanpd/norman.py","file_name":"norman.py","file_ext":"py","file_size_in_byte":6167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"30221601995","text":"from django.core.management.base import BaseCommand\nfrom backend.utils.database import Database\n\n\nmonitors = [\n dict(\n modelNumber='VE208T',\n name='Widescreen LED Monitor',\n quantity=5,\n weight=11.4,\n weightFormat='kg',\n price=139.99,\n priceFormat='CAD',\n brandName='ASUS',\n type='monitor',\n size=20,\n sizeFormat='inch'\n ),\n dict(\n modelNumber='VP239H',\n name='Wall Mountable IPS Frame-less Monitor',\n quantity=5,\n weight=12.6,\n weightFormat='lbs',\n price=189.99,\n priceFormat='CAD',\n brandName='ASUS',\n type='monitor',\n size=23,\n sizeFormat='inch'\n ),\n dict(\n modelNumber='34UM58',\n name='IPS Widescreen LED Monitor',\n quantity=5,\n weight=13.5,\n weightFormat='lbs',\n price=429.00,\n priceFormat='CAD',\n brandName='LG',\n type='monitor',\n size=25,\n sizeFormat='inch'\n ),\n dict(\n modelNumber='U2415',\n name='Ultrasharp LED Monitor',\n quantity=5,\n weight=18,\n weightFormat='lbs',\n price=419.99,\n priceFormat='CAD',\n brandName='Dell',\n type='monitor',\n size=24,\n sizeFormat='inch'\n ),\n dict(\n modelNumber='MB168B',\n name='Portable USB-powered monitor',\n quantity=5,\n weight=10.6,\n weightFormat='lbs',\n price=199.00,\n priceFormat='CAD',\n brandName='Dell',\n type='monitor',\n size=13,\n sizeFormat='inch'\n ),\n dict(\n modelNumber='23MP48HQ',\n name='IPS Widescreen LED Monitor',\n quantity=5,\n weight=11.5,\n weightFormat='lbs',\n price=159.99,\n priceFormat='CAD',\n brandName='LG',\n type='monitor',\n size=23,\n sizeFormat='inch'\n ),\n dict(\n modelNumber='Z35P',\n name='Ultra-Wide Curved Monitor',\n quantity=5,\n weight=17.6,\n weightFormat='lbs',\n price=1299.00,\n priceFormat='CAD',\n brandName='Acer',\n type='monitor',\n size=35,\n sizeFormat='inch'\n ),\n dict(\n modelNumber='LH55DMEPLGA',\n name='Direct-Lit LED Digital Signage Display',\n quantity=5,\n weight=33.9,\n weightFormat='lbs',\n price=2399.00,\n priceFormat='CAD',\n brandName='Samsung',\n type='monitor',\n size=55,\n sizeFormat='inch'\n ),\n dict(\n modelNumber='C24F390F',\n name='Curved VA FreeSync Widescreen LED Monitor',\n quantity=5,\n weight=10.3,\n weightFormat='lbs',\n price=279.99,\n priceFormat='CAD',\n brandName='Samsung',\n type='monitor',\n size=23.5,\n sizeFormat='inch'\n ),\n dict(\n modelNumber='E2060SWD',\n name='Curved VA FreeSync Widescreen LED Monitor',\n quantity=5,\n weight=8.3,\n weightFormat='lbs',\n price=109.99,\n priceFormat='CAD',\n brandName='NEC',\n type='monitor',\n size=23.5,\n sizeFormat='inch'\n )\n]\n\nmonitorDisplayIDs = [\n dict(\n serialNum='11111',\n modelNum='VE208T',\n isLocked=0\n ),\n dict(\n serialNum='22222',\n modelNum='VE208T',\n isLocked=0\n ),\n dict(\n serialNum='33333',\n modelNum='VE208T',\n isLocked=0\n ),\n dict(\n serialNum='44444',\n modelNum='VE208T',\n isLocked=0\n ),\n dict(\n serialNum='55555',\n modelNum='VE208T',\n isLocked=0\n ),\n dict(\n serialNum='66666',\n modelNum='VE208T',\n isLocked=0\n ),\n dict(\n serialNum='77777',\n modelNum='VE208T',\n isLocked=0\n ),\n dict(\n serialNum='88888',\n modelNum='VE208T',\n isLocked=0\n ),\n dict(\n serialNum='99999',\n modelNum='VE208T',\n isLocked=0\n ) \n]\n\n\nclass Command(BaseCommand):\n help = 'Populate monitor display table'\n\n def handle(self, *args, **options):\n\n with Database() as cursor:\n\n for monitor in monitors:\n query = \"\"\"\n INSERT INTO monitorDisplay (modelNumber, quantity, name, weight, weightFormat, price, priceFormat,\n brandName, type, size, sizeFormat)\n VALUES ('{modelNumber}', {quantity}, '{name}', {weight}, '{weightFormat}', {price}, '{priceFormat}',\n '{brandName}', '{type}', {size}, '{sizeFormat}');\n \"\"\".format(**monitor)\n\n try:\n cursor.execute(query)\n except Exception as error:\n print(error)\n\n for monitorDisplayID in monitorDisplayIDs:\n query = \"\"\"\n INSERT INTO monitorDisplayID (serialNum, modelNum, isLocked)\n VALUES ('{serialNum}', '{modelNum}', {isLocked});\n \"\"\".format(**monitorDisplayID)\n\n try:\n cursor.execute(query)\n except Exception as error:\n print(error)\n","repo_name":"ModestosV/DarthVendor","sub_path":"backend/apps/v1/inventory/management/commands/populate_monitor_display_table.py","file_name":"populate_monitor_display_table.py","file_ext":"py","file_size_in_byte":5259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"71581367448","text":"from Transform import Transform\n\nfrom OpenGL.GL import *\n\nfrom custom_logging import LOG\n\nimport numpy as np\n\nimport Material\n\nfrom Programs import Programs\n\nclass Mesh:\n def __init__(self, vertices: np.ndarray, faces: np.ndarray, normals: np.ndarray, uvs: np.ndarray):\n self.vertices = vertices\n self.faces = faces\n\n self.normals = normals\n self.uvs = uvs\n\n self.vbo = glGenBuffers(1) #vertex positions\n self.ibo = glGenBuffers(1) #indices\n\n self.nbo = glGenBuffers(1) #normals\n self.tbo = glGenBuffers(1) #texture coordinates\n\n self.tanbo = glGenBuffers(1) #tangents\n self.bitbo = glGenBuffers(1) #bitangents\n\n self.transform = Transform()\n\n self.bounds = self.getBoundingBox()\n\n self.material = None\n\n self.name = \"unnamed\"\n\n LOG(f\"Bounds (including scale): {self.bounds[0] * self.transform.scale} - {self.bounds[1] * self.transform.scale}\")\n\n self.isIcon = False\n\n self.scripts = []\n\n # normal map\n self.tangents = np.zeros(self.vertices.shape, dtype=\"f\")\n self.bitangents = np.zeros(self.vertices.shape, dtype=\"f\")\n\n self.initialize()\n self.calculateNormalMap()\n\n def calculateNormalMap(self):\n \"\"\"\n Calculates the normal map of the mesh\n \"\"\"\n for f in range(self.faces.shape[0]):\n v0 = self.vertices[self.faces[f, 0]]\n v1 = self.vertices[self.faces[f, 1]]\n v2 = self.vertices[self.faces[f, 2]]\n\n uv0 = self.uvs[self.faces[f, 0]]\n uv1 = self.uvs[self.faces[f, 1]]\n uv2 = self.uvs[self.faces[f, 2]]\n\n deltaPos1 = v1 - v0\n deltaPos2 = v2 - v0\n\n deltaUV1 = uv1 - uv0\n deltaUV2 = uv2 - uv0\n\n r = 1.0 / (deltaUV1[0] * deltaUV2[1] - deltaUV1[1] * deltaUV2[0])\n tangent = (deltaPos1 * deltaUV2[1] - deltaPos2 * deltaUV1[1]) * r\n bitangent = (deltaPos2 * deltaUV1[0] - deltaPos1 * deltaUV2[0]) * r\n\n for j in range(3):\n self.tangents[self.faces[f,j],:] += tangent\n self.bitangents[self.faces[f,j],:] += bitangent\n\n self.tangents /= np.linalg.norm(self.tangents, axis=1, keepdims=True)\n self.bitangents /= np.linalg.norm(self.bitangents, axis=1, keepdims=True)\n\n def add_script(self, script):\n \"\"\"\n Adds a script to the mesh\n \n :param script: The script to add\n \"\"\"\n program = Programs[script]\n self.scripts.append(program)\n\n def set_material(self, material: Material):\n \"\"\"\n Sets the material of the mesh\n \n :param material: The material to set\n \"\"\"\n self.material = material\n\n def getBoundingBox(self):\n \"\"\"\n Returns the bounding box of the mesh, i.e. smallest box that contains the mesh\n \n :return: The bounding box of the mesh\"\"\"\n min = [0, 0, 0]\n max = [0, 0, 0]\n\n for i in range(len(self.vertices)):\n for j in range(len(self.vertices[i])):\n if self.vertices[i][j] < min[j]:\n min[j] = self.vertices[i][j]\n if self.vertices[i][j] > max[j]:\n max[j] = self.vertices[i][j]\n\n return [min, max]\n\n def recalculate_normals(self):\n \"\"\"\n Recalculates the normals of the mesh and updates the VBO\n \"\"\"\n self.normals = np.zeros(self.vertices.shape, dtype=\"f\")\n \n for f in range(self.faces.shape[0]):\n a = self.vertices[self.faces[f, 1]] - self.vertices[self.faces[f, 0]]\n b = self.vertices[self.faces[f, 2]] - self.vertices[self.faces[f, 0]]\n n = np.cross(a, b)\n\n for j in range(3):\n self.normals[self.faces[f,j],:] += n\n\n self.normals /= np.linalg.norm(self.normals, axis=1, keepdims=True)\n\n LOG(f\"Recalculated normals for mesh {self.name} with {len(self.normals)} normals\")\n\n self.initialize()\n\n def initialize(self):\n \"\"\"\n Initializes the mesh, i.e. uploads the data to the GPU\n \"\"\"\n glEnableClientState(GL_VERTEX_ARRAY)\n\n # vertex positions\n glBindBuffer(GL_ARRAY_BUFFER, self.vbo)\n glBufferData(GL_ARRAY_BUFFER, self.vertices, GL_STATIC_DRAW)\n\n # indices (faces)\n glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.ibo)\n glBufferData(GL_ELEMENT_ARRAY_BUFFER, self.faces, GL_STATIC_DRAW)\n\n # normals\n glBindBuffer(GL_ARRAY_BUFFER, self.nbo)\n glBufferData(GL_ARRAY_BUFFER, self.normals, GL_STATIC_DRAW)\n\n # texture coordinates (UVs)\n glBindBuffer(GL_ARRAY_BUFFER, self.tbo)\n glBufferData(GL_ARRAY_BUFFER, self.uvs, GL_STATIC_DRAW)\n\n # tangents\n glBindBuffer(GL_ARRAY_BUFFER, self.tanbo)\n glBufferData(GL_ARRAY_BUFFER, self.tangents, GL_STATIC_DRAW)\n\n # bitangents\n glBindBuffer(GL_ARRAY_BUFFER, self.bitbo)\n glBufferData(GL_ARRAY_BUFFER, self.bitangents, GL_STATIC_DRAW)\n\n def draw(self):\n \"\"\"\n Draws the mesh using the currently bound shader\n \"\"\"\n glEnableClientState(GL_VERTEX_ARRAY)\n\n glEnableVertexAttribArray(0)\n glBindBuffer(GL_ARRAY_BUFFER, self.vbo)\n glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, None)\n\n glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.ibo)\n\n glEnableVertexAttribArray(1)\n glBindBuffer(GL_ARRAY_BUFFER, self.nbo)\n glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, None)\n\n glEnableVertexAttribArray(2)\n glBindBuffer(GL_ARRAY_BUFFER, self.tbo)\n glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, 0, None)\n\n glEnableVertexAttribArray(3)\n glBindBuffer(GL_ARRAY_BUFFER, self.tanbo)\n glVertexAttribPointer(3, 3, GL_FLOAT, GL_FALSE, 0, None)\n\n glEnableVertexAttribArray(4)\n glBindBuffer(GL_ARRAY_BUFFER, self.bitbo)\n glVertexAttribPointer(4, 3, GL_FLOAT, GL_FALSE, 0, None)\n\n\n glDrawElements(GL_TRIANGLES, self.faces.size, GL_UNSIGNED_INT, None)\n\n glBindBuffer(GL_ARRAY_BUFFER, 0)\n glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0)\n\n glDisableVertexAttribArray(0)\n glDisableVertexAttribArray(1)\n glDisableVertexAttribArray(2)\n glDisableVertexAttribArray(3)\n glDisableVertexAttribArray(4)\n\n\n def update(self, dt: float):\n \"\"\"\n Updates the mesh position, rotation and scale\n \"\"\"\n for script in self.scripts:\n script(self, dt)\n\n @staticmethod\n def CreateScreenQuad():\n \"\"\"\n Creates a quad mesh that fills the entire screen\n \"\"\"\n vertices = np.array([\n [-1, -1, 0],\n [1, -1, 0],\n [1, 1, 0],\n [-1, 1, 0]\n ], dtype=\"f\")\n\n faces = np.array([\n [0, 1, 2],\n [0, 2, 3]\n ], dtype=\"i\")\n\n normals = np.array([\n [0, 0, 1],\n [0, 0, 1],\n [0, 0, 1],\n [0, 0, 1]\n ], dtype=\"f\")\n \n uvs = np.array([\n [0, 0],\n [1, 0],\n [1, 1],\n [0, 1]\n ], dtype=\"f\")\n\n mesh = Mesh(vertices, faces, normals, uvs)\n mesh.name = \"ScreenQuad\"\n\n return mesh\n\n @staticmethod\n def CreateQuad(divisions):\n \"\"\"\n Creates a quad mesh with the given number of divisions\n \n :param divisions: The number of divisions\n :return: The quad mesh\n \"\"\"\n vertices = []\n faces = []\n normals = []\n uvs = []\n\n for i in range(divisions + 1):\n for j in range(divisions + 1):\n vertices.append([i / divisions, j / divisions, 0])\n normals.append([0, 0, 1])\n uvs.append([i / divisions, j / divisions])\n\n for i in range(divisions):\n for j in range(divisions):\n faces.append([i * (divisions + 1) + j, i * (divisions + 1) + j + 1, (i + 1) * (divisions + 1) + j + 1])\n faces.append([i * (divisions + 1) + j, (i + 1) * (divisions + 1) + j + 1, (i + 1) * (divisions + 1) + j])\n\n vertices = np.array(vertices, dtype=\"f\")\n faces = np.array(faces, dtype=\"i\")\n normals = np.array(normals, dtype=\"f\")\n uvs = np.array(uvs, dtype=\"f\")\n\n mesh = Mesh(vertices, faces, normals, uvs)\n mesh.name = \"Quad\"\n\n return mesh\n\n \n ","repo_name":"killereks/3D-experiments","sub_path":"Proper Rasterizer/Mesh.py","file_name":"Mesh.py","file_ext":"py","file_size_in_byte":8483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"3136330853","text":"import logging\nfrom typing import Any, Dict, List, Optional, Text\n\nfrom rasa.nlu.extractors import EntityExtractor\nfrom rasa.nlu.model import Metadata\nfrom rasa.nlu.training_data import Message\n\nimport os\nimport shutil\nimport kashgari\nfrom kashgari.embeddings import BERTEmbedding\nimport kashgari.tasks.labeling as labeling\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, TensorBoard\n\nlogger = logging.getLogger(__name__)\n\n\nclass KashgariEntityExtractor(EntityExtractor):\n provides = [\"entities\"]\n\n defaults = {\n \"bert_model_path\": None,\n \"sequence_length\": \"auto\",\n \"layer_nums\": 4,\n \"trainable\": False,\n \"labeling_model\": \"BiLSTM_CRF_Model\",\n \"epochs\": 10,\n \"batch_size\": 32,\n \"validation_split\": 0.2,\n \"patience\": 5,\n \"factor\": 0.5, # factor of reduce learning late everytime\n \"verbose\": 1,\n \"use_cudnn_cell\": False\n }\n\n def __init__(self,\n component_config=None,\n model=None):\n super(KashgariEntityExtractor, self).__init__(component_config)\n\n bert_model_path = self.component_config.get('bert_model_path')\n sequence_length = self.component_config.get('sequence_length')\n layer_nums = self.component_config.get('layer_nums')\n trainable = self.component_config.get('trainable')\n use_cudnn_cell = self.component_config.get('use_cudnn_cell')\n\n kashgari.config.use_cudnn_cell = use_cudnn_cell\n\n self.labeling_model = self.component_config.get('labeling_model')\n\n self.bert_embedding = BERTEmbedding(bert_model_path,\n task=kashgari.LABELING,\n layer_nums = layer_nums,\n trainable=trainable,\n sequence_length=sequence_length)\n\n self.model = model\n\n\n def train(self, training_data, cfg, **kwargs):\n labeling_model = eval(\"labeling.\" + self.labeling_model)\n\n epochs = self.component_config.get('epochs')\n batch_size = self.component_config.get('batch_size')\n validation_split = self.component_config.get('validation_split')\n patience = self.component_config.get('patience')\n factor = self.component_config.get('factor')\n verbose = self.component_config.get('verbose')\n\n filtered_entity_examples = self.filter_trainable_entities(training_data.training_examples)\n\n X, Y = self._create_dataset(filtered_entity_examples)\n\n train_x, validate_x, train_y, validate_y = train_test_split( X, Y, test_size=validation_split, random_state=100)\n\n self.model = labeling_model(self.bert_embedding)\n\n checkpoint = ModelCheckpoint(\n 'entity_weights.h5',\n monitor='val_loss',\n save_best_only=True,\n save_weights_only=False,\n verbose=verbose)\n early_stopping = EarlyStopping(\n monitor='val_loss',\n patience=patience)\n reduce_lr = ReduceLROnPlateau(\n monitor='val_loss',\n factor=factor,\n patience=patience,\n verbose=verbose)\n\n self.model.fit(\n train_x,\n train_y,\n validate_x,\n validate_y,\n epochs=epochs,\n batch_size=batch_size,\n callbacks=[checkpoint, early_stopping, reduce_lr]\n )\n\n def _create_dataset(self, examples):\n X, Y = [], []\n for example in examples:\n entity_offsets = self._convert_example(example)\n data, label = self._predata(example.text, entity_offsets)\n X.append(data)\n Y.append(label)\n return X, Y\n\n def _convert_example(self, example):\n def convert_entity(entity):\n return entity[\"start\"], entity[\"end\"], entity[\"entity\"]\n return [convert_entity(ent) for ent in example.get(\"entities\", [])]\n\n def _predata(self, text, entity_offsets):\n value = 'O'\n bilou = [value for _ in text]\n\n for (start, end, entity) in entity_offsets:\n if start is not None and end is not None:\n bilou[start] = 'B-' + entity\n for i in range(start+1, end):\n bilou[i] = 'I-' + entity\n\n return list(text), bilou\n\n def process(self, message, **kwargs):\n extracted = self.add_extractor_name(self.extract_entities(message))\n\n message.set(\"entities\",\n message.get(\"entities\", []) + extracted,\n add_to_output=True)\n\n def extract_entities(self, message):\n if self.model is not None:\n entities, result = self.model.predict_entities([list(message.text)], join_chunk=''), []\n\n for item in entities[0]['labels']:\n result.append({\n 'start': item['start'],\n 'end': item['start'] + len(item['value']),\n 'value': item['value'],\n 'entity': item['entity']\n })\n\n return result\n else:\n return []\n\n\n def persist(self,\n file_name: Text,\n model_dir: Text) -> Optional[Dict[Text, Any]]:\n model_path = os.path.join(model_dir, file_name)\n\n self.model.save(model_path)\n\n remove_file = os.path.join(model_path, 'model_weights.h5')\n os.remove(remove_file)\n shutil.move('entity_weights.h5', model_path)\n os.rename(os.path.join(model_path, 'entity_weights.h5'), os.path.join(model_path, 'model_weights.h5'))\n\n return {\"file\": file_name} \n\n @classmethod\n def load(cls,\n meta: Dict[Text, Any],\n model_dir: Optional[Text]=None,\n model_metadata: Optional['Metadata']=None,\n cached_component: Optional[EntityExtractor]=None,\n **kwargs: Any\n ) -> 'KashgariEntityExtractor':\n\n if model_dir and meta.get(\"file\"):\n file_name = meta.get(\"file\")\n labeling_model = os.path.join(model_dir, file_name)\n loaded_model = kashgari.utils.load_model(labeling_model)\n \n return cls(component_config=meta,\n model=loaded_model)\n else:\n logger.warning(\"Failed to load entity model. Maybe path {} \"\n \"doesn't exist\"\n \"\".format(os.path.abspath(model_dir)))\n return cls(component_config=meta)\n","repo_name":"GaoQ1/rasa_nlu_gq","sub_path":"rasa_nlu_gao/extractors/kashgari_entity_extractor.py","file_name":"kashgari_entity_extractor.py","file_ext":"py","file_size_in_byte":6604,"program_lang":"python","lang":"en","doc_type":"code","stars":300,"dataset":"github-code","pt":"31"} +{"seq_id":"37845683218","text":"#!/bin/python3\nimport sys, os, glob\nimport Lexer\nimport Parser\nfrom Generator import *\n\ndef compile_file(source: str):\n with open(source, \"r\") as file:\n source_code = file.read()\n\n lexer = Lexer.Lexer(source_code)\n parser = Parser.Parser(lexer.Lex(), generator)\n\n parser.program()\n path = source.split('.')[0] + \".py\"\n generator.write(path)\n print(\"Finished compilation\")\n\ndef main():\n if len(sys.argv) == 1:\n sys.exit(\"No file path passed as an argument\")\n else:\n input_path = sys.argv[1]\n\n if os.path.isfile(input_path):\n compile_file(input_path)\n elif os.path.isdir(input_path):\n file_paths = glob.glob(os.path.join(input_path, '*.txt'))\n for source_code in file_paths:\n compile_file(source_code)\n else:\n sys.exit(\"Invalid file name or directory.\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"watermoon7/OCR-to-Python-Compiler","sub_path":"ocrcompiler/Compile.py","file_name":"Compile.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72994949848","text":"import numpy as np\nimport urllib\nimport os\nimport mxnet as mx\nimport logging\n\n\ndef get_model(prefix, code, model_dir):\n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n download(prefix + '-symbol.json', model_dir)\n download(prefix + '-%04d.params' % code, model_dir)\n\n\n# obtain the pre-trained model\ndef download(url, model_dir):\n filename = url.split('/')[-1]\n if not os.path.exists(model_dir + filename):\n urllib.urlretrieve(url, model_dir + filename)\n\n\ndef spec_context(param, ctx):\n \"\"\"\n This func specifies the device context(computation source:CPU/GPU)\n of the NDArray\n\n Inputs:\n - param: dict of str to NDArray\n - ctx: the device context(Context or list of Context)\n\n Returns:\n None\n \"\"\"\n for k, v in param.items():\n param[k] = v.as_in_context(ctx)\n\n return\n\n\ndef load_pretrained_model(prefix, model_name, epoch, model_dir, ctx=None):\n \"\"\"\n This func is a wrapper of the mx.model.load_checkpoint. It can\n also specify the context(computation source:CPU/GPU) that will\n the params\n\n Inputs:\n - prefix: string indicating prefix of model name\n - epoch: int indicating epoch number\n - ctx: the device context(Context or list of Context)\n\n Returns:\n - arg_params: dict of str to NDArray of net's weights\n - aux_params: dict of str to NDArray of net's auxiliary states\n \"\"\"\n get_model(prefix, epoch, model_dir)\n sym, arg_params, aux_params = mx.model.load_checkpoint(model_dir + model_name, epoch)\n logging.info('The pretrained model has been loaded successfully!')\n return sym, arg_params, aux_params\n","repo_name":"jay1204/st-resnet","sub_path":"utils/model_helper.py","file_name":"model_helper.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"31"} +{"seq_id":"74958865688","text":"__author__ = 'gru'\n\nfrom os import listdir\nfrom os.path import isfile, isdir, join\nimport os.path\nimport platform\n\nimport soundfile as sf\nimport numpy as np\n\nfrom core.audio_cwe import watermarking_utils\nfrom core.audio_cwe.xs_wm_scheme import XsWMSystem\n\n\n# Specify input directories\ninput_dir = '../../../res/testing/robustness_eval/32_bits_step_5_t_50' \\\n '/marked_test_files'\ninput_dir_b = '../../../res/testing/robustness_eval/32_bits_step_5_t_50' \\\n '/stirmark/attacked_test_files'\n\n# Retrieve marked test files\nfiles = [f for f in listdir(input_dir) if isfile(join(input_dir, f))]\n\nif platform.system() == 'Darwin' and files[0].startswith(\".\"):\n files = files[1:] # exclude .ds_store on mac\n\n# Retrieve directories of attacked test files\nattacked_filedirs = [join(input_dir_b, d) for d in listdir(input_dir_b) if isdir(join(input_dir_b, d))]\nprint(attacked_filedirs)\n\nwav_files = [f for f in files if f.endswith('.wav')]\niv_files = [f for f in files if f.endswith('iv')]\nkeys = [f for f in files if f.endswith('key')]\nmarks = [f for f in files if f.endswith('mark')]\n\nresults = []\nfor i, f in enumerate(wav_files):\n print(f)\n\n # Init WM system\n wm_sys = XsWMSystem.from_file(join(input_dir, iv_files[i]))\n # Read key\n bin_pairs = watermarking_utils.read_keyfile(input_dir + \"/\" + keys[i])\n # Read embedded WMK\n wmk = np.loadtxt(input_dir + \"/\" + marks[i], dtype=np.int)\n\n attacked_files = [f for f in listdir(attacked_filedirs[i]) if\n isfile(join(attacked_filedirs[i], f)) and f.endswith('.wav')]\n\n for j, af in enumerate(attacked_files):\n print(af)\n\n # Read marked and attacked sound file\n samples, samplerate = sf.read(join(attacked_filedirs[i], af), dtype=np.int16)\n\n # Extract watermark\n recovered_wmk = wm_sys.extract_watermark(samples, syn=wmk, key=bin_pairs)\n\n # Check, whether the detected watermark is correct\n print('=============================================')\n print('Result:')\n print('---------------------------------------------')\n if np.array_equal(wmk, recovered_wmk):\n print('Original watermark and detected watermark match perfectly')\n else:\n print('Original watermark and detected watermark do not match ')\n while len(wmk) > len(recovered_wmk):\n recovered_wmk = np. append(recovered_wmk, 0)\n\n ber = watermarking_utils.calc_bit_error_rate(wmk, recovered_wmk)\n print('BER: ', ber)\n results.append((af, ber))\n print('---------------------------------------------')\n print(wmk.tolist())\n print(recovered_wmk)\n\nnp.savetxt(join(os.path.split(input_dir_b)[0], 'v2_BER_stirmark_attacks_' +\n str(len(wmk)) + '_bits'), results, fmt='%s')\n","repo_name":"jgru/audio-cwe-framework","sub_path":"experimental_testing/robustness_eval/eval_stirmark_robustness.py","file_name":"eval_stirmark_robustness.py","file_ext":"py","file_size_in_byte":2808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"17168893309","text":"# import libraries\nimport time\nimport datetime\nimport pandas as pd\nfrom rich import print as rprint\nfrom rich.pretty import pprint\nfrom rich.progress import Progress\nimport re\nfrom rich.progress import track\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.chrome.service import Service as ChromeService\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.firefox.service import Service\nfrom selenium.webdriver.firefox.options import Options\nfrom selenium.webdriver import Firefox\n# ------------------------------------------------------------------\n# TODO: functions\n# ------------------------------------------------------------------\n\n# This function search each element of the document DOM\n\n\ndef findElementTextBySelector(selector, exception):\n try:\n element = i.find_element(\n By.CSS_SELECTOR, selector).text.strip().upper()\n except:\n element = exception\n return element\n\n\ndef findElementNumberBySelector(selector, exception):\n try:\n element = i.find_element(\n By.CSS_SELECTOR, selector).text.replace('$', '')\n element = \"\".join([ch for ch in element if ch.isdigit()])\n except:\n element = exception\n return element\n\n\ndef findElementBy(by, selector, t):\n open_modal = driver.find_element(by, selector)\n open_modal.click()\n time.sleep(t)\n\n\ndef findElementByAndSendKey(by, selector, key, t):\n open_modal = driver.find_element(by, selector)\n open_modal.click()\n open_modal.send_keys(key)\n open_modal.send_keys(Keys.TAB)\n time.sleep(t)\n\n\n\ndef findElementNumberByXPATH(selector, exception):\n try:\n element = i.find_element(\n By.XPATH, selector).text.replace('$', '')\n element = \"\".join([ch for ch in element if ch.isdigit()])\n except:\n element = exception\n return element\n\n\ndef scrollDownPage(driver, t):\n time.sleep(t)\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n\n\ndef scrollDownFullPage(driver):\n height = driver.execute_script(\"return document.body.scrollHeight\")\n for i in range(height):\n # scroll by 10 on each iteration\n driver.execute_script('window.scrollBy(0,20)')\n # reset height to the new height after scroll-triggered elements have been loaded.\n height = driver.execute_script(\"return document.body.scrollHeight\")\n time.sleep(0.05)\n\n# Function Beatiful View\n\n\ndef process_data():\n time.sleep(0.02)\n\n# Date \n\ntoday = datetime.date.today() \n\n# Categories of brands that should be considered for search results\n\n\nshops = [\"Bogota\", \"Medellin\"]\ncategories = ['whisky', 'vodka', 'cremas',\n 'ginebra', 'ron', 'tequila-y-mezcal']\n\n# ------------------------------------------------------------------\n# TODO: Extract the data for shop EXITO\n# ------------------------------------------------------------------\n\nfor city in shops:\n for category in categories:\n # Bar progress -> comment\n for _ in track(range(100), description=f'[green]Iniciando Scraping en Diageo categoria: {category} en la ciudad: {city}'):\n process_data()\n options = webdriver.ChromeOptions()\n # options.add_argument(\"--headless\")\n options.add_argument(\"start-maximized\")\n options.add_experimental_option('excludeSwitches', ['enable-logging'])\n driver = webdriver.Chrome(service=ChromeService(ChromeDriverManager().install()), options=options)\n # driver.maximize_window()\n\n # Open the Page\n\n driver.get(f\"https://co.thebar.com/{category}\")\n\n time.sleep(10)\n\n # Click on Modal Window\n # findElementBy(\n # By.XPATH, \"//*[@id='btn-si']\", 1)\n findElementBy(\n By.XPATH, f\"//button[normalize-space()='{city}']\", 3)\n\n findElementBy(\n By.XPATH, \"//button[normalize-space()='Si, soy mayor de 18 años']\", 5) \n\n # findElementBy(\n # By.CSS_SELECTOR, \"#termsAndConditions\", 2)\n \n # Click for city selection\n # findElementBy(\n # By.XPATH, \"//select[@id='ciudadAgeVerification']\", 2)\n # Select City\n # findElementByAndSendKey(\n # By.ID, \"ciudadAgeVerification\", city, 2)\n\n # scrollDownPage(driver, 15)\n # scrollDownFullPage(driver)\n\n initial_XPATH = \"//div[contains(@class,'vtex-button__label flex items-center justify-center h-100 ph5')]\"\n # define the max clicks for page for default 30\n max_click_SHOW_MORE = 35\n # count the number of clicks\n count = 1\n # This loop search the button load more and apply the click until the end of page\n while count <= max_click_SHOW_MORE:\n try:\n WebDriverWait(driver, 30).until(\n EC.visibility_of_element_located((By.XPATH, initial_XPATH)))\n time.sleep(5) \n WebDriverWait(driver, 20).until(\n EC.element_to_be_clickable((By.XPATH, initial_XPATH))).click()\n count += 1\n time.sleep(2)\n # Bar progress -> comment\n for i in track(range(4), description=f\"[red]Explorando Pagina Web iter {count - 1}..........\"):\n time.sleep(1)\n\n except TimeoutException:\n break\n\n # Search the elements of the page\n items = driver.find_elements(\n By.CSS_SELECTOR, \".vtex-product-summary-2-x-element.pointer.pt3.pb4.flex.flex-column.h-100\")\n # Create a frame empty for the data\n data = []\n # iterate over each element\n for i in items:\n name = findElementTextBySelector(\n \".vtex-product-summary-2-x-productBrand.vtex-product-summary-2-x-brandName.t-body\", \"SIN DESCRIPCION\")\n brand = findElementTextBySelector(\n \".class\", \"SIN MARCA\")\n price_prime = findElementNumberBySelector(\n \".class\", \"0\")\n price_regular = findElementNumberBySelector(\n \".vtex-product-price-1-x-listPriceValue.strike\", \"0\")\n price_now = findElementNumberBySelector(\n \".vtex-product-price-1-x-sellingPriceValue\", \"0\")\n discount = findElementNumberBySelector(\n \".vtex-store-components-3-x-discountInsideContainer.t-mini.white.absolute.right-0.pv2.ph3.bg-emphasis.z-1\", \"0\")\n\n data.append({f\"shop\": \"DIAGEO\",\n \"city\": city,\n \"location\": city,\n \"category\": category,\n \"name\": name,\n \"unit_measure\": \"\",\n \"brand\": brand,\n \"price_prime\": price_prime,\n \"price_regular\": price_regular,\n \"price_now\": price_now,\n \"conditional_discount\": \"\",\n \"conditional_discount_2\": \"\",\n \"discount\": discount,\n \"date\": today\n })\n\n df = pd.DataFrame(data)\n df.to_csv(f'/home/pydev/workflow/dt_web_scraping/prod/data/diageo_{city}_{category}_data_{today}.txt',\n index=False, encoding='utf-8')\n\n time.sleep(1)\n driver.quit()\n\ntime.sleep(3)\ndriver.quit()\n","repo_name":"edwinmesa/dt_web_scraping","sub_path":"prod/diageo/web_scraping_diageo_v1.py","file_name":"web_scraping_diageo_v1.py","file_ext":"py","file_size_in_byte":7721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"16101329318","text":"from sklearn.datasets import fetch_mldata\n\nmnist = fetch_mldata('MNIST original', data_home='data/')\n\nX, y = mnist[\"data\"], mnist[\"target\"]\n\n# import matplotlib\n# import matplotlib.pyplot as plt\n# some_digit = X[36000]\n# some_digit_image = some_digit.reshape(28, 28)\n# plt.imshow(some_digit_image, cmap = matplotlib.cm.binary, interpolation=\"nearest\")\n# plt.axis(\"off\")\n# plt.show()\n\nX_train, X_test, y_train, y_test = X[:60000], X[60000:], y[:60000], y[60000:]\n\n# 打乱训练集\nimport numpy as np\nshuffle_index = np.random.permutation(60000)\nX_train, y_train = X_train[shuffle_index], y_train[shuffle_index]","repo_name":"applepip/machine_learning","sub_path":"mnist_classification/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25130742029","text":"from odoo import models, fields, api, _\nfrom odoo.exceptions import UserError\n\n\nclass HmsPatient(models.Model):\n _inherit = 'hms.patient'\n\n new_patient_id = fields.Many2one('hms.patient', string='New Patient')\n\n duplicate_patient_count = fields.Integer(String=\"Duplicate Patients\")\n duplicate_patient_ids = fields.One2many(\n comodel_name='hms.patient',\n inverse_name='new_patient_id',\n string='Detected Same Name Patients',\n )\n\n @api.onchange('name')\n def onchange_name(self):\n patient_ids = self.env['hms.patient'].search(\n [('name', '=', self.name)])\n self.duplicate_patient_count = len(patient_ids)\n\n if len(patient_ids) > 0:\n patient_ids = self.env['hms.patient'].search(\n [('name', '=', self.name)])\n self.duplicate_patient_ids = patient_ids.ids\n else:\n self.duplicate_patient_ids = []\n\n def action_get_duplicacte_patients(self):\n print('Checking duplicate patients')\n patient_ids = self.env['hms.patient'].search(\n [('name', '=', self.name)])\n self.duplicate_patient_ids = patient_ids.ids\n","repo_name":"butirpadi/aa_hms_mode","sub_path":"models/hms_patient.py","file_name":"hms_patient.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"72288554967","text":"import matplotlib.pyplot as plt\nimport random\nimport math\nfrom sklearn.cluster import KMeans\nfrom copy import deepcopy\nfrom itertools import combinations\n\n# Test datapoints\nnum_points = 100\npoints_list = list()\nfor _ in range(num_points):\n p = random.randint(0,10)\n if p<6:\n point = (random.uniform(-4, 3), random.uniform(-4, 3))\n elif p>=6 and p<8:\n point = (random.uniform(3, 5), random.uniform(3, 5))\n else:\n point = (random.uniform(4, 7), random.uniform(4, 7))\n points_list.append(point)\n\nplt.scatter([pair[0] for pair in points_list],[pair[1] for pair in points_list], marker = '.')\n\ndef kmeans(k, points_list, max_iter, initialization, n_init=3):\n best_centroids = dict()\n best_inertia = float('inf')\n for n in range(n_init):\n centroids_dict = dict() # map centroid tuple to unique and index\n if initialization=='random':\n dictinct_points = list(set(points_list))\n selected_cent = set()\n for cluster_idx in range(k):\n random_idx = random.randrange(0, len(dictinct_points))\n while random_idx in selected_cent:\n random_idx = random.randrange(0, len(dictinct_points))\n selected_cent.add(random_idx)\n centroids_dict[dictinct_points[random_idx]] = cluster_idx\n # centroids is the list of initial centorids\n elif initialization=='farthest':\n #initialize the initial centroids by farthest distance from each other\n curr_sum_distance = 0\n closest_distance = 0 \n for kth_centroid in range(k): #first centroid is still randomly picked\n if kth_centroid == 0:\n random_idx = random.randrange(0, len(points_list))\n centroids_dict[points_list[random_idx]] = kth_centroid\n else:\n curr_sum_distance = 0\n # compute sum of distance to the current centroids to each other point\n closest_centroid = points_list[0] # in case there is only one point in points_list\n for point in points_list:\n if point not in centroids_dict.keys():\n for curr_cent in centroids_dict.keys():\n curr_sum_distance += euclideanDist(curr_cent, point)\n if curr_sum_distance > closest_distance:\n closest_distance = curr_sum_distance\n closest_centroid = point\n curr_sum_distance = 0\n centroids_dict[closest_centroid] = kth_centroid\n else:\n return 'initialization method not recognized'\n\n convergence = False\n curr_iter = 0\n while not convergence and curr_iter Iterator[list[T]]:\n # cf. split_after from more-itertools\n \"\"\"\n Return a generator of subsequences of ``iterable``, split after each\n element ``x`` for which ``predicate(x)`` is true.\n\n If the predicate is true for the last element of ``iterable``, no trailing\n empty list is emitted.\n\n If ``iterable`` is empty, a generator of one empty list is returned.\n \"\"\"\n empty = True\n chunk: list[T] = []\n for obj in iterable:\n empty = False\n chunk.append(obj)\n if predicate(obj):\n yield chunk\n chunk = []\n if empty or chunk:\n yield chunk\n","repo_name":"jwodder/pywodlib","sub_path":"src/pywodlib/lists/split_after.py","file_name":"split_after.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"16997817467","text":"from django.core.signing import Signer, BadSignature\nfrom django.conf import settings\nfrom django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode\nfrom django.shortcuts import reverse\n\n\nclass UserUnsubscriber(object):\n _salt = 'pharm'\n\n def __init__(self):\n self.signer = Signer(salt=self._salt)\n\n def make_unsubscribe_link(self, email):\n base64_email = urlsafe_base64_encode(email)\n token = self.signer.sign(email).split(':')[1]\n\n return '{hostname}{path}'.format(\n hostname=settings.HOSTNAME,\n path=reverse('unsubscribe_news', args=(base64_email, token)),\n )\n\n def parse_unsubscribe_link(self, base64_email, token):\n \"\"\"\n If link is valid, return user's email\n \"\"\"\n email = urlsafe_base64_decode(base64_email)\n\n try:\n unsigned_token = self.signer.unsign('{}:{}'.format(email, token))\n\n if unsigned_token == email:\n return email\n except BadSignature:\n pass\n\n return False\n","repo_name":"nigil/pharm_bricks","sub_path":"apps/news/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13628622056","text":"# 4-3. 왕실의 나이트(p.115)\n# 시뮬레이션(Simulation) 유형\n\nl = str(input())\nrow = int(l[1])\ncol = int(ord(l[0])) - int(ord('a')) + 1 # ord : one-charater string에 대한 unicode 반환\ncnt = 0\n\n# 나이트가 이동가능한 모든 방향 경우의 수\ndir = [(2, 1), (2, -1), (-2, 1), (-2, -1), (1, 2), (1, -2), (-1, 2), (-1, -2)]\n\nfor i in dir:\n n_row = row + i[0]\n n_col = col + i[1]\n if 1 <= n_row <= 8 and 1 <= n_col <= 8: # 새로운 좌표가 공간 내에 있을 경우 cnt 1 추가\n cnt += 1\n\nprint(cnt)\n","repo_name":"jwshin0908/Algorithm","sub_path":"python-for-coding-test/ch04_구현/4-3.py","file_name":"4-3.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"36462482524","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/10/25 19:12\n# @Author : Big Huang\n# @Email : kenhuang866@qq.com\n# @File : main2.py\n# @Software: PyCharm Community Edition\nimport tensorflow as tf\nimport pandas as pd\n\nimport numpy as np\nfrom sklearn.datasets import fetch_california_housing\nhousing = fetch_california_housing()\nm, n = housing.data.shape\nY_array = housing.target.reshape(-1, 1)\nconstant_array = np.ones_like(Y_array)\nX_array = np.c_[constant_array, housing.data]\nX = tf.constant(X_array, dtype=tf.float32, name=\"X\")\nY = tf.constant(Y_array, dtype=tf.float32, name=\"Y\")\nXT = tf.transpose(X)\ntheta = tf.matmul(tf.matmul(tf.matrix_inverse(tf.matmul(XT, X)), XT), Y)\nwith tf.Session() as sess:\n theta_value = theta.eval()\nprint(theta_value)\n\n\nimport sklearn\nfrom sklearn.preprocessing import scale\nhousing = fetch_california_housing()\nm, n = housing.data.shape\nhousing_data_plus_bias = np.c_[np.ones((m, 1)), housing.data]\nscaled_housing_data_plus_bias = scale(housing_data_plus_bias)\nn_epochs = 1000\nlearning_rate = 0.01\nX = tf.constant(scaled_housing_data_plus_bias, dtype=tf.float32, name=\"X\")\ny = tf.constant(housing.target.reshape(-1, 1), dtype=tf.float32, name=\"y\")\ntheta = tf.Variable(tf.random_uniform([n+1, 1], -1.0, 1.0), name=\"theta\")\ny_pred = tf.matmul(X, theta, name=\"predictions\")\nerror = y_pred - y\nmse = tf.reduce_mean(tf.square(error), name=\"mse\")\ngradients = 2/m * tf.matmul(tf.transpose(X), error)\ntraining_op = tf.assign(theta, theta-learning_rate*gradients)\ninit = tf.global_variables_initializer()\nwith tf.Session() as sess:\n sess.run(init)\n for epoch in range(n_epochs):\n if epoch % 100 == 0:\n print(\"Epoch\", epoch, \"MSE = \", mse.eval())\n sess.run(training_op)\n best_theta = theta.eval()\n\n\n\n\n\n","repo_name":"bighuang866/own_code","sub_path":"python/20181025_tensorflow/main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"277089503","text":"import regex\nimport json\nimport pymongo\nfrom pymongo import MongoClient\n\ndef importData(collection):\n with open('sinndeslebens.txt', 'r') as sinnfile:\n sinn_file = sinnfile.readlines()\n \n print(\"Import started...\")\n for line in sinn_file:\n modified = regex.sub(r'db\\.fussball\\.insert\\(','',line)\n modified = regex.sub(r'\\);','',modified)\n modified = regex.sub(r',\\sgruendung.+\\)','',modified)\n modified = regex.sub(r'','',modified)\n keys = regex.findall(r'\\w+:',modified)\n for key in keys:\n modified = regex.sub(key,'\"'+key[0:-1]+'\":' ,modified)\n modified = modified.replace(\" \",\"\")\n modified = modified.replace(\"'\",'\"')\n modified = regex.sub(r',\\]',']',modified)\n modified = regex.sub(r',}','}',modified)\n modified = regex.sub(r',}','}',modified)\n keys = regex.findall(r',\\w+\"',modified)\n for key in keys:\n modified = regex.sub(key,',\"'+key[1:] ,modified)\n sinn_data = json.loads(modified)\n collection.insert(sinn_data)\n print(\"Import complete...\")\n\nclient = MongoClient()\nclient = MongoClient('localhost', 27017)\ndatabase = client.admin\ncollist = database.list_collection_names()\ncollection = None\nif not \"nosql\" in collist:\n collection = database[\"nosql\"]\n importData(collection)\nelse:\n print(\"Already exist\")\n#collection = database[\"nosql\"]\n#importData(collection)\n\n","repo_name":"smartuni/AIT-SS2021","sub_path":"bosch_database/MongoDB Code Examples/Import/importsinn.py","file_name":"importsinn.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6770515472","text":"from weightedgraph import WeightedGraph\nfrom priorityqueue import PriorityQueue\nimport random\n\n\ndef createRandomCompleteWeightedGraph(n: int):\n g = WeightedGraph()\n all_nodes = []\n \n #Create nodes\n for i in range(n):\n node = g.addNode(i)\n all_nodes.append(node)\n \n #Connect all nodes to each other\n for n1 in all_nodes:\n for n2 in all_nodes:\n if n1 is not n2:\n g.addWeightedEdge(n1, n2, random.randint(1, 100))\n \n return g\n\ndef createLinkedList(n: int):\n g = WeightedGraph()\n prev = None\n for i in range(n):\n n = g.addNode(i)\n if prev:\n prev.addNeighbor(n)\n prev = n\n return g\n \ndef dijkstras(start):\n queue = PriorityQueue()\n queue.put(start, 0)\n visited = []\n distance = {start: 0}\n previous = {start: None}\n inf = float('inf')\n \n while not queue.empty():\n u = queue.get()\n visited.append(u)\n\n for v in u.neighbors:\n if v not in visited:\n tempDistance = distance.get(u, inf) + u.getWeight(v)\n if tempDistance < distance.get(v, inf):\n distance[v] = tempDistance\n queue.put(v, tempDistance)\n previous[v] = u\n\n return distance\n\n\n#test methods\nran_graph = createRandomCompleteWeightedGraph(5)\nran_graph.printAdjacencyList()\nprint(\"dijkstras from node \", ran_graph.first, \":\")\nprint(dijkstras(ran_graph.first))\n\nprint()\nlink_list = createLinkedList(5)\nlink_list.printAdjacencyList()\nprint(\"dijkstras from node \", link_list.first, \":\")\nprint(dijkstras(link_list.first))","repo_name":"kingartie/CS435","sub_path":"Project2/part2/5/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"17803423955","text":"from time import sleep\nimport context\nimport mocks\nfrom menu import AsyncAction, Action\n\n\nsomeMenu = None\n\n\nclass AsyncActionForTest(AsyncAction):\n\n def __init__(self, name, delay=0):\n super(AsyncActionForTest, self).__init__(name)\n self._delay = delay\n self.runCnt = 0\n\n def _asyncRun(self):\n sleep(self._delay)\n self.runCnt += 1\n\n\ndef test_shouldBeSubClassOfFolder():\n assert isinstance(AsyncAction(\"\"), Action)\n\n\ndef test_name_shouldReturnName():\n action = AsyncAction(\"myName\")\n assert action.name() == \"myName\"\n\n\ndef test_run_shouldBeAsynchron():\n action = AsyncActionForTest(\"name\", 0.1)\n action.run(someMenu)\n assert action.runCnt == 0\n sleep(0.2)\n assert action.runCnt == 1\n\n\ndef test_run_shouldNotRunIfCurrentlyRunning():\n action = AsyncActionForTest(\"name\", 0.1)\n action.run(someMenu)\n action.run(someMenu)\n sleep(0.2)\n assert action.runCnt == 1\n","repo_name":"spielzeugland/kodi-control-panel","sub_path":"test/menu_asyncAction_test.py","file_name":"menu_asyncAction_test.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1624596498","text":"#! /usr/bin/env python\nimport numpy as np\nimport pandas as pd\nimport h5py\nimport os\nfrom tqdm import tqdm\n\ncountrys = np.array(['AUS', 'AUT', 'BEL', 'BGR', 'BRA', 'CAN', 'CHN', 'CYP', 'CZE',\n 'DEU', 'DNK', 'ESP','EST', 'FIN', 'FRA', 'GBR', 'GRC', 'HUN', 'IDN',\n 'IND', 'IRL', 'ITA', 'JPN', 'KOR', 'LTU', 'LUX', 'LVA', 'MEX',\n 'MLT', 'NLD', 'POL', 'PRT', 'ROM', 'RUS', 'SVK', 'SVN', 'SWE',\n 'TUR', 'TWN', 'USA', 'RoW'])\nindexnames = ['L3_1_DVA_FIN','L3_4_RDV','L3_6_FVA_INT','L3_2_DVA_INT',\n 'L3_5_FVA_FIN','L3_7_DDC','L3_3_DVA_INTrex','L3_5_MVA','L3_8_FDC']\nyears = np.arange(1995,2012).astype('str')\n\ndef get_data(indexname,year,country,option):\n '''\n index\n year\n country you want like USA\n option :tochn or chnto\n 2*17*9*41\n '''\n if option == 'tochn':\n if os.path.exists('out/'+year+'/'+year+'_'+indexname+'_tochn.csv'):\n data = pd.read_csv('out/'+year+'/'+year+'_'+indexname+'_tochn.csv')\n ind =np.where(country==countrys)[0][0]\n dat = np.array(data.iloc[:,35*ind+1:35*(ind+1)+1])\n return True,dat, 'year: '+year+', indicator: '+indexname +', country: '+country+' '+option,opt+'/'+year+'/'+country+'/'+index\n else:\n #print 'no such file: '+ 'out/'+year+'/'+year+'_'+indexname+'_tochn.csv'\n return False,'no such file: '+ 'out/'+year+'/'+year+'_'+indexname+'_chnto.csv'\n elif option =='chnto':\n if os.path.exists('out/'+year+'/'+year+'_'+indexname+'_chnto.csv'):\n data = pd.read_csv('out/'+year+'/'+year+'_'+indexname+'_chnto.csv')\n ind =np.where(country==countrys)[0][0]\n dat = np.array(data.iloc[:,35*ind+1:35*(ind+1)+1])\n return True,dat, 'year: '+year+', indicator: '+indexname +', country: '+country+' '+option,opt+'/'+year+'/'+country+'/'+index\n else:\n #print 'no such file: '+ 'out/'+year+'/'+year+'_'+indexname+'_chnto.csv'\n return False,'no such file: '+ 'out/'+year+'/'+year+'_'+indexname+'_chnto.csv'\n\nwith h5py.File('wholedata') as f:\n for opt in tqdm(['tochn','chnto']):\n for year in tqdm(years):\n for country in tqdm(countrys):\n for index in indexnames:\n if get_data(index,year,country,'tochn')[0]:\n f.create_dataset(get_data(index,year,country,'tochn')[3],data=get_data(index,year,country,'tochn')[1])\n else:\n get_data(index,year,country,'tochn')[1]\n","repo_name":"mengmeng12/economic_data_extraction","sub_path":"bin/reorganize_chndata.py","file_name":"reorganize_chndata.py","file_ext":"py","file_size_in_byte":2594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25552913230","text":"def params(opt):\n opt['learner'] = 'model.matching-net-classifier'\n opt['metaLearner'] = 'model.baselines.pre-trained-SGD'\n\n\n opt['trainFull'] = True\n opt['nClasses.train'] = 64\n\n opt['learningRate'] = 0.001\n opt['trainBatchSize'] = 64\n\n opt['learningRates'] = [0.5, 0.1, 0.01, 0.001, 0.0001, 0.00001]\n opt['learningRateDecays'] = [1, 1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 0]\n opt['nUpdates'] = [15]\n\n opt['nEpochs'] = 30000\n opt['nValidationEpisode'] = 100\n opt['printPer'] = 1000\n opt['useCUDA'] = True\n return opt\n\n","repo_name":"gitabcworld/FewShotLearning","sub_path":"config/baselines/train-pre-trained-SGD.py","file_name":"train-pre-trained-SGD.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":248,"dataset":"github-code","pt":"31"} +{"seq_id":"34393592942","text":"from __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport argparse\nimport collections\nimport datetime # For datetime objects\n\nimport backtrader as bt\n\n\nMAINSIGNALS = collections.OrderedDict(\n (('longshort', bt.SIGNAL_LONGSHORT),\n ('longonly', bt.SIGNAL_LONG),\n ('shortonly', bt.SIGNAL_SHORT),)\n)\n\nEXITSIGNALS = {\n 'longexit': bt.SIGNAL_LONGEXIT,\n 'shortexit': bt.SIGNAL_LONGEXIT,\n}\n\n\ndef parse_args(pargs=None):\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description='Sample for Signal concepts')\n\n # Data Feed\n parser.add_argument('--data', required=False,\n default='./data/WIN$N_5M_2015.05.22_2021.01.22_.csv',\n help='Specific data to be read in')\n parser.add_argument('--fromdate', required=False, #default=None,\n default=datetime.datetime(2015, 11, 17),\n help='Starting date in YYYY-MM-DD format')\n parser.add_argument('--todate', required=False, #default=None,\n # default=datetime.datetime(2012, 10, 1),\n default=datetime.datetime(2015, 11, 25),\n help='Ending date in YYYY-MM-DD format')\n parser.add_argument('--noheaders', action='store_true', default=False,\n required=False,\n help='Do not use header rows')\n parser.add_argument('--noprint', action='store_true', default=True,\n help='Print the dataframe')\n parser.add_argument('--writercsv', '-wcsv', action='store_true',\n help='Tell the writer to produce a csv stream')\n\n # Strategy\n parser.add_argument('--cash', required=False, action='store',\n type=float, default=1000000,\n help='Cash to start with')\n parser.add_argument('--signal', required=False, action='store',\n default=list(MAINSIGNALS)[0], choices=MAINSIGNALS,\n help='Signal type to use for the main signal')\n parser.add_argument('--exitsignal', required=False, action='store',\n default=None, choices=EXITSIGNALS,\n help='Signal type to use for the exit signal')\n\n parser.add_argument('--exitperiod', required=False, action='store',\n type=int, default=20,\n help=('Period for the exit control ATR'))\n\n # Optimization\n parser.add_argument('--maxcpus', '-m', type=int, required=False, default=0,\n help=('Number of CPUs to use in the optimization \\n'\n ' - 0 (default): use all available CPUs\\n'\n ' - 1 -> n: use as many as specified\\n'))\n parser.add_argument('--no-runonce', action='store_true', required=False,\n help='Run in next mode')\n parser.add_argument('--exactbars', required=False, type=int, default=0,\n help=('Use the specified exactbars still compatible with preload\\n'\n ' 0 No memory savings\\n'\n ' -1 Moderate memory savings\\n'\n ' -2 Less moderate memory savings\\n'))\n parser.add_argument('--no-optdatas', action='store_true', required=False,\n help='Do not optimize data preloading in optimization')\n parser.add_argument('--no-optreturn', action='store_true', required=False,\n help='Do not optimize the returned values to save time')\n # ## RSI\n # parser.add_argument('--period_rsi_low', type=int, default=10, required=False,\n # help='RSI period range to optimize')\n # parser.add_argument('--period_rsi_high', type=int, default=50, required=False,\n # help='RSI period range to optimize')\n # ## SMA\n # parser.add_argument('--ma_low', type=int, default=10, required=False,\n # help='SMA range low to optimize')\n # parser.add_argument('--ma_high', type=int, default=14, required=False,\n # help='SMA range high to optimize')\n # parser.add_argument('--m1_low', type=int, default=12, required=False,\n # help='MACD Fast MA range low to optimize')\n # parser.add_argument('--m1_high', type=int, default=15, required=False,\n # help='MACD Fast MA range high to optimize')\n # parser.add_argument('--m2_low', type=int, default=26, required=False,\n # help='MACD Slow MA range low to optimize')\n # parser.add_argument('--m2_high', type=int, default=28, required=False,\n # help='MACD Slow MA range high to optimize')\n # parser.add_argument('--m3_low', type=int, default=9, required=False,\n # help='MACD Signal range low to optimize')\n # parser.add_argument('--m3_high', type=int, default=12, required=False,\n # help='MACD Signal range high to optimize')\n\n # Cerebro\n parser.add_argument('--cerebro', required=False, default='',\n metavar='kwargs', help='kwargs in key=value format')\n parser.add_argument('--broker', required=False, default='',\n metavar='kwargs', help='kwargs in key=value format')\n parser.add_argument('--sizer', required=False, default='',\n metavar='kwargs', help='kwargs in key=value format')\n parser.add_argument('--strat', required=False, default='',\n metavar='kwargs', help='kwargs in key=value format')\n\n # Plot options\n parser.add_argument('--plot', '-p', nargs='?', required=False, default=True,\n metavar='kwargs', const=True,\n help=('Plot the read data applying any kwargs passed\\n'\n '\\n'\n 'For example:\\n'\n '\\n'\n ' --plot style=\"candle\" (to plot candles)\\n'))\n\n if pargs is not None:\n print(\"detected pargs \\n\")\n args = parser.parse_args()\n args.__dict__.update(pargs)\n return args\n\n args = parser.parse_args()\n print(\"not detected pargs \\n\")\n return args\n","repo_name":"paulobh/tradingsystem","sub_path":"src/helpers/args.py","file_name":"args.py","file_ext":"py","file_size_in_byte":6369,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"33790349505","text":"import logging\nimport time\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch_geometric.nn import radius_graph\n\nfrom torch_geometric.utils import sort_edge_index\n\nfrom ocpmodels.models.scn.scn import SphericalChannelNetwork as SCN\n\nfrom ocpmodels.common.registry import registry\nfrom ocpmodels.common.utils import (\n conditional_grad,\n get_pbc_distances,\n radius_graph_pbc,\n)\nfrom ocpmodels.models.base import BaseModel\nfrom ocpmodels.models.scn.sampling import CalcSpherePoints\nfrom ocpmodels.models.scn.smearing import (\n GaussianSmearing,\n LinearSigmoidSmearing,\n SigmoidSmearing,\n SiLUSmearing,\n)\nfrom ocpmodels.models.scn.spherical_harmonics import SphericalHarmonicsHelper\n\ntry:\n import e3nn\n from e3nn import o3\nexcept ImportError:\n pass\n\n\n@registry.register_model(\"scn_charge\")\nclass SCN_Charge(SCN):\n \"\"\"Spherical Channel Network\n Paper: Spherical Channels for Modeling Atomic Interactions\n\n Args:\n use_pbc (bool): Use periodic boundary conditions\n regress_forces (bool): Compute forces\n otf_graph (bool): Compute graph On The Fly (OTF)\n max_num_neighbors (int): Maximum number of neighbors per atom\n cutoff (float): Maximum distance between nieghboring atoms in Angstroms\n max_num_elements (int): Maximum atomic number\n\n num_interactions (int): Number of layers in the GNN\n lmax (int): Maximum degree of the spherical harmonics (1 to 10)\n mmax (int): Maximum order of the spherical harmonics (0 or 1)\n num_resolutions (int): Number of resolutions used to compute messages, further away atoms has lower resolution (1 or 2)\n sphere_channels (int): Number of spherical channels\n sphere_channels_reduce (int): Number of spherical channels used during message passing (downsample or upsample)\n hidden_channels (int): Number of hidden units in message passing\n num_taps (int): Number of taps or rotations used during message passing (1 or otherwise set automatically based on mmax)\n\n use_grid (bool): Use non-linear pointwise convolution during aggregation\n num_bands (int): Number of bands used during message aggregation for the 1x1 pointwise convolution (1 or 2)\n\n num_sphere_samples (int): Number of samples used to approximate the integration of the sphere in the output blocks\n num_basis_functions (int): Number of basis functions used for distance and atomic number blocks\n distance_function (\"gaussian\", \"sigmoid\", \"linearsigmoid\", \"silu\"): Basis function used for distances\n basis_width_scalar (float): Width of distance basis function\n distance_resolution (float): Distance between distance basis functions in Angstroms\n\n show_timing_info (bool): Show timing and memory info\n \"\"\"\n\n def __init__(\n self,\n name = 'scn_charge',\n **kwargs,\n ):\n \n self.atomic = kwargs['atomic']\n self.probe = kwargs['probe']\n kwargs.pop('atomic')\n kwargs.pop('probe')\n \n if 'max_num_neighbors' not in kwargs:\n kwargs['max_num_neighbors'] = 10000\n print('ping')\n if 'show_timing_info' not in kwargs:\n kwargs['show_timing_info'] = False\n \n super().__init__(\n num_atoms = 1,\n bond_feat_dim = 1,\n num_targets = 1,\n otf_graph = False,\n **kwargs,\n )\n\n\n @conditional_grad(torch.enable_grad())\n def _forward_helper(self, data):\n atomic_numbers = data.atomic_numbers.long()\n \n num_atoms = len(atomic_numbers)\n pos = data.pos\n \n # Necessary for _rank_edge_distances\n data.edge_index = sort_edge_index(data.edge_index.flipud()).flipud()\n \n (\n edge_index,\n edge_distance,\n edge_distance_vec,\n cell_offsets,\n _, # cell offset distances\n neighbors,\n ) = self.generate_graph(data)\n\n ###############################################################\n # Initialize data structures\n ###############################################################\n \n # Calculate which message block each edge should use. Based on edge distance rank.\n edge_rank = self._rank_edge_distances(\n edge_distance, edge_index, self.max_num_neighbors,\n )\n\n # Reorder edges so that they are grouped by distance rank (lowest to highest)\n last_cutoff = -0.1\n message_block_idx = torch.zeros(len(edge_distance), device=pos.device)\n edge_distance_reorder = torch.tensor([], device=self.device)\n edge_index_reorder = torch.tensor([], device=self.device)\n edge_distance_vec_reorder = torch.tensor([], device=self.device)\n cutoff_index = torch.tensor([0], device=self.device)\n for i in range(self.num_resolutions):\n mask = torch.logical_and(\n edge_rank.gt(last_cutoff), edge_rank.le(self.cutoff_list[i])\n )\n last_cutoff = self.cutoff_list[i]\n message_block_idx.masked_fill_(mask, i)\n edge_distance_reorder = torch.cat(\n [\n edge_distance_reorder,\n torch.masked_select(edge_distance, mask),\n ],\n dim=0,\n )\n edge_index_reorder = torch.cat(\n [\n edge_index_reorder,\n torch.masked_select(\n edge_index, mask.view(1, -1).repeat(2, 1)\n ).view(2, -1),\n ],\n dim=1,\n )\n edge_distance_vec_mask = torch.masked_select(\n edge_distance_vec, mask.view(-1, 1).repeat(1, 3)\n ).view(-1, 3)\n edge_distance_vec_reorder = torch.cat(\n [edge_distance_vec_reorder, edge_distance_vec_mask], dim=0\n )\n cutoff_index = torch.cat(\n [\n cutoff_index,\n torch.tensor(\n [len(edge_distance_reorder)], device=self.device\n ),\n ],\n dim=0,\n )\n\n edge_index = edge_index_reorder.long()\n edge_distance = edge_distance_reorder\n edge_distance_vec = edge_distance_vec_reorder\n\n # Compute 3x3 rotation matrix per edge\n edge_rot_mat = self._init_edge_rot_mat(\n data, edge_index, edge_distance_vec\n )\n\n # Initialize the WignerD matrices and other values for spherical harmonic calculations\n for i in range(self.num_resolutions):\n self.sphharm_list[i].InitWignerDMatrix(\n edge_rot_mat[cutoff_index[i] : cutoff_index[i + 1]],\n )\n\n ###############################################################\n # Initialize node embeddings\n ###############################################################\n\n # Init per node representations using an atomic number based embedding\n x = torch.zeros(\n num_atoms,\n self.sphere_basis,\n self.sphere_channels,\n device=pos.device,\n )\n x[:, 0, :] = self.sphere_embedding(atomic_numbers)\n\n ###############################################################\n # Update spherical node embeddings\n ###############################################################\n \n if self.atomic:\n atom_representations = []\n for i, interaction in enumerate(self.edge_blocks):\n if i > 0:\n x = x + interaction(\n x, atomic_numbers, edge_distance, edge_index, cutoff_index\n )\n atom_representations.append(x)\n else:\n x = interaction(\n x, atomic_numbers, edge_distance, edge_index, cutoff_index\n )\n atom_representations.append(x)\n return atom_representations\n \n \n if self.probe:\n atom_indices = torch.nonzero(data.atomic_numbers).flatten()\n probe_indices = (data.atomic_numbers == 0).nonzero().flatten()\n \n for i, interaction in enumerate(self.edge_blocks):\n if i > 0:\n x = x + interaction(\n x, atomic_numbers, edge_distance, edge_index, cutoff_index\n )\n x[atom_indices] = data.atom_representations[i]\n else:\n x = interaction(\n x, atomic_numbers, edge_distance, edge_index, cutoff_index\n )\n x[atom_indices] = data.atom_representations[i]\n \n ###############################################################\n # Predict electron density\n ###############################################################\n \n # Create a roughly evenly distributed point sampling of the sphere\n sphere_points = CalcSpherePoints(\n self.num_sphere_samples, x.device\n ).detach()\n sphharm_weights = o3.spherical_harmonics(\n torch.arange(0, self.lmax + 1).tolist(), sphere_points, False\n ).detach()\n\n # Density estimation\n node_energy = torch.einsum(\n \"abc, pb->apc\", x, sphharm_weights\n ).contiguous()\n node_energy = node_energy.view(-1, self.sphere_channels)\n node_energy = self.act(self.energy_fc1(node_energy))\n node_energy = self.act(self.energy_fc2(node_energy))\n node_energy = self.energy_fc3(node_energy)\n node_energy = node_energy.view(-1, self.num_sphere_samples, 1)\n node_density = torch.sum(node_energy, dim=1) / self.num_sphere_samples\n\n return node_density[probe_indices]","repo_name":"emsunshine/charge-density-models","sub_path":"cdm/models/scn_charge.py","file_name":"scn_charge.py","file_ext":"py","file_size_in_byte":10093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"31"} +{"seq_id":"22988393746","text":"\r\n\"\"\"Design a calculator which will correctly solve all problems except the following once:\r\n 45 * 3=555, 56+9=77, 56/6=4\r\n your program should take operator and the two numbers as input from the user and then return the result: \"\"\"\r\n\r\n# Faulty calculator\r\n# print(\"This is faulty calculator.. \")\r\n# while(True):\r\n# a = input(\"\\nenter your operator which operation you perform: \\n\")\r\n# b = int(input(\"enter the first number: \"))\r\n# c = int(input(\"enter the second number: \"))\r\n# if a==\"+\":\r\n# if b==56 and c==9:\r\n# print(\"The addition is: 77\")\r\n# else:\r\n# print(\"The addition is: \", b+c)\r\n# #exit()\r\n#\r\n# if a==\"-\":\r\n# print(\"The substraction is: \", b-c)\r\n# #exit()\r\n#\r\n# if a==\"*\":\r\n# if b==45 and c==3:\r\n# print(\"The multiplication is: 555\")\r\n# else:\r\n# print(\"The multiplication is:\",b*c)\r\n# # exit()\r\n#\r\n# if a==\"/\":\r\n# if b==56 and c==6:\r\n# print(\"The division is: 4\")\r\n# else:\r\n# print(\"The division is: \", b/c)\r\n# #exit()\r\n#\r\n# if a==\"**\":\r\n# print(\"The power is: \", b**c)\r\n# #exit()\r\n#\r\n# print(\"\\nDo you want to use calculator again...??\")\r\n# D=input(\"Press Y for yes nd N for no: \")\r\n# if D ==\"Y\":\r\n# continue\r\n# if D ==\"N\":\r\n# break\r\n\r\n#this is my code\r\nprint(\"Faulty calculator!!\")\r\nwhile(True):\r\n m = input(\"Enter the operator: \")\r\n n = int(input(\"Enter the 1st num: \"))\r\n o = int(input(\"Enter the 2nd num: \"))\r\n\r\n if m==\"+\":\r\n if n==23 and o==12:\r\n print(\"the addition is: 34\")\r\n else:\r\n print(\"the addition is: \", n+o)\r\n\r\n\r\n if m==\"-\":\r\n if n==56 and o==40:\r\n print(\"the substraction is: 12\")\r\n else:\r\n print(\"the substraction is: \", n-o)\r\n\r\n if m==\"*\":\r\n if n==4 and o==7:\r\n print(\"the multiplication is: 78\")\r\n else:\r\n print(\"the multiplication is: \",n*o)\r\n\r\n if m==\"/\":\r\n print(\"the division is: \", n/o)\r\n\r\n if m==\"**\":\r\n print(\"the answer is:\", n**o)\r\n\r\n print(\"Do you want calculate again..??\")\r\n k=input(\"Y for yes & N for no: \")\r\n if k==\"Y\":\r\n continue\r\n else:\r\n print(\"Thanks for using this calculator..!\")\r\n break\r\n\r\n","repo_name":"JadhaAM/python-programs","sub_path":"Exercise3.py","file_name":"Exercise3.py","file_ext":"py","file_size_in_byte":2272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6581220089","text":"#encoding=utf-8\n\nfrom __future__ import absolute_import\n\nimport addonHandler\naddonHandler.initTranslation()\n\nimport wx\nimport gui\n\ntray = gui.mainFrame.sysTrayIcon\ntoolsMenu = tray.toolsMenu\n\nclass Menu(wx.Menu):\n\tdef __init__(self):\n\t\tsuper(Menu, self).__init__()\n\t\t# Translators: Subtitle Reader menu on the NVDA tools menu\n\t\tself.menuItem = toolsMenu.AppendSubMenu(self, _(u'字幕閱讀器 (&R)'))\n\t\t# Translators: Reader toggle switch on the Subtitle Reader menu\n\t\tself.switch = self.AppendCheckItem(wx.ID_ANY, _(u'閱讀器開關 (&S)'))\n\t\tself.switch.Check(True)\n\t\t\n\t\t# Translators: toggle reading when video window not in foreground on the Subtitle Reader menu\n\t\tself.backgroundReading = self.AppendCheckItem(wx.ID_ANY, _(u'背景閱讀 (&B)'))\n\t\tself.backgroundReading.Check(True)\n\t\t\n\t\tself.youtube = wx.Menu()\n\t\tself.youtubeMenuItem = self.AppendSubMenu(self.youtube, _('Youtube 相關設定'))\n\t\t\n\t\t# Translators: toggle Youtube menu item whether to read the chat message when the new chat message already appeared\n\t\tself.readChat = self.youtube.AppendCheckItem(wx.ID_ANY, _(u'閱讀聊天室(&R)'))\n\t\tself.readChat.Check(True)\n\t\t\n\t\t# Translators: toggle Youtube menu item whether to read the chat message sender. \n\t\tself.readChatSender = self.youtube.AppendCheckItem(wx.ID_ANY, _(u'閱讀聊天室訊息發送者(&A)'))\n\t\tself.readChatSender.Check(True)\n\t\t\n\t\t# Translators: toggle Youtube menu item whether to read the chat gift sponser message. \n\t\tself.readChatGiftSponser = self.youtube.AppendCheckItem(wx.ID_ANY, _(u'閱讀會籍贈送(&G)'))\n\t\tself.readChatGiftSponser.Check(True)\n\t\t\n\t\t# Translators: toggle Youtube menu item whether to omit graphic when reading the chats\n\t\tself.omitChatGraphic = self.youtube.AppendCheckItem(wx.ID_ANY, _(u'閱讀聊天室時掠過圖片名稱(&G)'))\n\t\tself.omitChatGraphic.Check(True)\n\t\t\n\t\t# Translators: toggle menu item whether to prompt wher Youtube info card is already appear\n\t\tself.infoCardPrompt = self.youtube.AppendCheckItem(wx.ID_ANY, _(u'資訊卡提示(&I)'))\n\t\tself.infoCardPrompt.Check(True)\n\t\t\n\t\t# Translators: toggle Youtube menu item whether to notify current chapter name when chapter is changed\n\t\tself.readChapter = self.youtube.AppendCheckItem(wx.ID_ANY, _(u'讀出章節(&C)'))\n\t\tself.readChapter.Check(True)\n\t\t\n\t\t# Translators: This menu item performs a check for updates to the reader\n\t\tself.checkForUpdate = self.Append(wx.ID_ANY, _(u'立即檢查更新(&C)'))\n\t\t# Translators: This is menu item that open the changelog\n\t\tself.openChangeLog = self.Append(wx.ID_ANY, _(u'開啟更新日誌(&O)'))\n\t\t# Translators: This menu item that can toggle automatic check for update when Subtitle Reader is start\n\t\tself.checkUpdateAutomatic = self.AppendCheckItem(wx.ID_ANY, _(u'自動檢查更新(&A)'))\n\t\tself.checkUpdateAutomatic.Check(True)\n\t\n\nclass UpdateDialog(wx.Dialog):\n\tdef __init__(self, version):\n\t\tsuper(UpdateDialog, self).__init__(gui.mainFrame, title=_(u'字幕閱讀器 V') + str(version) + _(u' 新版資訊'))\n\t\tself.sizer = wx.BoxSizer(wx.VERTICAL)\n\t\t# Translators: This label means the edit box content is changelog\n\t\tself.changeLogLabel = wx.StaticText(self, label=_(u'更新日誌'))\n\t\tself.changelogText = wx.TextCtrl(self, style=wx.TE_MULTILINE | wx.TE_READONLY | wx.TE_RICH2 | wx.HSCROLL, size=(700, -1))\n\t\tself.progress = wx.Gauge(self, style=wx.GA_VERTICAL)\n\t\t# Translators: This button means now run the update process\n\t\tself.updateNow = wx.Button(self, label=_(u'現在更新(&U)'))\n\t\t# Translators: This button means that the automatic check for updates will skip this version\n\t\tself.skipVersion = wx.Button(self, label=_(u'跳過此版本(&S)'))\n\t\t# Translators: This button means close window until next automatic or manual check for update\n\t\tself.later = wx.Button(self, label=_(u'晚點再說(&L)'))\n\t\tself.SetSizerAndFit(self.sizer)\n\t\n","repo_name":"maxe-hsieh/subtitle_reader","sub_path":"addon/globalPlugins/subtitle_reader/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":3835,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"31"} +{"seq_id":"9323699473","text":"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n#\r\n# Copyright (C) 2010 ZHENG Zhong \r\n#\r\n# Created on 2010-04-14.\r\n# $Id$\r\n#\r\n\r\nimport re\r\n\r\n\r\n__all__ = (\"Browser\",)\r\n\r\n\r\n_BROWSERS = (\r\n\r\n # Opera likes to pretend to be some other browser, so detect it first.\r\n (re.compile(r\"(?POpera)[/\\s](?P[\\w\\.\\-]+)\"), \"Opera\"),\r\n\r\n (re.compile(r\"(?PMSIE)\\s(?P[\\w\\.\\-]+)\"), \"Microsoft Internet Explorer\"),\r\n (re.compile(r\"(?PFirefox)/(?P[\\w\\.\\-]+)\"), \"Mozilla Firefox\"),\r\n (re.compile(r\"(?PChrome)/(?P[\\w\\.\\-]+)\"), \"Google Chrome\"),\r\n (re.compile(r\"(?PSafari)/(?P[\\w\\.\\-]+)\"), \"Safari\"),\r\n)\r\n\r\n\r\ndef _parse_browser(user_agent):\r\n user_agent = user_agent or \"\"\r\n for expr, browser_name in _BROWSERS:\r\n match = expr.search(user_agent)\r\n if match:\r\n return (match.group(\"id\"), browser_name, match.group(\"version\"))\r\n return (None, None, None)\r\n\r\n\r\ndef _parse_platform(user_agent):\r\n user_agent = user_agent or \"\"\r\n expr = re.compile(r\"(?PWindows|Linux|Mac\\sOS\\sX|FreeBSD|OpenBSD)\")\r\n match = expr.search(user_agent)\r\n if match:\r\n return match.group(\"platform\")\r\n else:\r\n return None\r\n\r\n\r\nclass Browser(object):\r\n\r\n def __init__(self, user_agent):\r\n self.user_agent = user_agent or None\r\n self.id, self.name, self.version = _parse_browser(user_agent)\r\n self.platform = _parse_platform(user_agent)\r\n\r\n def __str__(self):\r\n return \"%s %s (%s)\" % (self.name, self.version, self.platform)\r\n\r\n def __unicode__(self):\r\n return u\"%s %s (%s)\" % (self.name, self.version, self.platform)\r\n\r\n\r\n# EOF\r\n","repo_name":"BGCX262/zzheng-hg-to-git","sub_path":"friday/website/friday/common/browsers.py","file_name":"browsers.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"9769295282","text":"def wrapper(f):\n def fun(l):\n # complete the function\n arr = []\n for number in l:\n length = len(number)\n data = \"+91 {} {}\".format(number[length - 10: length - 5], number[length - 5: length])\n arr.append(data)\n x = f(arr)\n return x\n\n return fun\n\n\n@wrapper\ndef sort_phone(l):\n print(*sorted(l), sep='\\n')\n\n\nif __name__ == '__main__':\n l = [input() for _ in range(int(input()))]\n sort_phone(l)\n","repo_name":"truongductri01/Hackerrank_problems_solutions","sub_path":"Python_Basics/Closures_Decorators/Standardize_mobile_number.py","file_name":"Standardize_mobile_number.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12651877591","text":"\n\nimport unittest\n# import os\n# print(\"CWD:\", os.getcwd())\n\n# import pdb\n# pdb.set_trace()\n\nimport MangaCMS.lib.logSetup\nimport MangaCMS.cleaner.processDownload\nfrom MangaCMS import db as mdb\nfrom MangaCMS.db import db_models as db_models\n\nimport settings\nassert \"test\" in settings.NEW_DATABASE_DB_NAME.lower(), \"Running tests on non-test database!\"\n\nclass TestSequenceFunctions(unittest.TestCase):\n\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper().__init__(*args, **kwargs)\n\n\tdef setUp(self):\n\t\t# print(\"Doing Setup!\")\n\t\tMangaCMS.lib.logSetup.DISABLE_REENTRANT_WARNING=True\n\t\tMangaCMS.lib.logSetup.initLogging(logToDb=False)\n\t\tself.addCleanup(self.dropDatabase)\n\n\tdef base_setup(self):\n\t\twith mdb.session_context() as sess:\n\t\t\tf1 = db_models.ReleaseFile(\n\t\t\t\tdirpath = \"lol1\",\n\t\t\t\tfilename = \"lol2\",\n\t\t\t\tfhash = \"lol3\",\n\t\t\t\t)\n\t\t\tf2 = db_models.ReleaseFile(\n\t\t\t\tdirpath = \"wat1\",\n\t\t\t\tfilename = \"wat2\",\n\t\t\t\tfhash = \"wat3\",\n\t\t\t\t)\n\n\t\t\tsess.add(f1)\n\t\t\tsess.add(f2)\n\t\t\tsess.flush()\n\t\t\t# print(f1.id)\n\t\t\t# print(f2.id)\n\n\t\t\tf1.hentai_tags.add(\"t1\")\n\t\t\tf1.hentai_tags.add(\"t2\")\n\t\t\tf1.hentai_tags.add(\"t3\")\n\t\t\tf1.manga_tags.add(\"t4\")\n\t\t\tf1.manga_tags.add(\"t5\")\n\t\t\tf1.manga_tags.add(\"t6\")\n\n\t\t\tf2.hentai_tags.add(\"t7\")\n\t\t\tf2.hentai_tags.add(\"t8\")\n\t\t\tf2.hentai_tags.add(\"t9\")\n\t\t\tf2.manga_tags.add(\"t10\")\n\t\t\tf2.manga_tags.add(\"t11\")\n\t\t\tf2.manga_tags.add(\"t12\")\n\n\t\t\tf1id = f1.id\n\t\t\tf2id = f2.id\n\n\t\twith mdb.session_context() as sess:\n\t\t\tr1 = db_models.MangaReleases(\n\t\t\t\tsource_site = 'test',\n\t\t\t\tsource_id = 'r1',\n\t\t\t\tfileid = f1id,\n\t\t\t\t)\n\n\t\t\tr2 = db_models.MangaReleases(\n\t\t\t\tsource_site = 'test',\n\t\t\t\tsource_id = 'r2',\n\t\t\t\tfileid = f1id,\n\t\t\t\t)\n\n\t\t\tr3 = db_models.MangaReleases(\n\t\t\t\tsource_site = 'test',\n\t\t\t\tsource_id = 'r3',\n\t\t\t\tfileid = f2id\n\t\t\t\t)\n\n\t\t\tr4 = db_models.MangaReleases(\n\t\t\t\tsource_site = 'test',\n\t\t\t\tsource_id = 'r4',\n\t\t\t\tfileid = f2id\n\t\t\t\t)\n\t\t\tsess.add(r1)\n\t\t\tsess.add(r2)\n\t\t\tsess.add(r3)\n\t\t\tsess.add(r4)\n\n\t\t\tr1.tags.add(\"m_r_t1\")\n\t\t\tr1.tags.add(\"m_r_t2\")\n\t\t\tr1.tags.add(\"m_r_t3\")\n\t\t\tr1.tags.add(\"m_r_t4\")\n\t\t\tr1.tags.add(\"m_r_t5\")\n\t\t\tr1.tags.add(\"m_r_t6\")\n\n\t\t\tr2.tags.add(\"m_r_t7\")\n\t\t\tr2.tags.add(\"m_r_t8\")\n\t\t\tr2.tags.add(\"m_r_t9\")\n\t\t\tr2.tags.add(\"m_r_t10\")\n\t\t\tr2.tags.add(\"m_r_t11\")\n\t\t\tr2.tags.add(\"m_r_t12\")\n\n\t\t\tr3.tags.add(\"m_r_t1\")\n\t\t\tr3.tags.add(\"m_r_t2\")\n\t\t\tr3.tags.add(\"m_r_t3\")\n\t\t\tr3.tags.add(\"m_r_t4\")\n\t\t\tr3.tags.add(\"m_r_t5\")\n\t\t\tr3.tags.add(\"m_r_t6\")\n\n\t\t\tr4.tags.add(\"m_r_t7\")\n\t\t\tr4.tags.add(\"m_r_t8\")\n\t\t\tr4.tags.add(\"m_r_t9\")\n\t\t\tr4.tags.add(\"m_r_t10\")\n\t\t\tr4.tags.add(\"m_r_t11\")\n\t\t\tr4.tags.add(\"m_r_t12\")\n\n\t\twith mdb.session_context() as sess:\n\t\t\tr5 = db_models.HentaiReleases(\n\t\t\t\tsource_site = 'test',\n\t\t\t\tsource_id = 'r5',\n\t\t\t\tfileid = f1id,\n\t\t\t\t)\n\n\t\t\tr6 = db_models.HentaiReleases(\n\t\t\t\tsource_site = 'test',\n\t\t\t\tsource_id = 'r6',\n\t\t\t\tfileid = f1id,\n\t\t\t\t)\n\n\t\t\tr7 = db_models.HentaiReleases(\n\t\t\t\tsource_site = 'test',\n\t\t\t\tsource_id = 'r7',\n\t\t\t\tfileid = f2id\n\t\t\t\t)\n\n\t\t\tr8 = db_models.HentaiReleases(\n\t\t\t\tsource_site = 'test',\n\t\t\t\tsource_id = 'r8',\n\t\t\t\tfileid = f2id\n\t\t\t\t)\n\t\t\tsess.add(r5)\n\t\t\tsess.add(r6)\n\t\t\tsess.add(r7)\n\t\t\tsess.add(r8)\n\n\t\t\tr5.tags.add(\"h_r_t1\")\n\t\t\tr5.tags.add(\"h_r_t2\")\n\t\t\tr5.tags.add(\"h_r_t3\")\n\t\t\tr5.tags.add(\"h_r_t4\")\n\t\t\tr5.tags.add(\"h_r_t5\")\n\t\t\tr5.tags.add(\"h_r_t6\")\n\n\t\t\tr6.tags.add(\"h_r_t7\")\n\t\t\tr6.tags.add(\"h_r_t8\")\n\t\t\tr6.tags.add(\"h_r_t9\")\n\t\t\tr6.tags.add(\"h_r_t10\")\n\t\t\tr6.tags.add(\"h_r_t11\")\n\t\t\tr6.tags.add(\"h_r_t12\")\n\n\t\t\tr7.tags.add(\"h_r_t1\")\n\t\t\tr7.tags.add(\"h_r_t2\")\n\t\t\tr7.tags.add(\"h_r_t3\")\n\t\t\tr7.tags.add(\"h_r_t4\")\n\t\t\tr7.tags.add(\"h_r_t5\")\n\t\t\tr7.tags.add(\"h_r_t6\")\n\n\t\t\tr8.tags.add(\"h_r_t7\")\n\t\t\tr8.tags.add(\"h_r_t8\")\n\t\t\tr8.tags.add(\"h_r_t9\")\n\t\t\tr8.tags.add(\"h_r_t10\")\n\t\t\tr8.tags.add(\"h_r_t11\")\n\t\t\tr8.tags.add(\"h_r_t12\")\n\n\t\twith mdb.session_context() as sess:\n\t\t\tf1 = sess.query(db_models.ReleaseFile).filter(db_models.ReleaseFile.fhash=='lol3').one()\n\t\t\tf2 = sess.query(db_models.ReleaseFile).filter(db_models.ReleaseFile.fhash=='wat3').one()\n\n\t\t\tself.assertEqual(set(f1.hentai_tags), set([\"t1\", \"t2\", \"t3\"]))\n\t\t\tself.assertEqual(set(f1.manga_tags), set([\"t4\", \"t5\", \"t6\"]))\n\t\t\tself.assertEqual(set(f2.hentai_tags), set([\"t7\", \"t8\", \"t9\"]))\n\t\t\tself.assertEqual(set(f2.manga_tags), set([\"t10\", \"t11\", \"t12\"]))\n\n\n\t\t\tf1_h_ids = [tmp.source_id for tmp in f1.hentai_releases]\n\t\t\tf1_m_ids = [tmp.source_id for tmp in f1.manga_releases]\n\t\t\tf2_h_ids = [tmp.source_id for tmp in f2.hentai_releases]\n\t\t\tf2_m_ids = [tmp.source_id for tmp in f2.manga_releases]\n\n\t\t\tself.assertEqual(set(f1_h_ids), set(['r5', 'r6']))\n\t\t\tself.assertEqual(set(f1_m_ids), set(['r1', 'r2']))\n\t\t\tself.assertEqual(set(f2_h_ids), set(['r7', 'r8']))\n\t\t\tself.assertEqual(set(f2_m_ids), set(['r3', 'r4']))\n\n\t\twith mdb.session_context() as sess:\n\n\t\t\tr1 = sess.query(db_models.MangaReleases) \\\n\t\t\t\t.filter(db_models.MangaReleases.source_site=='test') \\\n\t\t\t\t.filter(db_models.MangaReleases.source_id=='r1') \\\n\t\t\t\t.one()\n\t\t\tr2 = sess.query(db_models.MangaReleases) \\\n\t\t\t\t.filter(db_models.MangaReleases.source_site=='test') \\\n\t\t\t\t.filter(db_models.MangaReleases.source_id=='r2') \\\n\t\t\t\t.one()\n\t\t\tr3 = sess.query(db_models.MangaReleases) \\\n\t\t\t\t.filter(db_models.MangaReleases.source_site=='test') \\\n\t\t\t\t.filter(db_models.MangaReleases.source_id=='r3') \\\n\t\t\t\t.one()\n\t\t\tr4 = sess.query(db_models.MangaReleases) \\\n\t\t\t\t.filter(db_models.MangaReleases.source_site=='test') \\\n\t\t\t\t.filter(db_models.MangaReleases.source_id=='r4') \\\n\t\t\t\t.one()\n\n\t\t\tr5 = sess.query(db_models.HentaiReleases) \\\n\t\t\t\t.filter(db_models.HentaiReleases.source_site=='test') \\\n\t\t\t\t.filter(db_models.HentaiReleases.source_id=='r5') \\\n\t\t\t\t.one()\n\t\t\tr6 = sess.query(db_models.HentaiReleases) \\\n\t\t\t\t.filter(db_models.HentaiReleases.source_site=='test') \\\n\t\t\t\t.filter(db_models.HentaiReleases.source_id=='r6') \\\n\t\t\t\t.one()\n\t\t\tr7 = sess.query(db_models.HentaiReleases) \\\n\t\t\t\t.filter(db_models.HentaiReleases.source_site=='test') \\\n\t\t\t\t.filter(db_models.HentaiReleases.source_id=='r7') \\\n\t\t\t\t.one()\n\t\t\tr8 = sess.query(db_models.HentaiReleases) \\\n\t\t\t\t.filter(db_models.HentaiReleases.source_site=='test') \\\n\t\t\t\t.filter(db_models.HentaiReleases.source_id=='r8') \\\n\t\t\t\t.one()\n\n\t\t\tr1_t = set(r1.tags)\n\t\t\tr2_t = set(r2.tags)\n\t\t\tr3_t = set(r3.tags)\n\t\t\tr4_t = set(r4.tags)\n\t\t\tr5_t = set(r5.tags)\n\t\t\tr6_t = set(r6.tags)\n\t\t\tr7_t = set(r7.tags)\n\t\t\tr8_t = set(r8.tags)\n\n\t\t\tself.assertEqual(r1_t, {'m_r_t4', 'm_r_t2', 'm_r_t5', 'm_r_t1', 'm_r_t6', 'm_r_t3'})\n\t\t\tself.assertEqual(r2_t, {'m_r_t12', 'm_r_t8', 'm_r_t11', 'm_r_t7', 'm_r_t9', 'm_r_t10'})\n\t\t\tself.assertEqual(r3_t, {'m_r_t4', 'm_r_t2', 'm_r_t5', 'm_r_t1', 'm_r_t6', 'm_r_t3'})\n\t\t\tself.assertEqual(r4_t, {'m_r_t12', 'm_r_t8', 'm_r_t11', 'm_r_t7', 'm_r_t9', 'm_r_t10'})\n\t\t\tself.assertEqual(r5_t, {'h_r_t6', 'h_r_t4', 'h_r_t2', 'h_r_t5', 'h_r_t3', 'h_r_t1'})\n\t\t\tself.assertEqual(r6_t, {'h_r_t9', 'h_r_t10', 'h_r_t12', 'h_r_t11', 'h_r_t7', 'h_r_t8'})\n\t\t\tself.assertEqual(r7_t, {'h_r_t6', 'h_r_t4', 'h_r_t2', 'h_r_t5', 'h_r_t3', 'h_r_t1'})\n\t\t\tself.assertEqual(r8_t, {'h_r_t9', 'h_r_t10', 'h_r_t12', 'h_r_t11', 'h_r_t7', 'h_r_t8'})\n\n\n\tdef dropDatabase(self):\n\t\t# print(\"Cleanup!\")\n\t\twith mdb.session_context() as sess:\n\t\t\t# First, delete the link table entries\n\t\t\tsess.query(db_models.manga_files_tags_link).delete(synchronize_session=False)\n\t\t\tsess.query(db_models.manga_releases_tags_link).delete(synchronize_session=False)\n\t\t\tsess.query(db_models.hentai_files_tags_link).delete(synchronize_session=False)\n\t\t\tsess.query(db_models.hentai_releases_tags_link).delete(synchronize_session=False)\n\n\n\t\t\t# Delete the releases/tags\n\t\t\tsess.query(db_models.MangaReleases).delete()\n\t\t\tsess.query(db_models.HentaiReleases).delete()\n\t\t\tsess.query(db_models.MangaTags).delete()\n\t\t\tsess.query(db_models.HentaiTags).delete()\n\n\t\t\t# Finally, the files\n\t\t\tsess.query(db_models.ReleaseFile).delete()\n\n\n\tdef test_basic_1(self):\n\t\tself.base_setup()\n\n\tdef test_tag_reuse(self):\n\t\tself.base_setup()\n\n\t\twith mdb.session_context() as sess:\n\t\t\tm_tag_c_1 = sess.query(db_models.MangaTags).count()\n\t\t\th_tag_c_1 = sess.query(db_models.HentaiTags).count()\n\n\t\twith mdb.session_context() as sess:\n\t\t\tf2 = sess.query(db_models.ReleaseFile).filter(db_models.ReleaseFile.fhash=='wat3').one()\n\t\t\tf2.hentai_tags.add(\"t1\")\n\t\t\tf2.hentai_tags.add(\"t2\")\n\t\t\tf2.hentai_tags.add(\"t3\")\n\t\t\tf2.manga_tags.add(\"t4\")\n\t\t\tf2.manga_tags.add(\"t5\")\n\t\t\tf2.manga_tags.add(\"t6\")\n\n\t\twith mdb.session_context() as sess:\n\t\t\tm_tag_c_2 = sess.query(db_models.MangaTags).count()\n\t\t\th_tag_c_2 = sess.query(db_models.HentaiTags).count()\n\n\t\tself.assertEqual(m_tag_c_1, m_tag_c_2)\n\t\tself.assertEqual(h_tag_c_1, h_tag_c_2)\n\n\tdef test_file_relink_fid(self):\n\t\tself.base_setup()\n\n\t\tm_dlproc = MangaCMS.cleaner.processDownload.MangaProcessor()\n\t\th_dlproc = MangaCMS.cleaner.processDownload.HentaiProcessor()\n\t\twith mdb.session_context() as sess:\n\n\t\t\tf2 = sess.query(db_models.ReleaseFile).filter(db_models.ReleaseFile.fhash=='wat3').one()\n\n\t\t\ta_r3 = sess.query(db_models.MangaReleases) \\\n\t\t\t\t.filter(db_models.MangaReleases.source_site=='test') \\\n\t\t\t\t.filter(db_models.MangaReleases.source_id=='r3') \\\n\t\t\t\t.one()\n\t\t\ta_r4 = sess.query(db_models.MangaReleases) \\\n\t\t\t\t.filter(db_models.MangaReleases.source_site=='test') \\\n\t\t\t\t.filter(db_models.MangaReleases.source_id=='r4') \\\n\t\t\t\t.one()\n\n\t\t\tself.assertEqual(f2.id, a_r3.fileid)\n\t\t\tself.assertEqual(f2.id, a_r4.fileid)\n\n\t\tm_dlproc._create_or_update_file_entry_path(\"wat1/wat2\", \"lol1/lol2\")\n\n\t\twith mdb.session_context() as sess:\n\n\t\t\tf1 = sess.query(db_models.ReleaseFile).filter(db_models.ReleaseFile.fhash=='lol3').one()\n\n\t\t\tb_r3 = sess.query(db_models.MangaReleases) \\\n\t\t\t\t.filter(db_models.MangaReleases.source_site=='test') \\\n\t\t\t\t.filter(db_models.MangaReleases.source_id=='r3') \\\n\t\t\t\t.one()\n\t\t\tb_r4 = sess.query(db_models.MangaReleases) \\\n\t\t\t\t.filter(db_models.MangaReleases.source_site=='test') \\\n\t\t\t\t.filter(db_models.MangaReleases.source_id=='r4') \\\n\t\t\t\t.one()\n\n\t\t\tself.assertEqual(f1.id, b_r3.fileid)\n\t\t\tself.assertEqual(f1.id, b_r4.fileid)\n\n\n\tdef test_file_relink_tags(self):\n\t\tself.base_setup()\n\n\t\tm_dlproc = MangaCMS.cleaner.processDownload.MangaProcessor()\n\t\th_dlproc = MangaCMS.cleaner.processDownload.HentaiProcessor()\n\t\twith mdb.session_context() as sess:\n\n\t\t\tf2 = sess.query(db_models.ReleaseFile).filter(db_models.ReleaseFile.fhash=='wat3').one()\n\t\t\tf3 = sess.query(db_models.ReleaseFile).filter(db_models.ReleaseFile.fhash=='lol3').one()\n\n\t\t\tf2_m_tags = set(f2.manga_tags)\n\t\t\tf2_h_tags = set(f2.hentai_tags)\n\t\t\tf3_m_tags = set(f3.manga_tags)\n\t\t\tf3_h_tags = set(f3.hentai_tags)\n\n\t\t\tself.assertEqual(f2_m_tags, {'t11', 't12', 't10'} )\n\t\t\tself.assertEqual(f2_h_tags, {'t9', 't8', 't7'} )\n\t\t\tself.assertEqual(f3_m_tags, {'t5', 't6', 't4'} )\n\t\t\tself.assertEqual(f3_h_tags, {'t1', 't2', 't3'})\n\n\n\t\tm_dlproc._create_or_update_file_entry_path(\"wat1/wat2\", \"lol1/lol2\")\n\n\t\twith mdb.session_context() as sess:\n\n\t\t\tf1 = sess.query(db_models.ReleaseFile).filter(db_models.ReleaseFile.fhash=='lol3').one()\n\n\t\t\tf1_m_tags = set(f1.manga_tags)\n\t\t\tf1_h_tags = set(f1.hentai_tags)\n\n\n\t\t\tself.assertEqual(f1_m_tags, f2_m_tags | f3_m_tags)\n\t\t\tself.assertEqual(f1_h_tags, f2_h_tags | f3_h_tags)","repo_name":"herp-a-derp/MangaCMS","sub_path":"MangaCMS/test/duper_test.py","file_name":"duper_test.py","file_ext":"py","file_size_in_byte":11034,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"31"} +{"seq_id":"24367215121","text":"import sys\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.layers import Input, Dropout, Flatten, Dense, Embedding, concatenate, multiply\nfrom keras import applications\nfrom keras import Model\n\n\nfrom keras import optimizers\n\nimport numpy as np\n\nimport src.utils.utils as ut\n\nclass basic_models:\n def __init__(self):\n pass\n\n def imagemodel(self):\n input_tensor = Input(shape=(ut.params.image_heigth, ut.params.image_width, 3), name='im_input')\n vgg_model = applications.VGG16(weights='imagenet',\n include_top=False,\n input_tensor=input_tensor)\n layer_dict = dict([(layer.name, layer) for layer in vgg_model.layers])\n\n x = layer_dict['block5_conv3'].output\n\n vgg_model_flat = Flatten(name='im_flatten_1')(x)\n vgg_model_dense = Dense(128, activation='relu', name='im_dense_1')(vgg_model_flat)\n vgg_model_drop = Dropout(0.5, name='im_drop_1')(vgg_model_dense)\n\n custom_model = Model(input=vgg_model.input, output=vgg_model_drop)\n\n for layer in vgg_model.layers:\n layer.trainable = False\n\n return custom_model\n\n def textmodel(self):\n NLP_input = Input(shape=(ut.params.n_words, ), name='text_input')\n NLP_embedding = Embedding(ut.params.n_vocab, 128, input_length=ut.params.n_words, name='text_embedding')(NLP_input)\n NLP_flatten = Flatten(name='text_flatten_1')(NLP_embedding)\n NLP_dense = Dense(128, activation='relu', name='text_dense_1')(NLP_flatten)\n\n custom_model = Model(input=NLP_input, output=NLP_dense)\n\n return custom_model\n\n\n def combined(self):\n image = self.imagemodel()\n text = self.textmodel()\n\n merged = multiply([image.output, text.output], name='merged')\n merged_dense = Dense(128, activation='relu', name='merged_dense_1')(merged)\n merged_drop = Dropout(0.5, name='merged_drop_1')(merged_dense)\n\n custom_model = Model(inputs=[image.input, text.input],\n output=merged_drop)\n\n return custom_model\n\nclass custom(basic_models):\n def __init__(self, classmode, modelmode, n_classes=None):\n self.classmode = classmode\n self.modelmode = modelmode\n if(self.classmode == 'multilabel'):\n assert (len(n_classes) > 1)\n self.n_classes_pc = n_classes[0]\n self.n_classes_pt = n_classes[1]\n self.n_classes_pd = n_classes[2]\n else:\n assert (type(n_classes) != list)\n self.n_classes = n_classes\n\n def make_model(self):\n if(self.classmode == 'multiclass'):\n model = None\n if(self.modelmode == 'image'):\n model = basic_models.imagemodel(self)\n elif(self.modelmode == 'text'):\n model = basic_models.textmodel(self)\n else:\n model = basic_models.combined(self)\n\n custom_out = Dense(self.n_classes, activation='sigmoid', name='output')(model.output)\n if(self.n_classes > 1):\n custom_out = Dense(self.n_classes, activation='softmax', name='output')(model.output)\n custom_model = Model(input=model.inputs, output=[custom_out])\n\n return custom_model\n\n elif(self.classmode == 'multilabel'):\n model = None\n if(self.modelmode == 'image'):\n model = basic_models.imagemodel(self)\n elif(self.modelmode == 'text'):\n model = basic_models.textmodel(self)\n else:\n model = basic_models.combined(self)\n\n custom_out_pc = Dense(self.n_classes_pc, activation='sigmoid', name='output_1')(model.output)\n custom_out_pt = Dense(self.n_classes_pt, activation='sigmoid', name='output_2')(model.output)\n custom_out_pd = Dense(self.n_classes_pd, activation='sigmoid', name='output_3')(model.output)\n if(self.n_classes_pc > 1):\n custom_out_pc = Dense(self.n_classes_pc,\n activation='linear', name='output_1')(model.output)\n if(self.n_classes_pt > 1):\n custom_out_pt= Dense(self.n_classes_pt,\n activation='softmax', name='output_2')(model.output)\n if(self.n_classes_pd > 1):\n custom_out_pd = Dense(self.n_classes_pd,\n activation='softmax', name='output_3')(model.output)\n custom_model = Model(input=model.inputs, output=[custom_out_pc, custom_out_pt, custom_out_pd])\n\n return custom_model\n else:\n print('Make proper classmode and modelmode choice')\n sys.exit()\n\n def make_compiled_model(self, learning_rate):\n if(self.classmode == 'multiclass'):\n model = self.make_model()\n model.compile(optimizer=optimizers.Adam(lr=learning_rate),\n loss='mse',\n metrics=['accuracy'])\n return model\n elif(self.classmode == 'multilabel'):\n model = self.make_model()\n model.compile(optimizer=optimizers.Adam(lr=learning_rate),\n loss=['mse', 'categorical_crossentropy', 'categorical_crossentropy'],\n #loss_weights=[1,1,2],\n metrics=[\"accuracy\"])\n return model\n else:\n print('Make proper classmode and modelmode choice')\n sys.exit()\n","repo_name":"lbhesse/e-commerce","sub_path":"src/models/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5521,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"31"} +{"seq_id":"73260407768","text":"# %%\nimport glob\nimport json\nimport random\nfrom pathlib import Path\n\nimport cv2\nimport matplotlib.pyplot as plt\nfrom mmcv import Config\nfrom tqdm import tqdm\nfrom vinbigdata.preprocess import ScaledYoloWriter, VocWriter\nfrom vinbigdata.utils import is_interactive\nfrom vinbigdata.visualize import plot_bboxes\n\n# %%\nclass_map = {\n 'Atelectasis': 'Atelectasis',\n 'Effusion': 'Pleural effusion',\n 'Calcification': 'Calcification',\n 'Consolidation': 'Consolidation',\n 'Fibrosis': 'Pulmonary fibrosis',\n 'Pneumothorax': 'Pneumothorax',\n 'Mass': 'Nodule/Mass',\n 'Nodule': 'Nodule/Mass'\n}\n\n# %%\nconfig = Config.fromfile('configs/preprocess/chestxdet.py')['config']\nmetadata = []\nfor f in glob.glob(config['metapath']):\n metadata.extend(json.load(open(f, 'r')))\nimages = {Path(path).name: Path(path) for path in glob.glob(config['images'])}\n\n# %%\nimg_writers = [\n VocWriter(config['result_dir'], config['clear'], config['preprocessor']),\n ScaledYoloWriter(config['result_dir'], False, config['preprocessor'])\n]\nids = []\nfor img_meta in tqdm(metadata):\n img_path = images[img_meta['file_name']]\n img = cv2.imread(str(img_path))\n\n bboxes_meta = [(box, (class_map[class_name])) for box, class_name in zip(img_meta['boxes'], img_meta['syms'])\n if class_name in class_map]\n if len(bboxes_meta) - len(img_meta) >= 2:\n continue\n if is_interactive() and config['visualize']:\n img = plot_bboxes(img, [meta[0] for meta in bboxes_meta], [1.0 for meta in bboxes_meta],\n [meta[1] for meta in bboxes_meta])\n plt.figure(figsize=(10, 10))\n plt.imshow(img)\n plt.show()\n for writer in img_writers:\n writer.process_image(\n img_name=img_path.name,\n img=cv2.cvtColor(img, cv2.COLOR_RGB2GRAY),\n bboxes=[meta[0] for meta in bboxes_meta],\n classes=[meta[1] for meta in bboxes_meta])\n ids.append(img_path.stem)\n\n# %%\nrandom.seed(221288)\nrandom.shuffle(ids)\nfor writer in img_writers:\n writer.write_image_set(ids, 'train_chestxdet.txt')\n","repo_name":"Beaver48/kaggle-vinbigdata","sub_path":"scripts/preprocess_chestxdet.py","file_name":"preprocess_chestxdet.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"69796745368","text":"import reports\n\n\ndef filmsReportMenuOptions():\n options = 0\n\n optionsList = [\"1\", \"2\", \"3\", \"4\", \"5\"]\n\n userChoices = \"FilmFlix Reports\\n Reports Menu:\\n1. Show details of all films.\\n2. Show all Animation Films.\\n3. Show Films Rated PG\\n4. Show Films from 2023.\\n5. Exit.\"\n\n while options not in optionsList:\n print(userChoices)\n options = input(\n \"Enter an option from the FilmFlix Report choices above: \")\n\n if options not in optionsList:\n print(f\"{options} is not a valid choice in the FilmFlix Report menu!\")\n\n return options\n\n\ndef runReportsMenu():\n secondaryProgram = True\n while secondaryProgram:\n mainMenu2 = filmsReportMenuOptions()\n\n if mainMenu2 == \"1\":\n reports.details()\n elif mainMenu2 == \"2\":\n reports.genre()\n elif mainMenu2 == \"3\":\n reports.rating()\n elif mainMenu2 == \"4\":\n reports.year()\n else:\n secondaryProgram = False\n input(\"Press ENTER key to EXIT\")\n","repo_name":"Mab2095/FilmFlix","sub_path":"reportsMenu.py","file_name":"reportsMenu.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34938495291","text":"import os\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch import optim\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\nfrom torch.utils.tensorboard import SummaryWriter\n\nimport Model\nimport Dataset\n\ndef train(data_loader, model, optimizer, loss_fn, device, epoch, writer):\n \n model.train()\n \n total_loss = []\n # ミニバッチごとにループ\n for x_batch, y_batch_lt, y_batch_rt, y_batch_lb, y_batch_rb, _ in data_loader:\n \n x_batch = x_batch.to(device, dtype=torch.float)\n y_batch_lt = y_batch_lt.to(device, dtype=torch.float)\n y_batch_rt = y_batch_rt.to(device, dtype=torch.float)\n y_batch_lb = y_batch_lb.to(device, dtype=torch.float)\n y_batch_rb = y_batch_rb.to(device, dtype=torch.float)\n \n optimizer.zero_grad()\n lt, rt, lb, rb = model(x_batch)\n\n loss_lt = loss_fn(lt, y_batch_lt)\n loss_rt = loss_fn(rt, y_batch_rt)\n loss_lb = loss_fn(lb, y_batch_lb)\n loss_rb = loss_fn(rb, y_batch_rb)\n \n #loss = (0.25 * loss_lt) + (0.25 * loss_rt) + (0.25 * loss_lb) + (0.25 * loss_rb)\n loss = loss_lt + loss_rt + loss_lb + loss_rb\n total_loss.append(loss.item())\n\n loss.backward()\n optimizer.step()\n\n for idx, loss in enumerate(total_loss) :\n writer.add_scalar('data/loss', loss, (epoch * len(total_loss)) + idx)\n\n return np.median(total_loss), np.max(total_loss), np.min(total_loss)\n\n\ndef main(epochs=25, batch_size=4, lr=0.001):\n\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n device = 'cpu'\n\n # Transform を作成する。\n transform = transforms.Compose([transforms.Resize((224, 224)), transforms.ToTensor()])\n\n dataset = Dataset.Dataset(\n './datasets/DirectionSignboard/data_list.csv', \n './datasets/DirectionSignboard/images',\n transform\n )\n\n # DataLoader を作成する。\n dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)\n\n model = Model.Model().to(device)\n optimizer = optim.Adam(model.parameters(), lr=lr, betas=(0.9, 0.999))\n\n loss_fn = nn.MSELoss()\n\n logs_writer = SummaryWriter('F:/corner_detector_pytorch/traind_model/DirectionSignboard/tb_logs')\n #dummy_input = {'source_image': torch.rand([batch_size, 3, 240, 240], device = device)}\n #logs_writer.add_graph(model, dummy_input)\n os.makedirs('traind_model/DirectionSignboard', exist_ok=True)\n\n print('Starting training...') \n best_val_loss = float(\"inf\")\n\n for epoch in range(epochs):\n loss_med, loss_max, loss_min = train(dataloader, model, optimizer, loss_fn, device, epoch, logs_writer)\n\n print(f'[{epoch+1}], {loss_max}, {loss_med}, {loss_min}')\n\n is_best = loss_max < best_val_loss\n best_val_loss = min(loss_max, best_val_loss)\n\n torch.save(\n model.to('cpu').state_dict(), \n os.path.join(\n './traind_model/DirectionSignboard',\n f'epoch_{epoch+1}_model.pth'\n )\n )\n if is_best:\n torch.save(\n model.to('cpu').state_dict(), \n os.path.join(\n './traind_model/DirectionSignboard',\n f'bset_model.pth'\n )\n )\n\n\nif __name__ == '__main__':\n epochs = 200\n lr = 0.0001\n batch_size=4\n\n main( epochs, batch_size, lr)\n\n\n","repo_name":"hrkknt20001/corner_detector_pytorch","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20543121686","text":"import time\nimport shutil\nimport os\nfrom os import path\n\n\ndef move_recent_files(src,dst):\n time_now = time.time()\n \n for file in os.listdir(src):\n if file.endswith(\".txt\"):\n src_name = os.path.join(src,file)\n modified_time = os.path.getmtime(src_name)\n timenow = time.time()\n elapsed_time = time_now - modified_time\n if elapsed_time < 86400: \n shutil.copy(os.path.join(src, file),dst)\n \n \n\n","repo_name":"AMilesBarnes/Python","sub_path":"file_move_function.py","file_name":"file_move_function.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"31290010793","text":"import requests\nimport re\nimport json\n\n\ndef request_dandan(url):\n try:\n response = requests.get(url)\n if response.status_code == 200:\n return response.text\n except requests.RequestException as e:\n print(e)\n return None\n\n\ndef parse_result(html):\n pattern = re.compile(\n '.*?.*?class=\"star\">.*?class=\"tuijian\">(.*?).*?class=\"publisher_info\">.*?target=\"_blank\">(.*?).*?class=\"biaosheng\">.*?(.*?).*?

    (.*?).*?', re.S)\n items = re.findall(pattern, html)\n\n for item in items:\n yield {\n 'range': item[0],\n 'image': item[1],\n 'title': item[2],\n 'recommend': item[3],\n 'author': item[4],\n 'times': item[5],\n 'price': item[6]\n }\n\n\ndef write_item_to_file(item):\n print('开始写入数据 ====> ' + str(item))\n with open('book.txt', 'a', encoding='UTF-8') as f:\n f.write(json.dumps(item, ensure_ascii=False) + '\\n')\n\n\ndef main(page):\n url = 'http://bang.dangdang.com/books/fivestars/01.00.00.00.00.00-recent30-0-0-1-' + str(page)\n html = request_dandan(url)\n items = parse_result(html) # 解析过滤我们想要的信息\n for item in items:\n write_item_to_file(item)\n\n\nif __name__ == \"__main__\":\n for i in range(1, 26):\n main(i)\n","repo_name":"wistbean/learn_python3_spider","sub_path":"dangdang_top_500.py","file_name":"dangdang_top_500.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","stars":14022,"dataset":"github-code","pt":"3"} +{"seq_id":"6396465486","text":"__author__ = 'Emanuele Albini'\n__all__ = [\n 'save_booster',\n 'load_booster',\n 'load_booster_params',\n 'make_booster_agnostic',\n]\n\nimport os\nimport time\nimport random\nimport json\nimport logging\nimport numpy as np\nfrom ._json import load_json, save_json\nfrom ._utils import _remove_extension_from_filename\n\nBOOSTER_SUFFIX = '.bin'\nBOOSTER_TXT_SUFFIX = '.bin.txt'\nBOOSTER_PARAMS_SUFFIX = '.bin.json'\n\nFORMATS = ['bin.txt', 'bin.json', 'binary', 'bin', 'json', 'txt']\n\nDEFAULT_XGBOOST_NTHREAD = 24\n\n\ndef save_booster(model, path):\n \"\"\"Save the booster model in 3 files:\n - .bin > Booster file (agnostic to feature names)\n - .bin.json > Booster parameters\n - .bin.txt > Booster textual description (agnostic to feature names)\n\n Args:\n model (xgboost.Booster): The model\n path (str): The path without extension or '.bin'\n \"\"\"\n path = _remove_extension_from_filename(path, formats=FORMATS)\n\n # Create directory if not exists\n dir_name = os.path.dirname(path)\n if dir_name != '':\n os.makedirs(dir_name, exist_ok=True)\n\n # Dump textual description of the trees (with feature names)\n model.dump_model(path + BOOSTER_TXT_SUFFIX)\n\n # Dump raw booster parameters\n save_json(json.loads(model.save_config()), path + BOOSTER_PARAMS_SUFFIX)\n\n # Make the model agnostic to feature names\n model = make_booster_agnostic(model)\n\n # Dump booster object\n logging.info(f'Saving model in {path + BOOSTER_SUFFIX} ...')\n model.save_model(path + BOOSTER_SUFFIX)\n\n\ndef load_booster_params(path):\n if path.endswith('.bin'):\n path = path[:-4]\n\n path = path + BOOSTER_PARAMS_SUFFIX\n if not os.path.exists(path):\n raise FileNotFoundError(f'Booster params not found: {path}')\n\n return load_json(path)\n\n\ndef load_booster(path):\n from xgboost import Booster\n\n if path.endswith('.bin'):\n path = path[:-4]\n\n path = path + BOOSTER_SUFFIX\n if not os.path.exists(path):\n raise FileNotFoundError(f'Booster file not found: {path}')\n\n booster = Booster(model_file=path)\n\n return booster\n\n\ndef make_booster_agnostic(booster):\n from xgboost import Booster\n\n temp_file = f'temp-{time.perf_counter()}-{random.random()}.bin'\n booster.save_model(temp_file)\n booster = Booster(model_file=temp_file)\n os.remove(temp_file)\n\n return booster\n\n\n# def prepare_booster(booster, **kwargs):\n# booster.set_param({'nthread': DEFAULT_XGBOOST_NTHREAD}) # This has been deprecated (but we keep it for compatibility)\n# booster.set_param({'n_jobs': DEFAULT_XGBOOST_NTHREAD})\n# return booster\n","repo_name":"emanuele-albini/feature-selection","sub_path":"utils/file/_xgboost.py","file_name":"_xgboost.py","file_ext":"py","file_size_in_byte":2616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14745319034","text":"from hsd.common import *\nfrom hsd.tree import Element\n\n__all__ = [ \"HSDQueryError\", \"HSDMissingTagException\", \"HSDInvalidTagException\",\n \"HSDInvalidTagValueException\", \"HSDMissingAttributeException\",\n \"HSDInvalidAttributeException\", \"HSDInvalidAttributeValueException\",\n \"HSDQuery\"]\n\n\nclass HSDQuery:\n \"\"\"Class providing methods for querying a HSD-tree.\"\"\"\n \n def __init__(self, chkuniqueness=False, markprocessed=False):\n \"\"\"Initializes a query object.\n \n Args:\n chkuniqueness: If True, all query methods except findchildren()\n checks, whether the child found is unique. (Default: False)\n markprocessed: If True, nodes which have been queried are marked\n as processed. \n \"\"\" \n self.chkunique = chkuniqueness\n self.mark = markprocessed\n \n def findchild(self, node, name, optional=False):\n \"\"\"Finds a child of a node with a given name.\n \n Args:\n node: Parent node.\n name: Name of the child to look for.\n optional: Flags, whether the child is optional only.\n \n Returns:\n A hsd node if child has been found or None.\n \n Raises:\n HSDMissingTagException: if child was not found and the optional\n flag was False.\n HSDInvalidTagException: if there are duplicates of the child\n and the query object was initialized with chkuniqueness=True.\n \"\"\"\n if self.chkunique:\n children = node.findall(name)\n if len(children > 1):\n raise HSDInvalidTagException(node=children[1],\n msg=\"Double occurance of unique tag '{}'.\".format(name))\n child = children[0] if children else None\n else:\n child = node.find(name)\n if child is None and not optional:\n raise HSDMissingTagException(\n msg=\"Required tag '{}' not found.\".format(name), node=node)\n self.markprocessed(child)\n return child\n \n def findchildren(self, node, name, optional=False):\n \"\"\"Finds children of a node with given name.\n \n Args:\n node: Parent node.\n name: Name of the children to look for.\n optional: Flags, whether the presence of at least one child is\n optional or not.\n \n Returns:\n List of child nodes or empty list.\n \n Raises:\n HSDMissingTagException: if no children were not found and the\n optional flag was False.\n \"\"\"\n children = node.findall(name)\n if not children and not optional:\n raise HSDMissingTagException(node=node, msg=\"No occurrence of \"\n \"required tag '{}' found.\".format(name))\n self.markprocessed(*children)\n return children\n \n def getonlychild(self, node, defchild=None, hsdblock=False):\n \"\"\"Returns the first child of a node, which must be the only child.\n \n Args:\n node: Parent node.\n defchild: Default value for the child. If specified and no child is\n found, the default is appended to the tree and returned.\n hsdblock: If True, the the given value is added in hsd block\n notation (enclosed in curly braces) instead of an assignment. \n \n Returns:\n The only child of the node.\n \n Raises:\n HSDInvalidTagException: If tag has more than one child.\n HSDMissingTagException: If tag has no child.\n \"\"\" \n if len(node) > 1:\n raise HSDInvalidTagException(msg=\"Tag '{}' is only allowed to \"\n \"have one child.\".format(node.tag), node=node)\n elif len(node) == 1:\n child = node[0]\n elif defchild is None:\n raise HSDMissingTagException(msg=\"Tag '{}' must have exactly one \"\n \"child.\".format(node.tag), node=node)\n else:\n child = defchild\n node.append(child)\n if not hsdblock:\n node.hsdattrib[HSDATTR_EQUAL] = True\n self.markprocessed(child)\n return child\n \n def getchild(self, node, name, deftext=None, defattribs=None):\n \"\"\"Returns a child with a given name or sets a default if not found and\n default values had been specified.\n \n Args:\n node: Parent node.\n name: Name of the child to look for.\n deftext: Default text value for the child.\n defattribs: Default attribute dictionary for the child.\n \n Returns:\n The child with the given name. Either from the original hsd-tree\n or the one, which had been created using the provided default\n values. In latter case, the appropriate child is inserted into\n the tree.\n \n Raises:\n HSDMissingTagException: if the child was not found and no default\n value had been specified.\n \"\"\"\n optional = deftext is not None\n child = self.findchild(node, name, optional)\n # findchild only returns if child has been found or optional is True.\n if child is None:\n child = Element(name, defattribs or {})\n child.text = deftext\n self.markprocessed(child)\n node.append(child)\n return child\n \n def getvalue(self, node, name, converter, defvalue=None, defattribs=None,\n hsdblock=False):\n \"\"\"Returns the value (text) stored in a child with a given name. The \n value is converted using the provided converter.\n \n Args:\n node: Parent node.\n name: Name of the child to look for.\n converter: Object with methods fromhsd() and tohsd() which can\n convert between the hsd element and the desired type. See\n converters in hsd.converter for examples.\n defvalue: Optional default value used if child has not been found.\n defattribs: Optional default attribute dictionary used if child\n has not been found.\n hsdblock: If True, the the given value is added in hsd block\n notation (enclosed in curly braces) instead of an assignment. \n \n Returns:\n The converted value of the child node's text or the default value\n if the child had not been found. In latter case, an appropriate\n node with the appropriate text representation of the default\n value is inserted into the tree.\n \n Raises:\n HSDMissingTagException: if child was not found and no default\n value had been specified.\n Any other excepction raised by the converter.\n \"\"\"\n optional = defvalue is not None\n child = self.findchild(node, name, optional)\n if child is not None:\n return converter.fromhsd(child)\n else:\n child = converter.tohsd(name, defvalue, defattribs or {})\n self.markprocessed(child)\n if not hsdblock:\n child.hsdattrib[HSDATTR_EQUAL] = True\n node.append(child)\n return defvalue\n \n def getvaluenode(self, node, name, converter, defvalue=None,\n defattribs=None, hsdblock=False):\n \"\"\"Returns the child node of a child with a given name. The child node\n can have only one child. This is converted via the provided converter.\n \n Args:\n node: Parent node.\n name: Name of the child to look for.\n converter: Object with methods fromhsd() and tohsd() which can\n convert between the hsd element and the desired type. See\n converters in hsd.converter for examples.\n defvalue: Optional default value used if child has not been found.\n defattribs: Optional default attribute dictionary used if child\n has not been found.\n hsdblock: If True, the the given value is added in hsd block\n notation (enclosed in curly braces) instead of an assignment. \n \n Returns:\n The converted node of the child node's first child or the default\n value if the child had not been found. In latter case, an appropriate\n node with the provided default subnode is inserted into the tree.\n \n Raises:\n HSDMissingTagException: if child was not found and no default\n value had been specified.\n HSDInvalidTagException: If child has more than one child.\n \"\"\"\n optional = defvalue is not None\n child = self.findchild(node, name, optional)\n if child is not None:\n if len(child) > 1:\n raise HSDInvalidTagException(\"Tag '{}' is not allowed to have\"\n \" more than one child\".format(child.tag), node=child)\n self.markprocessed(child, child[0])\n return converter.fromhsd(child)\n else:\n child = converter.tohsd(name, defvalue, defattribs or {})\n if not hsdblock:\n child.hsdattrib[HSDATTR_EQUAL] = True\n self.markprocessed(child, child[0])\n node.append(child)\n return defvalue\n \n def markprocessed(self, *nodes):\n \"\"\"Marks nodes as having been processed, if the query object had been\n initialized with the appropriate option.\n \n Args:\n *nodes: List of nodes to mark as processed.\n \"\"\" \n if self.mark:\n for node in nodes:\n if node is not None:\n node.hsdattrib[HSDATTR_PROC] = True\n \n def findunprocessednodes(self, node, allnodes=False):\n \"\"\"Returns list of all nodes which had been not marked as processed.\n \n Args:\n node: Parent node.\n \n Returns:\n List of all nodes, which have not been queried by a HSDQuery\n instance.\n \"\"\"\n unprocessed = []\n for child in node:\n if child.hsdattrib.get(HSDATTR_PROC, None) is None:\n unprocessed.append(child)\n if not allnodes:\n continue\n unprocessed += self.findunprocessednodes(child, allnodes)\n return unprocessed\n\n\nif __name__ == \"__main__\":\n from io import StringIO\n from hsd.treebuilder import HSDTreeBuilder\n from hsd.parser import HSDParser\n from hsd.tree import HSDTree\n from hsd.converter import *\n parser = HSDParser(defattrib=\"unit\")\n builder = HSDTreeBuilder(parser=parser)\n \n # Defining force type (scalar, list)\n force_units = { \"eV/AA\": 0.0194469050555 }\n hsdforce = HSDScalarUnit(TxtFloat(),\n MultiplicativeUnitConverter(force_units))\n def hsdforcelist(nitem=-1):\n return HSDListUnit(TxtFloat(), MultiplicativeUnitConverter(force_units),\n nitem)\n \n stream = StringIO(\"\"\"\nDriver {}\n# Driver = None\n#Driver = ConjugateGradient {\n# MaxForceComponent [eV/AA] = 1e-2\n#}\nHamiltonian = DFTB {\n # SCC = True\n # SCCTolerance = 1e-4\n # MaxSCCIterations = 100\n MaxAngularMomentum {\n O = \"p\"\n H = \"s\"\n }\n Mixer = Broyden {\n MixingParameter [dfd] = 0.2\n }\n #ReadInitialCharges = No\n KPointsAndWeights {\n 0.0 0.0 0.0 0.25\n 0.25 0.25 0.25 0.75\n }\n}\n\nOptions {\n WriteAutotestTag = Yes\n UnknownOption = No\n}\n\n#ParserOptions {\n# ParserVersion = 4\n#}\n\"\"\")\n root = builder.build(stream)\n qy = HSDQuery(markprocessed=True)\n # Driver can be either a node or the text None. Default is the latter.\n driver = qy.getchild(root, \"Driver\", \"None\")\n if driver.text == \"None\":\n dtype = None\n else:\n # Driver not set to \"None\": query all possibilities:\n dtype = qy.getonlychild(driver, Element(\"ConjugateGradient\"))\n if dtype.tag == \"ConjugateGradient\":\n forcetol = qy.getvalue(dtype, \"MaxForceComponent\", hsdforce, 1e-4)\n elif dtype.tag == \"SteepestDescent\":\n forcetol = qy.getvalue(dtype, \"MaxForceComponent\", hsdforce, 1e-4)\n stepsize = qy.getvalue(dtype, \"StepSize\", hsdfloat, 40.0)\n else:\n raise HSDInvalidTagException(node=dtype, msg=\"Unknown driver type \"\n \"'{}'.\".format(dtype.tag))\n print(\"DTYPE:\", dtype) \n ham = qy.getchild(root, \"Hamiltonian\")\n dftb = qy.getchild(ham, \"DFTB\")\n scc = qy.getvalue(dftb, \"SCC\", hsdbool, defvalue=True)\n scctol = qy.getvalue(dftb, \"SCCTolerance\", hsdfloat, defvalue=1e-4)\n scciter = qy.getvalue(dftb, \"MaxSCCIterations\", hsdint, 100)\n mangmom = qy.getchild(dftb, \"MaxAngularMomentum\")\n maxangs = [ qy.getvalue(mangmom, species, hsdstr)\n for species in [\"O\", \"H\"] ]\n mixer = qy.getvaluenode(dftb, \"Mixer\", hsdnode, Element(\"Broyden\"))\n if mixer.tag == \"Broyden\":\n mixparam = qy.getvalue(mixer, \"MixingParameter\", hsdfloat, 0.2)\n else:\n raise HSDInvalidTagException(node=mixer,\n msg=\"Unknown mixer type '{}'.\".format(mixer.tag))\n readcharges = qy.getvalue(dftb, \"ReadInitalCharges\", hsdbool, False)\n kpoints = qy.getvalue(dftb, \"KPointsAndWeights\", hsdfloatlist())\n options = qy.getchild(root, \"Options\", \"\")\n autotest = qy.getvalue(options, \"WriteAutotestTag\", hsdbool, False)\n parseroptions = qy.getchild(root, \"ParserOptions\", \"\")\n parserversion = qy.getvalue(parseroptions, \"ParserVersion\", hsdint, 4)\n tree = HSDTree(root)\n tree.writehsd()\n print(\"\\nUnprocessed: \", qy.findunprocessednodes(root))\n \n","repo_name":"bhourahine/hsdparser","sub_path":"python/src/hsd/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":13892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"1586498170","text":"from fastapi import APIRouter,Form\nfrom .schemas.parametri import Parametri\nfrom .utils import parse_and_scramble,revese_mescola\nfrom fastapi.responses import FileResponse\nfrom fastapi_sqlalchemy import db\nimport xlsxwriter\n\nrouter = APIRouter(tags=['Gestione Mescola'])\n\n\n\n@router.post('/mescola', name=\"Mescola i giocatori del listone secondo i parametri dati\",)\ndef mescola(\n portieri: bool = Form(...),\n difensori: bool = Form(...),\n centrocampisti: bool = Form(...),\n attaccanti: bool = Form(...),\n alfabetico: bool = Form(...)\n\n ) -> int:\n return parse_and_scramble(\n Parametri(\n portieri=portieri,\n difensori=difensori,\n centrocampisti=centrocampisti,\n attaccanti=attaccanti,\n alfabetico=alfabetico\n )\n )\n@router.get('/mescola', name=\"Mescola i giocatori del listone secondo i parametri dati\",)\ndef get_config_mescola() -> Parametri:\n return revese_mescola()\n\n@router.get(\"/mescolati\")\ndef get_mescolati():\n mescola=db.session.execute(\"select ordine,nome_giocatore,ruolo,squadra from mescola inner join listone on id_giocatore=listone.id;\")\n workbook = xlsxwriter.Workbook('tmp/Mescolati.xlsx')\n worksheet = workbook.add_worksheet()\n worksheet.write_row(0, 0, ['Ordine', 'Nome', 'Squadra', 'Ruolo'], workbook.add_format({'bold': True}))\n row = 1\n worksheet.autofilter('A1:D{}'.format(mescola.rowcount + 1))\n for giocatore in mescola:\n worksheet.write_row(row, 0, [str(giocatore.ordine).rstrip(), str(giocatore.nome_giocatore).rstrip(),\n str(giocatore.squadra).rstrip(), str(giocatore.ruolo).rstrip()])\n row += 1\n workbook.close()\n db.session.commit()\n return FileResponse('tmp/Mescolati.xlsx', filename=\"Mescolati.xlsx\")","repo_name":"ftp21/fantacalcio-fastapi-mysql","sub_path":"app/routes/mescola/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20096643558","text":"import math\nimport os\nimport shutil\nimport sys\nimport warnings\nimport sklearn.preprocessing\nimport numpy as np\nfrom joblib import load, dump\nimport pandas as pd\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import linear_sum_assignment\n#matplotlib.use('Agg')\n\nFILENAME = \"staNMFDicts_\"\n\n\ndef load_example(reweight=False):\n '''\n Loads full data matrix from WuExampleExpression.csv file into numpy array;\n weights column names by their number of replicate occurances\n\n Parameters\n ----------\n reweight : bool, optional with default False\n whether to reweight the data matrix by 1 / occurences\n\n Returns\n -------\n X : array, shape (n_samples, n_features)\n data matrix extracted from WuExampleExpression.csv\n\n Examples\n --------\n >>> X = load_example()\n\n '''\n\n workingmatrix = pd.read_csv('../Demo/WuExampleExpression.csv', index_col=0)\n\n if reweight:\n # weight each column (gene) by 1 / its occurences in replicates\n colnames = workingmatrix.columns.values\n colnames = [(str(x).split('.'))[0] for x in colnames]\n weight = np.zeros(len(colnames))\n\n for i in range(len(colnames)):\n weight[i] = 1/colnames.count(colnames[i]) ** .5\n\n workingmatrix = workingmatrix.apply(\n lambda x: weight * x,\n axis=1,\n )\n\n X = (np.array(workingmatrix).astype(float)).T\n return X\n\n\ndef findcorrelation(A, B):\n '''\n Construct k by k matrix of Pearson product-moment correlation\n coefficients for every combination of two columns in A and B\n\n Parameters\n ----------\n A : array, shape (n_features, n_components)\n first NMF solution matrix\n\n B : array, shape (n_features, n_components)\n second NMF solution matrix, of same dimensions as A\n\n Returns\n -------\n X : array shape (n_components, n_components)\n array[a][b] is the correlation between column 'a' of X\n and column 'b'\n\n '''\n A_std = sklearn.preprocessing.normalize(A, axis=0)\n B_std = sklearn.preprocessing.normalize(B, axis=0)\n return A_std.T @ B_std\n\n\ndef amariMaxError(correlation):\n '''\n Compute what Wu et al. (2016) described as a 'amari-type error'\n based on average distance between factorization solutions\n\n Parameters\n ----------\n correlation: array, shape (n_components, n_components)\n cross correlation matrix\n\n Returns\n -------\n distM : double/float\n Amari distance\n\n '''\n\n n, m = correlation.shape\n maxCol = np.absolute(correlation).max(0)\n colTemp = np.mean((1-maxCol))\n maxRow = np.absolute(correlation).max(1)\n rowTemp = np.mean((1-maxRow))\n distM = (rowTemp + colTemp)/(2)\n\n return distM\n\n\ndef HungrianError(correlation):\n '''\n Compute error via Hungrian error\n based on average distance between factorization solutions\n\n Parameters\n ----------\n correlation: array, shape (n_components, n_components)\n cross correlation matrix\n\n Returns\n -------\n distM : double/float\n Hugrian distance\n\n '''\n\n n, m = correlation.shape\n correlation = np.absolute(correlation) # ignore the sign of corr\n x, y = linear_sum_assignment(-correlation)\n distM = np.mean([1 - correlation[xx, yy] for xx, yy in zip(x, y)])\n return distM\n\n\n# Define a worker function useful for parallelism\ndef f(model, X, K, seed, l, path):\n # set the number of components\n model.set_n_components(K)\n # set the random state\n if seed is not None:\n model.random_state = seed + 100 * l\n model.fit(X) # fit nmf model\n # write model to a joblib file in the path folder\n outputfilename = (\n \"nmf_model_\" + model.__class__.__name__\n + '_' + str(l) + \".joblib\"\n )\n outputfilepath = os.path.join(path, outputfilename)\n dump(model, outputfilepath)\n\n\nclass staNMF:\n '''Python 3 implementation of Siqi Wu's 03/2016 Stability NMF (staNMF)\n\n Solves non-negative matrix factorization for a range of principal patterns\n (PPs) with either different initializations or bootstrapped samples.\n\n Parameters\n ----------\n X : numpy array, shape (n_samples, n_features)\n 2d numpy array containing the data.\n\n folderID : str, optional with default \"\"\n allows user to specify a unique (to the user's working directory)\n identifier for the FILENAME folder that the runNMF method creates.\n\n processes : int, optional with default 3\n the number of processes to use when parallel is True\n\n K1 : int, optional with default 15\n lowest number of PP's (K) tested\n\n K2 : int, optional with default 30\n highest number of PP's (K) tested\n\n seed : int, optional with default 123\n set numpy random seed\n\n replicates : int or tuple of ints of length 2, optional with default\n int 100\n specify the bootstrapped repetitions to be performed on each value of K\n for use in stability analysis; if a list of length 2: self.replicates\n is set to a list of ints between the first and second elements of\n this tuple. If int: self.replicates is set to range(integer).\n\n NMF_finished : bool, optional with default False\n True if runNMF has been completed for the dataset. To surpass NMF step\n if fileID file already contains factorization solutions for X in your\n range [K1, K2], set to True.\n\n parallel_mode : Str, optional with default \"sequential\"\n \"sequential\": Each task runs sequentially.\n \"multiprocessing\": Use multiprocessing for parallel computation.\n \"pyspark\": Use pyspark to do parallel computation.\n\n chunksize : int, optional with default 10\n the smallest number of tasks to assign to each worker\n\n '''\n\n def __init__(self, X, folderID=\"\", K1=15, K2=30,\n seed=123, replicates=100, processes=3,\n NMF_finished=False, parallel_mode=\"sequential\",\n chunksize=10):\n warnings.filterwarnings(\"ignore\")\n self.K1 = K1\n self.K2 = K2\n self.seed = seed\n self.guess = np.array([])\n self.guessdict = {}\n self.parallel_mode = parallel_mode\n self.processes = processes\n if isinstance(replicates, int):\n self.replicates = range(replicates)\n elif isinstance(replicates, tuple):\n start, stop = replicates\n self.replicates = range(replicates[0], replicates[1])\n self.X = X\n self.folderID = folderID\n self.NMF_finished = NMF_finished\n self.instabilitydict = {}\n self.instability_std = {}\n self.instabilityarray = []\n self.instabilityarray_std = []\n self.stability_finished = False\n self.chunksize = chunksize\n\n def runNMF(self, nmf_model):\n '''\n Iterate through range of integers between the K1 and K2 provided (By\n default, K1=15 and K2=30), run NMF using the model; output NMF matrix\n files (.csv form).\n\n Parameters\n ----------\n nmf_model : a model that can be used to fit NMF models\n\n Returns\n -------\n None\n\n Side effects\n ------------\n (k2-k1) folders, each containing files for every replicate\n (labeled nmf_model__.joblib).\n\n Raises\n ------\n OSError\n the path cannot be created.\n\n '''\n\n self.NMF_finished = False\n numPatterns = np.arange(self.K1, self.K2+1)\n if self.parallel_mode == \"multiprocessing\":\n from multiprocessing import Pool\n pool = Pool(self.processes)\n elif self.parallel_mode == 'pyspark':\n import pyspark\n from pyspark.sql.functions import (\n pandas_udf,\n PandasUDFType,\n explode,\n lit,\n array,\n )\n from pyspark.sql.types import (\n IntegerType,\n FloatType,\n ArrayType,\n StructField,\n StructType,\n )\n spark = pyspark.sql.SparkSession.builder \\\n .master(\"local\") \\\n .appName(\"Demo\") \\\n .getOrCreate()\n sc = spark.sparkContext\n sc.addPyFile(\"../staNMF/nmf_models/sklearn_nmf.py\")\n\n for k in range(len(numPatterns)):\n K = numPatterns[k]\n path = (\n \"./\" + FILENAME + self.folderID + \"/K=\" + str(K) + \"/\"\n )\n try:\n os.makedirs(path)\n except OSError:\n if not (os.path.isdir(path)):\n raise\n m, n = np.shape(self.X)\n\n print(\"Working on K = {}...\".format(K))\n\n if self.parallel_mode == 'sequential':\n # fit nmf_models\n for l in self.replicates:\n # set the number of components\n nmf_model.set_n_components(K)\n # set the random state\n if self.seed is not None:\n nmf_model.random_state = self.seed + 100 * l\n nmf_model.fit(self.X) # fit nmf model\n # write model to a joblib file in the path folder\n outputfilename = (\n \"nmf_model_\" + nmf_model.__class__.__name__\n + '_' + str(l) + \".joblib\"\n )\n outputfilepath = os.path.join(path, outputfilename)\n dump(nmf_model, outputfilepath)\n elif self.parallel_mode == 'multiprocessing':\n parameters = [\n (nmf_model, self.X, K, self.seed, l, path)\n for l in self.replicates\n ]\n pool.starmap(f, parameters, chunksize=self.chunksize)\n elif self.parallel_mode == 'pyspark':\n sqlCtx = pyspark.sql.SQLContext(spark)\n spark_df = sqlCtx.createDataFrame(\n pd.DataFrame(\n self.X,\n columns=['F{}'.format(i) for i in range(n)],\n dtype=float,\n )\n )\n seed = array([\n lit(i) for i in self.replicates if i % self.chunksize == 0\n ])\n spark_df = spark_df.withColumn(\"seed\", seed)\n spark_df = spark_df.withColumn(\"seed\", explode(spark_df.seed))\n spark_df = spark_df.repartitionByRange(self.processes,\"seed\")\n K_broadcast = sc.broadcast(K)\n params_broadcast = sc.broadcast(nmf_model.get_params())\n chunksize_broadcast = sc.broadcast(self.chunksize)\n output_schema = StructType([\n StructField('seed', IntegerType(), True),\n StructField('l2_error', FloatType(), True),\n StructField('components', ArrayType(FloatType()), True)\n ])\n @pandas_udf(output_schema, PandasUDFType.GROUPED_MAP)\n def fit_sklearn_nmf(df):\n from sklearn_nmf import sklearn_nmf\n import numpy as np\n out = []\n for i in range(df.seed[0], df.seed[0] + chunksize_broadcast.value):\n ml = sklearn_nmf()\n ml.set_params(**params_broadcast.value)\n ml.n_components=K_broadcast.value\n ml.random_state=i\n X = np.array(df.drop(\"seed\", axis=1))\n ml.fit(X)\n coefs = ml.transform(X)\n l2_error = np.sum((coefs @ ml.components_ - X) ** 2) / np.sum(X**2)\n out.append([ml.random_state, l2_error, ml.components_.flatten()])\n return pd.DataFrame(out, columns=['seed', 'l2_error', 'components'])\n result = spark_df.groupBy(\"seed\").apply(fit_sklearn_nmf).toPandas()\n # Record all the results\n # set the number of components\n nmf_model.set_n_components(K)\n for l in result.index:\n # set the random state\n nmf_model.random_state = result.loc[l, 'seed']\n nmf_model.l2_error = result.loc[l, 'l2_error']\n nmf_model.components_ = np.array(result.loc[l, 'components']).reshape((K, n))\n # write model to a joblib file in the path folder\n outputfilename = (\n \"nmf_model_\" + nmf_model.__class__.__name__\n + '_' + str(l) + \".joblib\"\n )\n outputfilepath = os.path.join(path, outputfilename)\n dump(nmf_model, outputfilepath)\n\n else:\n raise ValueError(\n \"self.parallel_mode({}) is not acceptable.\".format(\n self.parallel_mode\n )\n )\n\n self.NMF_finished = True\n if self.parallel_mode == 'multiprocessing':\n pool.close()\n elif self.parallel_mode == 'pyspark':\n spark.stop()\n\n def instability(self, tag, k1=0, k2=0):\n '''\n Performs instability calculation for NMF models for each K\n within the range entered\n\n Parameters\n ----------\n tag : str\n the name of the nmf model to compute the stability\n\n k1 : int, optional, default self.K1\n lower bound of K to compute stability\n\n k2 : int, optional, default self.K2\n upper bound of K to compute instability\n\n Returns\n -------\n None\n\n Side effects\n ------------\n \"instability.csv\" containing instability index\n for each K between and including k1 and k2; updates\n self.instabilitydict (required for makeplot())\n '''\n if k1 == 0:\n k1 = self.K1\n if k2 == 0:\n k2 = self.K2\n\n numReplicates = len(self.replicates)\n\n if self.NMF_finished is False:\n print(\"staNMF Error: runNMF is not complete\\n\")\n else:\n numPatterns = np.arange(k1, k2+1)\n n_features = self.X.shape[1]\n # loop through each number of PPs\n for k in numPatterns:\n print(\"Calculating instability for \" + str(k))\n # load the dictionaries\n path = (\n \"./\" + FILENAME + self.folderID + \"/K=\" + str(k)+\"/\"\n )\n Dhat = np.zeros((numReplicates, n_features, k))\n\n for replicate in range(numReplicates):\n inputfilename = (\n \"nmf_model_\" + tag\n + \"_\" + str(replicate) + \".joblib\"\n )\n inputfilepath = os.path.join(path, inputfilename)\n model = load(inputfilepath)\n Dhat[replicate] = model.components_.T\n\n # compute the distance matrix between each pair of dicts\n distMat = np.zeros(shape=(numReplicates, numReplicates))\n\n for i in range(numReplicates):\n for j in range(i, numReplicates):\n x = Dhat[i]\n y = Dhat[j]\n\n CORR = findcorrelation(x, y)\n distMat[i][j] = HungrianError(CORR)\n distMat[j][i] = distMat[i][j]\n\n # compute the instability and the standard deviation\n self.instabilitydict[k] = (\n np.sum(distMat) / (numReplicates * (numReplicates-1))\n )\n # The standard deviation of the instability is tricky:\n # It is a U-statistic and in general hard to compute std.\n # Fortunately, there is a easy-to-understand upper bound.\n self.instability_std[k] = (\n np.sum(distMat ** 2)\n / (numReplicates * (numReplicates - 1))\n - self.instabilitydict[k] ** 2\n ) ** .5 * (2 / distMat.shape[0]) ** .5\n # write the result into csv file\n outputfile = path + \"instability.csv\"\n pd.DataFrame({\n 'K': [k],\n 'instability': [self.instabilitydict[k]],\n 'instability_std': [self.instability_std[k]],\n }).to_csv(outputfile, mode='a', header=False, index=False)\n # set the stability_finished to be True\n self.stability_finished = True\n\n def get_instability(self):\n '''\n Retrieves instability values calculated in this instance of staNMF\n\n Returns\n -------\n self.instabilitydict : dict\n dictionary with keys K, values instability index\n\n '''\n\n if self.stability_finished:\n return self.instabilitydict\n else:\n print(\"Instability has not yet been calculated for your NMF\"\n \"results. Use staNMF.instability() to continue.\")\n\n def plot(self, dataset_title=\"Drosophila Spatial Expression Data\", xmax=0,\n xmin=-1, ymin=0, ymax=0, xlab=\"K\", ylab=\"Instability Index\"):\n '''\n Plots instability results for all K's between and including K1 and K2\n with K on the X axis and instability on the Y axis\n\n Parameters\n ----------\n\n dataset_title : str, optional, default \"Drosophila\n Expression Data\"\n The title used in the plot\n\n ymax : float, optional, default\n largest Y + largest std(Y) * 2 + (largest Y/ # of points)\n the maximum y axis limit\n\n xmax : float, optional, default K2+1\n\n xlab : string, default \"K\"\n x-axis label\n\n ylab : string, default \"Instability Index\"\n y-axis label\n\n Returns\n -------\n None\n\n Side effects\n ------------\n A png file named .png is saved.\n\n '''\n kArray = []\n self.instabilityarray = []\n self.instabilityarray_std = []\n for K in range(self.K1, self.K2+1):\n kpath = (\n \"./\" + FILENAME + \"{}/K={}/instability.csv\"\n ).format(self.folderID, K)\n df = pd.read_csv(kpath, header=None, index_col=False)\n kArray.append(int(df.iloc[-1, 0]))\n self.instabilityarray.append(float(df.iloc[-1, 1]))\n self.instabilityarray_std.append(float(df.iloc[-1, 2]))\n if xmax == 0:\n xmax = self.K2 + 1\n if xmin == -1:\n xmin = self.K1 - .1\n if ymax == 0:\n ymax = max(self.instabilityarray) \\\n + max(self.instabilityarray_std) * 2 \\\n + (max(self.instabilityarray) / len(self.instabilityarray))\n plt.errorbar(x=kArray,\n y=self.instabilityarray,\n yerr=np.array(self.instabilityarray_std)*2)\n plt.axis([xmin, xmax, ymin, ymax])\n plt.xlabel(xlab)\n plt.ylabel(ylab)\n plt.axes.titlesize = 'smaller'\n plt.title(str(dataset_title))\n plotname = str(dataset_title + \".png\")\n plt.savefig(plotname)\n\n def ClearDirectory(self, k_list):\n '''\n A storage-saving option that clears the entire directory of each K\n requested, including the instability.csv file in each folder\n\n Parameters\n ----------\n k_list : list\n list of K's to delete corresponding directories of\n\n Notes\n -----\n This should only be used after stability has been calculated for\n each K you wish to delete.\n '''\n\n for K in k_list:\n path = (\"./\" + FILENAME + \"{}/K={}/\").format(self.folderID, K)\n shutil.rmtree(path)\n","repo_name":"Yu-Group/staNMF","sub_path":"staNMF/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":20021,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"23451934437","text":"import numpy as np\nimport pandas as pd\nimport argparse\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\n\nfrom src.SyntheticData import SyntheticData\nfrom src.SupervisedLearningModel import SupervisedLearningModel\nfrom src.ReweighingAnalysisVisualizer import ReweighingAnalysisVisualizer\nfrom src.MetricFrameGenerator import MetricFrameGenerator\nfrom src.EqualizedOddsPostProcesser import EqualizedOddsPostProcesser\nfrom src.EqualizedOddsPostProcessingAnalysis import EqualizedOddsPostProcessingAnalysis\nfrom src.FairnessMetricVisualizer import FairnessMetricVisualizer\n\nif __name__ == '__main__':\n\n FIGURE_DIRECTORY = \"./replication\"\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--seed\", help=\"random integer seed\", default=0)\n args = parser.parse_args()\n seed = int(args.seed)\n\n c, k = 0.1, 1.6\n num_points = 100000\n\n model_list = ['propensity', 'observational', 'counterfactual']\n\n synthetic_data = SyntheticData(\n treatment_effect = c, \n treatment_assignment_bias = k,\n seed = seed)\n df, config = synthetic_data.generate(num_points = num_points)\n\n for model_key in model_list:\n params = config.copy()\n clf = LogisticRegression(penalty = 'none')\n model = SupervisedLearningModel(model = clf, name = model_key, seed = seed)\n if model_key == 'propensity':\n params['target'] = params['treat']['name']\n else:\n params['target'] = params['outcome']['name']\n model.fit(df, params)\n train = df[params['features']['training']]\n if model_key == 'propensity':\n df[model_key] = clf.predict_proba(train)[:, 1:]\n else:\n df[model_key] = clf.predict_proba(train)[:, :1]\n\n reweighing_analysis = ReweighingAnalysisVisualizer(config)\n reweighing_analysis.visualize_base_rates(df, save = f\"{FIGURE_DIRECTORY}/reweighing/fig_seed_{str(seed).zfill(3)}.png\")\n\n ## Postprocess test and training datasets via equalized odds\n metric_frame = MetricFrameGenerator()\n equalized_odds = EqualizedOddsPostProcesser(config)\n observational = config['target']['observational']\n sensitive = config['features']['sensitive']\n _df, postprocessed = df[df.columns.difference(['treat', 'outcome'], sort=False)], {}\n datasets = train_test_split(_df, test_size=0.3, random_state=0)\n for key, raw_data in zip(['train', 'test'], datasets):\n data = raw_data.copy()\n metricframe = metric_frame.generate(data[observational], data['counterfactual'], data[sensitive])\n if key == 'train':\n mix_rates = equalized_odds.mix_rates(data, metricframe)\n probs = equalized_odds.post_process(data, metricframe, mix_rates)\n for sensitive_class, prob in probs.items():\n data.loc[data[sensitive] == sensitive_class, 'eo_fair_pred'] = prob\n postprocessed[key] = data\n\n eo_analysis = EqualizedOddsPostProcessingAnalysis(config)\n _eo = eo_analysis.visualize_roc(postprocessed['test'], save = f\"{FIGURE_DIRECTORY}/post_processed/fig_roc_seed_{str(seed).zfill(3)}.png\")\n errors = eo_analysis.error_analysis(postprocessed['test'])\n errors = errors[['Group', 'Method', 'cGFPR', 'cGFNR', 'oGFPR', 'oGFNR']]\n with open( f\"{FIGURE_DIRECTORY}/post_processed/fig_roc_seed_{str(seed).zfill(3)}.tex\", 'w') as f:\n f.write(errors.to_latex(column_format='llrrrr', index=False))\n\n test_df = postprocessed['test']\n roc = FairnessMetricVisualizer(metric = 'roc', parameters = config)\n _roc = roc.visualize_metric(test_df, save = f\"{FIGURE_DIRECTORY}/roc/fig_seed_{str(seed).zfill(3)}.png\")\n\n precision_recall = FairnessMetricVisualizer(metric = 'precision_recall', parameters = config)\n _pr = precision_recall.visualize_metric(test_df, save = f\"{FIGURE_DIRECTORY}/precision_recall/fig_seed_{str(seed).zfill(3)}.png\")\n\n calibration = FairnessMetricVisualizer(metric = 'calibration', parameters = config)\n _calibration = calibration.visualize_metric(test_df, save = f\"{FIGURE_DIRECTORY}/calibration/fig_seed_{str(seed).zfill(3)}.png\")","repo_name":"wcheung-code/rescience-counterfactual","sub_path":"mode/replication/synthetic_experiments.py","file_name":"synthetic_experiments.py","file_ext":"py","file_size_in_byte":4120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29209981583","text":"import datetime\nimport datetime\nimport json\nimport pickle\nimport os.path\nimport time\nimport warnings\n\nfrom googleapiclient.discovery import build\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom google.auth.transport.requests import Request\nfrom pip._vendor.colorama import Fore\n\nimport modification\n\n\n\nclass Event:\n \"\"\"\n Representiert einen Termin den man mit anderen Terminen zeitlich vergleichen kann\n und enthält alle nötigen Atribute die Google benötigt um ihn in den googlecalendar einzupflegen\n \"\"\"\n def __init__(self, start:datetime.datetime, company_name:str, location:str, duration=None,\n time_boundery:int=modification.timeBoundery(),\n description:str=\"\", notifications:()=(60, 240, 1440), notif_color:str=\"green\", end=None):\n\n assert(duration or end), \"duration oder end muss gegeben sein um Event exakt beschreiben zu können\"\n assert(duration is None or isinstance(duration, int)), \"duration muss entweder mit None oder int --> Minuten sein\"\n\n self.f = Fore.MAGENTA\n self.start = start\n self.end = end\n self.company_name = company_name\n self.location = location\n self.duration = duration if duration else None\n self.time_boundery = time_boundery\n self.description = description\n self.notifications = notifications\n self.notif_color = notif_color\n self.hashing_help = str(time.time())\n\n @property\n def duration(self):\n if self.__dict__[\"duration\"]:\n return self.__dict__[\"duration\"]\n datetime_result = self.end - self.start\n return datetime_result.seconds / 60\n\n @duration.setter\n def duration(self, value):\n self.__dict__[\"duration\"] = value\n\n @property\n def end(self):\n if not self.__dict__[\"end\"]:\n return self.start + datetime.timedelta(minutes=self.duration)\n return self.__dict__[\"end\"]\n\n @end.setter\n def end(self, value:datetime.datetime):\n self.__dict__[\"end\"] = value\n\n def haveTimeConflictWith(self, other):\n \"\"\"vergleicht zwei termine ob sie zeitlich zu schaffen sind oder es eher schwierig wird beide zu erledigen\n :param other: google_tools.Event()\n :return: True wenn es zeitlich problematisch wird +/-45min, ansonsten False\n \"\"\"\n compare_min = self.start - datetime.timedelta(minutes=self.time_boundery)\n compare_max = self.end + datetime.timedelta(minutes=self.time_boundery)\n if other.start > compare_max or other.end < compare_min:\n return False\n else:\n return True\n\n def mabyChangedApointment(self, other):\n return self.isSameCompany(other)\n\n def isSameCompany(self, other):\n return self.company_name == other.company_name\n\n def isSameTime(self, other):\n return self.start.time() == other.start.time()\n\n def isSameEvent(self, other):\n return self.isSameTime(other) and self.isSameCompany(other)\n\n def isSameDate(self, other):\n return self.start.date() == other.start.date()\n\n @classmethod\n def compareFsWithGoogleEvents(cls, fs_events, google_events):\n \"\"\"compares FsEvents with GoogleEvents\n :param fs_events:\n :param google_events:\n :return: new_events, maybe_changed_time_test_list, conflicting_test_list\n \"\"\"\n new_events = fs_events[:]\n\n conflicting_test_list = []\n maybe_changed_time_test_list = []\n\n for fs_event in fs_events:\n for google_event in google_events:\n\n if fs_event.isSameDate(google_event):\n try:\n if fs_event.isSameEvent(google_event):\n new_events.remove(fs_event)\n elif fs_event.mabyChangedApointment(google_event):\n maybe_changed_time_test_list.append((fs_event, google_event))\n elif fs_event.haveTimeConflictWith(google_event):\n conflicting_test_list.append((fs_event, google_event))\n except:\n continue\n new_events = [(x, None) for x in new_events]\n return new_events, maybe_changed_time_test_list, conflicting_test_list\n\n def oneLineAdress(self):\n return self.location.replace(\"\\n\", \" \")\n\n def __str__(self):\n return f\"<{self.company_name}, {self.start} bis {self.end} {self.duration}min, {self.oneLineAdress()}>\"\n\n def __repr__(self):\n return str(self)\n\n def __eq__(self, other):\n return self.description == other.description and self.start == other.start\n\n def __hash__(self):\n return hash(f\"{self} {self.hashing_help}\")\n\n @classmethod\n def stripDuplicates(cls, maybe_changed_events, conflicting_events, all_google_events):\n all_events = [*maybe_changed_events, *conflicting_events]\n itter_double = all_events[:]\n for event_tuple in all_events:\n event, second = event_tuple\n for g_e in all_google_events:\n a_e:Event\n g_e:Event\n if event.isSameEvent(g_e):\n try:\n itter_double.remove(event_tuple)\n except:\n continue\n return itter_double\n\n\nclass MyGoogleCalendarConnection:\n def __init__(self, auto_connect=True):\n self.f = Fore.BLUE\n self.scope = ['https://www.googleapis.com/auth/calendar'] #nicht in modi, muss zu credentials passen und soll nicht einfach änderbar sein\n self.service = self.connect() if auto_connect else None\n\n def connect(self):\n \"\"\"\n connect to google-calendar-api\n :return:\n \"\"\"\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', self.scope)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n return build('calendar', 'v3', credentials=creds)\n\n def createReminderStats(self, popup_minutes=(modification.popupMinutes()), email_minutes=(modification.emailMinutes())):\n \"\"\"\n creates list that contains notification-stats\n :param popup_minutes: tuple of ints minutes before actual event notification appears as popup\n :param email_minutes: tuple of ints minutes before actual event notification appears as email\n :return:\n \"\"\"\n\n print(f\"popup_minutes: {popup_minutes}, email_minutes: {email_minutes}\")\n assert(len(popup_minutes) + len(email_minutes) <= 5), \"maximal 5 Reminder möglich\"\n remninder_list = []\n if popup_minutes:\n for minutes in popup_minutes:\n remninder_list.append({'method': 'popup', 'minutes': minutes})\n if email_minutes:\n for minutes in email_minutes:\n remninder_list.append({'method': 'email', 'minutes': minutes})\n return remninder_list\n\n def _deleteEvents(self, min_time:datetime.datetime, time_delta_in_days:int=60,\n calender_id:str=modification.calendar_id(), max_results=30):\n \"\"\"ACHTUNG dev und debugtool\"\"\"\n warnings.warn(\"ACHTUNG du bist dabei im Kalender zu LÖSCHEN!!!!\", UserWarning)\n if not input(f\"(Y/N)\") in (\"Y\"):\n return\n\n google_events = self.fetchGoogleEvents(min_time=min_time, calender_id=calender_id,\n timedelta_in_days=time_delta_in_days, max_results=max_results,\n single_events=True)\n for google_event in google_events:\n self.service.events().delete(calendarId='primary', eventId=google_event[\"id\"]).execute()\n\n def createEventDict(self, start_time:datetime.datetime, end_time:datetime.datetime, company, description,\n location, reminder_list, color_id=modification.color_id()):\n \"\"\"\n :return: google_Event_dict benötigt um service.events().insert zu benutzen\n \"\"\"\n # event = {\n # 'summary': 'Google I/O 2015',\n # 'location': '800 Howard St., San Francisco, CA 94103',\n # 'description': 'A chance to hear more about Google\\'s developer products.',\n # 'start': {\n # 'dateTime': '2015-05-28T09:00:00-07:00',\n # 'timeZone': 'America/Los_Angeles',\n # },\n # 'end': {\n # 'dateTime': '2015-05-28T17:00:00-07:00',\n # 'timeZone': 'America/Los_Angeles',\n # },\n # 'recurrence': [\n # 'RRULE:FREQ=DAILY;COUNT=2'\n # ],\n # 'attendees': [\n # {'email': 'lpage@example.com'},\n # {'email': 'sbrin@example.com'},\n # ],\n # 'reminders': {\n # 'useDefault': False,\n # 'overrides': [\n # {'method': 'email', 'minutes': 24 * 60},\n # {'method': 'popup', 'minutes': 10},\n # ],\n # },\n # }\n # return event\n\n event = {\n 'summary': company,\n 'location': location,\n 'description': description,\n 'start': {\n\n 'dateTime': self.datetimeToTimeString(start_time)\n },\n 'end': {\n 'dateTime': self.datetimeToTimeString(end_time)\n },\n 'colorId' : color_id,\n 'reminders': {\n 'useDefault': False,\n 'overrides': reminder_list\n },\n }\n return event\n\n def createEvents(self, events, calender_id=modification.calendar_id(), color_id=modification.color_id()):\n print(f\"{self.f}alle events hier in create Events: {events} {Fore.RESET}\")\n \n for ev, second in events:\n print(f\"googleevent soll erzeugt werden!!!! event: {ev}\")\n self.createGoogleEvent(my_event=ev, calender_id=calender_id, color_id=color_id)\n\n\n\n def createGoogleEvent(self, my_event:Event, calender_id=modification.calendar_id(), color_id=modification.color_id()):\n \"\"\"\n erzeugt google-event aus google_tools.Event()\n :return:\n \"\"\"\n reminder_stats = self.createReminderStats()\n\n event = self.createEventDict(start_time=my_event.start, end_time=my_event.end, company=my_event.company_name,\n description=my_event.description, location=my_event.location,\n reminder_list=reminder_stats, color_id=color_id)\n print(f\"nach google soll eingespeist werden: calendar_id: {calender_id}, body: {event}\")\n event = self.service.events().insert(calendarId=calender_id, body=event).execute()\n print('Event created: %s' % (event.get('htmlLink')))\n\n\n def datetimeToTimeString(self, date_time:datetime.datetime, offset=2, offset_sighn=\"+\"):\n time_string = str(date_time).replace(\" \", \"T\")\n return time_string + f\"{offset_sighn}{offset:02d}:00\"\n\n def timeStringToDateTime(self, time_sting:str):\n \"\"\"\n wandelt google time_string '2020-05-06T12:00:00+02:00' in datetime_objekt\n '2020-05-06T12:00:00+02:00' bedeutet: es ist tatsächlich 12:uhr bei uns aber es hat einen offset von 2\n zur standard-zeit\n :param time_sting: google_time_strin\n :return: datetime.datetime\n \"\"\"\n datetime_str, offset_sighn, offset_time = time_sting[:19], time_sting[19:20], time_sting[20:]\n print(f\"datetime: {datetime_str}, offset sighn: {offset_sighn}, offset_time: {offset_time}\")\n try:\n datetime_object = time.strptime(datetime_str, '%Y-%m-%dT%H:%M:%S')\n except ValueError as e:\n print(f\"{Fore.RED}ERROR #09oi234knjw --> {e.__traceback__.tb_lineno}, {repr(e.__traceback__)}, {repr(e)}, {e.__cause__}{Fore.RESET}\")\n\n\n datetime_object = time.strptime(datetime_str, '%Y-%m-%d')\n\n datetime_object = datetime.datetime(*datetime_object[:6])\n\n return datetime_object\n\n def fetchGoogleEvents(self, min_time:datetime.datetime, timedelta_in_days:int=90,\n calender_id:str=modification.calendar_id(), order_by=\"startTime\",\n max_results:int=500, single_events=True):\n \"\"\"\n holt google events und gibt sie im eigenen Event-Datentyp zurück\n :param min_time: zeitpunkt ab welchem events geholt werden sollen\n :param timedelta_in_days: zeitraum für den Events geholt werden sollen\n :param order_by: \"startTime\"or\"updated\"\n :param time_boundery: zeitüberhang für termin der zu kurze abfolge erkennbar ewerden lässt\n :return: list(google_events-->dict)\n \"\"\"\n max_time = min_time + datetime.timedelta(days=timedelta_in_days)\n print(f\"{self.f}CalendarId hier!!!!!!!!! : {calender_id} {Fore.RESET}\")\n \n raw_google_events = self.service.events().list(\n calendarId=calender_id, timeMin=self.datetimeToTimeString(min_time),\n timeMax=self.datetimeToTimeString(max_time), maxResults=max_results,\n singleEvents=single_events, orderBy=order_by).execute()\n\n fetched_google_events = raw_google_events.get(\"items\", [])\n print(f\"{self.f}fetched_google_events: {fetched_google_events} {Fore.RESET}\")\n return fetched_google_events\n\n\n def fetchEvents(self, min_time:datetime.datetime, timedelta_in_days:int=90,\n calender_id:str=modification.calendar_id(), order_by=\"startTime\",\n max_results:int=500, time_boundery=modification.timeBoundery()):\n \"\"\"\n holt google events und gibt sie im eigenen Event-Datentyp zurück\n :param min_time: zeitpunkt ab welchem events geholt werden sollen\n :param timedelta_in_days: zeitraum für den Events geholt werden sollen\n :param order_by: \"startTime\"or\"updated\"\n :param time_boundery: zeitüberhang für termin der zu kurze abfolge erkennbar ewerden lässt\n :return: list(google_tools.Event()'s)\n \"\"\"\n\n events = []\n google_event: dict\n for google_event in self.fetchGoogleEvents(\n min_time=min_time, timedelta_in_days=timedelta_in_days , calender_id=calender_id, order_by=order_by,\n max_results=max_results, single_events=True):\n\n start = google_event[\"start\"]\n starttime_string = start.get(\"dateTime\", None) if start.get(\"dateTime\", None) else start.get(\"date\", None)\n end = google_event[\"end\"]\n endtime_string = end.get(\"dateTime\", None) if end.get(\"dateTime\", None) else end.get(\"date\", None)\n\n event = Event(start=self.timeStringToDateTime(starttime_string),\n end=self.timeStringToDateTime(endtime_string),\n company_name=google_event[\"summary\"],\n location=google_event.get(\"location\", \" \"), duration=None, time_boundery=time_boundery,\n description=google_event.get(\"description\", \"\"))\n events.append(event)\n\n return events\n\n\n def getCalendarIDs(self, calendarID):\n\n complete_answer = self.service.calendarList().get(calendarId=calendarID).execute()\n summary_exzerpt = complete_answer[\"summary\"]\n return summary_exzerpt, complete_answer\n\n\n\n\n\n\n\n\nif __name__ == '__main__':\n\n event_a = Event(start=datetime.datetime(*time.localtime()[:6]), company_name=\"schlecker\",\n location=\"Adresse\", duration=50,\n time_boundery=45, description=\"sollte klappen\")\n print(f\"start: {event_a.start}, end: {event_a.end}, duration: {event_a.duration}\")\n print(event_a)\n\n event_b = Event(start=datetime.datetime(*time.localtime()[:6]), company_name=\"schlecker\",\n location=\"Adresse\", duration=None,\n end=datetime.datetime(*time.localtime()[:6]) + datetime.timedelta(minutes=60),\n time_boundery=45, description=\"sollte klappen\")\n print(f\"start: {event_b.start}, end: {event_b.end}, duration: {event_b.duration}\")\n print(event_b)\n\n test_set = set()\n test_set.add((event_a, event_b))\n print(test_set)\n test_set.add((event_b, event_a))\n print(test_set)\n test_set.add((event_a, event_b))\n print(test_set)\n\n\n google_connection = MyGoogleCalendarConnection()\n google_events = google_connection.fetchGoogleEvents(min_time=datetime.datetime(*time.localtime()[:6]))\n print(f\"google_events: {google_events}\")\n\n now_time = datetime.datetime(*time.localtime()[:6])\n #\n # for i in range(2, 20):\n # starttime = now_time + datetime.timedelta(minutes=60*i)\n # event_here = Event(start=starttime, duration=modification.standardDuration(), company_name=f\"farbe: {i%12}\",\n # location=\"Flaumweg 23,\\n66111 Saarbrücken\", description=\"das ist ein test der Teste\")\n # google_connection.createGoogleEvent(event_here, color_id=i%12)\n #\n # my_events = google_connection.fetchEvents(min_time=now_time)\n # for my_event in my_events:\n # print(my_event)\n\n google_connection._deleteEvents(now_time-datetime.timedelta(days=3), max_results=260, time_delta_in_days=365)\n\n","repo_name":"peterQpan/FoodsharingCalendar","sub_path":"google_tools.py","file_name":"google_tools.py","file_ext":"py","file_size_in_byte":18009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"39911966515","text":"import fractions\nimport numpy as np\nfrom pylab import *\n\ndef totient(x):\n n = 0\n for k in range(0,x+1):\n if fractions.gcd(k,x) == 1:\n n += 1\n return n\n \nx = np.arange(1,5001)\nout = [totient(y) for y in x]\nplot(x,out,'r.')\ntitle('Totient Function')\nxlabel('denominator m')\nylabel('# of new excluded regions')\nshow()","repo_name":"thaaemis/fractal","sub_path":"totient.py","file_name":"totient.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"41028841065","text":"import pytest\nimport numpy as np\nimport openpnm as op\nimport matplotlib.pyplot as plt\nfrom openpnm._skgraph import generators as gen\nfrom openpnm._skgraph import tools\nfrom numpy.testing import assert_allclose\n\n\nclass SKGRGeneratorToolsTest:\n\n def setup_class(self):\n self.ws = op.Workspace()\n\n def test_get_centroid_2D(self):\n pts = np.array([[0, 0],\n [0, 1],\n [1, 1],\n [1, 0]], dtype=float)\n pt = gen.tools.get_centroid(pts, mode='rigorous')\n assert_allclose(pt, [0.5, 0.5], rtol=1e-7)\n pt = gen.tools.get_centroid(pts, mode='fast')\n assert_allclose(pt, [0.5, 0.5], rtol=1e-12)\n pts = np.array([[0, 0],\n [0, 0.2],\n [0, 0.4],\n [0, 0.6],\n [0, 0.8],\n [0, 1],\n [1, 1],\n [1, 0]], dtype=float)\n pt = gen.tools.get_centroid(pts, mode='rigorous')\n assert_allclose(pt, [0.5, 0.5], rtol=1e-7)\n pt = gen.tools.get_centroid(pts, mode='fast')\n assert_allclose(pt, [0.25, 0.5], rtol=1e-12)\n\n def test_get_centroid_3D(self):\n pts = np.array([[0, 0, 0],\n [0, 1, 0],\n [1, 0, 0],\n [1, 1, 0],\n [0, 0, 1],\n [0, 1, 1],\n [1, 0, 1],\n [1, 1, 1]], dtype=float)\n pt = gen.tools.get_centroid(pts, mode='rigorous')\n assert_allclose(pt, [0.5, 0.5, 0.5], rtol=1e-12)\n pt = gen.tools.get_centroid(pts, mode='fast')\n assert_allclose(pt, [0.5, 0.5, 0.5], rtol=1e-12)\n pts = np.array([[0, 0, 0],\n [0, 0.2, 0],\n [0, 0.4, 0],\n [0, 0.6, 0],\n [0, 0.8, 0],\n [0, 1, 0],\n [1, 0, 0],\n [1, 0.2, 0],\n [1, 0.4, 0],\n [1, 0.6, 0],\n [1, 0.8, 0],\n [1, 1, 0],\n [0, 0, 1],\n [0, 1, 1],\n [1, 0, 1],\n [1, 1, 1]], dtype=float)\n pt = gen.tools.get_centroid(pts, mode='rigorous')\n assert_allclose(pt, [0.5, 0.5, 0.5], rtol=1e-8)\n pt = gen.tools.get_centroid(pts, mode='fast')\n assert_allclose(pt, [0.5, 0.5, 0.25], rtol=1e-12)\n\n def test_parse_points(self):\n pts = gen.tools.parse_points(shape=[1, 1, 1], points=10)\n assert pts.shape == (10, 3)\n assert pts[:, 2].max() > 0\n pts = gen.tools.parse_points(shape=[1, 1, 0], points=10)\n assert pts.shape == (10, 3)\n assert pts[:, 2].max() == 0\n pts = gen.tools.parse_points(shape=[1, 1], points=10)\n assert pts.shape == (10, 3)\n assert pts[:, 2].max() > 0\n r, q, z = tools.cart2cyl(*pts.T)\n assert r.max() < 1\n assert r.max() > 0\n pts = gen.tools.parse_points(shape=[1, 0], points=10)\n assert pts.shape == (10, 3)\n assert pts[:, 2].max() == 0\n r, q, z = tools.cart2cyl(*pts.T)\n assert r.max() < 1\n assert r.max() > 0\n pts = gen.tools.parse_points(shape=[1], points=10)\n assert pts.shape == (10, 3)\n assert pts[:, 2].max() > 0\n r, q, p = tools.cart2sph(*pts.T)\n assert r.max() < 1\n assert r.max() > 0\n\n def test_add_all_label(self):\n d = gen.cubic(shape=[3, 3, 3], node_prefix='pore', edge_prefix='throat')\n assert len(d.keys()) == 2\n d = gen.tools.add_all_label(d)\n assert len(d.keys()) == 4\n assert 'pore.all' in d.keys()\n assert 'throat.all' in d.keys()\n\n def test_label_faces_cubic(self):\n d = gen.cubic(shape=[3, 3, 3], node_prefix='pore', edge_prefix='throat')\n d = gen.tools.label_faces_cubic(d, rtol=0.1)\n assert d['pore.left'].sum() == 9\n assert d['pore.right'].sum() == 9\n assert d['pore.front'].sum() == 9\n assert d['pore.back'].sum() == 9\n assert d['pore.top'].sum() == 9\n assert d['pore.bottom'].sum() == 9\n\n def test_template_sphere_shell(self):\n im = gen.tools.template_sphere_shell(r_outer=10, r_inner=0)\n assert im.sum() == 4139\n im = gen.tools.template_sphere_shell(r_outer=10, r_inner=5)\n assert im.sum() == 3624\n\n def test_template_cylinder_annulus(self):\n im = gen.tools.template_cylinder_annulus(z=10, r_outer=10, r_inner=0)\n assert im.sum() == 3050\n im = gen.tools.template_cylinder_annulus(z=10, r_outer=10, r_inner=5)\n assert im.sum() == 2240\n im = gen.tools.template_cylinder_annulus(z=0, r_outer=10, r_inner=0)\n assert im.sum() == 305\n im = gen.tools.template_cylinder_annulus(z=0, r_outer=10, r_inner=5)\n assert im.sum() == 224\n\n def test_generate_base_points_and_reflect(self):\n f = gen.tools.generate_base_points\n pts = f(20, domain_size=[1, 1, 1], reflect=False)\n assert pts.shape == (20, 3)\n pts = f(20, domain_size=[1, 1, 1], reflect=True)\n assert pts.shape == (140, 3)\n pts = f(20, domain_size=[1, 1, 0], reflect=False)\n assert pts.shape == (20, 3)\n pts = f(20, domain_size=[1, 1, 0], reflect=True)\n assert pts.shape == (100, 3)\n pts = f(20, domain_size=[1, 1], reflect=False)\n assert pts.shape == (20, 3)\n pts = f(20, domain_size=[1, 1], reflect=True)\n assert pts.shape == (120, 3)\n pts = f(20, domain_size=[1, 0], reflect=False)\n assert pts.shape == (20, 3)\n pts = f(20, domain_size=[1, 0], reflect=True)\n assert pts.shape == (40, 3)\n pts = f(20, domain_size=[1], reflect=False)\n assert pts.shape == (20, 3)\n pts = f(20, domain_size=[1], reflect=True)\n assert pts.shape == (40, 3)\n\n\nif __name__ == '__main__':\n\n t = SKGRGeneratorToolsTest()\n self = t\n t.setup_class()\n for item in t.__dir__():\n if item.startswith('test'):\n print(f'Running test: {item}')\n t.__getattribute__(item)()\n","repo_name":"PMEAL/OpenPNM","sub_path":"tests/unit/skgraph/test_generator_tools.py","file_name":"test_generator_tools.py","file_ext":"py","file_size_in_byte":6256,"program_lang":"python","lang":"en","doc_type":"code","stars":404,"dataset":"github-code","pt":"3"} +{"seq_id":"29170328367","text":"import pytest\nimport kern\n\n\n@pytest.mark.xfail(not kern.kernel_in(\"4.19.17-2\", \"5.4.14-1\"), reason=\"not implemented\")\n@pytest.mark.parametrize('cfq_period,cfq_quota,num_jobs,expected_wait', [\n pytest.param(100, 50, 1, 500),\n pytest.param(100, 100, 1, 0),\n pytest.param(100, 100, 2, 1000),\n pytest.param(100, 100, 3, 2000)\n])\ndef test_cpuacct_wait(logger, make_cgroup, find_bin, make_task,\n cfq_period, cfq_quota, num_jobs, expected_wait):\n \"\"\"\n Comit 4000e0a (\"sched/cpuacct: account time tasks waiting for execution\")\n \"\"\"\n\n runtime = 3\n runtime_ns = runtime * (10 ** 9)\n stress_bin = find_bin('stress-ng')\n\n cg = make_cgroup('cpu')\n assert cg.has_attr('cpuacct.wait')\n\n cg['cpu.cfs_quota_us'] = cfq_quota * 1000\n cg['cpu.cfs_period_us'] = cfq_period * 1000\n\n task = make_task(cgroups=[cg])\n assert cg.has_task(task.pid)\n assert cg.get_int(\"cpuacct.wait\") == 0\n\n task.check_call([stress_bin, '--cpu', str(num_jobs), \"--timeout\", str(runtime)])\n\n wait = cg.get_int(\"cpuacct.wait\")\n wait_percpu = cg.get_percpus(\"cpuacct.wait_percpu\")\n assert wait == sum(wait_percpu)\n\n usage = cg.get_int(\"cpuacct.usage\")\n usage_percpu = cg.get_percpus(\"cpuacct.usage_percpu\")\n assert usage == sum(usage_percpu)\n\n assert usage == pytest.approx((cfq_quota * runtime_ns) / cfq_period, rel=0.15)\n assert wait == pytest.approx(expected_wait * runtime * (10 ** 6), abs=0.15 * runtime_ns)\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"infra/test/misc/test_cgroup_cpuacct_knobs.py","file_name":"test_cgroup_cpuacct_knobs.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71391925203","text":"import datetime\nimport logging\nimport os\nimport time\nfrom urllib.parse import urljoin\n\nimport humanize\nfrom django.contrib.auth.models import User\nfrom django.db.models import Sum\nfrom product_classifier.models import ProductClass, ProductClassContent\nfrom product_register.models import ExternalProcess\nfrom rest_framework import serializers\n\nfrom .models import *\n\nlogger = logging.getLogger(__name__)\n\n\nclass ProductSerializer(serializers.HyperlinkedModelSerializer):\n prd_class = serializers.PrimaryKeyRelatedField(\n queryset=ProductClass.objects.all(), many=False)\n\n # Atributos da product_classifier.ProductClass\n # pcl_name = serializers.SerializerMethodField()\n pcl_display_name = serializers.SerializerMethodField()\n pcl_is_system = serializers.SerializerMethodField()\n\n # Atributos da product_classifier.ProductGroup\n pgr_group = serializers.SerializerMethodField()\n # pgr_name = serializers.SerializerMethodField()\n pgr_display_name = serializers.SerializerMethodField()\n\n # epr_original_id = Original Process ID\n epr_original_id = serializers.SerializerMethodField()\n\n # epr_original_id = Original Process ID\n prd_filter = serializers.SerializerMethodField()\n\n # Related Products\n prl_related = serializers.SerializerMethodField()\n prl_cross_identification = serializers.SerializerMethodField()\n prl_cross_property = serializers.SerializerMethodField()\n\n tablename = serializers.SerializerMethodField()\n\n productlog = serializers.SerializerMethodField()\n\n class Meta:\n model = Product\n\n fields = (\n 'id',\n 'prd_name',\n 'prd_display_name',\n 'prd_user_display_name',\n 'prd_class',\n 'prd_is_public',\n # 'pcl_name',\n 'pcl_display_name',\n 'pcl_is_system',\n 'pgr_group',\n # 'pgr_name',\n 'pgr_display_name',\n 'epr_original_id',\n 'prd_filter',\n 'prl_related',\n 'prl_cross_identification',\n 'prl_cross_property',\n 'tablename',\n 'productlog'\n )\n\n def get_pcl_name(self, obj):\n return obj.prd_class.pcl_name\n\n def get_pcl_display_name(self, obj):\n return obj.prd_class.pcl_display_name\n\n def get_pcl_is_system(self, obj):\n return obj.prd_class.pcl_is_system\n\n def get_pgr_group(self, obj):\n return obj.prd_class.pcl_group.id\n\n def get_pgr_name(self, obj):\n return obj.prd_class.pcl_group.pgr_name\n\n def get_pgr_display_name(self, obj):\n return obj.prd_class.pcl_group.pgr_display_name\n\n def get_epr_original_id(self, obj):\n try:\n return obj.prd_process_id.epr_original_id\n except:\n return None\n\n def get_prd_filter(self, obj):\n try:\n return obj.prd_filter.filter\n except:\n return None\n\n def get_prl_related(self, obj):\n try:\n related = ProductRelated.objects.get(prl_product=obj.pk, prl_relation_type=\"join\")\n return related.prl_related.pk\n except:\n return None\n\n def get_prl_cross_identification(self, obj):\n try:\n related = ProductRelated.objects.get(prl_product=obj.pk, prl_relation_type=\"join\")\n return related.prl_cross_identification.pk\n except:\n return None\n\n def get_prl_cross_property(self, obj):\n try:\n related = ProductRelated.objects.get(prl_product=obj.pk, prl_relation_type=\"join\")\n return related.prl_cross_identification.pcn_column_name.lower()\n except:\n return None\n\n def get_tablename(self, obj):\n try:\n if obj.table.tbl_schema is not None:\n return \"%s.%s\" % (obj.table.tbl_schema, obj.table.tbl_name)\n else:\n return obj.table.tbl_name\n except:\n return None\n\n def get_productlog(self, obj):\n try:\n site = obj.prd_process_id.epr_site.sti_url\n return urljoin(site, \"VP/getViewProcessCon?process_id=%s\" % obj.prd_process_id.epr_original_id)\n\n except:\n return None\n\n\nclass FileSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = File\n\n fields = (\n 'id',\n 'fli_base_path',\n 'fli_name'\n )\n\n\nclass TableSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = Table\n\n fields = (\n 'id',\n 'tbl_schema',\n 'tbl_name'\n )\n\n\nclass CatalogSerializer(serializers.HyperlinkedModelSerializer):\n owner = serializers.SerializerMethodField()\n prd_process_id = serializers.PrimaryKeyRelatedField(\n queryset=ExternalProcess.objects.all(), many=False)\n\n prd_class = serializers.PrimaryKeyRelatedField(\n queryset=ProductClass.objects.all(), many=False)\n\n # Atributos da product_classifier.ProductClass\n # pcl_name = serializers.SerializerMethodField()\n pcl_display_name = serializers.SerializerMethodField()\n pcl_is_system = serializers.SerializerMethodField()\n\n # Atributos da product_classifier.ProductGroup\n pgr_group = serializers.SerializerMethodField()\n # pgr_name = serializers.SerializerMethodField()\n pgr_display_name = serializers.SerializerMethodField()\n\n # Atributos da product_register.ExternalProcess\n epr_original_id = serializers.SerializerMethodField()\n epr_name = serializers.SerializerMethodField()\n epr_username = serializers.SerializerMethodField()\n epr_start_date = serializers.SerializerMethodField()\n epr_end_date = serializers.SerializerMethodField()\n epr_readme = serializers.SerializerMethodField()\n epr_comment = serializers.SerializerMethodField()\n\n # epr_site = models.CharField(max_length=128)\n release_id = serializers.SerializerMethodField()\n release_display_name = serializers.SerializerMethodField()\n\n is_owner = serializers.SerializerMethodField()\n\n tbl_rows = serializers.SerializerMethodField()\n\n class Meta:\n model = Catalog\n\n fields = (\n 'id',\n 'owner',\n 'prd_process_id',\n 'prd_name',\n 'prd_display_name',\n 'prd_user_display_name',\n 'prd_class',\n 'prd_date',\n 'prd_is_public',\n\n 'pcl_display_name',\n 'pcl_is_system',\n\n 'pgr_group',\n 'pgr_display_name',\n\n 'epr_original_id',\n 'epr_name',\n 'epr_username',\n 'epr_start_date',\n 'epr_end_date',\n 'epr_readme',\n 'epr_comment',\n\n 'tbl_schema',\n 'tbl_name',\n 'tbl_size',\n 'tbl_num_columns',\n 'tbl_rows',\n\n 'release_id',\n 'release_display_name',\n\n 'is_owner'\n )\n\n def get_owner(self, obj):\n try:\n return obj.prd_owner.username\n except:\n return None\n\n def get_pcl_name(self, obj):\n return obj.prd_class.pcl_name\n\n def get_pcl_display_name(self, obj):\n return obj.prd_class.pcl_display_name\n\n def get_pcl_is_system(self, obj):\n return obj.prd_class.pcl_is_system\n\n def get_pgr_group(self, obj):\n return obj.prd_class.pcl_group.id\n\n def get_pgr_name(self, obj):\n return obj.prd_class.pcl_group.pgr_name\n\n def get_pgr_display_name(self, obj):\n return obj.prd_class.pcl_group.pgr_display_name\n\n def get_epr_original_id(self, obj):\n try:\n return obj.prd_process_id.epr_original_id\n except:\n return None\n\n def get_epr_name(self, obj):\n try:\n return obj.prd_process_id.epr_name\n except:\n return None\n\n def get_epr_username(self, obj):\n try:\n return obj.prd_process_id.epr_username\n except:\n return None\n\n def get_epr_start_date(self, obj):\n try:\n return obj.prd_process_id.epr_start_date\n except:\n return None\n\n def get_epr_end_date(self, obj):\n try:\n return obj.prd_process_id.epr_end_date\n except:\n return None\n\n def get_epr_readme(self, obj):\n try:\n return obj.prd_process_id.epr_readme\n except:\n return None\n\n def get_epr_comment(self, obj):\n try:\n return obj.prd_process_id.epr_comment\n except:\n return None\n\n def get_release_id(self, obj):\n try:\n r = obj.productrelease_set.first()\n return r.release.id\n except:\n return None\n\n def get_release_display_name(self, obj):\n try:\n r = obj.productrelease_set.first()\n return r.release.rls_display_name\n except:\n return None\n\n def get_is_owner(self, obj):\n current_user = self.context['request'].user\n if obj.prd_owner.pk == current_user.pk:\n return True\n else:\n return False\n\n def get_tbl_rows(self, obj):\n if obj.tbl_rows is None:\n return obj.ctl_num_objects\n else:\n return obj.tbl_rows\n\n\nclass MapSerializer(ProductSerializer):\n class Meta(ProductSerializer.Meta):\n model = Map\n\n fields = ProductSerializer.Meta.fields + ('id', 'mpa_nside', 'mpa_ordering')\n\n\nclass CutoutJobSerializer(serializers.HyperlinkedModelSerializer):\n cjb_product = serializers.PrimaryKeyRelatedField(\n queryset=Product.objects.all(), many=False)\n\n owner = serializers.SerializerMethodField()\n execution_time = serializers.SerializerMethodField()\n h_file_sizes = serializers.SerializerMethodField()\n is_owner = serializers.SerializerMethodField()\n\n status_name = serializers.CharField(\n source='get_cjb_status_display', read_only=True\n )\n\n class Meta:\n model = CutOutJob\n\n fields = (\n 'id',\n 'cjb_product',\n 'cjb_display_name',\n 'cjb_status',\n 'status_name',\n 'cjb_tag',\n 'cjb_xsize',\n 'cjb_ysize',\n 'cjb_make_fits',\n 'cjb_fits_colors',\n 'cjb_make_stiff',\n 'cjb_stiff_colors',\n 'cjb_make_lupton',\n 'cjb_lupton_colors',\n 'cjb_label_position',\n 'cjb_label_properties',\n 'cjb_label_colors',\n 'cjb_label_font_size',\n 'cjb_start_time',\n 'cjb_finish_time',\n 'cjb_description',\n 'cjb_files',\n 'cjb_file_size',\n 'cjb_error',\n 'owner',\n 'execution_time',\n 'h_file_sizes',\n 'is_owner'\n )\n\n def get_owner(self, obj):\n return obj.owner.username\n\n def get_execution_time(self, obj):\n try:\n tdelta = obj.cjb_finish_time - obj.cjb_start_time\n seconds = tdelta.total_seconds()\n execution_time = str(datetime.timedelta(seconds=seconds)).split('.')[0]\n\n return execution_time\n except:\n return None\n\n def get_h_file_sizes(self, obj):\n try:\n return humanize.naturalsize(obj.cjb_file_size)\n except:\n return None\n\n def get_is_owner(self, obj):\n current_user = self.context['request'].user\n if obj.owner.pk == current_user.pk:\n return True\n else:\n return False\n\n\nclass CutoutSerializer(serializers.HyperlinkedModelSerializer):\n cjb_cutout_job = serializers.PrimaryKeyRelatedField(\n queryset=CutOutJob.objects.all(), many=False)\n\n cjb_des_job = serializers.PrimaryKeyRelatedField(\n queryset=Desjob.objects.all(), many=False)\n\n ctt_img_color = serializers.CharField(source='ctt_filter.filter')\n ctt_file_source = serializers.SerializerMethodField()\n\n class Meta:\n model = Cutout\n\n fields = (\n 'id',\n 'cjb_cutout_job',\n 'cjb_des_job',\n 'ctt_object_id',\n 'ctt_object_ra',\n 'ctt_object_dec',\n 'ctt_img_format',\n 'ctt_img_color',\n # 'ctt_filter',\n # 'ctt_file_path',\n 'ctt_file_name',\n 'ctt_file_type',\n 'ctt_file_size',\n # 'ctt_jobid',\n 'ctt_file_source',\n )\n\n def get_ctt_file_source(self, obj):\n try:\n # Exemplo do source para o arquivo de imagem.\n # http://localhost/data/cutouts/18/7bd2a79749974decab360f401310bf60/DES0305-3415/DESJ030506.1606-341532.4000/DESJ030506.1606-341532.4000_gri_stiff.png\n\n # Recuperar o Host\n host = settings.BASE_HOST\n\n # Substituir o path de Archive por /data que é o alias\n base_source = host + obj.ctt_file_path.replace(\"/archive\", \"/data\")\n\n # Adicionar o filename\n source = \"{}/{}?_dc={}\".format(base_source, obj.ctt_file_name, time.time())\n return source\n except Exception as e:\n return None\n\n\nclass MaskSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = Mask\n\n fields = (\n 'id',\n 'msk_filter'\n )\n\n\nclass ProductContentSerializer(serializers.HyperlinkedModelSerializer):\n pcn_product_id = serializers.PrimaryKeyRelatedField(\n queryset=Product.objects.all(), many=False)\n\n class Meta:\n model = ProductContent\n\n fields = (\n 'id',\n 'pcn_product_id',\n 'pcn_column_name',\n )\n\n\nclass ProductContentAssociationSerializer(serializers.HyperlinkedModelSerializer):\n pca_product = serializers.PrimaryKeyRelatedField(\n queryset=Product.objects.all(), many=False)\n\n pca_class_content = serializers.PrimaryKeyRelatedField(\n queryset=ProductClassContent.objects.all(), many=False)\n\n pca_product_content = serializers.PrimaryKeyRelatedField(\n queryset=ProductContent.objects.all(), many=False)\n\n # Atributos da product_classifier.ProductClassContent\n pcc_category = serializers.SerializerMethodField()\n pcc_display_name = serializers.SerializerMethodField()\n pcc_ucd = serializers.SerializerMethodField()\n pcc_unit = serializers.SerializerMethodField()\n pcc_reference = serializers.SerializerMethodField()\n pcc_mandatory = serializers.SerializerMethodField()\n\n # Atributos da product.ProductContent\n pcn_column_name = serializers.SerializerMethodField()\n\n class Meta:\n model = ProductContentAssociation\n\n fields = (\n 'id',\n 'pca_product',\n 'pca_class_content',\n 'pca_product_content',\n 'pcc_category',\n 'pcc_display_name',\n 'pcc_ucd',\n 'pcc_unit',\n 'pcc_reference',\n 'pcc_mandatory',\n 'pcn_column_name',\n )\n\n read_only_fields = ('id',)\n\n def get_pcc_category(self, obj):\n try:\n return obj.pca_class_content.pcc_category.cct_name\n except:\n return None\n\n def get_pcc_display_name(self, obj):\n return obj.pca_class_content.pcc_display_name\n\n def get_pcc_ucd(self, obj):\n return obj.pca_class_content.pcc_ucd\n\n def get_pcc_unit(self, obj):\n return obj.pca_class_content.pcc_unit\n\n def get_pcc_reference(self, obj):\n return obj.pca_class_content.pcc_reference\n\n def get_pcc_mandatory(self, obj):\n return obj.pca_class_content.pcc_mandatory\n\n def get_pcn_column_name(self, obj):\n return obj.pca_product_content.pcn_column_name\n\n\nclass AssociationSerializer(serializers.HyperlinkedModelSerializer):\n # Atributos da product_classifier.ProductClassContent\n pcc_ucd = serializers.SerializerMethodField()\n\n # Atributos da product.ProductContent\n pcn_column_name = serializers.SerializerMethodField()\n\n class Meta:\n model = ProductContentAssociation\n\n fields = (\n 'pcc_ucd',\n 'pcn_column_name'\n )\n\n def get_pcc_ucd(self, obj):\n return obj.pca_class_content.pcc_ucd\n\n def get_pcn_column_name(self, obj):\n return obj.pca_product_content.pcn_column_name\n\n\nclass ProductAssociationSerializer(serializers.ModelSerializer):\n # Atributos da product_classifier.ProductClassContent\n pcc_ucd = serializers.SerializerMethodField()\n\n # Atributos da product.ProductContent\n pcn_column_name = serializers.SerializerMethodField()\n\n class Meta:\n model = ProductContentAssociation\n\n fields = (\n 'id',\n 'pca_product',\n 'pca_class_content',\n 'pca_product_content',\n 'pcc_ucd',\n 'pcn_column_name'\n )\n\n read_only_fields = ('id',)\n\n def get_pcc_ucd(self, obj):\n return obj.pca_class_content.pcc_ucd\n\n def get_pcn_column_name(self, obj):\n return obj.pca_product_content.pcn_column_name.lower()\n\n\nclass ProductRelatedSerializer(serializers.ModelSerializer):\n prl_cross_name = serializers.SerializerMethodField()\n\n class Meta:\n model = ProductRelated\n\n fields = (\n 'id',\n 'prl_product',\n 'prl_related',\n 'prl_relation_type',\n 'prl_cross_identification',\n 'prl_cross_name'\n )\n\n def get_prl_cross_name(self, obj):\n try:\n return obj.prl_cross_identification.pcn_column_name\n\n except:\n return None\n\n\nclass AllProductsSerializer(serializers.HyperlinkedModelSerializer):\n ctl_num_objects = serializers.SerializerMethodField()\n tbl_rows = serializers.SerializerMethodField()\n mpa_nside = serializers.SerializerMethodField()\n mpa_ordering = serializers.SerializerMethodField()\n prd_table_ptr = serializers.SerializerMethodField()\n pgr_display_name = serializers.SerializerMethodField()\n pcl_display_name = serializers.SerializerMethodField()\n prd_process_id = serializers.PrimaryKeyRelatedField(\n queryset=ExternalProcess.objects.all(), many=False)\n epr_username = serializers.SerializerMethodField()\n epr_end_date = serializers.SerializerMethodField()\n epr_original_id = serializers.SerializerMethodField()\n exp_username = serializers.SerializerMethodField()\n exp_date = serializers.SerializerMethodField()\n # Dados do Release\n prd_release_id = serializers.SerializerMethodField()\n prd_release_display_name = serializers.SerializerMethodField()\n # Dados do Field\n prd_tags = serializers.SerializerMethodField()\n prd_tags_name = serializers.SerializerMethodField()\n\n prd_filter = serializers.SerializerMethodField()\n\n class Meta:\n model = Product\n\n fields = (\n 'id',\n 'prd_name',\n 'prd_display_name',\n 'prd_user_display_name',\n 'ctl_num_objects',\n 'tbl_rows',\n 'mpa_nside',\n 'mpa_ordering',\n \"pgr_display_name\",\n 'pcl_display_name',\n 'prd_process_id',\n 'epr_username',\n 'epr_end_date',\n 'prd_release_id',\n 'prd_release_display_name',\n 'prd_tags',\n 'prd_tags_name',\n 'epr_original_id',\n 'prd_filter',\n 'prd_table_ptr',\n 'exp_username',\n 'exp_date'\n\n )\n\n def get_ctl_num_objects(self, obj):\n try:\n return obj.table.catalog.ctl_num_objects\n except AttributeError:\n return None\n\n def get_tbl_rows(self, obj):\n try:\n return obj.table.catalog.tbl_rows\n except AttributeError:\n return None\n\n def get_prd_filter(self, obj):\n try:\n return obj.prd_filter.filter\n\n except AttributeError:\n return None\n\n def get_mpa_nside(self, obj):\n try:\n return obj.table.map.mpa_nside\n except AttributeError:\n return None\n\n def get_mpa_ordering(self, obj):\n try:\n return obj.table.map.mpa_ordering\n except AttributeError:\n return None\n\n def get_prd_table_ptr(self, obj):\n try:\n return str(obj.table.map.table_ptr)\n except AttributeError:\n return str(obj.table.catalog.table_ptr)\n\n def get_pgr_display_name(self, obj):\n return obj.prd_class.pcl_group.pgr_display_name\n\n def get_pcl_display_name(self, obj):\n return obj.prd_class.pcl_display_name\n\n def get_epr_original_id(self, obj):\n return obj.prd_process_id.epr_original_id\n\n def get_epr_username(self, obj):\n return obj.prd_process_id.epr_username\n\n def get_epr_end_date(self, obj):\n return obj.prd_process_id.epr_end_date\n\n def get_exp_username(self, obj):\n try:\n r = obj.prd_process_id.export_set.first()\n return r.exp_username\n except AttributeError:\n return None\n\n def get_exp_date(self, obj):\n try:\n r = obj.prd_process_id.export_set.first()\n return r.exp_date\n except AttributeError:\n return None\n\n def get_prd_release_id(self, obj):\n try:\n r = obj.releases.first()\n return r.id\n except AttributeError:\n return None\n\n def get_prd_release_display_name(self, obj):\n try:\n r = obj.releases.first()\n return r.rls_display_name\n except AttributeError:\n return None\n\n def get_prd_tags_name(self, obj):\n try:\n tags = list()\n for tag in obj.tags.values():\n tags.append(tag.get('tag_display_name'))\n\n return tags\n except AttributeError:\n return None\n\n def get_prd_tags(self, obj):\n try:\n tags = list()\n for tag in obj.tags.values():\n tags.append(tag.get('id'))\n\n return tags\n except AttributeError:\n return None\n\n\nclass ProductSettingSerializer(serializers.ModelSerializer):\n owner = serializers.SerializerMethodField()\n editable = serializers.SerializerMethodField()\n\n class Meta:\n model = ProductSetting\n\n fields = (\n 'id',\n 'cst_product',\n 'cst_display_name',\n 'cst_description',\n 'cst_is_public',\n 'cst_is_editable',\n 'owner',\n 'editable'\n )\n\n def get_owner(self, obj):\n return obj.owner.username\n\n def get_editable(self, obj):\n current_user = self.context['request'].user\n if obj.owner.pk == current_user.pk:\n return True\n else:\n return obj.cst_is_editable\n\n\nclass CurrentSettingSerializer(serializers.ModelSerializer):\n editable = serializers.SerializerMethodField()\n\n class Meta:\n model = CurrentSetting\n\n fields = (\n 'id',\n 'cst_product',\n 'cst_setting',\n 'editable'\n )\n\n def get_editable(self, obj):\n current_user = self.context['request'].user\n if obj.cst_setting.owner.pk == current_user.pk:\n return True\n else:\n return obj.cst_setting.cst_is_editable\n\n\nclass ProductContentSettingSerializer(serializers.ModelSerializer):\n display_name = serializers.SerializerMethodField()\n unit = serializers.SerializerMethodField()\n\n class Meta:\n model = ProductContentSetting\n\n fields = (\n 'id',\n 'pcs_content',\n 'pcs_setting',\n 'pcs_is_visible',\n 'pcs_order',\n 'display_name',\n 'unit'\n )\n\n def get_display_name(self, obj):\n try:\n association = obj.pcs_content.productcontentassociation_set.first()\n return association.pca_class_content.pcc_display_name\n except:\n return obj.pcs_content.pcn_column_name\n\n def get_unit(self, obj):\n try:\n association = obj.pcs_content.productcontentassociation_set.first()\n return association.pca_class_content.pcc_unit\n except:\n return None\n\n\nclass PermissionUserSerializer(serializers.ModelSerializer):\n prm_product = serializers.PrimaryKeyRelatedField(\n queryset=Product.objects.all(), many=False)\n prm_user = serializers.PrimaryKeyRelatedField(queryset=User.objects.all(), many=False)\n username = serializers.SerializerMethodField()\n\n class Meta:\n model = Permission\n\n fields = (\n 'id',\n 'prm_product',\n 'prm_user',\n 'username',\n )\n\n def get_username(self, obj):\n return obj.prm_user.username\n\n\nclass PermissionWorkgroupUserSerializer(serializers.ModelSerializer):\n wgu_workgroup = serializers.PrimaryKeyRelatedField(\n queryset=Product.objects.all(), many=False)\n wgu_user = serializers.PrimaryKeyRelatedField(queryset=User.objects.all(), many=False)\n\n workgroup = serializers.SerializerMethodField()\n username = serializers.SerializerMethodField()\n\n class Meta:\n model = Permission\n\n fields = (\n 'id',\n 'wgu_workgroup',\n 'wgu_user',\n 'workgroup',\n 'username'\n )\n\n def get_workgroup(self, obj):\n return obj.wgu_workgroup.wgp_workgroup\n\n def get_username(self, obj):\n return obj.wgu_user.username\n\n\nclass PermissionSerializer(serializers.ModelSerializer):\n prm_product = serializers.PrimaryKeyRelatedField(\n queryset=Product.objects.all(), many=False)\n prm_user = serializers.PrimaryKeyRelatedField(queryset=User.objects.all(), many=False, allow_null=True)\n prm_workgroup = serializers.PrimaryKeyRelatedField(queryset=Workgroup.objects.all(), many=False, allow_null=True)\n\n class Meta:\n model = Permission\n\n fields = (\n 'id',\n 'prm_product',\n 'prm_user',\n 'prm_workgroup',\n )\n\n\nclass WorkgroupSerializer(serializers.ModelSerializer):\n owner = serializers.PrimaryKeyRelatedField(read_only=True)\n\n class Meta:\n model = Workgroup\n\n fields = (\n 'id',\n 'wgp_workgroup',\n 'owner',\n )\n\n\nclass WorkgroupUserSerializer(serializers.ModelSerializer):\n wgu_user = serializers.PrimaryKeyRelatedField(queryset=User.objects.all(), many=False, allow_null=True)\n wgu_workgroup = serializers.PrimaryKeyRelatedField(queryset=Workgroup.objects.all(), many=False, allow_null=True)\n username = serializers.SerializerMethodField()\n\n class Meta:\n model = WorkgroupUser\n\n fields = (\n 'id',\n 'wgu_workgroup',\n 'wgu_user',\n 'username'\n )\n\n def get_username(self, obj):\n return obj.wgu_user.username\n\n\nclass FiltersetSerializer(serializers.ModelSerializer):\n class Meta:\n model = Filterset\n\n fields = (\n 'id',\n 'product',\n 'fst_name'\n )\n\n\nclass FilterConditionSerializer(serializers.ModelSerializer):\n property_name = serializers.SerializerMethodField()\n property_display_name = serializers.SerializerMethodField()\n operator_display_name = serializers.SerializerMethodField()\n\n class Meta:\n model = FilterCondition\n\n fields = (\n 'id',\n 'filterset',\n 'fcd_property',\n 'fcd_property_name',\n 'fcd_operation',\n 'fcd_value',\n 'property_name',\n 'property_display_name',\n 'operator_display_name'\n )\n\n def get_property_name(self, obj):\n try:\n return obj.fcd_property.pcn_column_name\n except:\n return obj.fcd_property_name\n\n def get_property_display_name(self, obj):\n try:\n association = obj.fcd_property.productcontentassociation_set.first()\n return association.pca_class_content.pcc_display_name\n except:\n try:\n return obj.pcs_content.pcn_column_name\n except:\n return obj.fcd_property_name\n\n def get_operator_display_name(self, obj):\n try:\n operators = dict({\n '=': 'is equal to',\n '!=': 'is not equal to',\n '>': 'is greater than',\n '>=': 'is greater than or equal to',\n '<': 'is less than',\n '<=': 'is less than or equal to'\n })\n\n return operators.get(obj.fcd_operation)\n except:\n return None\n\n\nclass FConditionSerializer(serializers.ModelSerializer):\n \"\"\"\n Este serializer e uma versao menor do FilterConditionSerializer\n contendo apenas os atributos para criar a clausula where no formato SQLAlchemy\n https://github.com/zzzeek/sqlalchemy/blob/master/lib/sqlalchemy/sql/operators.py#L16\n \"\"\"\n column = serializers.SerializerMethodField()\n op = serializers.SerializerMethodField()\n value = serializers.SerializerMethodField()\n\n class Meta:\n model = FilterCondition\n\n fields = (\n 'column',\n 'op',\n 'value',\n )\n\n def get_column(self, obj):\n property = \"\"\n try:\n property = obj.fcd_property.pcn_column_name\n except:\n property = obj.fcd_property_name\n\n property.lower().strip()\n\n return property\n\n def get_op(self, obj):\n\n op = obj.fcd_operation\n if op == \"=\":\n op = \"eq\"\n\n elif op == \"!=\":\n op = \"ne\"\n\n elif op == \"<\":\n op = \"lt\"\n\n elif op == \"<=\":\n op = \"le\"\n\n elif op == \">\":\n op = \"gt\"\n\n elif op == \">=\":\n op = \"ge\"\n\n return op\n\n def get_value(self, obj):\n return obj.fcd_value\n\n\n# ---------------------------------- Bookmark ----------------------------------\n\nclass BookmarkedSerializer(serializers.ModelSerializer):\n owner = serializers.SerializerMethodField()\n is_owner = serializers.SerializerMethodField()\n\n class Meta:\n model = BookmarkProduct\n\n fields = (\n 'id',\n 'product',\n 'owner',\n 'is_starred',\n 'is_owner'\n )\n\n def get_owner(self, obj):\n return obj.owner.username\n\n def get_is_owner(self, obj):\n current_user = self.context['request'].user\n if obj.owner.pk == current_user.pk:\n return True\n else:\n return False\n","repo_name":"linea-it/dri","sub_path":"api/product/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":30615,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"15422948954","text":"import sys\r\n\r\nfrom PyQt5 import uic, QtWidgets\r\n\r\nqtCreatorFile = \"P4_FranciscoGatos.ui\" # Nombre del archivo aquí.\r\n\r\nUi_MainWindow, QtBaseClass = uic.loadUiType(qtCreatorFile)\r\n\r\n\r\nclass MyApp(QtWidgets.QMainWindow, Ui_MainWindow):\r\n def __init__(self):\r\n QtWidgets.QMainWindow.__init__(self)\r\n Ui_MainWindow.__init__(self)\r\n self.setupUi(self)\r\n\r\n # Área de los Signals\r\n self.btn_conv.clicked.connect(self.conversion)\r\n\r\n # Área de los Slots\r\n def conversion(self):\r\n\r\n pesos = int(self.txt_pesos.text())\r\n gatos = 0\r\n\r\n if pesos < 5:\r\n gatos = 1\r\n else:\r\n gatos = pesos / 5\r\n\r\n self.txt_michis.setText(str(gatos))\r\n\r\n def mensaje(self, msj):\r\n m = QtWidgets.QMessageBox()\r\n m.setText(msj)\r\n m.exec_()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app = QtWidgets.QApplication(sys.argv)\r\n window = MyApp()\r\n window.show()\r\n sys.exit(app.exec_())","repo_name":"JuanEduardoArellano/PIP_UNIDAD_1_EJEJERCICIOS","sub_path":"P4_Main_FranciscoGatos.py","file_name":"P4_Main_FranciscoGatos.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30118096067","text":"from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\nfrom dm.skills.Common._common import CommonSkill\nfrom utilities import CooldownType\n\nif TYPE_CHECKING:\n from dm.core.contexts import AttackContext\n from dm.core.game.game import DMGame\n from dm.core.objects.unit import DMUnit\n################################################################################\n\n__all__ = (\"MirrorArmor\",)\n\n################################################################################\nclass MirrorArmor(CommonSkill):\n\n def __init__(self, state: DMGame, parent: DMUnit = None):\n\n super().__init__(\n state, parent,\n _id=\"SKL-182\",\n name=\"Mirror Armor\",\n description=(\n \"Gain 3 Mirror at the beginning of battle. Upon receiving \"\n \"8th damage, gain 1 Mirror.\"\n ),\n rank=4,\n cooldown=CooldownType.Passive\n )\n\n################################################################################\n def execute(self, ctx: AttackContext) -> None:\n\n if self.owner == ctx.target:\n if self.atk_count % 8 == 0:\n self.owner.add_status(\"Mirror\", 1, self)\n\n################################################################################\n def on_acquire(self) -> None:\n\n self.listen(\"hero_spawn\")\n\n################################################################################\n def notify(self, unit: DMUnit) -> None:\n\n if self.owner == unit:\n self.owner.add_status(\"Mirror\", 3, self)\n\n################################################################################\n","repo_name":"AllegroVivo/DungeonDefense","sub_path":"dm/skills/Common/SRank/MirrorArmor.py","file_name":"MirrorArmor.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33550598023","text":"# pylint: disable=not-callable, no-member, invalid-name, line-too-long, wildcard-import, unused-wildcard-import, missing-docstring\nimport itertools\nimport unittest\n\nimport torch\n\nfrom e3nn import o3, rs\nfrom e3nn.non_linearities.s2 import S2Activation\nfrom e3nn.non_linearities.rescaled_act import swish, tanh, sigmoid, softplus, identity, quadratic\n\n\nclass Tests(unittest.TestCase):\n\n def test_equivariance(self):\n torch.set_default_dtype(torch.float64)\n\n Rs = [(1, l, (-1) ** l) for l in range(4 + 1)]\n\n def test(act, normalization):\n x = rs.randn(2, Rs, normalization=normalization)\n ac = S2Activation(Rs, act, 120, normalization=normalization, lmax_out=6, random_rot=True)\n\n a, b, c = o3.rand_angles()\n y1 = ac(x) @ rs.rep(ac.Rs_out, a, b, c, 1).T\n y2 = ac(x @ rs.rep(Rs, a, b, c, 1).T)\n self.assertLess((y1 - y2).abs().max(), 1e-10 * y1.abs().max())\n\n acts = [tanh, swish, sigmoid, softplus, identity, quadratic]\n\n for act, normalization in itertools.product(acts, ['norm', 'component']):\n test(act, normalization)\n\n def test_equivariance_parity(self):\n torch.set_default_dtype(torch.float64)\n\n lmax = 5\n\n def test(Rs, act):\n x = rs.randn(2, Rs)\n ac = S2Activation(Rs, act, 200, lmax_out=lmax + 1, random_rot=True)\n\n a, b, c, p = *torch.rand(3), 1\n y1 = ac(x) @ rs.rep(ac.Rs_out, a, b, c, p).T\n y2 = ac(x @ rs.rep(Rs, a, b, c, p).T)\n self.assertLess((y1 - y2).abs().max(), 3e-4 * y1.abs().max())\n\n Rss = [\n [(1, l, -(-1) ** l) for l in range(lmax + 1)],\n [(1, l, (-1) ** l) for l in range(lmax + 1)],\n [(1, l, -1) for l in range(lmax + 1)],\n [(1, l, 1) for l in range(lmax + 1)],\n ]\n\n acts = [torch.tanh, torch.abs]\n\n for Rs, act in itertools.product(Rss, acts):\n test(Rs, act)\n\n Rss = [\n [(1, l, (-1) ** l) for l in range(lmax + 1)],\n [(1, l, 1) for l in range(lmax + 1)],\n ]\n\n acts = [torch.relu, torch.sigmoid]\n\n for Rs, act in itertools.product(Rss, acts):\n test(Rs, act)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"drorlab/gert","sub_path":"e3nn/tests/non_linearities/s2_test.py","file_name":"s2_test.py","file_ext":"py","file_size_in_byte":2279,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"} +{"seq_id":"22582951540","text":"import mysql.connector\nimport MySQLdb\nimport pandas as pd\nimport pandas.io.sql\nimport csv\nimport numpy as np\nimport sys\nsys.path.append('c:\\\\Users\\\\quach\\\\Desktop\\\\Personal\\\\FPT University\\\\SEMESTER 9\\\\Arranging-rooms-based-on-student-information')\nfrom Recomender.Rec_main import RS\n\n\n\nclass dataBASE():\n\n def __init__(self) -> None:\n \n self.connection = MySQLdb.connect(host='localhost',\n database='dormitory',\n user='root',\n password='security2KL')\n # if self.connection.is_connected():\n # self.cursor = self.connection.cursor()\n # self.cursor.execute(\"select database();\")\n # self.record = self.cursor.fetchone()\n # print(\"You're connected to database: \", self.record)\n\n def insert_Data(self, csv_path = 'C:\\\\Users\\\\quach\\\\Desktop\\\\Personal\\\\FPT University\\\\SEMESTER 9\\\\Dataset\\\\SORT_dATA.csv'):\n with open(csv_path, encoding= \"utf-8\") as file_obj:\n cursor = self.connection.cursor()\n query_dl = \"DELETE FROM dormitory.student\"\n cursor.execute(query_dl)\n # Skips the heading\n # Using next() method\n heading = next(file_obj)\n \n # Create reader object by passing the file \n # object to reader method\n reader_obj = csv.reader(file_obj)\n \n # Iterate over each row in the csv file \n # using reader object\n query_id = \"SELECT coalesce(max(Student.id), 0) from Student\"\n i = 1\n id_student = 0\n for row in reader_obj:\n cursor.execute(query_id)\n result = cursor.fetchall()\n for k in result:\n id_student = k[0]\n id_student +=1\n gender = 0\n if row[2].lower() == \"nam\":\n gender = 1\n row = (row[0], row[1], gender, row[3], row[4], row[5],row[6],row[7],row[8],row[9],row[10])\n\n sql = \"INSERT INTO Student (id, name, gender, hometown, Bio_personality, food_drink, hob_inter, smoking, refer_roommate, Cleanliess, Privacy) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\n cursor.execute(sql, row)\n self.connection.commit()\n print(i)\n i = i+1\n last_id = str(cursor.lastrowid)\n def get_students(self):\n cursor = self.connection.cursor()\n cursor.execute(\"SELECT * FROM Student\")\n myresult = cursor.fetchall()\n np_result = np.array(myresult)\n return np.delete(np_result,1,1)\n \n def user_room(self):\n pass\nif __name__ == '__main__':\n DBer = dataBASE()\n DBer.get_students()\n","repo_name":"quadang21cen/Arranging-rooms-based-on-student-information","sub_path":"Database/Create_DB.py","file_name":"Create_DB.py","file_ext":"py","file_size_in_byte":2864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"4343514055","text":"import sys\n\nnum1 = int(sys.stdin.readline())\n\ncount = 0\nsix = 666\nwhile 1:\n if '666' in str(six): # 문자열 리스트로 파악하기\n count += 1\n if count == num1:\n print(six)\n break\n six += 1\n\n# 이문제를 보고, 6이 연속인 값을 가지고 있어야 한다는 것을 파악해야 한다.\n# 그리고, \"항상 숫자가 어디에 존재하냐\"라는 문제가 있는 경우\n# 문자열로 변경해서 문제를 풀 수 있음을 생각해야 한다.\n# 항상 문제 풀 때, 범위 놓치지 말자.\n# 대체적으로 시간복잡도의 나온 값이 1억이 나ㅓㅁ지 않으면, 웬만한 무거운 연산도 1초안에 통과한다.","repo_name":"kalelpark/Baekjoon-Programmers","sub_path":"브루트포스/1436.py","file_name":"1436.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"10460840288","text":"# EXERCISE :\n\nfrom random import randint\n\n\ndef throw_dice():\n return randint(1, 6)\n\n\ndef throw_until_doubles():\n both_same = False\n count = 0\n while not both_same:\n count += 1\n dice1 = throw_dice()\n dice2 = throw_dice()\n if dice2 == dice1:\n break\n return count\n\nprint(throw_until_doubles())\n\ndef main():\n counter = 0\n for i in range(1, 100):\n counter += throw_until_doubles()\n return counter\n\n\ntotal = main()\n\nprint(f'''\nTotal throws: {total}\nAverage throws to reach doubles: {total/100}\n''')\n\n","repo_name":"emmanuelle1234/di_exercices","sub_path":"week_7/day_2/exercise_gold_/week_7_day_2_exercise_gold.py","file_name":"week_7_day_2_exercise_gold.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23381254609","text":"\"\"\"\n@Author: Dustin Xu\n@Date: 2020/2/9 14:56 PM\n@Description: the method of evaluate with window size\n\"\"\"\nimport numpy as np\nfrom dictionary.tornado_dictionary import TornadoDic\nfrom plotter.performance_plotter import *\n\nclass EvaluateWithWindowSize:\n def __init__(self, learner, detector, project, windowSize=500):\n self.window_size = windowSize\n self.learner = learner\n self.detector = detector\n\n self.count = 0\n self.allCount = 0\n self.accuracy = 0\n self.accuracyList = []\n self.accuracyAverageList = []\n\n self.__project_path = project.get_path()\n self.__project_name = project.get_name()\n self.learner_name = TornadoDic.get_short_names(self.learner.LEARNER_NAME)\n self.detector_name = self.detector.DETECTOR_NAME\n\n self.prediction_status = False\n\n def calculate_accuracy(self, prob, ground_truth, output_size=1, output_flag=False):\n \"\"\"\n :param prob: Predicted value\n :param ground_truth: Ground truth\n :param output_size: Calculate the step size of accuracy\n :param output_flag: Print out ?\n :return: True or False\n \"\"\"\n predicted_class = prob.index(max(prob))\n if predicted_class == ground_truth:\n self.count += 1\n self.prediction_status = True\n else:\n self.prediction_status = False\n self.allCount += 1\n self.accuracy = self.count / self.allCount\n if self.allCount % output_size == 0:\n self.accuracyList.append(self.accuracy)\n average_accuracy = round(sum(self.accuracyList)/len(self.accuracyList), 4)\n self.accuracyAverageList.append(average_accuracy)\n if output_flag:\n print(\"Current Accuracy:\", self.accuracy)\n print(\"Average Accuracy:\", average_accuracy)\n return self.prediction_status\n\n def store_stats(self):\n # 保存模型的准确率\n np.save(self.__project_path + self.learner_name + '+' + self.detector_name + '_accuracy.npy', np.vstack([self.accuracyList, self.accuracyAverageList]))\n # README\n # 保存模型准确率等等等各种前端数据\n # stats_writer = open(self.__project_path + \"INFORMATION.txt\", \"w\")\n # stats_writer.\n\n def plot(self, step=1, dataset=None, data=None):\n\n file_name = self.__project_name + \"_single\"\n\n Plotter.plot_single(self.learner_name + '+' + self.detector_name, self.accuracyList, \"Accuracy\",\n self.__project_name, self.__project_path, file_name, None, 'upper right', 200,\n datasetName=dataset, dataName=data, step=step)","repo_name":"dongshixu/ConceptExplorer","sub_path":"tornado/evaluate_xu/evaluateWithWindowSize.py","file_name":"evaluateWithWindowSize.py","file_ext":"py","file_size_in_byte":2692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29040083873","text":"from django import forms\nfrom django.forms import ValidationError\nfrom django.utils.translation import ugettext as _\n\nfrom ganeti_webmgr.ganeti_web import constants\nfrom ganeti_webmgr.utils import cluster_default_info\n\n\nclass RoleForm(forms.Form):\n \"\"\"\n Form for editing roles\n \"\"\"\n role = forms.ChoiceField(initial='',\n choices=constants.ROLE_CHOICES,\n label='New Role')\n force = forms.BooleanField(initial=False, required=False)\n\n\nclass MigrateForm(forms.Form):\n \"\"\" Form used for migrating primary Virtual Machines off a Node \"\"\"\n mode = forms.ChoiceField(choices=constants.MODE_CHOICES)\n\n\nclass EvacuateForm(forms.Form):\n EMPTY_FIELD = constants.EMPTY_CHOICE_FIELD\n\n iallocator = forms.BooleanField(initial=False, required=False,\n label='Automatic Allocation')\n iallocator_hostname = forms.CharField(initial='', required=False,\n widget=forms.HiddenInput())\n node = forms.ChoiceField(initial='', choices=[EMPTY_FIELD], required=False)\n\n def __init__(self, cluster, node, *args, **kwargs):\n super(EvacuateForm, self).__init__(*args, **kwargs)\n\n node_list = [str(h) for h in cluster.nodes.exclude(pk=node.pk)\n .values_list('hostname', flat=True)]\n nodes = zip(node_list, node_list)\n nodes.insert(0, self.EMPTY_FIELD)\n self.fields['node'].choices = nodes\n\n defaults = cluster_default_info(cluster)\n if defaults['iallocator'] != '':\n self.fields['iallocator'].initial = True\n self.fields['iallocator_hostname'].initial = defaults['iallocator']\n\n def clean(self):\n data = self.cleaned_data\n\n iallocator = data['iallocator']\n node = data['node'] if 'node' in data else None\n\n if iallocator:\n data['node'] = None\n elif node:\n data['iallocator_hostname'] = None\n else:\n raise ValidationError(_('Must choose automatic allocation '\n 'or a specific node'))\n\n return data\n","repo_name":"osuosl/ganeti_webmgr","sub_path":"ganeti_webmgr/nodes/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"3"} +{"seq_id":"43540493970","text":"# Напишите функцию, которая принимает на вход строку —\n# абсолютный путь до файла. Функция возвращает кортеж из трёх\n# элементов: путь, имя файла, расширение файла.\n\nimport os\npath_ex = 'X:/Geek/Developer/block/GitHub/workshop/README.md'\n\ndef parse_path(str_path: str) -> tuple:\n path, file_ext = os.path.split(str_path)\n file_name, ext = file_ext.split('.')\n return path, file_name, ext\n\nprint(parse_path(path_ex))","repo_name":"railgum/immersion_in_Python","sub_path":"lesson_5_Iterators_Generators/hw_5/task_2.py","file_name":"task_2.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33818231416","text":"# Crie um programa que tenha uma tupla totalmente preenchida com uma contagem por extensão, de zero até vinte.\n# Seu programa deverá ler um número pelo teclado (entre 0 e 20) e mostrá-lo por extenso.\nlista = ('Zero', 'Um', 'Dois', 'Três', 'Quatro',\n 'Cinco', 'Seis', 'Sete', 'Oito', 'Nove',\n 'Dez', 'Onze', 'Doze', 'Treze', 'Quatorze',\n 'Quinze', 'Dezesseis', 'Dezessete', 'Dezoito',\n 'Dezenove', 'Vinte')\nresp = ''\nwhile resp in 'S':\n num = int(input('Digite um valor entre 0 e 20: '))\n while num > 20 or num < 0:\n num = int(input('Tente novamente. Digite um valor entre 0 e 20: '))\n print(f'Você digitou {lista[num]}.')\n resp = str(input('Quer continuar? [S/N] ')).strip().upper()[0]\n","repo_name":"mroya/ProjectGit","sub_path":"Python/Curso em Video Python/Aula16-Tuplas/ex072_Número por extenso.py","file_name":"ex072_Número por extenso.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"20215369479","text":"import pytz\nfrom datetime import datetime, timezone\nimport awsapi as aws\nimport json\nimport logging\nimport os\nimport inspect\n\nhome = os.path.dirname(os.path.realpath(__file__))\nlogger = logging.getLogger(str(inspect.getouterframes(inspect.currentframe()\n )[-1].filename))\n#-----------------\n# This variable defines the mode which the calculator will execute\n# Fell free to define new functions that initializes with different heuristics\n# values: experimenting (does not save history, takes actions or sends data do server)\n# testing (does not send data to server or takes actions users, but saves history)\n# monitoring (sends data to server and takes actions and saves history)\nMODE = 'monitoring'\n#-----------------\n\n#--------------------------BUCKET.PY-----------------------------------\n# The DEMANDS dict tells the system the limits tha differ demands for each finality\n# Feel free to define new finalities and new demands\n# To define a new finality, you should edit selections.py so the system can categorize a VM\n# To define a new demand, you just need to add it to the DEMANDS dict. It must be reversed sorted.\n# default: execution, with 'high' demand higher than 90% utilization\n# with 'low' demand higher than 10% utilization\n# with 'idle' demand higher than 0% utilization\n# server, 'high' 80%, 'low' 10%, 'idle' 0%\n# interaction, 'high' 70%, 'low' 10%, 'idle' 0%\n\nBUCKET_DEMANDS = {'execution': {'high': 90, 'low': 10, 'idle': 0},\n 'server': {'high': 80, 'low': 10, 'idle': 0},\n 'interaction': {'high': 70, 'low': 10, 'idle': 0}}\n\n# The DEMAND_METRIC dict tells the system the metric to use to categorize the demand\n# default: all finalities uses the vCPU utilization\n\nBUCKET_DEMAND_METRIC = {'execution': 'system.cpu.util[all,user,avg1]',\n 'server': 'system.cpu.util[all,user,avg1]',\n 'interaction': 'system.cpu.util[all,user,avg1]'}\n#--------------------------------------------------------------------------\n\n#--------------------------SELECTIONS.PY-----------------------------------\n# The RESOURCES tells the system the resources used to filter the instances.\n# Each resource has a metric, which is the metric to use to gatter the data utilization\n# and a unit, which is the unit of the utilization\n# Our system only uses percentage as unit\n# default: vcpu, gpu and memory, both with percentage unit and respective Zabbix item\n\nSELECTIONS_RESOURCES = {\n 'vcpu': {'metric': 'system.cpu.util[all,user,avg1]', 'unit': 'percentage'},\n 'gpu': {'metric': 'gpu.utilization', 'unit': 'percentage'},\n 'memory': {'metric': 'vm.memory.size[pused]', 'unit': 'percentage'}\n}\n#--------------------------------------------------------------------------\n\n\n#--------------------------ACTIONS.PY----------------------------------\n# This variable defines the time IN SECONDS between actions of the same bucket \n# for the same instance\n# default: 10 minutes\nACTION_TIME_BETWEEN_ACTIONS = 600\n\n# This variable defines the amount of actions that the sytem takes for a bucket\n# for a instance before notifing the admins\n# default: 5 times\nACTION_AMOUNT_OF_ACTIONS_TAKEN = 5\n\n# The THRESHOLDS is the dict that tells the system the thresholds and the actions to be taken\n# You can define thresholds and actions for each finality and each demand\n# You can also not define thresholds and actions for a bucket, \n# but once you have a threshold, you must define an action \n# The bucket can have accumulative and reset quantification thresholds\n# Each one has a thresholds list, which are hours of wastage\n# and an actions list, which are the actions taken by the respective threshold\n# default: execution-high demand has accumulative quantification \n# and a threshold at 3 hours of wastage and 'notification' as an action\n# This means that once the VM reaches the wastage in $ related to\n# 3 hours, a notification will be sended\n# For example, if the VM costs $2.00/h, \n# 3 hours of wastage means 3h*$2.00/h=$6.00\n# So the user will be notified when the wastage reaches $6.00 \n# execution-high-accumulative: 3 notification\n# execution-low-accumulative: 2 recommendation\n# execution-idle-accumulative: 2 notification\n# execution-idle-reset: 1 intervention\n# server-high-accumulative: 3 notification\n# server-high-reset: 1 recommendation\n# server-low-accumulative: 3 recommendation\n# server-idle-accumulative: 3 recommendation\n# interaction-high-accumulative: 3 notification\n# interaction-high-reset: 1 recommendation\n# interaction-low-accumulative: 3 recommendation\n# interaction-idle-accumulative: 3 recommendation\n\n\nACTION_THRESHOLDS = {'execution': {'high': {'accumulative': {\n 'thresholds': [3], \n 'action': ['notification']}},\n 'low': {'accumulative': {\n 'thresholds': [2], \n 'action': ['recommendation']}},\n 'idle': {'accumulative': {\n 'thresholds': [2], \n 'action': ['notification']}, \n 'reset': {\n 'thresholds': [1], \n 'action': ['intervention']}}},\n 'server': {'high': {'accumulative': {\n 'thresholds': [3], \n 'action': ['notification']},\n 'reset': {\n 'thresholds': [1], \n 'action': ['recommendation']}},\n 'low': {'accumulative': {\n 'thresholds': [3], \n 'action': ['recommendation']}},\n 'idle': {'accumulative': {\n 'thresholds': [3], \n 'action': ['recommendation']}}},\n 'interaction': {'high': {'accumulative': {\n 'thresholds': [3], \n 'action': ['notification']},\n 'reset': {\n 'thresholds': [1], \n 'action': ['recommendation']}},\n 'low': {'accumulative': {\n 'thresholds': [3], \n 'action': ['recommendation']}},\n 'idle': {'accumulative': {\n 'thresholds': [3], \n 'action': ['recommendation']}}}\n }\n#----------------------------------------------------------------------\n\n\n#----------------------------------------------------------------------\n# This variable defines the time IN SECONDS to update the available instances \n# and their prices\n# This information is used by selections.py to select a instance\n# default: one day\nUPDATE_AVAILABLE_INSTANCES = 60*60*24\n\n# This variable defines the metric which the sytem uses to quantify wastage\n# default: vCPU utilization\nWASTAGE_QUANTIFICATION_METRIC = 'system.cpu.util[all,user,avg1]'\n#----------------------------------------------------------------------\n\nAVAILABLE_INSTANCES_FILE = home+\"/files/availableinstances.json\"\nAVAILABLE_INSTANCES = {}\nINSTANCES_RESOURCES = {}\nINSTANCES_PRICES = {}\n\ndef initialize_instances():\n global AVAILABLE_INSTANCES \n global INSTANCES_PRICES\n global INSTANCES_RESOURCES\n try:\n AVAILABLE_INSTANCES = json.loads((open(AVAILABLE_INSTANCES_FILE, 'r')).read())\n if NOW - AVAILABLE_INSTANCES['timestamp'] > UPDATE_AVAILABLE_INSTANCES:\n AVAILABLE_INSTANCES = {}\n providers = (open(home+'/private/providers', \"r\")).read().splitlines()\n for p in providers:\n if p == 'aws':\n AVAILABLE_INSTANCES['aws'] = aws.get_instance_types()\n (open(AVAILABLE_INSTANCES_FILE, 'w+')).write(json.dumps({\n 'timestamp': NOW, 'data': AVAILABLE_INSTANCES}))\n else:\n AVAILABLE_INSTANCES = AVAILABLE_INSTANCES['data']\n for p in AVAILABLE_INSTANCES:\n INSTANCES_RESOURCES[p] = {}\n INSTANCES_PRICES[p] = {}\n for r in AVAILABLE_INSTANCES[p]:\n INSTANCES_RESOURCES[p][r] = {}\n INSTANCES_PRICES[p][r] = {}\n for i in AVAILABLE_INSTANCES[p][r]:\n INSTANCES_RESOURCES[p][r][i] = {}\n INSTANCES_PRICES[p][r][i] = {}\n for a in AVAILABLE_INSTANCES[p][r][i]:\n if a == 'resources':\n INSTANCES_RESOURCES[p][r][i] = AVAILABLE_INSTANCES[p][r][i][a]\n else:\n INSTANCES_PRICES[p][r][i][a] = AVAILABLE_INSTANCES[p][r][i][a]\n\n except (FileNotFoundError, json.decoder.JSONDecodeError):\n AVAILABLE_INSTANCES = {}\n providers = (open(home+'/private/providers', \"r\")).read().splitlines()\n for p in providers:\n if p == 'aws':\n AVAILABLE_INSTANCES['aws'] = aws.get_instance_types()\n (open(AVAILABLE_INSTANCES_FILE, 'w+')).write(json.dumps({\n 'timestamp': NOW, 'data': AVAILABLE_INSTANCES}))\n AVAILABLE_INSTANCES = AVAILABLE_INSTANCES['data']\n for p in AVAILABLE_INSTANCES:\n INSTANCES_RESOURCES[p] = {}\n INSTANCES_PRICES[p] = {}\n for r in AVAILABLE_INSTANCES[p]:\n INSTANCES_RESOURCES[p][r] = {}\n INSTANCES_PRICES[p][r] = {}\n for i in AVAILABLE_INSTANCES[p][r]:\n INSTANCES_RESOURCES[p][r][i] = {}\n INSTANCES_PRICES[p][r][i] = {}\n for a in AVAILABLE_INSTANCES[p][r][a]:\n if a == 'resources':\n INSTANCES_RESOURCES[p][r][i] = AVAILABLE_INSTANCES[p][r][a]\n else:\n INSTANCES_PRICES[p][r][i][a] = AVAILABLE_INSTANCES[p][r][a]\n\n\n\ndef initialize_monitoring():\n global NOW\n global MODE\n global VOLUMES_CALCULATION\n global HEURISTIC_COMPARE\n global HEURISTIC_TYPE\n global AVAILABLE_INSTANCES \n global INSTANCES_PRICES\n global INSTANCES_RESOURCES\n global BUCKET_DEMANDS\n global BUCKET_DEMAND_METRIC\n NOW = int(datetime.timestamp(datetime.utcnow().astimezone(pytz.utc)))\n MODE = 'monitoring'\n VOLUMES_CALCULATION = ['equation-1', 'cost']\n HEURISTIC_COMPARE = 'price'\n HEURISTIC_TYPE = 'cpu'\n initialize_instances()\n\ndef initialize_testing():\n global NOW\n global MODE\n global VOLUMES_CALCULATION\n global HEURISTIC_COMPARE\n global HEURISTIC_TYPE\n global AVAILABLE_INSTANCES \n global INSTANCES_PRICES\n global INSTANCES_RESOURCES\n global BUCKET_DEMANDS\n global BUCKET_DEMAND_METRIC\n NOW = int(datetime.utcnow().replace(tzinfo=timezone.utc).timestamp())\n MODE = 'testing'\n VOLUMES_CALCULATION = ['cost', 'equation-1']\n HEURISTIC_COMPARE = 'price'\n HEURISTIC_TYPE = 'vcpu'\n initialize_instances()\n\ndef initialize_experimenting():\n global NOW\n global MODE\n global VOLUMES_CALCULATION\n global HEURISTIC_COMPARE\n global HEURISTIC_TYPE\n global AVAILABLE_INSTANCES \n global INSTANCES_PRICES\n global INSTANCES_RESOURCES\n global BUCKET_DEMANDS\n global BUCKET_DEMAND_METRIC\n NOW = int(datetime.timestamp(datetime.utcnow().astimezone(pytz.utc)))\n MODE = 'experimenting'\n VOLUMES_CALCULATION = ['cost', 'equation-1']\n HEURISTIC_COMPARE = 'price'\n HEURISTIC_TYPE = 'cpu'\n initialize_instances()\n\n\ndef calculator_init():\n if MODE == 'testing':\n initialize_testing()\n elif MODE == 'monitoring':\n initialize_monitoring()\n elif MODE == 'experimenting':\n initialize_experimenting()\n else:\n logger.error(\"NO MODE DETECTED\")","repo_name":"lmcad-unicamp/hpcc-monitor","sub_path":"calculatorsetting.py","file_name":"calculatorsetting.py","file_ext":"py","file_size_in_byte":12458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19702741478","text":"\"\"\"\nJoin lab eICU dataset with the patient.csv eICU dataset based on a specific lab measurement\n\"\"\"\n\nimport csv\n\nimport sys\n\n\ndef join(file2, file1, labname):\n with open(\"../../../data/eicu/\" + file1, 'r') as csv_file1:\n csv_reader1 = csv.reader(csv_file1, delimiter=',')\n\n id_to_lab_result = {}\n id_col1 = 1\n for row in csv_reader1:\n if row[4] == labname:\n id_to_lab_result[row[id_col1]] = row[5]\n\n with open(\"../../../data/eicu/\" + file2, 'r') as csv_file2:\n csv_reader2 = csv.reader(csv_file2, delimiter=',')\n with open(\"../../../data/eicu/\" + file1 + \".\" + file2, 'w') as csv_file:\n csv_writer = csv.writer(csv_file, delimiter=',')\n for row in csv_reader2:\n id = row[0]\n row.append(id_to_lab_result[id] if id in id_to_lab_result else \"\")\n csv_writer.writerow(row)\n\n\n########################################################################\n\n\ndata1 = sys.argv[1] # Patient.csv dataset (each row is a unique patient GUID in file)\ndata2 = sys.argv[2] # Lab file\nlabname = sys.argv[3]\n\njoin(data1, data2, labname)\n\n","repo_name":"tbj128/CS221-Project","sub_path":"src/extractors/eicu/join_datasets_by_guid.py","file_name":"join_datasets_by_guid.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"1630936970","text":"import random\nimport time\nimport math\nimport copy\nimport os\n\nimport pygame\n\nfrom tools import *\nfrom blocks import *\n\npygame.init()\n\nplayer1_img = {\n 'down': pygame.image.load('img/characters/P1/down.gif'),\n 'right': pygame.image.load('img/characters/P1/right.gif'),\n 'left': pygame.image.load('img/characters/P1/left.gif'),\n 'up': pygame.image.load('img/characters/P1/up.gif'),\n 'down_hurt': pygame.image.load('img/characters/P1/down_hurt.gif'),\n 'right_hurt': pygame.image.load('img/characters/P1/right_hurt.gif'),\n 'left_hurt': pygame.image.load('img/characters/P1/left_hurt.gif'),\n 'up_hurt': pygame.image.load('img/characters/P1/up_hurt.gif')\n}\n\n\nplayer2_img = {\n 'down': pygame.image.load('img/characters/P2/down.gif'),\n 'right': pygame.image.load('img/characters/P2/right.gif'),\n 'left': pygame.image.load('img/characters/P2/left.gif'),\n 'up': pygame.image.load('img/characters/P2/up.gif'),\n 'down_hurt': pygame.image.load('img/characters/P2/down_hurt.gif'),\n 'right_hurt': pygame.image.load('img/characters/P2/right_hurt.gif'),\n 'left_hurt': pygame.image.load('img/characters/P2/left_hurt.gif'),\n 'up_hurt': pygame.image.load('img/characters/P2/up_hurt.gif')\n}\n\n\ndef distance(a, b):\n x1, y1 = a\n x2, y2 = b\n return abs(math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2))\n\n\nclass Player:\n speed = 0.15\n hit_speed = 0.5\n flash_time = 0.15\n \n @classmethod\n def find_spot(cls, map, block_size, other_pos):\n spot = [0, 0]\n spot[0] = random.randint(5, len(map[0]) - 7)\n spot[1] = random.randint(0, len(map) - 2)\n \n while map[spot[1]][spot[0]] != None and distance(spot, other_pos) > 5:\n spot[0] = random.randint(5, len(map[0]) - 7)\n spot[1] = random.randint(0, len(map) - 2)\n\n spot[0] *= block_size\n spot[1] *= block_size\n spot = [spot[0] + 15, spot[1] + 15]\n return spot\n\n def __init__(self, map, controls, use_key, other_pos, player_img, turn_key):\n self.position = Player.find_spot(map, 56, other_pos)\n self.player_img = player_img\n self.other_pos = other_pos\n self.controls = controls\n self.move_time = Player.speed\n self.hit_time = Player.hit_speed\n self.direction = 'down'\n self.use_key = use_key\n self.being_hurt = False\n self.last_flashed = 0\n self.last_added = 0\n self.level = 0\n self.index = 0\n self.turn_key = turn_key\n\n self.inventory = [WoodWall, Windmill]#, StoneWall, SpikyWall,\n #FasterWindmill, BoosterPad, FastestWindmill, VerySpikyWall]\n self.tools = [Standard()]\n self.resources = {\n 'wood': 0,\n 'stone': 0,\n 'food': 0\n }\n \n self.max_health = 60\n self.health = self.max_health\n self.coins = 0\n self.xp = 0\n\n def render(self, screen):\n image = self.player_img[self.direction + ('' if not self.being_hurt else '_hurt')]\n screen.blit(image, [self.position[0], self.position[1]])\n\n def flash(self):\n self.being_hurt = True\n self.last_flashed = time.time()\n\n def stop_flash(self):\n if time.time() - self.last_flashed > Player.flash_time:\n self.being_hurt = False\n\n def moveable(self):\n return time.time() - self.move_time > Player.speed\n\n def hitable(self):\n return time.time() - self.hit_time > Player.hit_speed\n \n\n def move(self, direction, map, other_player, players, turning=False):\n self.direction = direction\n self.move_time = time.time()\n\n if turning: return\n\n inventory = self.tools + ['food'] + self.inventory\n \n old_pos = copy.deepcopy(self.position)\n \n if direction == 'up':\n self.position[1] -= 56\n elif direction == 'down':\n self.position[1] += 56\n elif direction == 'left':\n self.position[0] -= 56\n elif direction == 'right':\n self.position[0] += 56\n\n try:\n block = map[self.position[1] // 56][self.position[0] // 56]\n except IndexError:\n pass\n \n if self.position[1] // 56 > len(map) - 1 or \\\n self.position[0] // 56 > len(map[0]) - 6 or \\\n self.position[0] < 300 or \\\n self.position[1] < 0:\n # Undo the move\n self.position = old_pos\n elif block != None:\n if not isinstance(block, BoosterPad):\n self.position = old_pos\n\n # Give resources\n if self.hitable() and isinstance(inventory[self.index], Tool):\n if isinstance(block, Tree):\n self.resources['wood'] += inventory[self.index].resources\n elif isinstance(block, Mine):\n self.resources['stone'] += inventory[self.index].resources\n elif isinstance(block, FruitTree):\n self.resources['food'] += inventory[self.index].resources\n elif isinstance(block, GoldMine):\n self.coins += inventory[self.index].resources\n elif isinstance(block, SpikyWall) or isinstance(block, VerySpikyWall):\n block.health -= inventory[self.index].wall_damage // 2\n block.last_player = self\n self.health -= block.damage_per_hit\n self.flash()\n elif isinstance(block, BoosterPad):\n pass\n else:\n block.health -= inventory[self.index].wall_damage // 2\n block.last_player = self\n\n self.xp += 1\n\n self.hit_time = time.time()\n\n if self.position == other_player.position:\n self.position = old_pos\n \n other_player.health -= 5\n other_player.flash()\n \n self.health -= 5\n self.flash()\n \n\n","repo_name":"ruffleduck/LandOfBloks-pygame","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":6000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36851558892","text":"import sys\nimport qdarkstyle\nfrom PyQt5 import QtGui, QtWidgets, QtCore\nfrom mainwindow import Ui_MainWindow\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nimport cv2\nimport numpy as np\n#from CameraClassWrapper import IDSCam\nimport csv\nimport os\nimport math \nfrom scipy.optimize import curve_fit\n#import nicelib\n#from instrumental import instrument, list_instruments\n#from instrumental.drivers.cameras import uc480\n\nimport os, time\n#from queue import Queue\nfrom camWork import camWork\n#from pyueye import ueye\n#from pyueye_example_utils import (MemoryInfo, uEyeException, Rect, get_bits_per_pixel,ImageBuffer, check)\n\nimageSizeUIx = 600\nimageSizeUIy = 480\n\n\ndef readSpec(specName, delimiter):\n x = np.array([])\n y = np.array([])\n with open(specName, newline='') as f:\n fr = csv.reader(f, delimiter=delimiter, quotechar='|')\n for row in fr:\n if row != []:\n y = np.append(y, row[0])\n x = np.append(x, row[1])\n \n x = x.astype(np.float)\n y = y.astype(np.float)\n return y, x\ntx, ty = readSpec('transmission_losgatos.dat', ' ') \n\ndef func(x, a, b, c, d, e, f):\n return (a + b * x + c * x ** 2 + d * x ** 3 + e * x ** 4 + f * x ** 5)\n \ndef transM(y, x, ty, tx):\n \n xmin = min(np.where(tx >= min(x))[0])\n xmax = max(np.where(tx <= max(x))[0])\n xmax = max(np.where(tx <= max(x))[0])\n tx1 = tx[xmin:xmax]\n ty1 = ty[xmin:xmax]\n popt, pcov = curve_fit(func, tx1, 1/ty1)\n y1 = func(x,*popt)\n y1 = (y1 > 0) * y1\n y = y * y1\n y = y / y.max()\n \n return y,x\n \ndef checkName(file, add):\n fileList = os.listdir('data')\n fileGood = False\n ii = -1\n while fileGood is not True:\n ii += 1\n if file + str(ii) + add not in fileList:\n file = file + str(ii)\n fileGood = True\n file = 'data/' + file + add\n return file\n \ndef writeSpec(file, spec):\n file = checkName(file, '.csv')\n \n with open(file, mode='w', newline='') as f:\n fw = csv.writer(f, delimiter = ',')\n for ii in range(spec[0].shape[0]):\n fw.writerow([spec[0][ii], spec[1][ii]])\n \ndef makeSpec(arr, topY, bottomY, pointR, pointG, waveR, waveG, backGround, backGroundUse, specLogUse):\n arr3 = np.copy(arr)\n backGround = np.copy(backGround)\n if len(arr3.shape) == 3:\n arr3 = cv2.cvtColor(arr3, cv2.COLOR_BGR2GRAY)\n \n #threshold = arr3.max()/5\n #arr3 = (arr3 > threshold) * arr3\n if pointR == pointG: \n pointR = pointR + 1\n b = (waveR - waveG) / (pointR - pointG)\n a = waveR - b * pointR\n #topY = int(topY * arr3.shape[0]/imageSizeUIy)\n #bottomY = int(bottomY * arr3.shape[0]/imageSizeUIy)\n arr3[0:bottomY] = 0\n arr3[topY:] = 0\n pixSum = np.array([])\n pixSum = pixSum.astype(int)\n pixSum = np.sum(arr3, axis = 0)\n #print(max(pixSum))\n if backGroundUse and backGround.shape == arr3.shape: \n #print(np.sum(backGround - backGround.astype(int)))\n #backGround = backGround.astype(int)\n backGround[0:bottomY] = 0\n backGround[topY:] = 0\n backSum = np.array([])\n backSum = np.sum(backGround, axis = 0)\n #print(max(backSum))\n diffSum = np.array([])\n diffSum = (pixSum > backSum) * (pixSum - backSum)\n #print(max(diffSum))\n yAxes = (diffSum != 0) * (diffSum/ max(diffSum)) #erase condition?\n else:\n yAxes = (pixSum != 0) * (pixSum/ max(pixSum))\n \n xAxes = np.linspace(a + b * 0, a + b * arr3.shape[1], arr3.shape[1])\n if specLogUse:\n yAxes, xAxes = transM(yAxes, xAxes, ty, tx)\n #if not (np.isnan(yAxes[0])) and yAxes[0] > 0:\n #yAxes, xAxes = transM(yAxes, xAxes)\n return yAxes, xAxes\n\ndef getCenter(image):\n image = np.copy(image)\n if len(image.shape) == 3:\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n #threshold = image.max()/4\n #image = (image>=threshold)*image\n #moments = cv2.moments(image, 0) \n #dM01 = moments['m01'] # pixel sum in row\n #dM10 = moments['m10'] # pixel sum in column\n #dArea = moments['m00'] # pixel sum\n yAxes = np.sum(image, axis = 0) / ((max(np.sum(image, axis = 0))) + 1)\n #xAxes = np.linspace(a + b * 0, a + b * arr3.shape[1], arr3.shape[1])\n x = np.argmax(yAxes)\n y = 0\n #if dArea:\n # x = int(dM10 / dArea)\n # y = int(dM01 / dArea) \n #else :\n # y, x = image.shape\n # y = y // 2\n # x = x // 2\n return (int(x),int(y))\n \nclass Thread(QThread):\n changePixmap = pyqtSignal(QImage)\n changePixmap2 = pyqtSignal(tuple)\n \n changeFps = pyqtSignal(float)\n changeExposure = pyqtSignal(float)\n changeBoost = pyqtSignal(int)\n \n getCenterL = pyqtSignal(int)\n getCenterR = pyqtSignal(int)\n save = False\n backTake = False\n backGroundUse = False\n gainBoost = False\n specLogUse = False\n backGround = np.array([])\n backGroundAvg = []\n arrAvL = []\n topY = 640\n bottomY = 0\n WVLeft = 0\n WVRight = 10\n pointWVLeft = 10\n pointWVRight = 580\n fps = 3\n exposure = 0.02\n boost = 2000\n triggerDelay = 9960\n avgVid = 1\n findCenterLeft = False\n findCenterRight = False\n exposureMaxChecked = False\n iAv = 0\n iFAv = 0\n #camCommandSignal = pyqtSignal([list])\n cam = camWork(20)\n \n @pyqtSlot(int)\n def takeTopLevelS(self, topY):\n self.topY = topY\n \n @pyqtSlot(int)\n def takeBottomLevelS(self, bottomY):\n self.bottomY = bottomY\n \n @pyqtSlot(int)\n def takepointWVLeftS(self, pointWVLeft):\n self.pointWVLeft = pointWVLeft\n \n @pyqtSlot(int)\n def takepointWVRightS(self, pointWVRight):\n self.pointWVRight = pointWVRight\n\n @pyqtSlot(float)\n def takeWVLeftS(self, WVLeft):\n self.WVLeft = WVLeft\n\n @pyqtSlot(float)\n def takeWVRightS(self, WVRight):\n self.WVRight = WVRight\n \n @pyqtSlot()\n def takeCalimPointLeftS(self):\n self.findCenterLeft = True \n \n @pyqtSlot()\n def takeCalimPointRightS(self):\n self.findCenterRight = True\n \n @pyqtSlot(float)\n def takeFpsS(self, fps):\n self.fps = fps\n self.cam.SetFramerate(self.fps)\n \n @pyqtSlot(float)\n def takeExposureS(self, exposure):\n self.exposure = exposure\n self.cam.SetExposure(self.exposure)\n \n @pyqtSlot(int)\n def takeBoostS(self, boost):\n self.boost = boost\n self.cam.SetGain(self.boost)\n \n @pyqtSlot(int)\n def takeTriggerDelayS(self, triggerDelay):\n self.triggerDelay = triggerDelay\n self.cam.setTriggerDelay(self.triggerDelay)\n \n @pyqtSlot(int)\n def takeAvgVidS(self, avgVid):\n self.avgVid = avgVid\n \n @pyqtSlot()\n def saveS(self):\n self.save = True\n\n @pyqtSlot()\n def backTakeS(self):\n self.backTake = True\n \n @pyqtSlot(bool)\n def backUseS(self, checked):\n self.backGroundUse = checked\n \n @pyqtSlot(bool)\n def gainBoostS(self, checked):\n self.cam.setGainBoost(checked)\n \n #self.gainBoost = checked\n \n @pyqtSlot(bool)\n def triggerS(self, checked):\n self.cam.setExternalTrigger(checked)\n self.cam.setTriggerDelay(self.triggerDelay)\n #self.gainBoost = checked\n \n @pyqtSlot(bool)\n def startStopS(self, checked):\n self.cam.freezeVIdeo(checked)\n \n @pyqtSlot(bool)\n def exposureMaxS(self, checked):\n self.exposureMaxChecked = checked\n \n @pyqtSlot(bool)\n def SpecLogS(self, checked):\n self.specLogUse = checked\n \n def run(self):\n self.cam.SetFramerate(self.fps)\n self.cam.SetExposure(self.exposure)\n self.cam.SetGain(self.boost)\n\n #self.cam.GetExposure(),\n #self.cam.GetFramerate(),\n #self.cam.GetPixelclock()\n self.cam.captureVideo()\n \n iSum = 0\n arrSum = []\n \n while True: \n frame = self.cam.takeImage()\n self.iFAv = self.iFAv + 1\n self.arrAvL.append(np.copy(frame))\n \n if self.iFAv == self.avgVid:\n arr = np.sum(self.arrAvL, axis = 0) / self.iFAv #np.copy(frame)\n arr = arr.astype(dtype = 'uint8')\n self.arrAvL = []\n self.iFAv = 0\n \n \n #here we compare real values and ui values\n if(self.boost != self.cam.GetGain()):\n self.boost = self.cam.GetGain()\n self.changeBoost.emit(self.boost)\n \n if(self.exposure != self.cam.GetExposure()):\n self.exposure = self.cam.GetExposure()\n self.changeExposure.emit(self.exposure)\n \n if(self.fps != self.cam.GetFramerate()):\n self.fps = self.cam.GetFramerate()\n self.changeFps.emit(self.fps)\n \n if self.exposureMaxChecked:\n if self.cam.getExposureMax != self.cam.GetExposure():\n self.cam.setExposureMax()\n \n if self.backTake:\n self.backGroundAvg.append(arr)\n self.iAv += 1\n if self.iAv == 1:\n self.backGroundAvg = np.array(self.backGroundAvg)\n self.backGround = np.sum(self.backGroundAvg, axis = 0)/ self.iAv\n self.backGround = self.backGround.astype(dtype = 'uint8') #dtype = 'uint')\n self.backGroundAvg = []\n self.iAv = 0\n self.backTake = False\n \n spectrum = makeSpec(arr, self.topY, self.bottomY, self.pointWVRight, self.pointWVLeft, self.WVRight, self.WVLeft, self.backGround, self.backGroundUse, self.specLogUse)\n \n #arr[0:self.topY] = 0\n #arr[self.bottomY:] = 0\n #arr[0:self.bottomY] = 0\n #arr[self.topY:] = 0\n if self.findCenterLeft == True:\n self.findCenterLeft = False\n xCenterL, yCenterL = getCenter(arr)\n self.getCenterL.emit(xCenterL)\n\n if self.findCenterRight == True:\n self.findCenterRight = False\n xCenterR, yCenterR = getCenter(arr)\n self.getCenterR.emit(xCenterR)\n \n if self.backGround.shape == arr.shape and self.backGroundUse:\n arr = (arr>self.backGround) * (arr - self.backGround)\n \n if len(arr.shape) == 3:\n h, w, ch = arr.shape \n bytesPerLine = ch * w\n convertToQtFormat = QtGui.QImage(arr.data, w, h, bytesPerLine, QtGui.QImage.Format_Grayscale8)\n else:\n h, w = arr.shape\n bytesPerLine = w\n \n convertToQtFormat = QtGui.QImage(arr.data, w, h, bytesPerLine, QtGui.QImage.Format_Grayscale8)\n p = convertToQtFormat.scaled(imageSizeUIx, imageSizeUIy)#, Qt.KeepAspectRatio)\n #print(p.height())\n #print(p.width())\n painter = QPainter()\n painter.begin(p)\n painter.setPen(QPen(Qt.blue, 3))\n topYUI = int(self.topY * imageSizeUIy / arr.shape[0])\n bottomYUI = int(self.bottomY * imageSizeUIy / arr.shape[0])\n #print(self.bottomY)\n #print(topYUI)\n pointWVRightUI = self.pointWVRight * imageSizeUIx / arr.shape[1]\n pointWVLeftUI = self.pointWVLeft * imageSizeUIx / arr.shape[1]\n painter.drawLine(0, topYUI, imageSizeUIx, topYUI)\n painter.drawLine(0, bottomYUI, imageSizeUIx, bottomYUI)\n painter.setPen(QPen(Qt.green, 3))\n painter.drawLine(pointWVRightUI, 0, pointWVRightUI, imageSizeUIy)\n painter.drawLine(pointWVLeftUI, 0, pointWVLeftUI, imageSizeUIy)\n \n painter.end() \n self.changePixmap.emit(p)\n self.changePixmap2.emit(spectrum)\n \n if self.save:\n self.save = False\n fileIm = checkName('img', '.png')\n cv2.imwrite(fileIm, arr)\n writeSpec('spec', spectrum)\n \ndef main():\n app = QtWidgets.QApplication( sys.argv )\n app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())\n MainWindow = QtWidgets.QMainWindow()\n ui = Ui_MainWindow()\n ui.setupUi( MainWindow )\n #ui.graphicsVideo\n #image = QPixmap(\"lolo.png\")\n #image = image.scaled(ui.graphicsVideoL.width(), ui.graphicsVideoL.width(), Qt.KeepAspectRatio, Qt.FastTransformation)\n #ui.setImage(image)\n #ui.setImage(image)\n\n MainWindow.show()\n app.aboutToQuit.connect(ui.closeEvent)\n th = Thread()\n th.changePixmap.connect(ui.setImage)\n th.changePixmap2.connect(ui.setSpectrum)\n ui.takeTopLevel.connect(th.takeTopLevelS)\n ui.takeBottomLevel.connect(th.takeBottomLevelS)\n ui.takeWVLeft.connect(th.takeWVLeftS)\n ui.takeWVRight.connect(th.takeWVRightS)\n ui.takePointWVLeft.connect(th.takepointWVLeftS)\n ui.takePointWVRight.connect(th.takepointWVRightS)\n ui.takeCalimPointLeft.connect(th.takeCalimPointLeftS)\n ui.takeCalimPointRight.connect(th.takeCalimPointRightS)\n ui.takeFps.connect(th.takeFpsS)\n ui.takeExposure.connect(th.takeExposureS)\n ui.takeBoost.connect(th.takeBoostS)\n ui.takeTriggerDelay.connect(th.takeTriggerDelayS)\n ui.takeAvgVid.connect(th.takeAvgVidS)\n ui.pushButtonSave.clicked.connect(th.saveS)\n ui.pushButtonBackTake.clicked.connect(th.backTakeS)\n ui.backUse.connect(th.backUseS)\n ui.gainBoost.connect(th.gainBoostS)\n ui.trigger.connect(th.triggerS)\n ui.startStop.connect(th.startStopS)\n ui.exposureMax.connect(th.exposureMaxS)\n \n ui.SpecLog.connect(th.SpecLogS)\n th.changeFps.connect(ui.spinBoxFps.setValue)\n th.changeExposure.connect(ui.spinBoxExposure.setValue)\n th.changeBoost.connect(ui.spinBoxBoost.setValue)\n \n th.getCenterL.connect(ui.spinBoxPointWLLeft.setValue)\n th.getCenterR.connect(ui.spinBoxPointWLRight.setValue)\n #th..connect(ui.checkBoxGainBoost.isChecked)\n \n th.start()\n \n ui.spinBoxTop.setValue(999)\n ui.spinBoxBottom.setValue(452)\n ui.doubleSpinBoxWLLeft.setValue(587.49 )\n ui.doubleSpinBoxWLRight.setValue(578.74)\n ui.spinBoxPointWLRight.setValue(445)\n ui.spinBoxPointWLLeft.setValue(100)\n \n sys.exit( app.exec_() )\n \nif __name__ == \"__main__\":\n main()","repo_name":"kzsanam/lab","sub_path":"spec/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":14980,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"19409345867","text":"import numpy as np\r\nimport cv2\r\n\r\n\r\ndef removeBackGround(frame, lower_color, higher_color):\r\n hsvFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\r\n\r\n # extracting only the pitch\r\n mask = cv2.inRange(hsvFrame, lower_color, higher_color)\r\n mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)\r\n # maskedFrame = cv2.bitwise_and(hsvFrame, hsvFrame, mask=mask)\r\n maskedFrame = frame & mask\r\n\r\n # convert hsv to gray for thresholding\r\n maskedFrame_gray = cv2.cvtColor(maskedFrame, cv2.COLOR_BGR2GRAY)\r\n\r\n # Performing closing to remove noise\r\n kernel = np.ones((10, 10), np.uint8)\r\n thresholdedMask = cv2.threshold(maskedFrame_gray, 0, 255, cv2.THRESH_BINARY)[1]\r\n thresholdedMask = cv2.morphologyEx(thresholdedMask, cv2.MORPH_CLOSE, kernel)\r\n thresholdedMask = cv2.cvtColor(thresholdedMask, cv2.COLOR_GRAY2BGR)\r\n\r\n # subtracting to get only the players without the background\r\n removedBackground = frame - (frame & thresholdedMask)\r\n\r\n return removedBackground\r\n\r\n\r\ndef imgHistogram (image, mask=None, maskFlag = 0, channelNo = 0):\r\n imageHSV = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\r\n\r\n # if mask flag=1 and mask is none, the histogram will be for all the image\r\n # if flag=0, mask attribute will be the only parts of the image represented in the histogram\r\n if maskFlag != 1:\r\n mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)\r\n thresholdedMask = cv2.threshold(mask, 0, 255, cv2.THRESH_BINARY)[1]\r\n else:\r\n thresholdedMask = mask\r\n\r\n # hist will be list of 255 elements, each carrying the no of pixels carrying this attribute\r\n # (H for example if cahnnelNo is 0)\r\n if channelNo == 0:\r\n upperHistRange = 180\r\n else:\r\n upperHistRange = 256\r\n hist = cv2.calcHist([imageHSV], [channelNo], thresholdedMask, [upperHistRange], [0, upperHistRange])\r\n return hist\r\n\r\n\r\ndef maxRangeFromHisto (maxIndexH):\r\n # assigning every color to certain range according to hist max value\r\n startIndex = 0\r\n endIndex = 0\r\n if maxIndexH <= 19 or maxIndexH > 170: # Red\r\n startIndex = 170\r\n endIndex = 19\r\n elif maxIndexH <= 31: # Yellow\r\n startIndex = 20\r\n endIndex = 31\r\n elif maxIndexH <= 60: # Green stadium\r\n startIndex = 32\r\n endIndex = 60\r\n elif maxIndexH <= 88: # dark green\r\n startIndex = 61\r\n endIndex = 88\r\n # elif maxIndexH <= 103: # light blue\r\n # startIndex = 89\r\n # endIndex = 103\r\n elif maxIndexH <= 169: # blue\r\n startIndex = 104\r\n endIndex = 169\r\n\r\n return startIndex, endIndex\r\n\r\n\r\ndef calculateChangeColor(newColor, originalColor):\r\n # this function is used to get the desired color to be added to original color, to get the new color chosen by user\r\n\r\n # detecting if the color dark blue\r\n blue = False\r\n darkblue = False\r\n if 105 <= originalColor[0] <= 168:\r\n blue = True\r\n if blue and originalColor[0] > 119:\r\n darkblue = True\r\n if blue and originalColor[1] > 35:\r\n darkblue = True\r\n\r\n temp = np.mod((newColor[0] - originalColor[0]), 180)\r\n\r\n # if the color is dark blue, we have to change S and V of the transfer color\r\n if darkblue:\r\n temp1 = (255 - originalColor[1])/2\r\n temp2 = (255 - originalColor[2])/2\r\n else:\r\n temp1 = 0\r\n temp2 = 0\r\n\r\n return np.array([temp, temp1, temp2])\r\n\r\n\r\ndef detectSuitableFrame(img):\r\n # used to detect a frame in the video, which contains green background and players (wide shot frame)\r\n # this frame is used for color detection\r\n hist = imgHistogram(img, None, 1, 0)\r\n sumi = 0\r\n for i in range(32, 61):\r\n sumi = sumi + hist[i][0]\r\n average = sumi/(61-32)\r\n if average > 20000 and average < 25000:\r\n return True\r\n return False \r\n\r\n","repo_name":"3omar3allam/salah-ly","sub_path":"OpenCV_Processing/AuxFunctions.py","file_name":"AuxFunctions.py","file_ext":"py","file_size_in_byte":3900,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"69814032083","text":"import random\r\neasy = 10\r\nhard = 5\r\nnumber = random.randint(1,100)\r\nprint(\"--Welcome to Guess The Number--\")\r\nprint(\"I'm thinking the number between 1 to 100\")\r\nlevel = input(\"Enter Difficulty 'easy' or 'hard' \")\r\n\r\ndef gameplay(x):\r\n print(f\"You will have {x} Guesses\")\r\n print(number)\r\n while(x != 0):\r\n print(f\"You have {x} attempt remaining to guess the number\")\r\n guess_no=int(input(\"Guess The Number :\"))\r\n if(number == guess_no):\r\n print(\"You guessed it correct\")\r\n break\r\n elif number < guess_no:\r\n print(\"Too High\")\r\n x -=1\r\n elif number > guess_no:\r\n print(\"Too Low\")\r\n x -=1\r\n if x==0:\r\n print(\"Game Over. You haven't guess the number\")\r\n\r\nif level==\"easy\":\r\n gameplay(easy)\r\nelif level==\"hard\":\r\n gameplay(hard)\r\n\r\n\r\n","repo_name":"Aditya-wani02/Python","sub_path":"u-py/GuessTheNumber.py","file_name":"GuessTheNumber.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20164299912","text":"from flask import Flask, render_template, request, Markup\nfrom flask_socketio import SocketIO, Namespace, emit, disconnect\nimport time, math, select, os\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'secret!'\nsocketio = SocketIO(app, async_mode=None)\n\nweb_port = 33333\n\ndefault_actions = \\\n {'-10_': ['ir03111800','ir03111000','KEY_LEFT' ,'gpio6' ,'rf3F402350','rf3F405350'],\n '-1_': ['ir03201800','ir03201000','KEY_UP' ,'gpio27','rf77082350','rf77085350'],\n '+1_': ['ir03211800','ir03211000','KEY_DOWN' ,'gpio22','rf7B042350','rf7B045350'],\n '+10_': ['ir03101800','ir03101000','KEY_RIGHT' ,'gpio5' ,'rf5F202350','rf5F205350'],\n 'auto_':['ir030C1000','ir030C1800','KEY_POWER' ,'gpio17','rf7E012350','rf7E015350'],\n 'menu_':['ir030D1000','ir030D1800','KEY_MUTE' ,'gpio23','rf7D022350','rf7D025350'],\n 'mode_':['ir030B1000','ir030B1800','KEY_SELECT','gpio18','rf6F102350','rf6F105350'],\n\n 'tack port': ['rf37482350', 'rf37485350', 'rf37482950', 'rf37485950'],\n 'tack starboard': ['rf5B242350', 'rf5B245350', 'rf5B242950', 'rf5B245950'],\n\n '-10': ['rf3F402950', 'rf3F405950'],\n '-1': ['rf77082950', 'rf77085950'],\n '+1': ['rf7B042950', 'rf7B045950'],\n '+10': ['rf5F202950', 'rf5F205950'],\n 'standby': ['rf7E012950', 'rf7E015950'],\n 'compass mode': ['rf7D022950', 'rf7D025950'],\n 'gps mode': ['rf6F102950', 'rf6F105950'],\n 'nav mode': ['rf6D122950', 'rf6D125950'],\n 'wind mode': ['rf7F002930', 'rf7F005930'],\n 'true wind mode': ['rf6F102930', 'rf6F105930']\n }\n\ntry:\n from flask_babel import Babel, gettext\n babel = Babel(app)\n\n LANGUAGES = os.listdir(os.path.dirname(os.path.abspath(__file__)) + '/translations')\n\n @babel.localeselector\n def get_locale():\n return request.accept_languages.best_match(LANGUAGES)\n \nexcept Exception as e:\n print('failed to import flask_babel, translations not possible!!', e)\n def _(x): return x\n app.jinja_env.globals.update(_=_)\n babel = None\n\nclass WebConfig(Namespace):\n def __init__(self, name, pipe, config):\n super(Namespace, self).__init__(name)\n socketio.start_background_task(target=self.background_thread)\n self.pipe = pipe\n self.config = config\n self.status = 'N/A'\n self.profiles = False\n\n self.last_key = False\n\n ind = 0\n acts = ['', '']\n names = Markup('[')\n cols = 1\n col = 0\n acts[ind] += Markup('

    ')\n i = 0\n actions = config['actions']\n for name in actions:\n if name.startswith('profile '):\n continue\n\n n = name.replace(' ', '_')\n n = n.replace('+', 'plus')\n if i == 7:\n acts[ind] += Markup('
    ')\n ind = 1\n acts[ind] += Markup('')\n col = 0\n i+=1\n \n if col == 0:\n acts[ind] += Markup('')\n acts[ind] += Markup('')\n if col == cols-1:\n acts[ind] += Markup('')\n col = 0\n else:\n col += 1\n names += Markup('\"' + n + '\", ')\n\n acts[ind] += Markup('
    ')\n names += Markup('\"\"]')\n\n adc_channels = Markup('')\n for i in range(3):\n #adc_channels += Markup('
    ')\n adc_channels += Markup('
    ')\n adc_channels += Markup('Channel ' + str(i))\n adc_channels += Markup('')\n adc_channels += Markup('
    ')\n \n ir = Markup(' raspberry')\n ir += Markup(' arduino')\n \n nmea = Markup(' Input Output')\n\n remote = Markup('')\n\n @app.route('/')\n def index():\n return render_template('index.html', async_mode=socketio.async_mode, web_port=web_port, actionkeys = acts, action_names = names, adc_channels = adc_channels, ir_settings = ir, nmea_settings = nmea, remote_settings = remote)\n\n def on_ping(self):\n emit('pong')\n\n def on_keys(self, command):\n actions = self.config['actions']\n if command == 'clear':\n for name in actions:\n actions[name] = []\n self.emit_keys()\n return\n\n if command == 'default':\n for name in actions:\n actions[name] = []\n\n for name, keys in default_actions.items():\n actions[name] = keys.copy()\n\n self.emit_keys()\n return\n\n if command.startswith('clearcodes'):\n command = command[10:]\n if command in actions:\n actions[command] = []\n self.emit_keys()\n return\n\n if not self.last_key:\n return\n\n # remove this key from any actions\n for name, keys in actions.items():\n while self.last_key in keys:\n keys.remove(self.last_key)\n\n # add the last key to the action\n if command != 'none':\n if not command in actions:\n actions[command] = []\n actions[command].append(self.last_key)\n self.emit_keys()\n\n def on_config(self, config):\n self.pipe.send(config)\n\n def emit_keys(self):\n actions = self.config['actions']\n for name, keys in actions.items():\n keys = {'name': name.replace(' ', '_'), 'keys': keys}\n socketio.emit('action_keys', keys)\n self.pipe.send({'actions': actions})\n\n def on_connect(self):\n if self.profiles:\n socketio.emit('profiles', self.profiles)\n self.emit_keys()\n\n print('web client connected', request.sid)\n\n\n def on_disconnect(self):\n print('web client disconnected', request.sid)\n\n def background_thread(self):\n print('web process on port', web_port)\n last_key_time = time.monotonic()\n x = 0\n polls_sent = {}\n while True:\n socketio.sleep(.5)\n\n if self.last_key:\n dtc = time.monotonic() - last_key_time\n if dtc > 8:\n self.last_key = False\n socketio.emit('key', 'N/A')\n socketio.emit('action', '')\n\n if not self.pipe:\n continue\n\n while True:\n msg = self.pipe.recv()\n if not msg:\n break\n\n if 'key' in msg:\n self.last_key = msg['key']\n last_key_time = time.monotonic()\n for name in msg:\n d = msg[name]\n if name != 'profiles':\n d = str(d)\n socketio.emit(name, d)\n if 'status' in msg:\n self.status = msg['status']\n #socketio.emit('status', self.status)\n if 'profiles' in msg:\n self.profiles = msg['profiles']\n self.emit_keys()\n\ndef web_process(pipe, config):\n print('web process', os.getpid())\n path = os.path.dirname(__file__)\n os.chdir(os.path.abspath(path))\n socketio.on_namespace(WebConfig('', pipe, config))\n socketio.run(app, debug=False, host='0.0.0.0', port=web_port)\n \nif __name__ == '__main__':\n config = {'host': 'localhost', 'actions': {},\n 'pi.ir': True, 'arduino.ir': False,\n 'arduino.nmea.in': False, 'arduino.nmea.out': False,\n 'arduino.nmea.baud': 4800,\n 'lcd': {},\n 'actions': default_actions.copy()}\n web_process(None, config)\n","repo_name":"pypilot/pypilot","sub_path":"hat/web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":9696,"program_lang":"python","lang":"en","doc_type":"code","stars":210,"dataset":"github-code","pt":"3"} +{"seq_id":"73166602002","text":"\"\"\"\nab test service 相关内容\n\"\"\"\n# !/usr/bin/env python\n# -*- coding:utf-8 -*-\n# @Time : 2021/10/19\n# @Author : jiang.hu\n# @File : __init__.py.py\n\n# pylint: disable=E0401\nimport datetime\nimport hashlib\nimport json\n\nfrom ab_test.abtest_user.ab_client import ABTestClient\nfrom ab_test.bucket.bucket_operation import BucketOperation\nfrom ab_test.bucket.bucket_param import BucketParam\nfrom ab_test.config.extra_config import PARAM_CONFIG_FILE\nfrom ab_test.experiment.experiment_operation import ExperimentalOperation\nfrom ab_test.layer.layer_operation import LayerOperation\nfrom ab_test.scene.scene_operation import SceneOperation\nfrom ab_test.strategy.diversion_strategy import DiversionStrategy\nfrom ab_test.strategy.diverter import Diverted\nfrom ab_test.strategy.driver_white_list_type import DiverWhiteListType\nfrom ab_test.strategy.white_list_info import WhiteListInfo\nfrom ab_test.util.resolve_input_parameters import resolve_params\nfrom common import Singleton\nfrom common.mysql_util import MySqlDbUtil\n\nfrom common.name_template import gen_ab_name, gen_recall_group_stra_name, gen_fusion_stra_name\n\n# pylint: disable=R0903,R0205\nfrom conf.config import MysqlConf, logger\nfrom infoflow.config import ChannelInfoConfig\n\nmysql_db = MySqlDbUtil(**MysqlConf.HADOOP_DB_DICT)\nmysql_db.connect()\n\n# 白名单策略,默认id为1\nWHITE_LIST = 1\n\n\n# @Singleton\nclass ABService(object):\n \"\"\"\n ab test service\n \"\"\"\n\n def __init__(self):\n self.eop = ExperimentalOperation(mysql_db)\n self.sop = SceneOperation(mysql_db)\n self.lop = LayerOperation(mysql_db)\n self.bop = BucketOperation(mysql_db)\n self.ds = DiversionStrategy(mysql_db)\n self.bp = BucketParam(mysql_db)\n self.dwlt = DiverWhiteListType(mysql_db)\n self.wli = WhiteListInfo(mysql_db)\n\n # pylint: disable=R0201,W0613\n def abtest(self, params: dict) -> dict:\n \"\"\"\n :param params:\n :return:\n \"\"\"\n # user_id = params[\"user_id\"]\n # bucket = int(hashlib.sha1(str(user_id).encode(\"utf8\")).hexdigest(), 16) % 100\n ab_config = {\n \"name\": gen_ab_name(\"test\", \"1.0.0.0\")\n }\n if True:\n ab_config.update({\n \"9999\": {\n \"use_rs\": True,\n \"recall_stra\": gen_recall_group_stra_name(\n ChannelInfoConfig.recommender_channel_id, \"1.0.0.0\"),\n \"fusion_stra\": gen_fusion_stra_name(\n ChannelInfoConfig.recommender_channel_id, \"1.0.0.0\"),\n \"rank_stra\": \"RS:R:ARL:1.0.0.0\",\n \"rerank_stra\": \"RS:RR:CRL:1.0.0.0\",\n \"use_cache\": False,\n \"cache_stra\": \"RS:CC:RI:1.0.1.0\"\n },\n ChannelInfoConfig.ipr_channel_id: {\n \"use_rs\": True,\n \"recall_stra\": gen_recall_group_stra_name(\n ChannelInfoConfig.ipr_channel_id, \"1.0.0.0\"),\n \"fusion_stra\": gen_fusion_stra_name(\n ChannelInfoConfig.ipr_channel_id, \"1.0.0.0\"),\n \"rank_stra\": \"RS:R:ARL:1.0.0.0\",\n \"rerank_stra\": \"RS:RR:CRL:1.0.0.0\",\n \"use_cache\": False,\n \"cache_stra\": \"RS:CC:RI:1.0.1.0\"\n },\n ChannelInfoConfig.publicy_subsidy_channel_id: {\n \"use_rs\": True,\n \"recall_stra\": gen_recall_group_stra_name(\n ChannelInfoConfig.publicy_subsidy_channel_id, \"1.0.0.0\"),\n \"fusion_stra\": gen_fusion_stra_name(\n ChannelInfoConfig.publicy_subsidy_channel_id, \"1.0.0.0\"),\n \"rank_stra\": \"RS:R:ARL:1.0.0.0\",\n \"rerank_stra\": \"RS:RR:CRL:1.0.0.0\",\n \"use_cache\": False,\n \"cache_stra\": \"RS:CC:RI:1.0.1.0\"\n },\n ChannelInfoConfig.other_channel_id: {\n \"use_rs\": True,\n \"recall_stra\": gen_recall_group_stra_name(\n ChannelInfoConfig.other_channel_id, \"1.0.0.0\"),\n \"fusion_stra\": gen_fusion_stra_name(\n ChannelInfoConfig.other_channel_id, \"1.0.0.0\"),\n \"rank_stra\": \"RS:R:ARL:1.0.0.0\",\n \"rerank_stra\": \"RS:RR:CRL:1.0.0.0\",\n \"use_cache\": False,\n \"cache_stra\": \"RS:CC:RI:1.0.1.0\"\n },\n ChannelInfoConfig.pc_channel_id: {\n \"use_rs\": True,\n \"recall_stra\": gen_recall_group_stra_name(\n ChannelInfoConfig.pc_channel_id, \"1.0.0.0\"),\n \"fusion_stra\": gen_fusion_stra_name(\n ChannelInfoConfig.pc_channel_id, \"1.0.0.0\"),\n \"rank_stra\": \"RS:R:ARL:1.0.0.0\",\n \"rerank_stra\": \"RS:RR:CRL:1.0.1.0\",\n \"use_cache\": False,\n \"cache_stra\": \"RS:CC:RI:1.0.1.0\"\n }\n })\n\n return ab_config\n\n def create_and_insert_db_abtest(self, filepath):\n \"\"\"\n 根据配置创建 新 abtest 并存入数据库\n \"\"\"\n experiment_info = resolve_params(filepath)\n\n # 创建实验,如果实验名为空,则程序退出\n experiment_name = experiment_info.get(\"experiment_name\")\n if not experiment_name:\n return\n # 创建实验,根据实验名返回实验id\n self.eop.create_and_update_experiment({\"name\": experiment_name})\n exp_id = self.eop.get_experiment_id_by_name(experiment_name)\n\n # 创建实验后创建策略,返回策略id\n # 创建策略的同时判断是否包含白名单策略并建立对应的白名单\n exp_diversion_strategy = experiment_info.get(\"exp_diversion_strategy\")\n if not exp_diversion_strategy:\n return\n exp_diversion_strategy_id = self.create_and_resolve_strategy_param(exp_diversion_strategy,\n {\"experiment_id\": exp_id})\n # 更新实验策略id\n self.eop.update_diversion_strategy_id_field({\"diversion_strategy_id\": exp_diversion_strategy_id}, exp_id)\n\n # 创建场景\n scene_info = experiment_info.get(\"scene_info\", [])\n for scene in scene_info:\n scene_name = scene.get(\"scene_name\")\n if not scene_name:\n logger.info(\"没有配置场景名\")\n continue\n\n # 创建场景并根据场景返回scene_id\n self.sop.create_and_update_scene(\n [{\"name\": scene_name, \"experiment_id\": exp_id, \"allocation_ratio\": 1}])\n scene_id = self.sop.get_scene_id_by_name(scene_name, exp_id)\n\n # 创建流量层\n layer_info = scene.get(\"layer_info\")\n for layer in layer_info:\n layer_name = layer.get(\"layer_name\")\n if not layer_name:\n logger.info(\"没有配置流量层名称\")\n continue\n\n # 创建layer并返回layer_id\n self.lop.create_and_update_later({\"name\": layer_name, \"experiment_id\": exp_id, \"scene_id\": scene_id})\n layer_id = self.lop.get_layer_id_by_name(layer_name, exp_id, scene_id)\n\n # 获取并创建用户配置的 layer -> bucket 分流策略,返回策略id\n layer_diversion_strategy = layer.get(\"layer_diversion_strategy\", {})\n if not layer_diversion_strategy:\n logger.info(\"没有配置分流策略\")\n continue\n _process_ids = {\"experiment_id\": exp_id, \"scene_id\": scene_id, \"layer_id\": layer_id}\n\n # 返回策略id\n layer_diversion_strategy_id = self.create_and_resolve_strategy_param(layer_diversion_strategy,\n _process_ids)\n\n # 更新流量层分流策略id\n self.lop.update_diversion_strategy_id_field({\"diversion_strategy_id\": layer_diversion_strategy_id},\n layer_id)\n\n # 创建非白名单bucket\n bucket_info = layer.get(\"bucket_info\", [])\n self.create_and_update_bucket_util(_process_ids, bucket_info, is_white_list=0)\n\n def create_and_update_bucket_util(self, process_ids, bucket_info, is_white_list=0):\n \"\"\"\n 创建bucket\n 创建bucket_param\n \"\"\"\n for bucket_dic in bucket_info:\n bucket_name = bucket_dic.get(\"bucket_name\")\n if not bucket_name:\n continue\n param_info = {\"name\": bucket_name}\n param_info.update(process_ids)\n self.bop.create_and_update_bucket(param_info, is_white_list=is_white_list)\n bucket_id = self.bop.get_bucket_id_by_name(bucket_name, process_ids, is_white_list)\n bucket_param = bucket_dic.get(\"bucket_param\")\n bucket_param_list = []\n for param_name, param_value in bucket_param.items():\n bucket_param_list.append(\n {\"name\": param_name, \"value\": param_value, \"bucket_id\": bucket_id})\n self.bp.create_and_update_bucket_param(bucket_param_list)\n\n def create_and_resolve_strategy_param(self, diversion_strategy, process_ids: dict):\n \"\"\"\n 解析数据\n 创建分流策略\n 如果有白名单策略,则创建白名单\n 返回策略id\n \"\"\"\n if not diversion_strategy:\n logger.info(\"没有配置分流策略\")\n return\n diversion_strategy_name = diversion_strategy.get(\"diversion_strategy_name\")\n diversions = diversion_strategy.get(\"diversions\")\n\n # 策略类型 当前支持两种:白名单、用户信息\n diversion_type = diversions.get(\"diversion_type\")\n\n # 优先级\n diversions_priority = diversions.get(\"diversions_priority\")\n # 创建策略并返回策略id\n param_info = {\"name\": diversion_strategy_name,\n \"diver_config\": str(dict(zip(diversions_priority, diversion_type)))}\n param_info.update(process_ids)\n self.ds.create_diver_strategy(param_info)\n diver_id = self.ds.get_diver_strategy_id_by_name(diversion_strategy_name, process_ids)\n\n # 判断策略是否选择白名单策略并创建白名单\n # exp --> scene 白名单\n # layer --> bucket白名单\n if WHITE_LIST in diversion_type:\n white_list_info = diversions.get(\"white_list_info\", [])\n self.create_white_list(white_list_info, process_ids)\n return diver_id\n\n def create_white_list(self, white_list_info: list, process_ids: dict):\n \"\"\"\n 创建 scene 和 bucket 白名单\n \"\"\"\n for dic in white_list_info:\n white_list_type_name = dic.get(\"white_list_type_name\")\n if not white_list_type_name:\n continue\n param_info = {\"name\": white_list_type_name}\n param_info.update(process_ids)\n # 新建白名单类型并返回类型id\n self.dwlt.create_white_list_type(param_info)\n white_list_type_id = self.dwlt.get_white_list_type_id_by_name(white_list_type_name, process_ids)\n\n white_list_users = dic.get(\"white_list\")\n white_lists = []\n for user in white_list_users:\n white_lists.append({\"user_id\": user, \"white_list_id\": white_list_type_id})\n # 创建并存入白名单用户\n self.wli.create_white_list(white_lists)\n\n white_list_bucket_info = dic.get(\"white_list_bucket_info\", {})\n # 创建白名单策略bucket及参数 bucket_param\n self.create_and_update_bucket_util(process_ids, [white_list_bucket_info], is_white_list=white_list_type_id)\n\n\nif __name__ == '__main__':\n logger.info(\"开始创建实验...\")\n logger.info(\"请上传实验配置文件...\")\n path = ''\n ab = ABService()\n logger.info(\"正在创建实验...\")\n ab.create_and_insert_db_abtest(path)\n ac_c = ABTestClient()\n logger.info(\"实验创建完成...\")\n logger.info(\"请导入用户信息...\")\n parameter = {\"user_id\": \"000007\", \"exp_id\": 1}\n # ac_c.get_bucket_param_info()\n lst = ac_c.get_bucket_param_by_user_operation(parameter)\n print(lst)\n","repo_name":"cn-hujiang/abtest","sub_path":"src/ab_test/service/ab_service.py","file_name":"ab_service.py","file_ext":"py","file_size_in_byte":12573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34070467426","text":"\"\"\"\n\nSmall 1 liners utilities that are to small to pe placed in a module \n\n\"\"\"\n\nimport os\nimport random\nimport string\nfrom typing import List\n\nfrom flask import Request\nfrom marshmallow import Schema\nfrom marshmallow_jsonschema import JSONSchema\n\nfrom licenseware.utils.logger import log\n\n\ndef get_flask_request_dict(flask_request: Request):\n from licenseware.common.constants import envs\n\n \"\"\"Convert flask request object into a dict\"\"\"\n flask_headers = dict(flask_request.headers) if flask_request.headers else {}\n\n flask_json = {}\n if flask_request.json is not None:\n flask_json = {\"flask_request_json\": flask_request.json}\n\n flask_args = dict(flask_request.args) if flask_request.args else {}\n\n data = {**flask_json, **flask_headers, **flask_args}\n\n if envs.DESKTOP_ENVIRONMENT and data.get(\"tenant_id\", None) is None:\n for tid in [\"tenant_id\", \"TenantId\", \"Tenantid\"]:\n data[tid] = envs.DESKTOP_TENANT_ID\n data[\"Authorization\"] = \"Authorization not needed on desktop\"\n\n return data\n\n\ndef serialize_flask_request(flask_request: Request):\n log.warning(\n \"Please use `get_flask_request_dict` instead. This was a bad naming function, will be removed\"\n )\n return get_flask_request_dict(flask_request)\n\n\ndef set_environment_variables(*, envs: dict = None, env_path: str = \".env\"):\n \"\"\"\n\n In the case we need to set some environment variables\n either providing a dict or loading from .env file this function may come in handy\n\n Make sure to call the `set_environment_variables` before the envs are needed!\n\n ```py\n\n from licenseware.utils.miscellaneous import set_environment_variables\n set_environment_variables()\n\n\n App = AppBuilder(\n name = 'App Name',\n description = 'App long description',\n flags = [flags.BETA]\n )\n\n ```\n\n \"\"\"\n\n if os.getenv(\"ENVIRONMENT\") not in [\"local\", None]:\n return\n\n if envs:\n os.environ.update(envs)\n return\n\n with open(env_path, \"r\") as f:\n env_vars = {}\n for v in f.readlines():\n vli = v.strip().split(\"=\")\n if len(vli) == 2:\n env_vars[vli[0]] = vli[1]\n else:\n env_vars[vli[0]] = \"\"\n\n os.environ.update(env_vars)\n\n\ndef generate_id(length=6):\n \"\"\"Create a random series of digits of length specified\"\"\"\n return \"\".join([random.choice(list(string.digits)) for _ in range(length)])\n\n\ndef flat_dict(li: List[dict]) -> dict:\n \"\"\"\n - input_list = [{'width': 'full'}, {'height': '100vh'}]\n - output_dict = {'width': 'full', 'height': '100vh'}\n \"\"\"\n return {k: v for dict_ in li for k, v in dict_.items()}\n\n\ndef get_json_schema(schema: Schema):\n \"\"\"\n Generate json schema from marshmallow schema class\n \"\"\"\n\n json_schema = JSONSchema().dump(schema())[\"definitions\"][schema.__name__]\n\n return json_schema\n\n\ndef build_restx_model(ns, schema: Schema, model_name: str = None):\n \"\"\"\n !DEPRECIATED - use `marshmallow_to_restx_model`function instead\n\n Convert a marshmallow schema to a flask restx model\n Resulted restx model can be used for swagger (body, marshal_with, expect, etc)\n \"\"\"\n\n model_name = model_name or schema.__name__\n\n json_schema = get_json_schema(schema)\n restx_model = ns.schema_model(model_name, json_schema)\n\n return restx_model\n\n\nhttp_methods = [\"GET\", \"POST\", \"PUT\", \"DELETE\"]\n\nswagger_authorization_header = {\n \"Tenantid\": {\"type\": \"apiKey\", \"in\": \"header\", \"name\": \"Tenantid\"},\n \"Authorization\": {\"type\": \"apiKey\", \"in\": \"header\", \"name\": \"Authorization\"},\n}\n","repo_name":"licenseware/licenseware-sdk-v2","sub_path":"licenseware/utils/miscellaneous.py","file_name":"miscellaneous.py","file_ext":"py","file_size_in_byte":3640,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"11340287372","text":"from test_framework import generic_test\nfrom test_framework.test_failure import TestFailure\nfrom list_node import ListNode\n\n\nclass Stack:\n\n def __init__(self):\n self.elements = []\n self.current_max = []\n\n def empty(self) -> bool:\n return not self.elements\n\n def max(self) -> int:\n return self.current_max[-1]\n\n def peek(self):\n return self.elements[-1]\n\n def pop(self) -> int:\n self.current_max.pop()\n return self.elements.pop()\n\n def push(self, x: int) -> None:\n if self.empty():\n self.current_max.append(x)\n else:\n self.current_max.append(max(x, self.max()))\n self.elements.append(x)\n\n\ndef stack_tester(ops):\n try:\n s = Stack()\n\n for (op, arg) in ops:\n if op == 'Stack':\n s = Stack()\n elif op == 'push':\n s.push(arg)\n elif op == 'pop':\n result = s.pop()\n if result != arg:\n raise TestFailure('Pop: expected ' + str(arg) + ', got ' +\n str(result))\n elif op == 'max':\n result = s.max()\n if result != arg:\n raise TestFailure('Max: expected ' + str(arg) + ', got ' +\n str(result))\n elif op == 'empty':\n result = int(s.empty())\n if result != arg:\n raise TestFailure('Empty: expected ' + str(arg) +\n ', got ' + str(result))\n else:\n raise RuntimeError('Unsupported stack operation: ' + op)\n except IndexError:\n raise TestFailure('Unexpected IndexError exception')\n\n\nif __name__ == '__main__':\n exit(\n generic_test.generic_test_main('stack_with_max.py',\n 'stack_with_max.tsv', stack_tester))\n","repo_name":"Axelphiman/Elements-of-Programming","sub_path":"epi_judge_python/stack_with_max.py","file_name":"stack_with_max.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42813416649","text":"from typing import List\n\n\nclass Solution:\n def permute(self, nums: List[int]) -> List[List[int]]:\n res = []\n N = len(nums)\n\n def dfs(t, visited):\n if len(t) == N:\n res.append(t[:])\n return\n for i in range(N):\n if not visited[i]:\n t.append(nums[i])\n visited[i] = True\n dfs(t, visited)\n t.pop()\n visited[i] = False\n dfs([], [False] * N)\n return res\n\n\nif __name__ == \"__main__\":\n s = Solution()\n result = s.permute([1, 2, 3])\n print(result)\n","repo_name":"kenwoov/PlayLeetCode","sub_path":"Algorithms/Medium/46. Permutations/answer.py","file_name":"answer.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"39709694493","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib import rc\nplt.rcParams['mathtext.fontset'] = 'custom'\nplt.rcParams['mathtext.rm'] = 'Bitstream Vera Sans'\nplt.rcParams['mathtext.it'] = 'Bitstream Vera Sans:italic'\nplt.rcParams['mathtext.bf'] = 'Bitstream Vera Sans:bold'\nrc('text', usetex=True)\n\n#Hier muss die Anzahl an Sweeps und L entsprechend simulation.cpp eingestellt werden.\n#Eventuell muss auch die Groesse der Marker angepasst werden.\nSweeps = 5*1000\nL = 100\n\nt = np.linspace(1, Sweeps, Sweeps)\ns = t/(L*L)\n\nx_1, y_1 = np.genfromtxt('spinup_1.txt', unpack=True)\na_1, b_1 = np.genfromtxt('spindown_1.txt', unpack=True)\n\nplt.figure(1, (10, 10))\nplt.scatter(x_1, y_1, c='darkblue', s=40, linewidth=0, label=r'Spin up')\nplt.scatter(a_1, b_1, c='darkorange', s=40, linewidth=0, label=r'Spin down')\nplt.legend(loc='best', fontsize='small')\nplt.xlabel('$x$')\nplt.ylabel('$y$')\nplt.xlim(-1, L)\nplt.ylim(-1, L)\nplt.savefig('momentaufnahme_1.pdf')\nplt.clf()\n\nx_2, y_2 = np.genfromtxt('spinup_2.txt', unpack=True)\na_2, b_2 = np.genfromtxt('spindown_2.txt', unpack=True)\n\nplt.figure(1, (10, 10))\nplt.scatter(x_2, y_2, c='darkblue', s=40, linewidth=0, label=r'Spin up')\nplt.scatter(a_2, b_2, c='darkorange', s=40, linewidth=0, label=r'Spin down')\nplt.legend(loc='best', fontsize='small')\nplt.xlabel('$x$')\nplt.ylabel('$y$')\nplt.xlim(-1, L)\nplt.ylim(-1, L)\nplt.savefig('momentaufnahme_2.pdf')\nplt.clf()\n\nx_3, y_3 = np.genfromtxt('spinup_3.txt', unpack=True)\na_3, b_3 = np.genfromtxt('spindown_3.txt', unpack=True)\n\nplt.figure(1, (10, 10))\nplt.scatter(x_3, y_3, c='darkblue', s=40, linewidth=0, label=r'Spin up')\nplt.scatter(a_3, b_3, c='darkorange', s=40, linewidth=0, label=r'Spin down')\nplt.legend(loc='best', fontsize='small')\nplt.xlabel(r'$x$')\nplt.ylabel(r'$y$')\nplt.xlim(-1, L)\nplt.ylim(-1, L)\nplt.savefig('momentaufnahme_3.pdf')\nplt.clf()\n\nplt.figure(1, (7, 5))\nenergie_equi_geordnet_1 = np.genfromtxt('Energie_Equi_geordnet_1.txt', unpack=True)\nenergie_equi_geordnet_2 = np.genfromtxt('Energie_Equi_geordnet_2.txt', unpack=True)\nenergie_equi_geordnet_3 = np.genfromtxt('Energie_Equi_geordnet_3.txt', unpack=True)\nplt.xlabel(r'$t$')\nplt.ylabel(r'$e(t)$')\nplt.plot(s, energie_equi_geordnet_1, c='darkblue', label=r'$k_B T = 1.5$')\nplt.plot(s, energie_equi_geordnet_2, c='darkorange', label=r'$k_B T = 2.27$')\nplt.plot(s, energie_equi_geordnet_3, c='darkgreen', label=r'$k_B T = 3$')\nplt.grid()\nplt.xlim(0, 15)\nplt.legend(loc='best')\nplt.savefig('Energie_Equi_geordnet.pdf')\nplt.clf()\n\nplt.figure(1, (7, 5))\nenergie_equi_1 = np.genfromtxt('Energie_Equi_1.txt', unpack=True)\nenergie_equi_2 = np.genfromtxt('Energie_Equi_2.txt', unpack=True)\nenergie_equi_3 = np.genfromtxt('Energie_Equi_3.txt', unpack=True)\nplt.xlabel(r'$t$')\nplt.ylabel(r'$e(t)$')\nplt.plot(s, energie_equi_1, c='darkblue', label=r'$k_B T = 1.5$')\nplt.plot(s, energie_equi_2, c='darkorange', label=r'$k_B T = 2.27$')\nplt.plot(s, energie_equi_3, c='darkgreen', label=r'$k_B T = 3$')\nplt.grid()\nplt.xlim(0, 15)\nplt.legend(loc='best')\nplt.savefig('Energie_Equi.pdf')\nplt.clf()\n\nplt.figure(1, (7, 5))\nenergie_1 = np.genfromtxt('Energie_1.txt', unpack=True)\nenergie_2 = np.genfromtxt('Energie_2.txt', unpack=True)\nenergie_3 = np.genfromtxt('Energie_3.txt', unpack=True)\nplt.xlabel(r'$t$')\nplt.ylabel(r'$e(t)$')\nplt.plot(t, energie_1, c='darkblue', label=r'$k_B T = 1.5$')\nplt.plot(t, energie_2, c='darkorange', label=r'$k_B T = 2.27$')\nplt.plot(t, energie_3, c='darkgreen', label=r'$k_B T = 3$')\nplt.grid()\nplt.legend(loc='best')\nplt.savefig('Energie.pdf')\nplt.clf()\n\nplt.figure(1, (7, 5))\nmagnetisierung_1 = np.genfromtxt('Magnetisierung_1.txt', unpack=True)\nmagnetisierung_2 = np.genfromtxt('Magnetisierung_2.txt', unpack=True)\nmagnetisierung_3 = np.genfromtxt('Magnetisierung_3.txt', unpack=True)\nplt.xlabel(r'$t$')\nplt.ylabel(r'$\\langle m \\rangle$')\nplt.plot(t, magnetisierung_1, c='darkblue', label=r'$k_B T = 1.5$')\nplt.plot(t, magnetisierung_2, c='darkorange', label=r'$k_B T = 2.27$')\nplt.plot(t, magnetisierung_3, c='darkgreen', label=r'$k_B T = 3$')\nplt.grid()\nplt.legend(loc='best')\nplt.savefig('Magnetisierung.pdf')\nplt.clf()\n\nplt.figure(1, (7, 5))\nbetrag_magnetisierung_1 = np.genfromtxt('Betrag_Magnetisierung_1.txt', unpack=True)\nbetrag_magnetisierung_2 = np.genfromtxt('Betrag_Magnetisierung_2.txt', unpack=True)\nbetrag_magnetisierung_3 = np.genfromtxt('Betrag_Magnetisierung_3.txt', unpack=True)\nplt.xlabel(r'$t$')\nplt.ylabel(r'$\\langle | m | \\rangle $')\nplt.plot(t, betrag_magnetisierung_1, c='darkblue', label=r'$k_B T = 1.5$')\nplt.plot(t, betrag_magnetisierung_2, c='darkorange', label=r'$k_B T = 2.27$')\nplt.plot(t, betrag_magnetisierung_3, c='darkgreen', label=r'$k_B T = 3$')\nplt.grid()\nplt.legend(loc='best')\nplt.savefig('Betrag_Magnetisierung.pdf')\nplt.clf()\n","repo_name":"H-berg/Computational-Physics-SS20","sub_path":"Übungsblatt9_2/a_2.py","file_name":"a_2.py","file_ext":"py","file_size_in_byte":4762,"program_lang":"python","lang":"de","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"26204630194","text":"from flask import Blueprint, jsonify, request, g\nfrom flask.ext.login import login_required, current_user\n\nfrom app.api import db, auto, Wine\nfrom app.api.categories.model import Category\nfrom app.api.constants import OK, BAD_REQUEST\nfrom app.api.helpers import response_builder\nfrom app.api.favorites.model import Favorite, FavoriteWine\nfrom app.api.photos.model import RecipePhoto\nfrom app.api.recipes.model import Recipe\nfrom app.api.recipes.views_v2 import recipe_response_builder\nfrom app.api.users.constants import PUBLISHED\n\nmod = Blueprint('favorites_v2', __name__, url_prefix='/api_v2/favorites')\n\n\n@auto.doc()\n@mod.route('/', methods=['POST'])\n@login_required\ndef new_favorite():\n \"\"\"\n Add new favorite. List of parameters in json request:\n user_id (required)\n recipe_id (required)\n Example of request:\n {\"user_id\":3, \"recipe_id\":2}\n :return: json with parameters:\n error_code - server response_code\n result - information about created favorite\n \"\"\"\n user_id = request.json.get('user_id')\n recipe_id = request.json.get('recipe_id')\n if user_id is None or recipe_id is None:\n return jsonify({'error_code': BAD_REQUEST, 'result': 'not ok'}), 200 # missing arguments\n if current_user.id == user_id:\n # if True:\n favorite = Favorite.query.filter_by(recipe_id=recipe_id, user_id=user_id).first()\n if not favorite:\n favorite = Favorite(user_id=user_id, recipe_id=recipe_id)\n db.session.add(favorite)\n db.session.commit()\n information = response_builder(favorite, Favorite)\n return jsonify({'error_code': OK, 'result': information}), 201\n else:\n return jsonify({'error_code': BAD_REQUEST, 'result': 'not ok'}), 200\n\n\n@auto.doc()\n@mod.route('/', methods=['GET'])\n@login_required\ndef get_favorite():\n \"\"\"\n Get favorites (recipes and wines) for current user.\n :return: json with parameters:\n error_code - server response_code\n result - information about favorites recipes and wines\n is_last_page - is current page last or not\n \"\"\"\n user_id = current_user.id\n recipes = []\n wines = []\n page = request.args.get('page', type=int)\n vendor_id = request.args.get('vendor_id', type=unicode, default=u'')\n if page is not None:\n # for faster loading\n limit_recipes = 2\n offset_recipes = (page-1)*limit_recipes\n limit_wines = 2\n offset_wines = (page-1)*limit_recipes\n recipes_band = Favorite.query.filter_by(user_id=user_id).slice(start=offset_recipes, stop=limit_recipes+offset_recipes).all()\n wines_band = FavoriteWine.query.filter_by(user_id=user_id).slice(start=offset_wines, stop=limit_wines+offset_wines).all()\n next_recipe = Favorite.query.filter_by(user_id=user_id).slice(start=limit_recipes+offset_recipes, stop=limit_recipes+offset_recipes+1).first()\n next_wine = FavoriteWine.query.filter_by(user_id=user_id).slice(start=limit_wines+offset_wines, stop=limit_wines+offset_wines+1).first()\n if next_recipe or next_wine:\n is_last_page = False\n else:\n is_last_page = True\n else:\n recipes_band = Favorite.query.filter_by(user_id=user_id).all()\n wines_band = FavoriteWine.query.filter_by(user_id=user_id).all()\n is_last_page = True\n for favorite in recipes_band:\n if current_user.is_authenticated() and current_user.role_code == 0:\n recipe_query = Recipe.query\n else:\n recipe_query = Recipe.query.filter_by(type=PUBLISHED)\n recipe = recipe_query.filter_by(id=favorite.recipe_id).first()\n information = recipe_response_builder(recipe, vendor_id)\n information['type_of_object'] = 'recipe'\n recipes.append(information)\n for favorite in wines_band:\n wine = Wine.query.get(favorite.wine_id)\n information = response_builder(wine, Wine)\n information['type_of_object'] = 'wine'\n wines.append(information)\n feed = recipes + wines\n return jsonify({'error_code': OK, 'result': feed, 'is_last_page': is_last_page}), 200\n\n\n@auto.doc()\n@mod.route('/', methods=['DELETE'])\n@login_required\ndef delete_favorite(recipe_id):\n \"\"\"\n Delete recipe from current user favorites.\n :param recipe_id: recipe id\n :return: json with parameters:\n error_code - server response_code\n \"\"\"\n favorite = Favorite.query.filter_by(user_id=current_user.id, recipe_id=recipe_id).first()\n if not favorite:\n return jsonify({'error_code': BAD_REQUEST, 'result': 'not ok'}), 200 # favorite isn't exist\n db.session.delete(favorite)\n db.session.commit()\n return jsonify({'error_code': OK}), 200\n\n\n@auto.doc()\n@mod.route('/wines/', methods=['POST'])\n@login_required\ndef new_favorite_wine():\n \"\"\"\n Add new favorite wine. List of parameters in json request:\n user_id (required)\n wine_id (required)\n Example of request:\n {\"user_id\":3, \"wine_id\":2}\n :return: json with parameters:\n error_code - server response_code\n result - information about created favorite wine\n \"\"\"\n user_id = request.json.get('user_id')\n wine_id = request.json.get('wine_id')\n if user_id is None or wine_id is None:\n return jsonify({'error_code': BAD_REQUEST, 'result': 'not ok'}), 200 # missing arguments\n if current_user.id == user_id:\n # if True:\n favorite = FavoriteWine.query.filter_by(wine_id=wine_id, user_id=user_id).first()\n if not favorite:\n favorite = FavoriteWine(user_id=user_id, wine_id=wine_id)\n db.session.add(favorite)\n db.session.commit()\n information = response_builder(favorite, FavoriteWine)\n return jsonify({'error_code': OK, 'result': information}), 201\n else:\n return jsonify({'error_code': BAD_REQUEST, 'result': 'not ok'}), 200\n\n\n@auto.doc()\n@mod.route('/wines/', methods=['GET'])\n@login_required\ndef get_favorite_wines():\n \"\"\"\n Get favorite wines for current user.\n :return: json with parameters:\n error_code - server response_code\n recipes - information about favorites wines\n \"\"\"\n user_id = current_user.id\n favorites = FavoriteWine.query.filter_by(user_id=user_id)\n wines = []\n for favorite in favorites:\n wine = Wine.query.get(favorite.wine_id)\n information = response_builder(wine, Wine)\n wines.append(information)\n return jsonify({'error_code': OK, 'wines': wines}), 200\n\n\n@auto.doc()\n@mod.route('/wines/', methods=['DELETE'])\n@login_required\ndef delete_favorite_wine(wine_id):\n \"\"\"\n Delete wine from current user favorite wines.\n :param wine_id: wine id\n :return: json with parameters:\n error_code - server response_code\n \"\"\"\n favorite = FavoriteWine.query.filter_by(user_id=current_user.id, wine_id=wine_id).first()\n if not favorite:\n return jsonify({'error_code': BAD_REQUEST, 'result': 'not ok'}), 200 # favorite isn't exist\n db.session.delete(favorite)\n db.session.commit()\n return jsonify({'error_code': OK}), 200\n","repo_name":"megge-dream/culinaryon","sub_path":"app/api/favorites/views_v2.py","file_name":"views_v2.py","file_ext":"py","file_size_in_byte":7177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"32784340602","text":"# 4-dars Uyga vazifa\n\n\n# \"Hello World!\" matnini yangi o'zgaruvchiga yuklang va print() yordamida konsolga chiqaring\n\nmatn = \"hello world \"\nprint(matn)\n\n# xabar deb nomlangan o'zgaruvchiga biror matn yuklang va konsolga chiqaring, keyin \n# esa o'zgaruvchiga yangi qiymat berib uni ham konsolga chiqaring.\n\nxabar = 'nma gap'\nprint(xabar)\n\nxabar = \"tzumisan\"\nprint(xabar)\n\n# class den nomlangan o'zgaruvchi yarating, unga biror qiymat bering va\n# konsolga chiqaring (siz kutgan natija chiqdimi?)\n\n# class = \"sayfiddin\"\n# print(class)\n\n# Quyidagi kodni bajaring:\n \nradius = 70\npi = 3.14159\naylananig_yuzi = pi * radius**2\nprint(\"Radiusi\", radius, \"ga teng aylananig yuzi\", aylananig_yuzi)","repo_name":"sasomiddinov1/AmalYotlar.py","sub_path":"4-dars.py","file_name":"4-dars.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"uz","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"12615325853","text":"\"\"\"\nCreate a user with GDPR P1 PII for manual testing.\nEnrolls the user in the DemoX course.\nOptionally takes in username, email, and course UUID arguments.\n\"\"\"\n\n\nfrom datetime import datetime\nfrom textwrap import dedent\nfrom uuid import uuid4\n\nfrom consent.models import DataSharingConsent\nfrom django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user\nfrom django.core.management.base import BaseCommand\nfrom enterprise.models import (\n EnterpriseCourseEnrollment,\n EnterpriseCustomer,\n EnterpriseCustomerUser,\n PendingEnterpriseCustomerUser\n)\nfrom integrated_channels.sap_success_factors.models import SapSuccessFactorsLearnerDataTransmissionAudit\nfrom opaque_keys.edx.keys import CourseKey\nfrom pytz import UTC\n\nfrom common.djangoapps.entitlements.models import CourseEntitlement, CourseEntitlementSupportDetail\nfrom lms.djangoapps.verify_student.models import SoftwareSecurePhotoVerification\nfrom openedx.core.djangoapps.course_groups.models import CourseUserGroup, UnregisteredLearnerCohortAssignments\nfrom openedx.core.djangoapps.profile_images.images import create_profile_images\nfrom openedx.core.djangoapps.profile_images.tests.helpers import make_image_file\nfrom common.djangoapps.student.models import CourseEnrollment, CourseEnrollmentAllowed, PendingEmailChange, UserProfile\n\nfrom ...models import UserOrgTag\n\n\nclass Command(BaseCommand):\n \"\"\"\n Create a user with GDPR P1 PII for manual testing.\n Enrolls the user in the DemoX course.\n Optionally takes in username, email, and course UUID arguments.\n \"\"\"\n help = dedent(__doc__).strip()\n\n def add_arguments(self, parser):\n parser.add_argument(\n '-u', '--username',\n required=False,\n help='Username'\n )\n parser.add_argument(\n '-e', '--email',\n required=False,\n help='Email'\n )\n parser.add_argument(\n '-c', '--course',\n required=False,\n help='Course UUID'\n )\n\n def handle(self, *args, **options):\n \"\"\"\n Execute the command.\n \"\"\"\n\n username = options['username'] if options['username'] else 'gdpr_test_user'\n email = options['email'] if options['email'] else 'gdpr_test_user@example.com'\n course_uuid = options['course'] if options['course'] else uuid4().hex\n\n user, __ = User.objects.get_or_create(\n username=username,\n email=email\n )\n user_info = {\n 'email': email,\n 'first_name': \"GDPR\",\n 'last_name': \"Test\",\n 'is_active': True\n }\n for field, value in user_info.items():\n setattr(user, field, value)\n user.set_password('gdpr test password')\n user.save()\n\n # UserProfile\n profile_image_uploaded_date = datetime(2018, 5, 3, tzinfo=UTC)\n user_profile, __ = UserProfile.objects.get_or_create(\n user=user\n )\n user_profile_info = {\n 'name': 'gdpr test name',\n 'meta': '{}',\n 'location': 'gdpr test location',\n 'year_of_birth': 1950,\n 'gender': 'gdpr test gender',\n 'mailing_address': 'gdpr test mailing address',\n 'city': 'Boston',\n 'country': 'US',\n 'bio': 'gdpr test bio',\n 'profile_image_uploaded_at': profile_image_uploaded_date\n }\n for field, value in user_profile_info.items():\n setattr(user_profile, field, value)\n user_profile.save()\n\n # Profile images\n with make_image_file() as image_file:\n create_profile_images(\n image_file,\n {10: \"ten.jpg\"}\n )\n\n # DataSharingConsent\n enterprise_customer, __ = EnterpriseCustomer.objects.get_or_create( # pylint: disable=no-member\n name='test gdpr enterprise customer',\n active=True,\n branding_configuration=None,\n catalog=None,\n enable_audit_enrollment=False,\n enable_data_sharing_consent=False,\n enforce_data_sharing_consent='at_enrollment',\n replace_sensitive_sso_username=True,\n site_id=1\n )\n\n DataSharingConsent.objects.get_or_create(\n username=username,\n enterprise_customer_id=enterprise_customer.uuid\n )\n\n # Sapsf data transmission\n enterprise_customer_user, __ = EnterpriseCustomerUser.objects.get_or_create(\n user_id=user.id,\n enterprise_customer_id=enterprise_customer.uuid\n )\n audit, __ = EnterpriseCourseEnrollment.objects.get_or_create(\n enterprise_customer_user=enterprise_customer_user\n )\n SapSuccessFactorsLearnerDataTransmissionAudit.objects.get_or_create(\n enterprise_course_enrollment_id=audit.id,\n )\n\n # PendingEnterpriseCustomerUser\n PendingEnterpriseCustomerUser.objects.get_or_create(\n user_email=user.email,\n enterprise_customer_id=enterprise_customer.uuid\n )\n\n # EntitlementSupportDetail\n course_entitlement, __ = CourseEntitlement.objects.get_or_create(\n user_id=user.id,\n course_uuid=course_uuid\n )\n CourseEntitlementSupportDetail.objects.get_or_create(\n support_user=user,\n comments='test comments',\n entitlement_id=course_entitlement.id\n )\n\n # Misc. models that may contain PII of this user\n SoftwareSecurePhotoVerification.objects.get_or_create(\n user=user,\n name='gdpr test',\n face_image_url='https://fake_image_url.com',\n photo_id_image_url='gdpr_test',\n photo_id_key='gdpr_test'\n )\n PendingEmailChange.objects.get_or_create(\n user=user,\n activation_key=uuid4().hex\n )\n UserOrgTag.objects.get_or_create(\n user=user\n )\n\n course_id = CourseKey.from_string(\"course-v1:edX+DemoX+Demo_Course\")\n # Objects linked to the user via their original email\n CourseEnrollmentAllowed.objects.get_or_create(\n email=user.email\n )\n course_user_group, __ = CourseUserGroup.objects.get_or_create(\n name='test course user group',\n course_id=course_id\n )\n UnregisteredLearnerCohortAssignments.objects.get_or_create(\n email=user.email,\n course_user_group_id=course_user_group.id\n )\n\n # Enroll the user in a course\n CourseEnrollment.objects.get_or_create(\n course_id=course_id,\n user_id=user.id,\n )\n","repo_name":"openedx/edx-platform","sub_path":"openedx/core/djangoapps/user_api/management/commands/create_user_gdpr_testing.py","file_name":"create_user_gdpr_testing.py","file_ext":"py","file_size_in_byte":6719,"program_lang":"python","lang":"en","doc_type":"code","stars":6774,"dataset":"github-code","pt":"3"}