diff --git "a/2962.jsonl" "b/2962.jsonl"
new file mode 100644--- /dev/null
+++ "b/2962.jsonl"
@@ -0,0 +1,1176 @@
+{"seq_id":"33859626867","text":"import cv2, numpy, matplotlib.pyplot as plt, scipy.interpolate, sys\nfrom collections import defaultdict\n\nimg = cv2.imread(sys.argv[1]) \nH, S, L = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))\nimg_blur = cv2.GaussianBlur(H, (15,15), 0) \nthresh = cv2.threshold(img_blur, 55, 255, cv2.THRESH_BINARY)[1]\nedges = cv2.Canny(image=thresh, threshold1=100, threshold2=200)\nc, hier = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\nbest = max(c, key=lambda x: x.shape[0])[:, 0, :]\nmin_x, max_x = min(best[:, 0]), max(best[:, 0])\nmin_y, max_y = min(best[:, 1]), max(best[:, 1])\nxrange = max_x - min_x\nyrange = max_y - min_y\ncoords = [[((x - min_x) / xrange) * 2 - 1, ((y - min_y) / yrange) * 2 - 1] for x, y in best ]\ncoords.sort(key=lambda x: x[0])\ncoords2 = defaultdict(list)\nfor x, y in coords:\n coords2[x].append(y)\ncoords3x = []\ncoords3y = []\nlast = -1\nfor x, ys in coords2.items():\n coords3y.append(min(ys, key=lambda x: abs(x - last)))\n coords3x.append(x)\n last = coords3y[-1]\ni = scipy.interpolate.CubicSpline(coords3x, coords3y, extrapolate=True)\nxs = numpy.arange(-1.0, 1.0, 0.02)\nplt.plot(coords3x, coords3y, label='data')\nplt.plot(xs, i(xs))\nplt.show()","repo_name":"osmarks/random-stuff","sub_path":"goose2function.py","file_name":"goose2function.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"91"}
+{"seq_id":"21200893133","text":"from logging.config import dictConfig\nfrom typing import Dict, Optional\n\nfrom flask import Flask\nfrom flask_cors import CORS\nfrom gobexport.config import API_BASE_PATH, API_LOGGING\nfrom gobexport.exporter import CONFIG_MAPPING\n\n\ndef _health():\n \"\"\"\n\n :return: Message telling the API is OK\n \"\"\"\n return 'Connectivity OK'\n\n\ndef _products():\n \"\"\"Returns an overview of all generated products with their filenames.\n\n For example:\n\n \"bag\": {\n \"woonplaatsen\": {\n \"esri_actueel\": [\n \"SHP/BAG_woonplaats.shp\",\n \"SHP/BAG_woonplaats.dbf\",\n \"SHP/BAG_woonplaats.shx\",\n \"SHP/BAG_woonplaats.prj\"\n ],\n \"uva2\": [\n \"UVA2_Actueel/WPL_20201001_N_20201001_20201001.UVA2\"\n ],\n ...\n },\n ...\n },\n ...,\n \"brk\": ...\n\n :return:\n \"\"\"\n result = {}\n\n for catalog_name, catalog in CONFIG_MAPPING.items():\n result[catalog_name] = {}\n for collection_name, config in catalog.items():\n result[catalog_name][collection_name] = {}\n\n for product_name, product in config.products.items():\n filenames = [\n filename() if callable(filename) else filename for filename in\n ([product['filename']] + [extra_file['filename'] for extra_file in product.get('extra_files', [])])\n ]\n\n result[catalog_name][collection_name][product_name] = filenames\n\n return result\n\n\ndef get_flask_app(config: Optional[Dict[str, any]] = None) -> Flask:\n \"\"\"\n Initializes the Flask App\n\n :param config: dictionary to update the flask configuration with.\n :return: Flask App\n \"\"\"\n dictConfig(API_LOGGING)\n ROUTES = [\n # Health check URL\n ('/status/health/', _health, ['GET']),\n (f'{API_BASE_PATH}/products', _products, ['GET']),\n ]\n\n app = Flask(__name__)\n if config is not None:\n app.config.update(config)\n CORS(app)\n\n for route, view_func, methods in ROUTES:\n app.route(rule=route, methods=methods)(view_func)\n\n return app\n","repo_name":"Amsterdam/GOB-Export","sub_path":"src/gobexport/flask_api.py","file_name":"flask_api.py","file_ext":"py","file_size_in_byte":2148,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"208434420","text":"#!/usr/bin/python3\nimport copy\nimport heapq\nimport json\n\nfrom timeit import default_timer as timer\nimport os\nimport psutil\n\n## Helper Classes and variables\nstats = {}\n\n## ---> Profiling functions and decorator\ndef get_process_memory():\n process = psutil.Process(os.getpid())\n return process.memory_info().rss\n\ndef profile(store):\n def prof_stor(func):\n def wrapper(*args, **kwargs):\n mem_before = get_process_memory()\n visited = set()\n itr = [0,0]\n start = timer()\n try:\n result = func(*args, visited, itr, **kwargs)\n except Exception as e:\n print(e)\n result = None\n elapsed_time = timer() - start\n mem_after = get_process_memory()\n \n if result and store:\n print('Final ', end='')\n result.print_state()\n stats[store]= {\n 'elapsed_time' : elapsed_time,\n # 'memory' : mem_after - mem_before,\n 'iters' : itr[0],\n 'path_length': itr[1],\n }\n else:\n print('No Soluton found !')\n stats[store]= {\n 'elapsed_time' : elapsed_time,\n # 'memory' : mem_after - mem_before,\n 'iters' : 0,\n 'path_length': 0,\n }\n return result\n return wrapper\n return prof_stor\n## ^^ Simple decorator to make tracking execution time and other statistics easy\n\nclass Board:\n def __init__(self, n: int):\n self.size = n\n self.state = []\n\n def __eq__(self, other):\n return self.h() == other.h()\n def __lt__(self, other):\n return self.h() < other.h()\n\n def is_goal_state(self):\n # determine if the board is in the goal state currently\n return self.h() == 0\n\n def h(self):\n dist = 0\n val = 0\n for i in range(0, self.size):\n for j in range(0, self.size):\n dir_x = [1,0,-1,0]\n dir_y = [0,1,0,-1]\n for k in range(0, 4):\n new_x = i+dir_x[k]\n new_y = j+dir_y[k]\n if new_x >=0 and new_x =0 and new_y < self.size:\n if self.state[new_x][new_y] == self.state[i][j]:\n dist+=1\n return dist\n\n def input_state(self):\n n = self.size\n print('Enter the colour Board state as {0}x{0} matrix, elements separated by a {1}:'.format(n, '\",\"'))\n for i in range(0, n):\n row = input().split(',')\n self.state.append(row)\n self.validate_input()\n\n def validate_input(self):\n vals = []\n for i in range(0, self.size):\n # print(self.state[i])\n if len(self.state[i]) != self.size:\n raise AssertionError('Board specified is not square')\n for i in range(0, self.size):\n for j in range(0, self.size):\n if str(self.state[i][j]).strip() not in ['R','G','B','Y']:\n raise AssertionError('Invalid Colour')\n else:\n self.state[i][j] = str(self.state[i][j]).strip()\n\n def print_state(self):\n n = self.size\n print('Board state: ')\n for i in range(0, n):\n print(self.state[i])\n\n def generate_states(self):\n n = self.size\n new_states = []\n for i in range(0, n):\n for j in range(0, n):\n dir_x = [1,0,-1,0]\n dir_y = [0,1,0,-1]\n bad = False\n for k in range(0, 4):\n new_x = i+dir_x[k]\n new_y = j+dir_y[k]\n if new_x >=0 and new_x =0 and new_y < self.size:\n if self.state[new_x][new_y] == self.state[i][j]:\n bad = True\n if not bad:\n continue\n for k in range(0, 4):\n new_x = i+dir_x[k]\n new_y = j+dir_y[k]\n if new_x >=0 and new_x =0 and new_y < self.size:\n if self.state[new_x][new_y] != self.state[i][j]:\n nboard = Board(n)\n nboard.state = copy.deepcopy(self.state)\n nboard.state[new_x][new_y], nboard.state[i][j] = nboard.state[i][j], nboard.state[new_x][new_y]\n new_states.append(nboard)\n return new_states\n\n########################\n## Search Algorithms ##\n########################\n\n## BFS Algorithm\n@profile(store='bfs')\ndef bfs(cur_state, visited, itr):\n print('BFS Initial ', end='')\n cur_state.print_state()\n\n qu = []\n qu.append((cur_state, 0))\n # parent = {}\n\n while(len(qu) != 0):\n itr[0]+=1\n cst, depth = qu.pop(0)\n if cst.is_goal_state():\n itr[1] = depth\n return cst\n for state in cst.generate_states():\n y = repr(state.state)\n if y not in visited:\n visited.add(y)\n # parent[y] = cst\n qu.append((state, depth+1))\n\n return None\n\n# def print_path(parents, goal, initial):\n# # print(parents)\n# temp = goal\n# while repr(temp.state) in parents.keys() and repr(temp.state) != repr(initial.state):\n# temp.print_state()\n# temp = parents[repr(temp.state)]\n# temp.print_state()\n\n## A* Algorithm\n@profile(store='A*')\ndef a_star(cur_state, visited, itr):\n print('A* Initial ', end='')\n cur_state.print_state()\n\n hp = []\n heapq.heappush(hp, (0+cur_state.h(), 0, cur_state))\n # parent = {}\n while(len(hp) != 0):\n itr[0]+=1\n top_ele = heapq.heappop(hp)\n cst = top_ele[2]\n if cst.is_goal_state():\n itr[1] = top_ele[1]\n # print_path(parent, cst, cur_state)\n return cst\n cur_steps = top_ele[1]\n for state in cst.generate_states():\n y = repr(state.state)\n if y not in visited:\n visited.add(y)\n # parent[y] = cst\n cost = cur_steps+1+state.h()\n heapq.heappush(hp, (cost, cur_steps+1, state))\n\n return None\n\n## Main Functions and Input/Output routines\n\nboard = Board(int(input('Enter Board Size: ')))\nboard.input_state()\n# print(board.h())\n# for state in board.generate_states():\n# state.print_state()\nbfs(board)\na_star(board)\n\nprint(json.dumps(stats, indent = 4))\n","repo_name":"virresh/search_strategies","sub_path":"board_color.py","file_name":"board_color.py","file_ext":"py","file_size_in_byte":6673,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"91"}
+{"seq_id":"43212057550","text":"#!/usr/bin/env python3\n\n# returns a hashed version of the input msg\ndef hash(msg):\n\thsum = 0 # hash sum\n\n\t# Pad msg to make its length divisible by 4\n\twhile len(msg) % 4:\n\t\tmsg = msg + ' '\n\n\n\tfor i in range(0, len(msg), 4):\n\t\tpsum = 0 # partial sum\n\t\tcharvals = [(ord(c)*16777619)%128 for c in msg[i:i+4]]\n\t\tif i % 16 < 4:\n\t\t\tfor j in range(4):\n\t\t\t\tpsum = (psum * 16777619) ^ charvals[j]\n\n\t\telif i % 16 < 8:\n\t\t\tfor j in range(4):\n\t\t\t\tpsum = psum ^ ((psum << 5) + (psum >> 2) + charvals[j])\n\n\t\telif i % 16 < 12:\n\t\t\tfor j in range(4):\n\t\t\t\tpsum = 33 * psum ^ charvals[j]\n\n\t\telif i % 16 < 16:\n\t\t\tfor j in range(4):\n\t\t\t\tpsum = ((psum << 5) * charvals[j])\n\n\t\thsum = hsum + ((psum**50)*51)%(1<<128) ^ psum\n\n\tif len(hex(hsum)) < 34:\n\t\treturn (hex(hsum)[2:])\n\n\treturn (hex(hsum)[-32:])\n","repo_name":"jthill9/Data-Structures-Final-Project","sub_path":"passhash.py","file_name":"passhash.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"43572181215","text":"\"\"\"\r\n* JBoss, Home of Professional Open Source.\r\n* Copyright 2020 Red Hat, Inc., and individual contributors\r\n* as indicated by the @author tags.\r\n*\r\n* Licensed under the Apache License, Version 2.0 (the \"License\")\r\n* you may not use this file except in compliance with the License.\r\n* You may obtain a copy of the License at\r\n*\r\n* http: // www.apache.org/licenses/LICENSE-2.0\r\n*\r\n* Unless required by applicable law or agreed to in writing, software\r\n* distributed under the License is distributed on an \"AS IS\" BASIS,\r\n* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n* See the License for the specific language governing permissions and\r\n* limitations under the License.\r\n\"\"\"\r\n\r\nimport ipaddress\r\nfrom risk_calculator.DataBases import sqliteDB\r\n\r\n\"\"\"\r\n* Module to check if the IP is already blacklisted i.e. , by checking it's presence\r\n* in popular IP blacklist dumps and return the highest possible Risk Score if found\r\n* \r\n* @author < a href = \"mailto:piyush.palta@outlook.com\" > Piyush Palta < /a >\r\n\"\"\"\r\n\r\nclass blacklistIP:\r\n \"\"\" Create new instance\r\n @param bldata_file : the location of file containing blacklist IPs data \r\n Constructor assigns IP as attribute and initiates search in blacklist dumps\r\n \"\"\"\r\n def __init__(self, bldata_file = 'full_blacklist_database.txt'):\r\n self.bldata_file = bldata_file\r\n self.db = sqliteDB.database()\r\n self.init_db() \r\n\r\n def init_db(self):\r\n self.db.create_table()\r\n f = open(self.bldata_file)\r\n file_data=f.readlines()\r\n ipv4 = []\r\n ipv6 = []\r\n \r\n for line in file_data:\r\n #this is based on the structure of the blacklist data file used\r\n ip_addr = str(line.split('\\t',1)[0])\r\n if('.' in ip_addr):\r\n ip_hash = hashIPv4(ip_addr)\r\n ipv4.append(ip_hash)\r\n else :\r\n ip_hash1,ip_hash2 = hashIPv6(ip_addr)\r\n ipv6.append([ip_hash1,ip_hash2])\r\n\r\n #Intialize database by extracting data from file to db \r\n self.db.insertManyIPv4(ipv4)\r\n self.db.insertManyIPv6(ipv6)\r\n \r\n \r\n def check(self, ip_addr):\r\n if('.' in ip_addr):\r\n ip_hash = hashIPv4(ip_addr)\r\n return self.db.searchIPv4(ip_hash)\r\n else:\r\n ip_hash1, ip_hash2 = hashIPv6(ip_addr)\r\n return self.db.searchIPv6(ip_hash1, ip_hash2)\r\n\r\n\"\"\" Hashes IPv6 addresses\r\n @param ip_addr : IPv6 address to be hashed\r\n return {hash1, hash2} : Returns two 64-bit hash numbers\r\n\"\"\"\r\ndef hashIPv6(ip_addr):\r\n ip_addr = str(ipaddress.ip_address(ip_addr).exploded)\r\n ip = ip_addr.split(':')\r\n ip = [int(i,16) for i in ip]\r\n hash1=0\r\n for i in range(4):\r\n hash1 += (ip[i]*( 1<<(16*(3-i))))\r\n hash2=0\r\n for i in range(4):\r\n hash2 += (ip[i+4]*( 1<<(16*(3-i))))\r\n if hash1 >= (1<<63):\r\n hash1-=(1<<64)\r\n if hash2 >= (1<<63):\r\n hash2-=(1<<64)\r\n return {hash1, hash2}\r\n\r\n\r\n\"\"\" Generates IPv6 address from the hash number \r\n @param hash1 : 64-bit first part of the IPv6 hash\r\n @param hash2 : 64-bit second part of the IPv6 hash \r\n return ip_addr : Return IPv6 address\r\n\"\"\"\r\ndef getIPv6(hash1, hash2):\r\n ip = []\r\n for i in range(4):\r\n ip.append(hash2%(1<<16))\r\n hash2//=(1<<16)\r\n for i in range(4):\r\n ip.append(hash1%(1<<16))\r\n hash1//=(1<<16)\r\n ip = ip[::-1]\r\n ip = [str(hex(i)[2:]) for i in ip]\r\n ip_addr = str(ipaddress.ip_address(':'.join(ip)))\r\n return ip_addr \r\n\r\n\r\n\"\"\"Function : Hashes IPv4 addresses\r\n @param ip_addr : IPv4 address to be hashed\r\n return hashNum : Returns a 32-bit hash number\r\n\"\"\"\r\ndef hashIPv4(ip_addr):\r\n ip = ip_addr.split('.')\r\n ip = [int(i) for i in ip]\r\n hashNum=0\r\n for i in range(4):\r\n hashNum += (ip[i]*( 1<<(8*(3-i))))\r\n return hashNum\r\n\r\n\r\n\"\"\" Generates IPv4 address from the hash number \r\n @param hashNum : 32-bit hash number\r\n return ip_addr : Return IPv4 address\r\n\"\"\"\r\ndef getIPv4(hashNum):\r\n ip = []\r\n for i in range(4):\r\n ip.append(hashNum%(1<<8))\r\n hashNum//=(1<<8)\r\n ip = ip[::-1]\r\n ip = [str(i) for i in ip]\r\n ip_addr = '.'.join(ip)\r\n return ip_addr\r\n","repo_name":"piyush-palta/risk-analyzer","sub_path":"risk_calculator/blacklist_ip.py","file_name":"blacklist_ip.py","file_ext":"py","file_size_in_byte":4300,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"18488746449","text":"#!/usr/bin/python3\n\"\"\"prints first states objects from database hbtn_0e_6_usa\"\"\"\nfrom model_state import Base, State\nfrom sqlalchemy import create_engine\nfrom sys import argv\nfrom sqlalchemy.orm import sessionmaker\n\nif __name__ == '__main__':\n \"\"\"connect to database and return query\"\"\"\n db_link = \"mysql+mysqldb://{}:{}@localhost:3306/{}\".format(\n argv[1], argv[2], argv[3])\n eng = create_engine(db_link)\n Session = sessionmaker(bind=eng)\n\n session = Session()\n f_state = session.query(State).order_by(State.id).first()\n if (f_state is not None):\n print(\"{}: {}\".format(f_state.id, f_state.name))\n else:\n print(\"Nothing\")\n","repo_name":"Almiviolad/alx-higher_level_programming","sub_path":"0x0F-python-object_relational_mapping/8-model_state_fetch_first.py","file_name":"8-model_state_fetch_first.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"5194637162","text":"import string\nfrom . import utils\n\n\nclass Season(object):\n def __init__(self, data, special=False):\n self.id = data.get('id')\n self.url = data.get('url')\n self.number = data.get('number')\n self.name = data.get('name')\n self.num_episodes = data.get('episodeOrder')\n self.episodes = {}\n self.premiere_date = utils.convert_date(data.get('premiereDate'))\n self.end_date = data.get('endDate')\n self.network = data.get('network')\n self.streaming_service = data.get('webChannel')\n self.images = data.get('image')\n self.summary = \"\"\n self.summary = utils.strip_tags(data.get('summary'))\n self.links = data.get('_links')\n\n def __str__(self):\n return string.capwords(' '.join(self.url.split('/')[-1].split('-')))\n","repo_name":"MythTV/mythtv","sub_path":"mythtv/bindings/python/tvmaze/season.py","file_name":"season.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","stars":656,"dataset":"github-code","pt":"91"}
+{"seq_id":"70120069422","text":"import os\nfrom environs import Env\nimport dj_database_url\nfrom django.core.management.utils import get_random_secret_key\nfrom pathlib import Path\n\nenv = Env()\nenv.read_env()\n# Build paths inside the project like this: BASE_DIR / 'subdir'.\nBASE_DIR = Path(__file__).resolve().parent.parent\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = env(\"DJANGO_SECRET_KEY\", default=get_random_secret_key())\nSECURE_SSL_REDIRECT = env.bool(\"DJANGO_SECURE_SSL_REDIRECT\", default=True)\nSECURE_HSTS_SECONDS = env.int(\"DJANGO_SECURE_HSTS_SECONDS\", default=2592000)\nSECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(\"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS\", default=True)\nSECURE_HSTS_PRELOAD = env.bool(\"DJANGO_SECURE_HSTS_PRELOAD\", default=True)\nSESSION_COOKIE_SECURE = env.bool(\"DJANGO_SESSION_COOKIE_SECURE\", default=True)\nCSRF_COOKIE_SECURE = env.bool(\"DJANGO_CSRF_COOKIE_SECURE\", default=True)\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = env.bool(\"DJANGO_DEBUG\", default=False)\n\nALLOWED_HOSTS = ['.herokuapp.com', 'localhost', '127.0.0.1', '0.0.0.0']\n\nRENDER_EXTERNAL_HOSTNAME = os.environ.get('RENDER_EXTERNAL_HOSTNAME')\nif RENDER_EXTERNAL_HOSTNAME:\n ALLOWED_HOSTS.append(RENDER_EXTERNAL_HOSTNAME)\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.sites',\n # external\n 'tailwind',\n 'django_browser_reload',\n 'crispy_forms',\n 'allauth',\n 'allauth.account',\n 'whitenoise.runserver_nostatic',\n 'storages',\n 'ckeditor',\n # local\n 'accounts',\n 'pages',\n 'theme',\n 'products',\n]\n\n# django allauth config\nSITE_ID = 1\nAUTHENTICATION_BACKENDS = (\n 'django.contrib.auth.backends.ModelBackend',\n 'allauth.account.auth_backends.AuthenticationBackend',\n)\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\nLOGIN_REDIRECT_URL = 'home'\nLOGOUT_REDIRECT_URL = 'home'\nACCOUNT_LOGOUT_REDIRECT = 'home'\nACCOUNT_SESSION_REMEMBER = True\nACCOUNT_USERNAME_REQUIRED = False\nACCOUNT_AUTHENTICATION_METHOD = 'email'\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_UNIQUE_EMAIL = True\n\nTAILWIND_APP_NAME = 'theme'\n\nINTERNAL_IPS = [\n \"127.0.0.1\",\n]\n\nAUTH_USER_MODEL = 'accounts.CustomUser'\n\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django_browser_reload.middleware.BrowserReloadMiddleware',\n 'whitenoise.middleware.WhiteNoiseMiddleware',\n]\n\nSTATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'\n\nROOT_URLCONF = 'config.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [str(BASE_DIR.joinpath(\"templates\"))],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'config.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/3.1/ref/settings/#databases\n\nDATABASES = {\n 'default': dj_database_url.config(\n # Feel free to alter this value to suit your needs.\n default='postgresql://postgres:postgres@localhost:5432/db',\n conn_max_age=600\n )\n\n}\n\n\n# Password validation\n# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.1/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.1/howto/static-files/\n\n# STATIC_URL = '/static/'\n# STATICFILES_DIRS = (str(BASE_DIR.joinpath('static')), str(BASE_DIR.joinpath('theme/static')))\n# STATIC_ROOT = str(BASE_DIR.joinpath('staticfiles'))\n\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\n\nCRIPSY_TEMPLATE_PACK = 'tailwind'\n\n# AWS & Django-storages settings\n# AWS_ACCESS_KEY_ID = os.environ.get(\"AWS_ACCESS_KEY_ID\")\n# AWS_SECRET_ACCESS_KEY = os.environ.get(\"AWS_SECRET_ACCESS_KEY\")\n# AWS_STORAGE_BUCKET_NAME = os.environ.get(\"AWS_STORAGE_BUCKET_NAME\")\n# AWS_S3_FILE_OVERWRITE = False\n# AWS_DEFAULT_ACL = None\n# DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'\n# STATICFILES_STORAGE = 'storages.backends.s3boto3.S3StaticStorage'\n# AWS_QUERYSTRING_AUTH = False\n\nUSE_S3 = bool(RENDER_EXTERNAL_HOSTNAME)\n\nif USE_S3:\n # aws settings\n AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID')\n AWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY')\n AWS_STORAGE_BUCKET_NAME = os.getenv('AWS_STORAGE_BUCKET_NAME')\n AWS_S3_CUSTOM_DOMAIN = f'{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com'\n AWS_S3_OBJECT_PARAMETERS = {'CacheControl': 'max-age=86400'}\n # s3 static settings\n STATIC_LOCATION = 'static'\n STATIC_URL = f'https://{AWS_S3_CUSTOM_DOMAIN}/{STATIC_LOCATION}/'\n STATICFILES_STORAGE = 'config.storage_backends.StaticStorage'\n # s3 public media settings\n PUBLIC_MEDIA_LOCATION = 'media'\n MEDIA_URL = f'https://{AWS_S3_CUSTOM_DOMAIN}/{PUBLIC_MEDIA_LOCATION}/'\n DEFAULT_FILE_STORAGE = 'config.storage_backends.PublicMediaStorage'\nelse:\n STATIC_URL = '/staticfiles/'\n STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')\n MEDIA_URL = '/mediafiles/'\n MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')\n\nSTATICFILES_DIRS = (os.path.join(BASE_DIR, 'static'),)\n","repo_name":"nexttrack07/sellerlane","sub_path":"config/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":6565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"18324056498","text":"#!/usr/bin/env python\nimport sys\nimport os\nimport rospy\nimport numpy as np\nfrom geometry_msgs.msg import Pose, PoseStamped\nfrom sensor_msgs.msg import JointState, Image\nfrom std_msgs.msg import *\nfrom control_msgs.msg import JointTrajectoryControllerState\nfrom trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint\nfrom trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint\nfrom intera_core_msgs.msg import JointCommand\nfrom intera_core_msgs.msg import EndpointState\nfrom gazebo_msgs.msg import ContactsState\nfrom tf import TransformListener\nfrom intera_io import IODeviceInterface\nimport intera_interface\nimport math\nimport cv2\nfrom cv_bridge import CvBridge, CvBridgeError\nfrom string import Template\nimport time\nimport intera_interface\nfrom ddpg.msg import GoalObs\nfrom geometry_msgs.msg import (\n PoseStamped,\n Pose,\n Point,\n Quaternion,\n)\nfrom gazebo_msgs.srv import (\n GetModelState\n)\nfrom intera_core_msgs.srv import (\n SolvePositionFK,\n SolvePositionFKRequest,\n) \nfrom intera_core_msgs.srv import (\n SolvePositionIK,\n SolvePositionIKRequest,\n)\nfrom sawyer_sim_examples.msg import * \nfrom get_model_gazebo_pose import GazeboModel\n\nbase_dir = os.path.dirname(os.path.realpath(__file__))\n\nfixed_orientation = Quaternion(\n x=-0.00142460053167,\n y=0.999994209902,\n z=-0.00177030764765,\n w=0.00253311793936)\n\nN_STEP_RETURN = 10\nGAMMA = 0.97\n\nfrom gym import seeding\nfrom gym import spaces\n# register(\n# id='FetchReach-v0',\n# entry_point='openai_ros:task_envs.fetch_reach.fetch_reach.FetchReachEnv',\n# timestep_limit=1000,\n# )\nACTION_DIM = 3 # Cartesian\nOBS_DIM = (100,100,3) # POMDP\nSTATE_DIM = 24 # MDP\n \nclass robotEnv():\n def __init__(self, max_steps=700, isdagger=False, isPOMDP=False, train_indicator=0):\n \"\"\"An implementation of OpenAI-Gym style robot reacher environment\n \"\"\"\n rospy.init_node(\"robotEnv\")\n # for compatiability\n self.action_space = spaces.Box(-1., 1., shape=(ACTION_DIM,), dtype='float32')\n self.observation_space = spaces.Dict(dict(\n\n observation=spaces.Box(-np.inf, np.inf, shape=obs['observation'].shape, dtype='float32'),\n ))\n\n\n\n self.train_indicator = train_indicator # 0: Train 1:Test\n self.isdagger = isdagger\n self.isPOMDP = isPOMDP\n self._limb = intera_interface.Limb(\"right\")\n if not train_indicator:\n self._tip_name = 'right_gripper_tip'\n else:\n self._tip_name = 'right_hand'\n\n self._gripper = intera_interface.Gripper()\n self.currentDist = 1\n self.previousDist = 1\n self.reached = False\n self.tf = TransformListener()\n print(\"Getting robot state... \")\n self._rs = intera_interface.RobotEnable(intera_interface.CHECK_VERSION)\n self._init_state = self._rs.state().enabled\n print(\"Enabling robot... \")\n self._rs.enable()\n self.bridge = CvBridge()\n\n self.joint_names_4d = ['right_j0', 'right_j1', 'right_j2', 'right_j3']\n self.joint_names = ['right_j0', 'right_j1', 'right_j2', 'right_j3', 'right_j4', 'right_j5', 'right_j6']\n self.joint_speeds = [0.0,0.0,0.0,0.0,0.0,0.0,0.0]\n self.joint_positions = [0.0,0.0,0.0,0.0,0.0,0.0,0.0]\n self.joint_velocities = [0.0,0.0,0.0,0.0,0.0,0.0,0.0]\n self.joint_efforts = [0.0,0.0,0.0,0.0,0.0,0.0,0.0]\n self.right_endpoint_position = [0,0,0]\n self.max_steps = max_steps\n self.done = False\n self.reward = 0\n self.reward_rescale = 1.0\n self.isDemo = False\n self.reward_type = 'sparse'\n\n\n self.joint_command = JointCommand()\n self.gripper = intera_interface.Gripper(\"right\")\n \n self.pub = rospy.Publisher('/robot/limb/right/joint_command', JointCommand, tcp_nodelay=True, queue_size=1)\n self.pub3 = rospy.Publisher('/dagger/restart', JointCommand, queue_size=1)\n self.pub4 = rospy.Publisher('/robot/limb/right/joint_command_timeout', Float64, latch=True, queue_size=10)\n self.pub5 = rospy.Publisher('/ddpg/epi0', JointCommand, queue_size=1)\n self.resetPub = rospy.Publisher('/ddpg/reset2/', Bool, queue_size=1)\n\n self.destPos = np.array([0.7, 0.15, -0.12+0.025])\n self.destObj = np.array([0.7, 0.10, 0])\n\n self.color_obs_list = []\n self.depth_obs_list = []\n\n self.resize_factor = 100/400.0\n self.resize_factor_real = 100/650.0\n\n self.gripper_length = 0.176\n self.distance_threshold = 0.1\n self.position = [0.0, 0.0, 0.0]\n self.terminateCount = 0\n self.successCount = 0\n self.color_image = np.ones((400,400,3))\n self.squared_sum_eff = 0.0\n self.isReset = False\n self.daggerPosAction = [0.0,0.0,0.0]\n \n self.joint_vel_command = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\n self.cartesian_command = [0.0, 0.0, 0.0]\n # used for per-step elapsed time measurement\n self.tic = 0.0\n self.toc = 0.0\n self.elapsed = 0.0\n\n\n self.starting_joint_angles = {'right_j0': -0.041662954890248294,\n 'right_j1': -1.0258291091425074,\n 'right_j2': 0.0293680414401436,\n 'right_j3': 1.37518162913313,\n 'right_j4': -0.06703022873354225,\n 'right_j5': 0.7968371433926965,\n 'right_j6': 1.7659649178699421}\n\n self._action_scale = 1.0\n\n rospy.Subscriber('/robot/joint_states', JointState , self.jointStateCB)\n rospy.Subscriber('/robot/limb/right/endpoint_state', EndpointState , self.endpoint_positionCB)\n rospy.Subscriber('/teacher/fin', JointCommand , self.doneCB)\n\n if not self.train_indicator: # train\n rospy.Subscriber(\"/dynamic_objects/camera/raw_image\", Image, self.rgb_ImgCB)\n else:\n rospy.Subscriber(\"/camera/color/image_raw\", Image, self.rgb_ImgCB)\n\n rospy.Subscriber(\"/robot/limb/right/joint_command\", JointCommand, self.vel_CommandCB)\n rospy.Subscriber(\"/ddpg/reset/\", Float64, self.resetCB)\n rospy.Subscriber(\"/teacher/pos_cmd_pub/\", PosCmd, self.posCmdCB)\n \n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def posCmdCB(self, poscmd):\n cmd_arr = poscmd.goal_cart_pos\n self.daggerPosAction = cmd_arr\n\n def resetCB(self, isReset):\n if isReset:\n self.isReset = True\n\n def vel_CommandCB(self, data):\n '''\n self.joint_command.mode = 2 # velocity control mode\n self.joint_command.names = self.joint_names\n self.joint_command.velocity =self.joint_speeds\n self.joint_command.header.stamp = rospy.Time.now()\n '''\n _temp_vel = list(data.velocity)\n if len(_temp_vel)>0 and abs(_temp_vel[0]) >0.0:\n self.joint_vel_command = _temp_vel\n\n def doneCB(self, data):\n print (\"Done\")\n self.done = True\n\n def rgb_ImgCB(self, data):\n self.rcvd_color = data ## ROS default image\n self.cimg_tstmp = rospy.get_time()\n self.color_image = self.bridge.imgmsg_to_cv2(self.rcvd_color, \"bgr8\") # 640 * 480\n\n def depth_ImgCB(self, data):\n data.encoding = \"mono16\"\n self.rcvd_depth = data ## ROS default image\n self.dimg_tstmp = rospy.get_time()\n self.depth_image2 = self.bridge.imgmsg_to_cv2(self.rcvd_depth2, \"mono16\") # 640 * 480\n\n def depth_ImgCB(self, data):\n pass\n\n def jointStateCB(self,msg): # callback function for joint state readings\n\n self.joint_positions = [self._limb.joint_angle('right_j0'),\n self._limb.joint_angle('right_j1'),\n self._limb.joint_angle('right_j2'),\n self._limb.joint_angle('right_j3'),\n self._limb.joint_angle('right_j4'),\n self._limb.joint_angle('right_j5'),\n self._limb.joint_angle('right_j6')]\n\n self.joint_velocities = [self._limb.joint_velocity('right_j0'),\n self._limb.joint_velocity('right_j1'),\n self._limb.joint_velocity('right_j2'),\n self._limb.joint_velocity('right_j3'),\n self._limb.joint_velocity('right_j4'),\n self._limb.joint_velocity('right_j5'),\n self._limb.joint_velocity('right_j6')]\n self.squared_sum_vel = np.linalg.norm(np.array(self.joint_velocities))\n\n self.joint_efforts = [self._limb.joint_effort('right_j0'),\n self._limb.joint_effort('right_j1'),\n self._limb.joint_effort('right_j2'),\n self._limb.joint_effort('right_j3'),\n self._limb.joint_effort('right_j4'),\n self._limb.joint_effort('right_j5'),\n self._limb.joint_effort('right_j6')]\n self.squared_sum_eff = np.linalg.norm(np.array(self.joint_efforts))\n\n self._limb.set_command_timeout(1.0)\n\n def endpoint_positionCB(self,msg):\n self.right_endpoint_position = [msg.pose.position.x, msg.pose.position.y, msg.pose.position.z]\n\n def apply_action(self, cartesian):\n cartesian = cartesian\n ik_pose = Pose()\n ik_pose.position.x = cartesian[0]\n ik_pose.position.y = cartesian[1]\n ik_pose.position.z = cartesian[2] #- 0.14 if isReal else 0\n ik_pose.orientation.x = fixed_orientation.x\n ik_pose.orientation.y = fixed_orientation.y\n ik_pose.orientation.z = fixed_orientation.z\n ik_pose.orientation.w = fixed_orientation.w\n self._servo_to_pose(ik_pose)\n\n def _servo_to_pose(self, pose, time=1.0, steps=1.0):\n ''' An *incredibly simple* linearly-interpolated Cartesian move '''\n r = rospy.Rate(1/(time/steps)) # Defaults to 100Hz command rate\n current_pose = self._limb.endpoint_pose()\n ik_delta = Pose()\n ik_delta.position.x = pose.position.x / steps + current_pose['position'].x \n ik_delta.position.y = pose.position.y / steps + current_pose['position'].y \n ik_delta.position.z = pose.position.z / steps + current_pose['position'].z #s - 0.14 if isReal else 0\n ik_delta.orientation.x = ( pose.orientation.x) / steps \n ik_delta.orientation.y = ( pose.orientation.y) / steps\n ik_delta.orientation.z = ( pose.orientation.z) / steps\n ik_delta.orientation.w = ( pose.orientation.w) / steps\n joint_angles = self._limb.ik_request(ik_delta, self._tip_name)\n if joint_angles:\n self._limb.set_joint_positions(joint_angles)\n\n def getColor_observation(self):\n return self.color_image\n\n def getAction_Dagger(self):\n return self.daggerPosAction\n\n def getDepth_observation(self):\n return self.depth_image\n\n def getCurrentJointValues(self):\n return self.joint_positions, self.joint_velocities, self.joint_efforts\n\n def getCurrentPose(self):\n return self.right_endpoint_position\n\n def setCartAction(self, action):\n self.cartesian_command = action\n\n def setJointValues(self,jvals):\n self.joint_speeds = jvals\n return True\n\n def getDist(self):\n DIST_OFFSET = -0.9+0.025-0.0375\n rospy.wait_for_service('/gazebo/get_model_state')\n try:\n object_state_srv = rospy.ServiceProxy('/gazebo/get_model_state', GetModelState)\n object_state = object_state_srv(\"block\", \"world\")\n self.destPos = np.array([object_state.pose.position.x, object_state.pose.position.y, object_state.pose.position.z + DIST_OFFSET])\n except rospy.ServiceException as e:\n rospy.logerr(\"Spawn URDF service call failed: {0}\".format(e)) \n self.position = self.getCurrentPose()\n currentPos = np.array((self.position[0],self.position[1],self.position[2])) \n return np.linalg.norm(currentPos-self.destPos)\n\n def checkForTermination(self):\n \"\"\"Termination triggers done=True\n \"\"\"\n X_RANGE = range(0.3,0.75)\n Y_RANGE = range(-0.5,0.5)\n Z_RANGE = range(-0.2,0.55)\n\n if not self.position[0] in X_RANGE or not self.position[1] in Y_RANGE or not self.position[2] in Z_RANGE:\n self.terminateCount +=1\n\n if self.terminateCount == 50:\n self.terminateCount =0\n return True\n else:\n return False\n\n def checkForSuccess(self):\n \"\"\"Success triggers done=True\n \"\"\"\n curDist = self.getDist()\n if curDist < self.distance_threshold:\n self.successCount +=1\n self.reward +=1\n if self.successCount == 50:\n self.successCount =0\n return True\n else:\n return False\n\n def _pend_epi_transition(self):\n if self.isDagger:\n print ('All demo trajectories are collected for this EPISODE')\n rospy.set_param('dagger_reset',\"true\") # param_name, param_value \n print ('Waiting for new episode to start') \n while not rospy.is_shutdown():\n if rospy.has_param('epi_start'):\n break \n rospy.delete_param('epi_start') \n print ('Now starts new eisode')\n else:\n rospy.set_param('ddpg_reset',\"true\") # param_name, param_value\n print ('Reset param published')\n print ('Now moves to start position')\n _color_obs = self.getColor_observation()\n resetMsg = Bool()\n self.resetPub.publish(resetMsg)\n while not rospy.is_shutdown():\n if self.isReset:\n self.isReset = False\n break\n print ('Now starts new eisode')\n\n def _get_color_obs(self): \n _color_obs = self.getColor_observation()\n if self.isReal:\n self.color_obs = cv2.resize(_color_obs,None,fx=self.resize_factor_real,fy=self.resize_factor_real,interpolation=cv2.INTER_CUBIC)\n else:\n self.color_obs = cv2.resize(_color_obs,None,fx=self.resize_factor,fy=self.resize_factor,interpolation=cv2.INTER_CUBIC)\n return self.color_obs\n\n def _get_joint_obs(self):\n joint_pos, joint_vels, joint_effos = self.getCurrentJointValues()\n while not joint_pos:\n print ('waiting joint vals')\n joint_pos, joint_vels, joint_effos = self.getCurrentJointValues()\n return joint_pos, joint_vels, joint_effos\n\n def _get_target_obj_obs(self):\n \"\"\"Return target object pose. Experimentally supports only position info.\"\"\"\n if not self.isReal:\n rospy.wait_for_service('/gazebo/get_model_state')\n try:\n object_state_srv = rospy.ServiceProxy('/gazebo/get_model_state', GetModelState)\n object_state = object_state_srv(\"block\", \"world\")\n self.destPos = np.array([object_state.pose.position.x, object_state.pose.position.y, object_state.pose.position.z-0.884])\n except rospy.ServiceException as e:\n rospy.logerr(\"Spawn URDF service call failed: {0}\".format(e))\n return self.destPos\n\n def reset(self):\n \"\"\"OpenAI Gym style reset function.\"\"\"\n self.done = False\n self.successCount =0\n self.terminateCount =0\n self.reward = 0\n color_obs = self._get_color_obs()\n joint_pos, joint_vels, joint_effos = self._get_joint_obs()\n obj_pos = self._get_target_obj_obs()\n if self.isPOMDP: # Partially observable\n obs = [color_obs, joint_pos, joint_vels, joint_effos, obj_pos]\n else: # Fully observable\n obs = [joint_pos, joint_vels, joint_effos, obj_pos]\n return obs\n\n def reset_teaching(self):\n \"\"\"OpenAI Gym style reset function.\n Will be used for demo data acquisition.\"\"\"\n self.done = False\n self.successCount =0\n self.terminateCount =0\n self.reward = 0\n\n color_obs = self._get_color_obs()\n joint_pos, joint_vels, joint_effos = self._get_joint_obs()\n obj_pos = self._get_target_obj_obs()\n if self.isPOMDP: # Partially observable\n obs = [color_obs, joint_pos, joint_vels, joint_effos]\n else: # Fully observable\n obs = [joint_pos, joint_vels, joint_effos]\n return obs\n\n def compute_reward(self):\n \"\"\" Reward computation for non-goalEnv.\n \"\"\"\n curDist = self.getDist()\n if self.reward_type == 'sparse':\n return (curDist <= self.distance_threshold).astype(np.float32) # 1 for success else 0\n else:\n return -curDist -self.squared_sum_vel # -L2 distance -l2_norm(joint_vels)\n\n def compute_HER_rewards(env, achieved_goal, desired_goal):\n \"\"\"Re-computed rewards for substituted goals. Only supports sparse reward setting.\n Computes batch array of rewards\"\"\"\n batch_dist = np.linalg.norm(achieved_goal - desired_goal, axis=-1)\n return (batch_dist <= self.distance_threshold).astype(np.float32)\n\n def step(self,_act, step):\n \"\"\"\n Function executed each time step.\n Here we get the action execute it in a time step and retrieve the\n observations generated by that action.\n :param action:\n :return: obs, reward, done\n \"\"\"\n self.prev_tic = self.tic\n self.tic = time.time()\n self.elapsed = time.time()-self.prev_tic\n self.done = False\n if step == self.max_steps:\n self.done = True\n\n act = _act.flatten().tolist()\n self.apply_action(act)\n if not self.isReal:\n self.reward = self.compute_reward()\n if self.checkForTermination():\n print ('======================================================')\n print ('Terminates Episode current Episode : OUT OF BOUNDARY')\n print ('======================================================')\n self.done = True\n color_obs = self._get_color_obs()\n joint_pos, joint_vels, joint_effos = self._get_joint_obs()\n obj_pos = self._get_target_obj_obs()\n \n if np.mod(step, 10)==0:\n if not isReal:\n print(\"DISTANCE : \", curDist)\n print(\"PER STEP ELAPSED : \", self.elapsed)\n print(\"SPARSE REWARD : \", self.reward_rescale*self.reward)\n print(\"Current EE pos: \" ,self.right_endpoint_position)\n print(\"Actions: \", act)\n\n if self.isPOMDP: # Partially observable\n obs = [color_obs, joint_pos, joint_vels, joint_effos]\n else: # Fully observable\n obs = [joint_pos, joint_vels, joint_effos]\n\n return obs, self.reward_rescale*self.reward, self.done\n\n def step_teaching(self,step):\n self.prev_tic = self.tic\n self.tic = time.time()\n self.elapsed = time.time()-self.prev_tic\n self.done = False\n if step == self.max_steps:\n self.done = True\n rospy.set_param('demo_success','true')\n curDist = self.getDist()\n if not self.isReal:\n self.reward = self.compute_reward()\n color_obs = self._get_color_obs()\n joint_pos, joint_vels, joint_effos = self._get_joint_obs()\n obj_pos = self._get_target_obj_obs()\n\n if np.mod(step, 10)==0:\n print(\"PER STEP ELAPSED : \", self.elapsed)\n print(\"DISTANCE : \", curDist)\n print(\"SPARSE REWARD : \", self.reward)\n print(\"Current EE pos: \" ,self.right_endpoint_position)\n return obs, self.reward_rescale*self.reward, self.done\n\n\n\n def close(self): \n rospy.signal_shutdown(\"done\")\n\n","repo_name":"mch5048/catkin_ws_4rl","sub_path":"robotReacher-v0.py","file_name":"robotReacher-v0.py","file_ext":"py","file_size_in_byte":19764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"6520839529","text":"from time import perf_counter\nfrom decimal import Decimal, getcontext\nfrom math import ceil, factorial as fac\n\nstart = perf_counter()\n\nprecision = 99999\n\ngetcontext().prec = precision\ni = ceil(precision / 14)\nc = 426880 * Decimal(10005).sqrt()\ne = 1\nl = 13591409\npartial_sum = Decimal(l)\nfor k in range(1, i):\n m = fac(6*k)//(fac(3 * k)*fac(k)**3)\n l += 545140134\n e *= -262537412640768000\n partial_sum += Decimal(m * l) / e\n\nresult = str(c / partial_sum)[:-1]\nprint(perf_counter() - start)\nprint(result)","repo_name":"runkaiz/Chudnovsky-Python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"24095357345","text":"\"\"\" This module contains the functions for iterating the social and climate \ncoupled model (SoCCo)\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom scipy import stats\n\nfrom . import social as sl\nfrom . import climate as cl\n \n#### randomUniformF ############################################################\n\ndef randomUniformF(nSamples=1):\n \"\"\"\n returns random variates on scale (0,1).\n This function can replace perceivedBehavioralControlF()\n and efficacyF().\n \"\"\" \n return np.random.uniform(low=0.0, high=1.0, size=nSamples)\n \n\n#### randomNormalF ############################################################\n \ndef randomNormalF(mean, sd, nSamples=1):\n \"\"\"\n returns normal random variates.\n This function is used to initialize per capita emissions.\n pcEmissions = per capita emissions of CO2; Current values estimated\n by annualGHGemissionsInit/WorldPopnInit resulting if value of 5.049\n per person\n \"\"\" \n \n return np.random.normal(loc=mean,scale=sd,size=nSamples)\n\n\n#### eIncrement ################################################################\n\ndef eIncrement(att, pbc, psn):\n \"\"\"\n eIncrement[att_,pbc_,psn_]: rescales att and psn to -Inf to Inf and \n then multiplies by pbc (0 to 1) to result in a increment in per \n capita emissions\n att = attitude, pbc = perceivedBehavioralControl, psn = perceivedSocialNorm\n \"\"\"\n attInv = stats.norm(loc=0.0,scale=1.0).ppf(att) # InverseCDF\n attInv[attInv==-np.inf]= min(10*attInv[attInv!=-np.inf].min(),-10) # avoid -inf !NEED TO CHANGE VALUE OF 10!\n attInv[attInv==np.inf]= max(10*attInv[attInv!=np.inf].max(),10) # avoid +inf\n \n psnInv = stats.norm(loc=0.0,scale=1.0).ppf(psn) # InverseCDF\n psnInv[psnInv==-np.inf]= min(10*psnInv[psnInv!=-np.inf].min(),-10) # avoid -inf\n psnInv[psnInv==np.inf]= max(10*psnInv[psnInv!=np.inf].max(),10) # avoid +inf\n \n eDelIncrement = -(attInv + psnInv)*pbc\n \n return eDelIncrement\n \n \n#### updatePCEmissions ################################################################\n\ndef updatePCEmissions(pcE, eff, pbc, tData,percepWindowSize,riskSens=1.0):\n \"\"\"\n updatePCEmissions calculates a del pcE and then adds this to current pcE to\n return new pcE in \n \"\"\"\n psn= sl.perceivedSocialNorm(pcE) \n risk = sl.perceivedRisk(percepWindowSize, tData, riskSens)\n att = sl.attitude(risk, eff)\n pcE_Del = eIncrement(att, pbc, psn)\n pcE_New = pcE_Del + pcE\n \n return pcE_New\n \n\n#### iterateOneStep ############################################################\n\n\ndef iterateOneStep(pcE_ts, tData_ts, co2_ts, eff, pbc, popN,percepWindowSize=3,riskSens=1.0):\n \"\"\"\n Updates atm CO2, temperature and per capita emissions for one step (one year).\n \"\"\"\n pcE_updated=updatePCEmissions(pcE_ts[:,-1], eff, pbc,tData_ts,percepWindowSize,riskSens)\n pcE_updated=np.atleast_2d(pcE_updated).transpose()\n pcE_vector=np.concatenate((pcE_ts, pcE_updated),axis=1)\n co2Del_ppm=cl.perCapitaEmissionsToDelPPM(pcE_updated, popN)\n co2_updated = np.array([co2Del_ppm + co2_ts[-1]]) # adds to last element of co2Current\n co2_vector = np.concatenate( [co2_ts, co2_updated] )\n rf = cl.computeRF(co2_vector)\n tDel=cl.compute_deltaT(rf)\n t_updated = np.array([tDel[-1] + tData_ts[-1]]) # adds to last element of co2Current\n t_vector = np.concatenate( [tData_ts, t_updated] )\n \n return pcE_vector,t_vector,co2_vector\n \n \n#### iterateOneStep ############################################################\n\ndef iterateNsteps(pcE_init,tData_init, co2_init, nSteps, eff, pbc,popN,\n percepWindowSize=3,riskSens=1.0):\n \"\"\"\n 'Nsteps' updates of per capita emissions, temperature, and atm CO2 with each\n step being 1 year\n \"\"\"\n \n for i in range(nSteps):\n \n pcE_init,tData_init,co2_init = iterateOneStep(pcE_init,tData_init, co2_init,\n eff, pbc,popN,percepWindowSize=3,riskSens=1.0)\n \n return pcE_init,tData_init,co2_init\n\n\n\n\n\n","repo_name":"OpenClimate/climate_change_model","sub_path":"SoCCo/algorithms/iter.py","file_name":"iter.py","file_ext":"py","file_size_in_byte":4032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"4045410145","text":"\"\"\"\n406. Queue Reconstruction by Height\nMedium\n\nYou are given an array of people, people, which are the attributes of some people in a queue (not necessarily in order). Each people[i] = [hi, ki] represents the ith person of height hi with exactly ki other people in front who have a height greater than or equal to hi.\n\nReconstruct and return the queue that is represented by the input array people. The returned queue should be formatted as an array queue, where queue[j] = [hj, kj] is the attributes of the jth person in the queue (queue[0] is the person at the front of the queue).\n\nExample 1:\n\nInput: people = [[7,0],[4,4],[7,1],[5,0],[6,1],[5,2]]\nOutput: [[5,0],[7,0],[5,2],[6,1],[4,4],[7,1]]\nExplanation:\nPerson 0 has height 5 with no other people taller or the same height in front.\nPerson 1 has height 7 with no other people taller or the same height in front.\nPerson 2 has height 5 with two persons taller or the same height in front, which is person 0 and 1.\nPerson 3 has height 6 with one person taller or the same height in front, which is person 1.\nPerson 4 has height 4 with four people taller or the same height in front, which are people 0, 1, 2, and 3.\nPerson 5 has height 7 with one person taller or the same height in front, which is person 1.\nHence [[5,0],[7,0],[5,2],[6,1],[4,4],[7,1]] is the reconstructed queue.\nExample 2:\n\nInput: people = [[6,0],[5,0],[4,0],[3,2],[2,2],[1,4]]\nOutput: [[4,0],[5,0],[2,2],[3,2],[1,4],[6,0]]\n\"\"\"\n\n\n\"\"\"\nO(N^2) Algorithm\n\nInitialize ans array of size len(people)\nSort the people by height from low to high, if same height: sort k from high to low.\nScan from left to right in the sorted people array, for each element [n, k], we find the (k+1)th available slot in ans array. Put the [n, k] in such slot and mark the slot as unavailable. (At beginning, all slot in ans are available)\nMore explain for step 3: Why we need to find the (k+1)th available slot? Because [n, k] means, there will be k higher people in front of me. Since the people is sorted from low to high, so I have to leave k empty slot for the future candidate (which are higher than me). Thus, the current element sits in the (k+1)th available slot. And after the element sit, you mark the slot as unavailable.\n\nclass Solution:\n def reconstructQueue(self, people: List[List[int]]) -> List[List[int]]:\n people.sort(key = lambda x: [x[0], -x[1]])\n \n ans = [None] * len(people)\n \n for h, k in people:\n cnt = 0\n for i in range(len(people)):\n if ans[i] is None:\n cnt += 1\n \n if cnt == k + 1:\n ans[i] = [h, k]\n break\n \n return ans\nO(NlogN) Algorithm\n\nNow, our ultimate problem becomes: Given an array with slots. Some are available, some are not available. What is the index of the kth available slot? for example: we have array slots = [0, 1, 1, 0, 0, 1, 1, 0], where 1 means available. The 3rd available slot is in the index: 5. If this array is static, we can keep track of information easily. However, the bad thing is: the availability of the slot are changing when a new element is added, but the good thing is: a new people will only change an available slot to unavailable (1 to 0)\n\nSo, it is time for segment tree. Build a segment tree based on the slots array. The leaf node of segment tree keeps a value 1(available) or 0(unavailable), The internal node's value means the sum of availability in the range. When a query comes, you do a binary search on the segment tree, until you find the correct leaf. Then you update the leaf and update the whole tree.\n\nI myself did not come up with such segment solution. I referred to someone else's idea and re-implement it from scratch.\n\nclass TreeNode:\n def __init__(self, lo, hi):\n self.val = 1\n self.left = None\n self.right = None\n self.lo = lo\n self.hi = hi\n \nclass SegmentTree:\n def __init__(self, N):\n self.root = self.build(0, N-1)\n \n def build(self, lo, hi):\n if lo == hi:\n return TreeNode(lo, hi)\n \n mid = (lo + hi) // 2\n \n node = TreeNode(lo, hi)\n \n node.left = self.build(lo, mid)\n node.right = self.build(mid+1, hi)\n \n node.val = node.left.val + node.right.val\n \n return node\n \n def query(self, node, slot):\n if node.lo == node.hi:\n node.val = 0\n return node.lo\n \n if node.left.val >= slot:\n ret = self.query(node.left, slot)\n else:\n ret = self.query(node.right, slot - node.left.val)\n \n node.val = node.left.val + node.right.val\n \n return ret\n\nclass Solution:\n def reconstructQueue(self, people: List[List[int]]) -> List[List[int]]:\n if not people:\n return []\n people.sort(key = lambda x: [x[0], -x[1]])\n N = len(people)\n tree = SegmentTree(N)\n root = tree.root\n\n ans = [None] * N\n \n for h, k in people:\n idx = tree.query(root, k+1)\n ans[idx] = [h, k]\n \n return ans\n\"\"\"\n\n\nfrom typing import List\n\n\nclass Solution:\n def reconstructQueue(self, people: List[List[int]]) -> List[List[int]]:\n people.sort(key=lambda person : (-person[0], person[1]))\n queue = []\n for p in people:\n queue.insert(p[1], p)\n return queue ","repo_name":"datpham19/leetcode","sub_path":"python/406.py","file_name":"406.py","file_ext":"py","file_size_in_byte":5522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"25148567744","text":"from sys import platform\nimport pandas as pd\nimport numpy as np\nfrom numpy import log, exp, arange\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.linear_model import LinearRegression, LassoCV\nfrom sklearn.model_selection import train_test_split, RepeatedKFold, GridSearchCV\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn import tree\nfrom dtreeviz.trees import dtreeviz # will be used for tree visualization\n\n\n# dataset is from Kaggle\n# https://www.kaggle.com/austinreese/craigslist-carstrucks-data\n\nfile = '/vehicles.csv'\n\n# data load\ndata = pd.read_csv(file)\ndata = data.drop(data.columns.values[0], 1)\n\n# data summarization\nprint(data.iloc[:, 1:].describe())\n\n# variable list\nprint(data.columns)\nprint(data.iloc[0])\n\n# count unique values\nunique_val = data.nunique(axis=0).sort_values()\n\n# count na\ncount_na = data.isnull().sum().sort_values(ascending=False)\npercent_na = count_na/data.shape[0]\n\n# title uniques\nprint(data['title_status'].unique())\nprint(data['model'].unique())\n\n# select variables\nselect_var = ['price', 'year', 'odometer', 'manufacturer', 'condition', 'cylinders', 'fuel',\n 'title_status', 'transmission', 'drive', 'type', 'paint_color', 'state']\n\n# remove any missing values\nnew_data = data[select_var].dropna(how='any')\n\n# remove outliers (price > 1000 or price < 1000000 or odometer > 10000000)\nnew_data = new_data[(new_data['price']>1e+3) & (new_data['price']<1e+6)]\nnew_data = new_data[(new_data['odometer']<1e+7)]\n\n# log transformation of price and odometer to remedy skewness of the distributions\nnew_data['odometer'] = log(new_data['odometer'] + 1e-2)\nnew_data['price'] = log(new_data['price'])\n\n# plot the original and log-transformed price andodometer\nfig, ax = plt.subplots(1, 2)\nexp(new_data['price']).hist(ax=ax[0], bins=70)\nnew_data['price'].hist(ax=ax[1], bins=70)\n# ax[0].set_title('Price (original)')\nax[0].set_xlabel('Price ($)')\n# ax[1].set_title('Price (log-transformed)')\nax[1].set_xlabel('log(Price)')\nax[0].set_ylabel('Frequency')\n\nfig, ax = plt.subplots(1, 2)\nexp(new_data['odometer']).hist(ax=ax[0], bins=70)\nnew_data['odometer'].hist(ax=ax[1], bins=70)\n# ax[0].set_title('Odometer')\n# ax[1].set_title('Odometer (log-transformed)')\nax[0].set_xlabel('Odometer')\nax[1].set_xlabel('log(Odometer)')\nax[0].set_ylabel('Frequency')\n\n# linear regression\nlm_data = new_data.copy()\nlab_var = ['manufacturer', 'condition', 'cylinders', 'fuel', 'title_status', 'transmission', 'drive', 'type', 'paint_color', 'state']\nlm_data = pd.concat([lm_data.iloc[:, :3], pd.get_dummies(lm_data[lab_var], drop_first=True)], axis=1)\n\nlm_predictors = lm_data.iloc[:, 1:]\nlm_price = lm_data.iloc[:, 0]\n\n# test and train split\nX_lm_train, X_lm_test, y_lm_train, y_lm_test = train_test_split(lm_predictors, lm_price, test_size=.2)\n\nlm = LinearRegression()\n\n# fit the linear regression model\nlm.fit(X_lm_train, y_lm_train)\n\n# calculate R square for training and test data\nlm.score(X_lm_train, y_lm_train)\nlm.score(X_lm_test, y_lm_test)\n\n# calculate the predicted price\ny_pred_lm = lm.predict(X_lm_test)\n\n# calculate RMSE\nmean_squared_error(y_lm_test, y_pred_lm, squared=False)\n\n\n## LassoCV\ncv = RepeatedKFold(n_splits=10, n_repeats=3, random_state=1)\n# define model\nls = LassoCV(alphas=arange(0.01, 1, 0.01), cv=cv, n_jobs=-1)\n# fit model\nls.fit(X_lm_train, y_lm_train)\n\n# R squared for training and test data\nls.score(X_lm_train, y_lm_train)\nls.score(X_lm_test, y_lm_test)\ny_pred_ls = ls.predict(X_lm_test)\n\n# RMSE\nmean_squared_error(y_lm_test, y_pred_ls, squared=False)\nmean_squared_error(exp(y_lm_test), exp(y_pred_ls), squared=False)\n\n# random forest\nrf_data = new_data.copy()\nlab_var = ['manufacturer', 'condition', 'cylinders', 'fuel', 'title_status', 'transmission', 'drive', 'type', 'paint_color', 'state']\n\n# label encoding for random forest categorical variables\nrf_data[lab_var] = rf_data[lab_var].apply(LabelEncoder().fit_transform)\nrf_predictors = rf_data.iloc[:, 1:]\nrf_price = rf_data.iloc[:, 0]\nX_rf_train, X_rf_test, y_rf_train, y_rf_test = train_test_split(rf_predictors, rf_price, test_size=.2)\n\nrf = RandomForestRegressor(n_estimators=100, max_depth=16, n_jobs=-1)\nrf.fit(X_rf_train, y_rf_train)\n\nimportant_feature = pd.DataFrame({'Variable':rf_predictors.columns,\n 'Importance':rf.feature_importances_}).sort_values('Importance', ascending=False)\nprint(important_feature)\n\nax = plt.bar(list(range(important_feature.shape[0])), important_feature['Importance'])\nplt.xticks(list(range(important_feature.shape[0])), important_feature['Variable'], rotation='vertical')\n\n\ny_pred_rf = rf.predict(X_rf_test)\n\nprint(rf.score(X_rf_train, y_rf_train))\nprint(rf.score(X_rf_test, y_rf_test))\n\nmean_squared_error(y_rf_test, y_pred_rf, squared=False)\nmean_squared_error(exp(y_rf_test), exp(y_pred_rf), squared=False)\n\n# plot the absolute deviation error\nax = (np.abs(exp(y_pred_rf) - exp(y_rf_test))).hist(bins=1000)\nax.set_title(\"Absolute Deviation Error\")\nax.set_xlim(0, 10000)\nax.set_xlabel(\"Price\")\nax.set_ylabel(\"Frequency\")\n\nax = (exp(y_pred_rf) - exp(y_rf_test)).hist(bins=1000)\nax.set_title(\"Deviation Error\")\nax.set_xlim(-10000, 10000)\nax.set_xlabel(\"Price\")\nax.set_ylabel(\"Frequency\")\n\n\n# Grid CV for random forest\nparam_grid = {\n 'bootstrap': [True],\n 'max_depth': [4, 6, 10, 14, 16],\n 'min_samples_leaf': [3, 4, 5],\n 'min_samples_split': [8, 10, 12]\n}\ngrid_search = GridSearchCV(estimator = rf, param_grid = param_grid,\n cv = 3, n_jobs = -1, verbose = 2)\n\ngrid_search.fit(X_rf_train, y_rf_train)\n\ngrid_search.score(X_rf_train, y_rf_train)\ngrid_search.score(X_rf_test, y_rf_test)\ngrid_search.best_params_\ngrid_search.best_index_\n\n# best CV result showed 16 depth again\n\n# plot random forest\nplt.figure(figsize=(20,20))\n_ = tree.plot_tree(rf.estimators_[0], feature_names=rf_data.columns, filled=True)\n\nviz = dtreeviz(rf.estimators_[0], X_rf_train, y_rf_train,\n feature_names=X_rf_train.columns, target_name=\"Target\")\nviz\n\n\n\n# Import tools needed for visualization\nfrom sklearn.tree import export_graphviz\nimport pydot\n\n# Pull out one tree from the forest\ntree = rf.estimators_[0]\n\n# Export the image to a dot file\nexport_graphviz(tree, out_file = 'tree.dot', feature_names = X_rf_train.columns, rounded = True, precision = 1)\n\n# Use dot file to create a graph\n(graph, ) = pydot.graph_from_dot_file('./tree.dot')\n\n# Write graph to a pdf file\ngraph.write_pdf('./tree.pdf')\n","repo_name":"wy1189/etc","sub_path":"vehicle.py","file_name":"vehicle.py","file_ext":"py","file_size_in_byte":6491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"38868888377","text":"class Solution:\n def numSquares(self, n: int) -> int:\n dp=[5 for i in range(n+1)]\n dp[0]=0\n for i in range(n):\n j=1\n while j*j+i<=n:\n dp[i+j*j]=min(dp[i+j*j],dp[i]+1)\n j=j+1\n return dp[n]\n# the transfer function for this problem is dp[i+j*j]=dp[i]+1\n#every integer can be made up of at most 4 square numbers so we can initialize the dp list with number 5","repo_name":"junone/Algorithme","sub_path":"Dynamic programming/perfect-square.py","file_name":"perfect-square.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"70787725742","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\nimport glob, os\nimport object_detection_api as tf\nfrom libs.functions import * \nfrom libs.utils import *\nfrom libs.constants import *\nfrom libs.settings import Settings\nfrom libs.ustr import ustr\n# from PyQt5.QtCore import Qt,pyqtSlot, QSize\n# from PyQt5.QtGui import QImage, QPixmap, QPalette, QPainter , QIcon , QImageReader\nfrom PyQt5.QtPrintSupport import QPrintDialog, QPrinter\n# from PyQt5.QtWidgets import QListWidget,QLabel,QToolBar , QSizePolicy, QScrollArea, QMessageBox, QMainWindow, QMenu, QAction, \\\n# qApp, QFileDialog, QTableWidget, QTableWidgetItem, QVBoxLayout, QPushButton, QDockWidget, QWidget\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\nimport json\n\nimport xlwt\n# Add internal libs\nfrom libs.settings import Settings\nfrom libs.pascal_voc_io import PascalVocReader\nfrom libs.pascal_voc_io import XML_EXT\nfrom libs.ustr import ustr\n\nclass CloneThread(QThread):\n signal = pyqtSignal('PyQt_PyObject')\n\n def __init__(self):\n QThread.__init__(self)\n self.git_url = []\n self.git_status = \"\"\n self.add=\"\"\n self.tabela=\"\"\n self.save=\"\"\n self.alerta=\"\"\n\n # run method gets called when we start the thread\n def run(self):\n self.git_status(\"Analisado 0 /\"+str(len(self.git_url)))\n i = 0\n total = 0\n vivo = 0\n paralisado = 0\n for index in range(0,len(self.git_url) , 2):\n self.tabela.setRowCount(self.tabela.rowCount()+1)\n json = tf.get_teste(self.git_url[index],self.git_url[index+1], 0.98)\n print(json)\n self.add(i,0 , json.path)\n self.add(i,1 , str(json.gabarito['total']))\n self.add(i,2 , str(json.gabarito['vivo']))\n self.add(i,3 , str(json.gabarito['paralisado']))\n total = total + int(json.gabarito['total'])\n vivo = vivo + int(json.gabarito['vivo'])\n paralisado = paralisado + int(json.gabarito['paralisado'])\n i = i + 1\n self.git_status(\"Analisado \"+str(i)+\" /\"+str(len(self.git_url[index])))\n self.tabela.setRowCount(self.tabela.rowCount()+1)\n self.add(i,0 , \"TOTAL\")\n self.add(i,1 , str(total))\n self.add(i,2 , str(vivo))\n self.add(i,3 , str(paralisado))\n self.tabela.setRowCount(self.tabela.rowCount()+1)\n # self.alerta(\"Analise finalizada, os resultados foram salvos.\")\n self.save()\n \n\nclass QImageViewer(QMainWindow):\n def __init__(self):\n super().__init__()\n\n self.git_thread = CloneThread() # This is the thread object\n # Connect the signal from the thread to the finished method\n # self.git_thread.signal.connect(self.finished)\n \n self.defaultSaveDir = None\n self.usingPascalVocFormat = True\n self.mImgList = []\n self.dirname = None\n self.labelHist = []\n self.lastOpenDir = None\n self.itemsToShapes = {}\n self.shapesToItems = {}\n self.prevLabelText = ''\n\n # Application state.\n \n self.filePath = None\n self.recentFiles = []\n \n\n self.mImgList = []\n self.dirname = None\n self.labelHist = []\n self.lastOpenDir = None\n\n self.tableWidget = QTableWidget(0,4)\n self.tableWidget.setHorizontalHeaderLabels(['Imagem', 'Encontrados', 'Movimentando', 'Paralisados'])\n self.tableWidget.horizontalHeader().setStretchLastSection(True) \n self.tableWidget.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)\n \n # lista de arquivos\n self.fileListWidget = QListWidget()\n self.fileListWidget.itemDoubleClicked.connect(self.fileitemDoubleClicked)\n filelistLayout = QVBoxLayout()\n filelistLayout.setContentsMargins(0, 0, 0, 0)\n filelistLayout.addWidget(self.fileListWidget)\n fileListContainer = QWidget()\n fileListContainer.setLayout(filelistLayout)\n self.filedock = QDockWidget('Lista de imagens', self)\n self.filedock.setObjectName('files')\n self.filedock.setWidget(fileListContainer)\n \n\n listLayout = QVBoxLayout()\n listLayout.setContentsMargins(0, 0, 0, 0)\n \n\n self.labelList = QListWidget()\n listLayout.addWidget(self.labelList)\n \n\n self.createActions()\n self.createMenus()\n\n \n self.setCentralWidget(self.tableWidget)\n self.addDockWidget(Qt.RightDockWidgetArea, self.filedock)\n \n\n \n self.statusBar().showMessage('Todos os modulos foram carregados com sucesso.')\n self.statusBar().show()\n\n\n self.setWindowTitle(\"NematoNET\")\n self.resize(1200, 800) \n\n \n def savefile(self):\n # filename,_ = QFileDialog.getSaveFileName(self, 'Save File', '', \".xls(*.xls)\")\n print(self.lastOpenDir)\n filename = self.lastOpenDir + \"/resultado.xls\"\n wbk = xlwt.Workbook()\n sheet = wbk.add_sheet(\"sheet\", cell_overwrite_ok=True)\n style = xlwt.XFStyle()\n font = xlwt.Font()\n font.bold = True\n style.font = font\n model = self.tableWidget.model()\n sheet.write(0, 0, \"Imagem\")\n sheet.write(0, 1, \"Contados\")\n sheet.write(0, 2, \"Movimentando\")\n sheet.write(0, 3, \"Paralisado\")\n for r in range(model.rowCount()):\n text = model.data(model.index(r, 0))\n sheet.write(r, 0, text)\n text = model.data(model.index(r, 1))\n sheet.write(r, 1, text)\n text = model.data(model.index(r, 2))\n sheet.write(r, 2, text)\n text = model.data(model.index(r, 3))\n sheet.write(r, 3, text)\n wbk.save(filename)\n self.status(\"Resultado foi salvo na pasta.\")\n\n def scanAllImages(self, folderPath):\n extensions = ['.%s' % fmt.data().decode(\"ascii\").lower() for fmt in QImageReader.supportedImageFormats()]\n images = []\n for file in os.listdir(folderPath):\n if file.lower().endswith(tuple(extensions)):\n relativePath = file\n relativePath = os.path.join(folderPath, file)\n path = ustr(os.path.abspath(relativePath))\n images.append(path)\n\n natural_sort(images, key=lambda x: x.lower())\n return images \n \n def status(self, message, delay=5000):\n self.statusBar().showMessage(message, delay)\n\n\n def addRow(self, linha , coluna , texto):\n self.tableWidget.setItem(linha,coluna,QTableWidgetItem(texto))\n\n def analisar(self): \n self.git_thread.git_url = self.mImgList\n self.git_thread.git_status = self.status\n self.git_thread.add = self.addRow\n self.git_thread.tabela = self.tableWidget\n self.git_thread.save = self.savefile\n self.git_thread.alerta = self.alerta\n self.git_thread.start() # Finally starts the thread\n\n\n def fileitemDoubleClicked(self, item=None):\n currIndex = self.mImgList.index(ustr(item.text()))\n if currIndex < len(self.mImgList):\n filename = self.mImgList[currIndex]\n if filename:\n print(\"s\")\n\n def importDirImages(self, dirpath):\n self.lastOpenDir = dirpath\n self.dirname = dirpath\n self.filePath = None\n self.fileListWidget.clear()\n self.mImgList = self.scanAllImages(dirpath)\n if len(self.mImgList) % 2 != 0:\n self.alerta(\"Por favor, quantidade de imagens está errada.\")\n else:\n for imgPath in self.mImgList:\n item = QListWidgetItem(imgPath)\n self.fileListWidget.addItem(item)\n\n def openDir(self):\n options = QFileDialog.Options()\n my_dir = QFileDialog.getExistingDirectory(self, \"Abrir pasta\", '', QFileDialog.ShowDirsOnly )\n if my_dir:\n for x in range(0,self.tableWidget.rowCount()):\n self.tableWidget.removeRow(x)\n self.updateActions()\n self.importDirImages(my_dir)\n\n \n def createActions(self):\n self.openDirAct = QAction(\"&Abrir pasta\", self, shortcut=\"Ctrl+t\", triggered=self.openDir)\n self.openDirToolAct = QAction(QIcon(\"/Users/guilhermealarcao/tfObjWebrtc/resources/icons/open.png\"), \"&Abrir Pasta\", self, shortcut=\"Ctrl+t\", triggered=self.openDir)\n self.analisarAct = QAction(QIcon(\"/Users/guilhermealarcao/tfObjWebrtc/resources/icons/database.png\") , \"Analisar\", self, enabled=False, triggered=self.analisar)\n\n \n def updateActions(self):\n self.analisarAct.setEnabled(True)\n \n\n def createMenus(self):\n self.fileMenu = QMenu(\"&Arquivo\", self) \n self.fileMenu.addAction(self.openDirAct)\n self.fileMenu.addSeparator()\n self.menuBar().addMenu(self.fileMenu)\n self.toolbar = QToolBar( \"My main toolbar\")\n self.toolbar.addAction(self.openDirToolAct)\n self.toolbar.addSeparator()\n self.toolbar.addAction(self.analisarAct)\n self.toolbar.setToolButtonStyle(Qt.ToolButtonTextUnderIcon)\n self.addToolBar(Qt.LeftToolBarArea, self.toolbar)\n\n def alerta(self, texto):\n QMessageBox.about(self, \"Alerta\", texto)\n\n def currentItem(self):\n items = self.labelList.selectedItems()\n if items:\n return items[0]\n return None\n \n \n\nif __name__ == '__main__':\n import sys\n from PyQt5.QtWidgets import QApplication\n\n app = QApplication(sys.argv)\n imageViewer = QImageViewer()\n imageViewer.show()\n sys.exit(app.exec_())\n # TODO QScrollArea support mouse","repo_name":"guilhermealarcao/nemanet","sub_path":"tela.py","file_name":"tela.py","file_ext":"py","file_size_in_byte":9636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"20803574673","text":"from kivy.app import App\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.image import Image\nfrom kivy.clock import Clock\nfrom kivy.graphics.texture import Texture\nfrom pyzbar.pyzbar import decode\nfrom filestorage import FileStorage\nfrom kivy.core.window import Window\nimport ast\nimport cv2\nimport numpy as np\n\nclass CamApp(App):\n data = []\n\n def build(self):\n self.img1=Image()\n layout = BoxLayout()\n layout.add_widget(self.img1)\n #opencv2 stuffs\n self.capture = cv2.VideoCapture(0)\n # cv2.namedWindow(\"CV2 Image\")\n Clock.schedule_interval(self.update, 1.0/33.0)\n return layout\n\n def update(self, dt):\n fs = FileStorage()\n fs.reload()\n # display image from cam in opencv window\n ret, frame = self.capture.read()\n # convert it to texture\n buf1 = cv2.flip(frame, 0)\n buf = buf1.tobytes()\n texture1 = Texture.create(size=(frame.shape[1], frame.shape[0]), colorfmt='bgr') \n #if working on RASPBERRY PI, use colorfmt='rgba' here instead, but stick with \"bgr\" in blit_buffer. \n texture1.blit_buffer(buf, colorfmt='bgr', bufferfmt='ubyte')\n # display image from the texture\n self.img1.texture = texture1\n self.decoder(frame)\n code = cv2.waitKey(10)\n if self.data:\n verfi = self.data[0]['id']\n fs.incr(verfi)\n fs.save()\n #App.get_running_app().stop\n #Window.close()\n elif code == ord('q'):\n App.get_running_app().stop\n Window.close()\n\n\n def decoder(self, image):\n gray_img = cv2.cvtColor(image,0)\n barcode = decode(gray_img)\n \n for obj in barcode:\n points = obj.polygon\n (x,y,w,h) = obj.rect\n pts = np.array(points, np.int32)\n pts = pts.reshape((-1, 1, 2))\n cv2.polylines(image, [pts], True, (0, 255, 0), 3)\n \n barcodeData = obj.data.decode(\"utf-8\")\n barcodeType = obj.type\n string = \"Data \" + str(barcodeData) + \" | Type \" + str(barcodeType)\n self.data.append(ast.literal_eval(barcodeData))\n\n\nif __name__ == '__main__':\n CamApp().run()\n cv2.destroyAllWindows()","repo_name":"Dav0202/attendance_system","sub_path":"camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":2248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"38904399065","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\"\"\"\n@Project :pytestProject \n@File :oracle_operate.py.py\n@Author :李永峰\n@Date :2021/11/3 9:39 \n@Version:1.0\n@Desc:oracle链接\n\"\"\"\nimport cx_Oracle\nfrom public.read_data import ReadFileData\n\n\nclass OracleDb:\n\n def __init__(self):\n # 通过字典拆包传递配置信息,建立数据库连接\n db_conf = ReadFileData().get_oracle_config()\n self.conn = cx_Oracle.connect(\n user=db_conf.get(\"user\"),\n password=db_conf.get(\"password\"),\n dsn=db_conf.get(\"dsn\"),\n encoding=\"UTF-8\"\n )\n # 通过 cursor() 创建游标对象\n self.cur = self.conn.cursor()\n\n def __del__(self): # 对象资源被释放时触发,在对象即将被删除时的最后操作\n # 关闭游标\n self.cur.close()\n # 关闭数据库连接\n self.conn.close()\n\n def execute_db(self, sql, data_type=\"str\", num: bool = False):\n \"\"\"\n 查询数据\n @param sql: 查询sql\n @param data_type: str<默认返回一个值>、list、dict\n @param num: True 返回多个值、False 返回一个值\n @return: list\n \"\"\"\n # 检查连接是否断开,如果断开就进行重连\n self.conn.ping()\n # 使用 execute() 执行sql\n self.cur.execute(sql)\n if \"select\" not in sql.lower():\n try:\n # 提交事务\n self.conn.commit()\n return\n except cx_Oracle.DatabaseError as error:\n # 回滚所有更改\n self.conn.rollback()\n msg = f\"操作Oracle出现错误 ==>> 错误原因:{error}\"\n raise cx_Oracle.DatabaseError(msg)\n # 使用 fetchall()/fetchone() 获取查询结果\n fetchall = self.cur.fetchone() if not num and data_type == \"str\" else self.cur.fetchall()\n if data_type == \"dict\":\n date = dict()\n # self.cur.description 列名\n cols = [d[0] for d in self.cur.description]\n for row in fetchall:\n date.update(dict(zip(cols, row)))\n elif data_type == \"list\":\n date = list()\n for row in fetchall:\n if isinstance(row, tuple):\n date.append(row[0])\n else:\n date = fetchall[0]\n return date\n","repo_name":"lixiaofeng1993/pytestProject","sub_path":"public/oracle_operate.py","file_name":"oracle_operate.py","file_ext":"py","file_size_in_byte":2401,"program_lang":"python","lang":"zh","doc_type":"code","stars":7,"dataset":"github-code","pt":"91"}
+{"seq_id":"5255705630","text":"# Write a method that takes a string as an argument, and returns an Array that contains every word from the string, to which you have appended a space and the word length.\n\n# You may assume that words in the string are separated by exactly one space, and that any substring of non-space characters is a word.\n\n\n'''\nInput: string\n\nOutput: an array of word followed by length of word\n\nRules\n\n\nData structure / Algorithms\n get the array of words from the string\n create an empty array of length words\n iterate through the words with index\n get the count of words in current word\n store the word and count to the result array\n return the result array\n\n\n\n'''\n\ndef word_lengths(string):\n words = string.split()\n result_array = [''] * len(words)\n\n for index, word in enumerate(words):\n count = len(word)\n result_array[index] = f\"{word} {count}\"\n\n return result_array\n\n\n\n\nprint(word_lengths(\"cow sheep chicken\") == [\"cow 3\", \"sheep 5\", \"chicken 7\"])\n\nprint(word_lengths(\"baseball hot dogs and apple pie\") ==\n [\"baseball 8\", \"hot 3\", \"dogs 4\", \"and 3\", \"apple 5\", \"pie 3\"])\n\nprint(word_lengths(\"It ain't easy, is it?\") == [\"It 2\", \"ain't 5\", \"easy, 5\", \"is 2\", \"it? 3\"])\n\nprint(word_lengths(\"Supercalifragilisticexpialidocious\") ==\n [\"Supercalifragilisticexpialidocious 34\"])\n\nprint(word_lengths(\"\") == [])","repo_name":"steven-liou/LaunchSchool_Ruby_Main_Course","sub_path":"RB101_Programming_Foundations/small_problems/Easy9/Python/how_long_are_you.py","file_name":"how_long_are_you.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"19241795730","text":"import random\r\n\r\n# Initialize the scores, pairings, and total points\r\nscores = {}\r\npairings = {}\r\ntotal_points = {}\r\n\r\n# Set the flag variable to True\r\ngame_on = True\r\n\r\n# Get the number of people\r\nnum_people = int(input(\"Enter the number of people who are battling: \"))\r\n\r\n# Get the names of the people\r\nnames = []\r\nfor i in range(num_people):\r\n name = input(f\"Enter the name of person {i+1}: \")\r\n names.append(name)\r\n\r\n# Main game loop\r\nwhile game_on:\r\n # Sort the scores from highest to lowest\r\n sorted_scores = sorted(total_points.items(), key=lambda x: x[1], reverse=True)\r\n\r\n # Print the current scores\r\n print(\"Current scores:\")\r\n for name, score in sorted_scores:\r\n print(f\"{name}: {score} points\")\r\n\r\n # Shuffle the names\r\n random.shuffle(names)\r\n\r\n # Pair the names\r\n for i in range(0, len(names), 2):\r\n name1 = names[i]\r\n name2 = names[i+1]\r\n print(f\"{name1} will fight {name2}\")\r\n pairings[name1] = name2\r\n pairings[name2] = name1\r\n\r\n # Get the number of wins for each person\r\n for name in names:\r\n wins = int(input(f\"Enter the number of wins for {name}: \"))\r\n scores[name] = wins\r\n\r\n # Assign points based on the number of wins\r\n for name in names:\r\n wins = scores[name]\r\n if wins == 2:\r\n scores[name] = 75\r\n elif wins == 1:\r\n scores[name] = 25\r\n\r\n # Update the total points for the person\r\n if name in total_points:\r\n total_points[name] += scores[name]\r\n else:\r\n total_points[name] = scores[name]\r\n\r\n # Ask if there will be another set of rounds\r\n another_round = input(\"Do you want to play another round (y/n)? \")\r\n if another_round == \"n\":\r\n game_on = False\r\n\r\n# Sort the total points from highest to lowest\r\nsorted_total_points = sorted(total_points.items(), key=lambda x: x[1], reverse=True)\r\n\r\n# Print the top eight scorers will be gym leaders\r\nprint(\"Your top eight scorers will be gym leaders:\")\r\nfor i in range(8):\r\n name, points = sorted_total_points[i]\r\n print(f\"{name}: {points} points\")\r\n\r\n# Print all the final scores from highest to lowest\r\nprint(\"Final scores:\")\r\nfor name, points in sorted_total_points:\r\n print(f\"{name}: {points} points\")\r\n","repo_name":"animetechie/Ujimatournament","sub_path":"ujimatournamenttracker.py","file_name":"ujimatournamenttracker.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"39347579254","text":"import random\r\nmark = 0\r\n\r\nwhile (True):\r\n\r\n print (\"mark : \" +str (mark))\r\n\r\n choice = input (\"1. stone \\n2. paper \\n3. Scissors \\nChoose your option : \")\r\n choice = int (choice)\r\n\r\n if (choice == 0) :\r\n break\r\n \r\n if (choice != 1) and (choice != 2) and (choice != 3):\r\n print (\"Your input is invalid\")\r\n\r\n choicesystem = random.randint(1,3)\r\n x = \"\"\r\n\r\n if choicesystem == 1 : x = \"stone\"\r\n elif choicesystem == 2 : x = \"paper\"\r\n elif choicesystem == 3 : x = \"Scissors\"\r\n print (\"System selection : \"+x)\r\n\r\n if (choicesystem == 1 and choice == 2) or (choicesystem == 2 and choice == 3) or (choicesystem == 3 and choice == 1):\r\n print (\"You got a positive score\")\r\n mark += 1\r\n\r\n elif (choicesystem == choice):\r\n print (\"Your choice is equal to the choice of system\")\r\n\r\n else :\r\n print (\"game over\")\r\n mark -= 1\r\n\r\n print (\"\\n\") ","repo_name":"MohammadaminB/Have-fun-with-python","sub_path":"sang-kaghaz.py","file_name":"sang-kaghaz.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"30026164434","text":"\"\"\" \n+ break: thoát hoàn toàn ra khỏi vòng lặp chứa nó\n+ continue: bỏ qua các câu lệnh bên dưới nó và chuyển sang mộ lần lặp mới\n\"\"\"\n\n\"\"\" for i in range(1,21):\n if i > 5:\n break # khi mà i > 5 nó sẽ break (thoát ra) khỏi vòng lặp n.\n print(i, end = ' ') \"\"\"\n\nfor i in range(1,21):\n if i % 2 == 0:\n continue # i sẽ in ra những số lẻ thay vì số chia hết cho 2 như ở chỗ đkiện if, phần số chia hết cho 2 được lưu vào continue và đc bỏ qua.\n print(i, end = ' ')","repo_name":"herobwl201/list_part2","sub_path":"day9_forwhile/break_continue.py","file_name":"break_continue.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"29688685023","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.http.response import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework.parsers import JSONParser\nfrom rest_framework import status\nfrom manager.models import ManagerDetails\nfrom manager.serializers import ManagerDetailsSerializer\n\n\n# Create your views here.\n@csrf_exempt\n# This function is responsible to fetch all manager details, add manager details and delete all manager details to and from the database.\ndef manager_list(request):\n if request.method == 'GET':\n managers = ManagerDetails.objects.all()\n managers_serializer = ManagerDetailsSerializer(managers, many=True)\n return JsonResponse(managers_serializer.data, safe=False)\n\n elif request.method == 'POST':\n managers_data = JSONParser().parse(request)\n managers_serializer = ManagerDetailsSerializer(data=managers_data)\n if managers_serializer.is_valid():\n managers_serializer.save()\n return JsonResponse(managers_serializer.data, status=status.HTTP_201_CREATED)\n return JsonResponse(managers_serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n ManagerDetails.objects.all().delete()\n return HttpResponse(status=status.HTTP_204_NO_CONTENT)\n\n\n@csrf_exempt\n# This function is responsible to fetch manager details, update manager details and delete manager details to and from the database based upon the id parameter.\ndef manager_detail(request, pk):\n try:\n manager = ManagerDetails.objects.get(pk=pk)\n except ManagerDetails.DoesNotExist:\n return HttpResponse(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n manager_serializer = ManagerDetailsSerializer(manager)\n return JsonResponse(manager_serializer.data)\n\n elif request.method == 'PUT':\n manager_data = JSONParser().parse(request)\n manager_serializer = ManagerDetailsSerializer(manager, data=manager_data)\n if manager_serializer.is_valid():\n manager_serializer.save()\n return JsonResponse(manager_serializer.data)\n return JsonResponse(manager_serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n manager.delete()\n return HttpResponse(status=status.HTTP_204_NO_CONTENT)\n","repo_name":"shobhitnandi/Project-Management-Tracker","sub_path":"ProjectManagementTrackerAPI/manager/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"13868184395","text":"import glob\nimport json\nimport shutil\n\nfor file in glob.glob('*.txt'):\n\twith open(file) as f:\n\t\tinfo = json.load(f)\n\t\tnfields = sum(1 for string in info.values() if len(string)>0)\n\t\tif nfields < 3:\n\t\t\t# incomplete title, go above and correct for it\n\t\t\tprint(file)\n\t\t\twith open('../' + file) as g:\n\t\t\t\tflag = 0\n\t\t\t\tpage_ctr = 0\n\t\t\t\tfor line in g:\n\t\t\t\t\tif len(line.strip()) == 0:\n\t\t\t\t\t\t# skip empty line\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif 'ENDOFPAGE' in line:\n\t\t\t\t\t\tpage_ctr += 1\n\t\t\t\t\t\tif page_ctr < 2:\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\tif flag == 0:\n\t\t\t\t\t\t# no need to extract info yet\n\t\t\t\t\t\tif \"SOC\" in line:\n\t\t\t\t\t\t\t# print('line is {}'.format(line))\t\n\t\t\t\t\t\t\tflag = 1\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\telif \"Job title\" in line or \"Joblitle\" in line or \"Jobtitle\" in line:\n\t\t\t\t\t\t\tflag = 2\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tpass\n\t\t\t\t\telif flag == 1 and info['soc_code']:\n\t\t\t\t\t\t# SOC code\n\t\t\t\t\t\tinfo['soc_code'] = line.strip().split(' ')[1]\n\t\t\t\t\t\tinfo['soc_code'] = info['soc_code'].replace('\\\\u2014', '-')\n\t\t\t\t\t\tinfo['soc_code'] = info['soc_code'].replace('oo', '00')\n\t\t\t\t\t\t# print('soc_code is {}'.format(info['soc_code']))\n\t\t\t\t\t\tflag = 0 # reset flag\n\t\t\t\t\telif flag == 2 and info['job_title'] :\n\t\t\t\t\t\t# Job title\n\t\t\t\t\t\tinfo['job_title'] = line.strip()\n\t\t\t\t\t\t# print('job title is {}'.format(info['job_title']))\n\t\t\t\t\t\tflag = 0\n\t\t\t\t\telse:\n\t\t\t\t\t\tpass\n\t\t\t# print(info)\n\t\t\twith open(file + '.json', 'w') as outfile:\n\t\t\t\tjson.dump(info, outfile, sort_keys=True,\n\t\t\t indent=4, separators=(',', ': '))\n\t\t\tshutil.move(file + '.json', file)","repo_name":"patrick-llgc/pdf2txt","sub_path":"check_field_completeness.py","file_name":"check_field_completeness.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"74479341102","text":"from rdflib import Graph, Namespace, Literal\nimport json\n\n\nclass RDFBuilder:\n def __init__(self) -> None:\n self.rdf = Graph() # RDF图\n self.ns = Namespace('https://www.114514.com/')\n self.entity_list = json.load(open('data/entity_result.json', 'r', encoding='utf-8')) # data的结构是一个list\n\n def clean_text(self, text: str):\n text = text.replace(' ', '').replace('\\u3000', '')\n symbol_set = {'。', ',', '?', '!', ';', ':', '、', '(', ')', '「', '」', '“', '”', '‘', '’', '《', '》', '【', '】',\n '…', '—', '~', ' ', '.', ',', '?', '!', ';', ':', '(', ')', '\"', '\"', '\\'', '\\'', '<', '>', '[',\n ']', '...', '~', '*', '―', '•', '・', '.', '/', '+', '-', '%'}\n for symbol in symbol_set:\n text = text.replace(symbol, '_')\n return text\n\n def build(self):\n for entity in self.entity_list:\n subject = self.ns[self.clean_text(entity['title'])]\n for key in entity.keys():\n if key != 'title':\n relation = self.ns[key]\n obj = ' '.join(entity[key])\n self.rdf.add((subject, relation, Literal(obj)))\n self.rdf.serialize('data/rdf_data.rdf', format='n3')\n\n\nif __name__ == '__main__':\n builder = RDFBuilder()\n builder.build()\n","repo_name":"Mr-xiu/BUPT-KG-lab3","sub_path":"rdf_builder.py","file_name":"rdf_builder.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"10127871370","text":"from sklearn import preprocessing\nfrom sklearn.naive_bayes import GaussianNB\n\n# STEP 1 - Get data, extracted on November 13th\n\n# Quarantine days extracted from https://es.wikipedia.org/wiki/Cuarentena_por_la_pandemia_de_COVID-19#cite_note-8\nquarentine_days = [\n\t140, 14, 36,\n\t22, 205, 21,\n\t32, 28, 30,\n\t31,\t74,\t86,\n\t160, 118, 96,\n\t98, 54, 33,\n\t128, 70, 106,\n\t130, 32, 35\n]\n\n# Travel restrictions extracted from https://www.kayak.com.gt/restricciones-de-viaje\ntravel_restriction = [\n\t\"without restrictions\", \"parcial closure\",\n\t\"parcial closure\",\t\"total closure\",\n\t\"total closure\", \"without restrictions\", \n\t\"parcial closure\", \"parcial closure\", \n\t\"parcial closure\", \"parcial closure\", \n\t\"without restrictions\", \"parcial closure\", \n\t\"without restrictions\", \"parcial closure\", \n\t\"without restrictions\", \"parcial closure\", \n\t\"parcial closure\", \"without restrictions\", \n\t\"prompt reopening\", \"parcial closure\", \n\t\"parcial closure\", \"without restrictions\", \n\t\"parcial closure\", \"parcial closure\"\n]\n\n# Health investment extracted from https://datosmacro.expansion.com/estado/gasto/salud\nhealth_investment = [\n\t14.68, 19.88, 10.06, \n\t10.73, 16.05, 5.27, \n\t17.8, 15.31, 2.99, \n\t15.31, 10.26, 9.07, \n\t17.49, 15.87, 19.24, \n\t15.28, 15.47, 11.71, \n\t3.38, 13.42, 14.9, \n\t18.74, 8.78, 13.34 \n]\n\n# Population density extracted from https://datosmacro.expansion.com/demografia/poblacion\npopulation_density = [\n\t99, 233, 16, \n\t18, 16, 100, \n\t3, 106, 1093,\n\t378, 25, 146, \n\t44, 103, 316, \n\t94, 122, 85, \n\t411, 200, 25, \n\t275, 9, 48\n]\n\n# Rise in COVID 19 cases extracted from https://news.google.com/covid19/map?hl=es-419&gl=US&ceid=US%3Aes-419\nrise_in_cases = [\n\t1, 0, 0, \n\t1, 0, 1, \n\t0, 1, 1, \n\t0, 1, 0, \n\t1, 1, 0, \n\t1, 1, 0, \n\t0, 1, 0, \n\t1, 1, 1\n]\n\n\nle = preprocessing.LabelEncoder()\n\n# STEP 2 - Transform tuples\nquarentine_days_encoded = le.fit_transform(quarentine_days)\ntravel_restriction_encoded = le.fit_transform(travel_restriction)\nhealth_investment_encoded = le.fit_transform(health_investment)\npopulation_density_encoded = le.fit_transform(population_density)\nlabel = le.fit_transform(rise_in_cases)\n\n# STEP 3 - Combine attributes into single list of tuples\nfeatures = list(zip(quarentine_days_encoded, travel_restriction_encoded, health_investment_encoded, population_density_encoded))\nprint(features)\n\n# STEP 4 - Define and train model\nmodel = GaussianNB()\nmodel.fit(features, label)\n\n# STEP 5 - Data prediction for Guatemala\npredicted = model.predict([[\n\t103, 1, 17.21, 159\n]])\nprint(\"Predicted Value: \", predicted)\n","repo_name":"luisespino/Coronavirus_2020_11","sub_path":"src/201504394.py","file_name":"201504394.py","file_ext":"py","file_size_in_byte":2512,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"91"}
+{"seq_id":"22807893757","text":"import numpy as np\r\n\r\n#upper board\r\n# Category 1: 1*(number of 1s rolled)\r\n# Category 2: 2*(number of 2s rolled)\r\n# Category 3: 2*(number of 3s rolled)\r\n# Category 4: 2*(number of 4s rolled)\r\n# Category 5: 2*(number of 5s rolled)\r\n# Category 6: 2*(number of 6s rolled)\r\n\r\n#goal 1: player is allowed 1 roll for each turn, this simulates 6 turns and calculates optimal score by checking all categories\r\n#this program aims to optimize the scores by allocating different dice rolls to different categories.\r\n#Ex: if you roll 5 6's, its better to count as category 6 (score 36) rather than category 1 (score 0)\r\n\r\ndef simulateRoll(iter):\r\n all_rolls = []\r\n for turns in range(iter):\r\n roll_outcome = np.random.randint(1,7, size = 5)\r\n all_rolls.append(roll_outcome.tolist())\r\n return all_rolls\r\n\r\ndef countScore(player_rolls):\r\n score_arr = []\r\n #roll_category_scores is keeping track of what the score would be if you marked the roll as ith category i from 1-6\r\n for roll_num in range(0,6):\r\n roll_category_scores = np.zeros(6).astype(int)\r\n for category_num in range(1, 7):\r\n count = np.count_nonzero(np.array(player_rolls[roll_num])==category_num)\r\n this_score = count*category_num\r\n roll_category_scores[category_num-1] = this_score\r\n score_arr.append(roll_category_scores.tolist())\r\n return score_arr\r\n\r\n#helper function counts minimum number of lines needed to cross out all zeroes\r\ndef crossLines(arr):\r\n # first create second array with num zeroes row-wise (positive) and col-wise (negative) for each element\r\n arr_t = np.transpose(arr)\r\n min_lines_arr = np.zeros((6, 6), dtype=int)\r\n for row in range(0, 6):\r\n row_zeroes = 6 - np.count_nonzero(arr[row])\r\n for col in range(0, 6):\r\n col_zeroes = 6 - np.count_nonzero(arr_t[col])\r\n if (col_zeroes > row_zeroes):\r\n min_lines_arr[row][col] = col_zeroes * -1\r\n else:\r\n min_lines_arr[row][col] = row_zeroes\r\n\r\n # loop through min_lines_arr and if the corresponding element is zero in original arr and unmarked (positive)\r\n # where marked is negative then look at sign of min_lines_arr and put line either h or v through row/col\r\n line_count = 0\r\n for row in range(0, 6):\r\n for col in range(0, 6):\r\n if (arr[row][col] == 0):\r\n line_count += 1\r\n if (min_lines_arr[row][col] > 0):\r\n # mark all horizontal\r\n arr[row] = np.repeat(500, 6) + arr[row]\r\n else:\r\n # mark all vertical\r\n arr = np.transpose(arr)\r\n arr[col] = np.repeat(500, 6) + arr[col]\r\n arr = np.transpose(arr)\r\n\r\n return line_count\r\n\r\n#helper function to choose optimal solution based on reduced matrix\r\ndef chooseOptimalSolution(arr):\r\n solution_arr = [-1, -1, -1, -1, -1, -1]\r\n unassigned_rows = [0,1,2,3,4,5]\r\n\r\n exist_single_zero = True\r\n while(exist_single_zero == True):\r\n num_zeroes_counter = 0\r\n for r in unassigned_rows:\r\n num_zeroes = 6 - np.count_nonzero(arr[r])\r\n if(num_zeroes == 1):\r\n num_zeroes_counter += 1\r\n unassigned_rows.remove(r)\r\n for c in range(0,6):\r\n if (arr[r][c] == 0):\r\n solution_arr[r] = c\r\n arr = np.transpose(arr)\r\n arr[c] = np.repeat(500, 6) + arr[c]\r\n arr = np.transpose(arr)\r\n if(num_zeroes_counter == 0):\r\n exist_single_zero = False\r\n\r\n # arbitrarily sets ties to first zero in row\r\n exist_multiple_zeroes = True\r\n while(exist_multiple_zeroes):\r\n num_zeroes_counter = 0\r\n for r in unassigned_rows:\r\n num_zeroes = 6 - np.count_nonzero(arr[r])\r\n if num_zeroes > 0:\r\n num_zeroes_counter +=1\r\n unassigned_rows.remove(r)\r\n for c in range(0,6):\r\n if(arr[r][c] == 0):\r\n solution_arr[r] = c\r\n arr = np.transpose(arr)\r\n arr[c] = np.repeat(500, 6) + arr[c]\r\n arr = np.transpose(arr)\r\n break\r\n if(num_zeroes_counter == 0):\r\n exist_multiple_zeroes = False\r\n\r\n #sets rows with no assignment\r\n for i in range(0,6):\r\n if(solution_arr[i] == -1):\r\n solution_arr[i] = unassigned_rows[0]\r\n unassigned_rows.pop()\r\n\r\n return solution_arr\r\n\r\n\r\n#uses hungarian method to assignment problem but for maximization\r\ndef optimizeScore(score_arr):\r\n #turning into maximization instead of minimization problem\r\n arr = np.asarray(score_arr).reshape(6,6)\r\n max = np.repeat(np.amax(score_arr), 36).reshape(6,6)\r\n arr = np.subtract(max,arr)\r\n\r\n #subtracting min element from each row\r\n for i in range(0,6):\r\n min_row = np.repeat(np.amin(arr[i]), 6)\r\n arr[i] = arr[i] - min_row\r\n\r\n #subtracting min element from each col\r\n arr = np.transpose(arr)\r\n for j in range(0,6):\r\n min_col = np.repeat(np.amin(arr[j]),6)\r\n arr[j] = arr[j] - min_col\r\n arr = np.transpose(arr)\r\n\r\n #using minimum number of lines to cross out zeroes\r\n line_count = crossLines(arr)\r\n\r\n # check if line_count = dimension of array\r\n while(line_count < 6):\r\n #find minimum value remaining\r\n min_remaining = 500\r\n for r in range(0,6):\r\n for c in range(0,6):\r\n if(arr[r][c] < 500 and min_remaining > arr[r][c]):\r\n min_remaining = arr[r][c]\r\n #subtracting min from remaining elements and adding to elements with 2 lines\r\n for r in range(0,6):\r\n for c in range(0,6):\r\n if(arr[r][c] < 500):\r\n arr[r][c] = arr[r][c]-min_remaining\r\n if(arr[r][c]>=1000):\r\n arr[r][c] = arr[r][c] + min_remaining\r\n\r\n #recover original array\r\n arr = np.mod(arr, np.repeat(500, 36).reshape(6,6))\r\n line_count = crossLines(arr)\r\n\r\n arr = np.mod(arr, np.repeat(500, 36).reshape(6, 6))\r\n return chooseOptimalSolution(arr)\r\n\r\ndef sum_score(score_arr, optimal_solution):\r\n sum = 0\r\n for i in range(0,6):\r\n sum += score_arr[i][optimal_solution[i]]\r\n return sum\r\n\r\ndef play():\r\n player_rolls = simulateRoll(6)\r\n score_arr = countScore(player_rolls)\r\n optimal_solution = optimizeScore(score_arr)\r\n print(sum_score(score_arr, optimal_solution))\r\n\r\n\r\n\r\nplay()\r\n","repo_name":"Aloha-Churchill/hungarian-algorithm-yahtzee","sub_path":"assignmentproblem.py","file_name":"assignmentproblem.py","file_ext":"py","file_size_in_byte":6651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"29075566774","text":"#!/usr/bin/env python\n\nfrom flask import Flask # import flask\napp = Flask(__name__) # create an app instance\n\nBATTERY_CHARGE = 100\nMIXTURE_LEVEL = 100\nBASE_POSITION = [\"x1\", \"y1\", \"z1\"]\n\n@app.route(\"/\") # at the end point /\ndef hello(): # call method hello\n return f\"Hello World! Base coordinates: {BASE_POSITION[0], BASE_POSITION[1], BASE_POSITION[2]}\" # which returns \"hello world\" and version number\nif __name__ == \"__main__\": # on running python app.py\n app.run(host=\"0.0.0.0\")","repo_name":"TheTremblingDoe/drone-sprayer","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"72388669744","text":"from __future__ import absolute_import\n\nimport os\nimport re\nimport numpy as np\n\n\ndef load_task(data_dir, task_id, only_supporting=False):\n '''Load the nth task. There are 20 tasks in total.\n\n Returns a tuple containing the training and testing data for the task.\n '''\n assert task_id > 0 and task_id < 21\n\n files = os.listdir(data_dir)\n files = [os.path.join(data_dir, f) for f in files]\n s = 'dialog-babi-task{}-'.format(task_id)\n train_file = [f for f in files if s in f and 'te-trn' in f][0]\n test_file = [f for f in files if s in f and 'te-tst' in f][0]\n train_data = get_stories(train_file, only_supporting)\n test_data = get_stories(test_file, only_supporting)\n# print(train_data)\n return train_data, test_data\n\n'''\ndef all_candidate_responses(data_dir, word_idx):\n files = os.listdir(data_dir)\n files = [os.path.join(data_dir, f) for f in files]\n q = 'dialog-babi-candidates'\n all_candidate_responses_file = [f for f in files if q in f][0]\n question_list = list()\n QC = list()\n with open(all_candidate_responses_file) as f:\n for line in f.readlines():\n if 'api_call' in line:\n line = line.strip()\n line = str.lower(line)\n nid, line = line.split(' ', 1)\n q = tokenize(line)\n question_list.append(q)\n\n for i in range(len(q)):\n if q[i] not in word_idx and q[i] is not 'api_call':\n print(q[i],\"heyyyyyyyyyyyyyyyy\")\n word_idx[q[i]] = len(word_idx)\n print(len(word_idx))\n for i in range(len(question_list)):\n y = np.zeros(len(word_idx) + 1)\n for j in range(len(question_list[i])):\n if question_list[i][j] is not 'api_call':\n y[word_idx[question_list[i][j]]] = 1\n QC.append(y)\n# for num, data in enumerate(question_list):\n# print(\"\\n\",num, data)\n return word_idx, np.array(QC), QC\n'''\n\n\ndef tokenize(sent):\n '''Return the tokens of a sentence including punctuation.\n >>> tokenize('Bob dropped the apple. Where is the apple?')\n ['Bob', 'dropped', 'the', 'apple', '.', 'Where', 'is', 'the', 'apple', '?']\n '''\n return [x.strip() for x in re.split('(\\W+)?', sent) if x.strip()]\n\ndef get_places():\n tokens_filename_f = open('/home/danda/research/memnet/tourist_data/train_creation_files/unique_places.txt', 'r')\n places_list = list()\n for line in tokens_filename_f.readlines():\n line = line.strip()\n places_list.append(line.lower())\n places_labels_list = []\n for num, data in enumerate(places_list):\n y = np.zeros(len(places_list))\n y[num] = 1\n places_labels_list.append(y)\n places_idx = dict((c, i) for i, c in enumerate(places_list))\n return places_idx, places_labels_list\n\ndef parse_stories(lines, only_supporting=False):\n '''Parse stories provided in the bAbI tasks format\n If only_supporting is true, only the sentences that support the answer are kept.\n '''\n data = []\n story = []\n '''\n cuisines = ['cantonese', 'korean', 'japanese', 'thai',\n 'vietnamese', 'french', 'spanish', 'british', 'indian', 'italian']\n places = ['paris', 'seoul', 'tokyo', 'beijing', 'bangkok',\n 'hanoi', 'rome', 'london', 'bombay', 'madrid','api_call']\n '''\n for line in lines:\n if line == '\\n':\n continue\n line = line.strip()\n line = str.lower(line)\n nid, line = line.split(' ', 1)\n nid = int(nid)\n if nid == 1:\n story = []\n if 'api_call' in line: # question\n q, a = line.split('\\t')\n q = tokenize(q)\n a = tokenize(a)\n list_a = list()\n for word in a:\n if 'api_call' != word:\n list_a.append(word)\n a = list_a\n# print(a)\n\n # answer is one vocab word even if it's actually multiple words\n # a = [a]\n substory = None\n\n # remove question marks\n if q[-1] == \"api_call\":\n q = q[:-1]\n\n if only_supporting:\n # Only select the related substory\n supporting = map(int, supporting.split())\n substory = [story[i - 1] for i in supporting]\n else:\n # Provide all the substories\n substory = [x for x in story if x]\n data.append((substory, q, a))\n story.append('')\n else:\n # regular sentence\n # remove periods\n sent_list = line.split(\"\\t\")\n sent_user = tokenize(sent_list[0])\n sent_system = tokenize(sent_list[1])\n if sent_user[-1] == \".\":\n sent_user = sent_user[:-1]\n if sent_system[-1] == \".\":\n sent_system = sent_system[:-1]\n story.append(sent_user)\n story.append(sent_system)\n '''\n sent_user = tokenize(line)\n if sent_user[-1] == \".\":\n sent_user = sent_user[:-1]\n story.append(sent_user)\n print(story)\n '''\n return data\n\n\ndef get_stories(f, only_supporting=False):\n '''Given a file name, read the file, retrieve the stories, and then convert the sentences into a single story.\n If max_length is supplied, any stories longer than max_length tokens will be discarded.\n '''\n with open(f) as f:\n return parse_stories(f.readlines(), only_supporting=only_supporting)\n\n\ndef vectorize_data(data, word_idx, sentence_size, memory_size):\n \"\"\"\n Vectorize stories and queries.\n\n If a sentence length < sentence_size, the sentence will be padded with 0's.\n\n If a story length < memory_size, the story will be padded with empty memories.\n Empty memories are 1-D arrays of length sentence_size filled with 0's.\n\n The answer array is returned as a one-hot encoding.\n \"\"\"\n S = []\n Q = []\n A1 = []\n A2 = []\n# A3 = []\n# A4 = []\n\n places_idx, labels_places_list = get_places()\n print(places_idx)\n for story, query, answer in data:\n ss = []\n k = 0\n for i, sentence in enumerate(story, 1):\n ls = max(0, sentence_size - len(sentence))\n ss.append([word_idx[w] for w in sentence] + [0] * ls)\n\n # take only the most recent sentences that fit in memory\n ss = ss[::-1][:memory_size][::-1]\n\n # Make the last word of each sentence the time 'word' which\n # corresponds to vector of lookup table\n for i in range(len(ss)):\n ss[i][-1] = len(word_idx) - memory_size - i + len(ss)\n\n for i in range(len(ss)):\n if (i % 2) == 0:\n ss[i][-2] = 0\n else:\n ss[i][-2] = 1\n\n # pad to memory_size\n lm = max(0, memory_size - len(ss))\n for _ in range(lm):\n ss.append([0] * sentence_size)\n\n lq = max(0, sentence_size - len(query))\n q = [word_idx[w] for w in query] + [0] * lq\n\n y1 = np.zeros(len(places_idx))\n y2 = np.zeros(len(places_idx))\n # y1 = np.zeros(len(word_idx) + 1) # 0 is reserved for nil word\n # y2 = np.zeros(len(word_idx) + 1)\n# y3 = np.zeros(len(word_idx) + 1)\n# y4 = np.zeros(len(word_idx) + 1)\n# print(\"\\n\",answer)\n\n y1[places_idx[answer[0]]] = 1\n# y2[places_idx[answer[1]]] = 1\n# y3[word_idx[answer[2]]] = 1\n# y4[word_idx[answer[3]]] = 1\n\n S.append(ss)\n Q.append(q)\n A1.append(y1)\n# A2.append(y2)\n# A3.append(y3)\n# A4.append(y4)\n\n return np.array(S), np.array(Q), np.array(A1)\n # np.array(A2)\n #, np.array(A3), np.array(A4)\n","repo_name":"dandaprathyusha/NLP_ML_experiments","sub_path":"dialog_data_intent_extraction/intent_using_memnn/data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":7763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"13621621686","text":"import unittest\nimport krpctest\n\n\nclass TestPartsResourceConverter(krpctest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.new_save()\n if cls.connect().space_center.active_vessel.name != 'PartsHarvester':\n cls.launch_vessel_from_vab('PartsHarvester')\n cls.remove_other_vessels()\n space_center = cls.connect().space_center\n cls.converter_state = space_center.ResourceConverterState\n cls.harvester_state = space_center.ResourceHarvesterState\n parts = space_center.active_vessel.parts\n cls.drill = parts.with_title(\n '\\'Drill-O-Matic\\' Mining Excavator')[0].resource_harvester\n cls.converter = parts.with_title(\n 'Convert-O-Tron 250')[0].resource_converter\n cls.infos = [\n {\n 'name': 'Lf+Ox',\n 'inputs': ['Ore', 'ElectricCharge'],\n 'outputs': ['LiquidFuel', 'Oxidizer']\n },\n {\n 'name': 'Monoprop',\n 'inputs': ['Ore', 'ElectricCharge'],\n 'outputs': ['MonoPropellant']\n },\n {\n 'name': 'LiquidFuel',\n 'inputs': ['Ore', 'ElectricCharge'],\n 'outputs': ['LiquidFuel']\n },\n {\n 'name': 'Oxidizer',\n 'inputs': ['Ore', 'ElectricCharge'],\n 'outputs': ['Oxidizer']\n }\n ]\n\n def test_properties(self):\n self.assertAlmostEqual(\n 0.66, self.converter.thermal_efficiency, delta=0.01)\n self.assertGreater(self.converter.core_temperature, 0)\n self.assertEqual(1000, self.converter.optimum_core_temperature)\n self.assertEqual(len(self.infos), self.converter.count)\n for i, info in enumerate(self.infos):\n self.assertFalse(self.converter.active(i))\n self.assertEqual(info['name'], self.converter.name(i))\n self.assertEqual(\n self.converter_state.idle, self.converter.state(i))\n self.assertEqual('Inactive', self.converter.status_info(i))\n self.assertEqual(info['inputs'], self.converter.inputs(i))\n self.assertEqual(info['outputs'], self.converter.outputs(i))\n\n def test_operate(self):\n self.drill.deployed = True\n while not self.drill.deployed:\n self.wait()\n self.drill.active = True\n while not self.drill.active:\n self.wait()\n index = 1\n self.assertFalse(self.converter.active(index))\n self.assertEqual(\n self.converter_state.idle, self.converter.state(index))\n self.assertEqual('Inactive', self.converter.status_info(index))\n self.converter.start(index)\n while self.converter.state(index) != self.converter_state.running:\n self.wait()\n self.assertTrue(self.converter.active(index))\n self.assertEqual(\n self.converter_state.running, self.converter.state(index))\n self.assertGreater(self.converter.core_temperature, 0)\n self.assertEqual(1000, self.converter.optimum_core_temperature)\n self.converter.stop(index)\n while self.converter.state(index) != self.converter_state.idle:\n self.wait()\n self.assertFalse(self.converter.active(index))\n self.assertEqual(\n self.converter_state.idle, self.converter.state(index))\n self.assertEqual('Inactive', self.converter.status_info(index))\n self.drill.deployed = False\n while self.drill.state != self.harvester_state.retracted:\n self.wait()\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"krpc/krpc","sub_path":"service/SpaceCenter/test/test_parts_resource_converter.py","file_name":"test_parts_resource_converter.py","file_ext":"py","file_size_in_byte":3668,"program_lang":"python","lang":"en","doc_type":"code","stars":591,"dataset":"github-code","pt":"91"}
+{"seq_id":"1466434437","text":"from src.util import *\nfrom collections import Counter\n\nsections = Parser.from_file(INPUT).to_sections()\n\nstring = sections[0].strip()\norig_string = string\n# print(string)\nrewrites = {key: value for [key, value] in Parser.from_string(sections[1]).to_regex_match(f\"{WORD} -> {WORD}\")}\n# print(rewrites)\npair_rewrites = {key: (key[0] + value, value + key[1]) for [key, value] in Parser.from_string(sections[1]).to_regex_match(f\"{WORD} -> {WORD}\")}\n# print(pair_rewrites)\n\npairs = {}\nfor key in rewrites.keys():\n pairs[key] = 0\nfor i in range(len(string) - 1):\n pairs[string[i:i+2]] += 1\n# print(pairs)\nfor step in range(40):\n new_pairs = {}\n for key in rewrites.keys():\n new_pairs[key] = 0\n for pair, amount in pairs.items():\n new_ps = pair_rewrites[pair]\n for new_p in new_ps:\n new_pairs[new_p] += amount\n pairs = new_pairs\n print(step + 1, pairs)\n\n# print(pairs)\ncounter = {}\nl = list(pairs.items())\nfor pair, amount in l:\n if pair[0] not in counter:\n counter[pair[0]] = 0\n counter[pair[0]] += amount\n\ncounter[orig_string[-1]] += 1\n\n# if l[-1][0][1] not in counter:\n# counter[l[-1][0][1]] = 0\n# counter[l[-1][0][1]] += l[-1][1]\n\n\nprint(counter)\n\nmax_val = max(counter.values())\nprint(max_val)\nmin_val = min(counter.values())\nprint(min_val)\nprint(max_val - min_val)\n\n# Template: NNCB\n# After step 1: NCNBCHB\n# After step 2: NBCCNBBBCBHCB\n# After step 3: NBBBCNCCNBBNBNBBCHBHHBCHB\n# After step 4: NBBNBNBBCCNBCNCCNBBNBBNBBBNBBNBBCBHCBHHNHCBBCBHCB\n","repo_name":"ewjmulder/advent_of_code","sub_path":"src/2021/dec14/puzzle2.py","file_name":"puzzle2.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"4991528796","text":"#! python3\r\n# Project Euler, problem #12\r\n# What is the value of the first triangle number to have over five hundred divisors?\r\n\r\n# how the program should work:\r\n# 1. Iterate through a list of triangular numbers (n * (n + 1)) / 2\r\n# 2. Find all the prime numbers below the trNumber / 2 (trNumber - triangular number)\r\n# 3. Factorize the trNumber starting from the largest prime found\r\n# 4. Calculate the number of divisors\r\nimport logging\r\nlogging.basicConfig(level='DEBUG')\r\nlogging.disable()\r\ndivisorsDatabase = []\r\nnumOfDivisors = 0\r\nn = 2000 # n in (n * (n + 1)) / 2 - calculates next trNumber\r\n# for n in range(1, 1001):\r\nwhile numOfDivisors <= 500: # uncomment when an appropriate algorithm is found\r\n trNumber = int((n * (n + 1)) / 2) # calculates a prime number\r\n answer = trNumber # saving the number to display later\r\n # 2. Find all the prime numbers below the trNumber / 2\r\n potentialPrimes = list(range(2, int(trNumber / 2)))\r\n primes = [] # list of primes below trNumber / 2\r\n for i in range(len(potentialPrimes) - 1):\r\n if str(potentialPrimes[i]) == '0':\r\n continue\r\n primes.append(potentialPrimes[i])\r\n potentialPrimes[i + potentialPrimes[i]::potentialPrimes[i]] = '0' * len(potentialPrimes[i + potentialPrimes[i]::potentialPrimes[i]])\r\n # 3. Factorize the trNumber starting from the largest prime found\r\n factors = []\r\n for prime in primes:\r\n while trNumber % prime == 0:\r\n factors.append(prime)\r\n trNumber = trNumber / prime\r\n if trNumber == 1:\r\n break\r\n # 4. Calculate the number of divisors\r\n # The number of divisors is calculated as described here:\r\n # https://math.stackexchange.com/questions/433848/prime-factors-number-of-divisors\r\n # 4.1 First count occurrences of each factor in a list\r\n counts = []\r\n for factor in factors:\r\n if factor == 0:\r\n continue\r\n counts.append(factors.count(factor))\r\n factors = [x for x in factors if x != factor]\r\n counts = [x for x in counts if x != 0]\r\n # 4.2 Calculate the number of divisors\r\n numOfDivisors = 1\r\n for count in counts:\r\n numOfDivisors = numOfDivisors * (count + 1)\r\n\r\n divisorsDatabase.append(numOfDivisors)\r\n logging.debug(divisorsDatabase)\r\n\r\n n += 1\r\n\r\n\r\noutput = open('output.txt', 'w')\r\noutput.write('[')\r\nfor i in divisorsDatabase:\r\n output.write(str(i) + ', ')\r\noutput.write(']')\r\noutput.close()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"vnkvstnk/project_euler","sub_path":"012_highlyDivisibleTriangularNumber.py","file_name":"012_highlyDivisibleTriangularNumber.py","file_ext":"py","file_size_in_byte":2523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"13842648228","text":"import re\nfrom collections import defaultdict\nfrom io import StringIO\n\nfrom lxml.html import fromstring\n\nfrom pupa.scrape import BaseBillScraper\nfrom pupa.utils import convert_pdf\nfrom pupa.scrape import Bill\n\nfrom .utils import Urls\n\n\nagenda_list = (\"http://cityclerk.cityofboise.org/city-council-meetings/\"\n \"council-agendas/2012-agendas/\")\n\n\nclass BillScraper(BaseBillScraper):\n\n def get_agenda_urls(self):\n xpath = '//a/@href'\n urls = self.urls.agenda_list.doc.xpath(xpath)\n for url in filter(re.compile('\\d+ca.pdf$', re.I).search, urls):\n yield url\n for url in filter(re.compile('\\d+sm.pdf$', re.I).search, urls):\n yield url\n\n def get_bill_ids(self):\n self.urls = Urls(dict(agenda_list=agenda_list), scraper=self)\n for agenda_url in self.get_agenda_urls():\n self.urls.add(agenda=agenda_url)\n doc = self.urls.agenda.pdf_to_lxml\n\n titles = defaultdict(StringIO)\n for url in doc.xpath('//a'):\n if 'href' not in url.attrib:\n continue\n href = url.attrib['href']\n if re.search(r'[ro]\\-\\d+\\-\\d+\\.pdf$', href):\n titles[href].write(url.text_content())\n\n for item in titles.items():\n try:\n yield from self.parse_title(item)\n except Exception:\n # Because PDF scraping is terrible.\n continue\n\n def parse_title(self, item):\n url, title = item\n chunks = title.getvalue().split('\\xa0')\n chunks = list(filter(None, chunks))\n\n # Fix problem of agenda item number connected to bill_id.\n if len(chunks[0]) > 3:\n bill_id = chunks.pop(0)\n if '.' in bill_id:\n print(bill_id)\n agenda_item, bill_id = bill_id.split('.')\n else:\n agenda_item = None\n bill_id = chunks.pop(0)\n else:\n agenda_item = chunks.pop(0)\n bill_id = chunks.pop(0)\n\n # Fix issue of different spacing betwixt bill_id and title.\n title = ' '.join(chunks)\n if len(bill_id) > 20:\n bill_id, title_start = bill_id.split(' ', 1)\n title = title_start + ' ' + title\n\n if not bill_id:\n return\n\n yield bill_id, dict(agenda_item=agenda_item, title=title, url=url)\n\n def get_type(self, bill_id):\n first = bill_id[0]\n try:\n return dict(R='resolution', O='ordinance')[first]\n except KeyError:\n raise self.ContinueScraping()\n\n def get_bill(self, bill_id, **kwargs):\n url = kwargs.pop('url')\n agenda_item = kwargs.pop('agenda_item')\n _type = self.get_type(bill_id)\n bill = Bill(bill_id, self.session, type=_type, **kwargs)\n bill.add_source(url, note='detail')\n return bill\n","repo_name":"datamade/scrapers-us-municipal","sub_path":"archive/boise/bills.py","file_name":"bills.py","file_ext":"py","file_size_in_byte":2933,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"91"}
+{"seq_id":"27603434843","text":"\"\"\"Adicionando email ao Usuario\n\nRevision ID: e973e1684c97\nRevises: 343246ee01cd\nCreate Date: 2023-06-03 13:18:44.049821\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'e973e1684c97'\ndown_revision = '343246ee01cd'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('apps_v1_usuario', sa.Column('email', sa.String(length=256), nullable=False))\n op.create_unique_constraint(None, 'apps_v1_usuario', ['email'])\n # ### end Alembic commands ###\n\n\ndef downgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'apps_v1_usuario', type_='unique')\n op.drop_column('apps_v1_usuario', 'email')\n # ### end Alembic commands ###\n","repo_name":"quesmues/simple-crud-backend","sub_path":"migrations/versions/e973e1684c97_adicionando_email_ao_usuario.py","file_name":"e973e1684c97_adicionando_email_ao_usuario.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"32646533703","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 18 15:37:28 2019\nLoad data via serial to micro-controller board. \n\n@author: arozenevallesp\n\"\"\"\n\nimport numpy\nimport serial\n# Manually import tools from serial to avoid errors\nimport serial.tools.list_ports \n#for exiting the script in case of exceptions\nimport sys \n\n#%% Find port of the Arduino\n\n#list ports\nlstports = list( serial.tools.list_ports.comports() )\n#create an empty port number\nport_nr = ''\n\n#find the port with arduino on its name\nfor n in lstports:\n if n.description.startswith( \"Arduino\" ):\n # port name of the arduino prort\n port_nr = n.device\n\n#if no ports are found, exit the program\nif port_nr == '':\n sys.exit(\"device not connected or not recognized\")\n# alternative way of declaring\n# pp = [lstports[i].device for i in range(len(lstports)) if lstports[i].description.startswith(\"Arduino\")]\n\n#%% Create serial object\ntry:\n serialPort = serial.Serial(port_nr, 9600, timeout=0, rtscts=True)\nexcept serial.serialutil.SerialException:\n serialPort.close()\n serialPort.open()\n\n#%% Generate random data to send to serial. Adapt it to protocol\n\n# seed \nnumpy.random.seed(1)\n# Array with random numbers\nvalues = numpy.random.rand(100)\nvalues = numpy.around(values, decimals=2)\nvalues = values.astype('|S5')\nvalues = [x+b'\\n' for x in values]\n\n#%% Send data to serial port\n\ntry:\n serialPort.close() \nexcept serial.serialutil.SerialException:\n serialPort.open()\n serialPort.close()\n\nfor x in range(len(values)): \n serialPort.open()\n serialPort.write(values[x])\n serialPort.close()","repo_name":"alerozene/WienerFilters_AudioWifi","sub_path":"python/emitter_serial.py","file_name":"emitter_serial.py","file_ext":"py","file_size_in_byte":1606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"7938768194","text":"## Again we import the necessary socket python module\nimport socket\nUDP_IP_ADDRESS = \"127.0.0.1\"\nUDP_PORT_NO = 6789\n\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)\n\nwhile True:\n msg = input(\"Enter your value: \")\n s.sendto(str.encode(msg), (UDP_IP_ADDRESS, UDP_PORT_NO))\n if msg == \"exit\":\n break\n data, address = s.recvfrom(4096)\n print(\"Client received : \", data.decode('utf-8'))\ns.close()\n","repo_name":"Eduardoserpa/distribuidos-22.2","sub_path":"HW1/Calculadora-Yago/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"42400912170","text":"import sys\n\nfrom Fundamentals.Accumulator import Accumulator, VisualAccumulator\nfrom Fundamentals.Bag import Bag\nfrom Fundamentals.UF import UF\nfrom stdlib import stdrandom\nfrom stdlib.StdIn import InStream\n\n\ndef test_bag():\n for line in sys.stdin:\n bag = Bag()\n for item in line.split():\n bag.add(item) # add\n print(\"size of bag: {}\".format(bag.size())) # size\n print(\"items:\")\n for item in bag: # iterator\n print(item)\n import ipdb\n\n ipdb.set_trace()\n print(\"bag:\", bag) # str\n\n\ndef test_accumulator():\n n = int(sys.argv[1])\n accumulator = Accumulator()\n for i in range(n):\n num = stdrandom.uniformFloat(0, 1)\n accumulator.addDataValue(num)\n print(accumulator)\n\n\ndef test_visual_accumulator():\n n = int(sys.argv[1])\n accumulator = VisualAccumulator(n, 1.0)\n for i in range(n):\n num = stdrandom.uniformFloat(0, 1)\n accumulator.addDataValue(num)\n print(accumulator)\n\n\ndef test_uf():\n instream = InStream()\n N = instream.readInt()\n uf = UF(N)\n while not instream.isEmpty():\n p = instream.readInt()\n q = instream.readInt()\n if uf.connected(p, q):\n continue\n uf.union(p, q)\n print(p, \" \", q)\n print(uf.count(), \"components\")\n\n\nif __name__ == \"__main__\":\n # test_bag()\n # test_accumulator()\n # test_visual_accumulator()\n test_uf()\n","repo_name":"sywh/algorithms","sub_path":"tests/test_fundamentals.py","file_name":"test_fundamentals.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"42295442649","text":"from models.gp import GaussianProcess\nimport json\nimport os\nimport pickle as pkl\nimport torch\nimport wandb\nimport torch\n# import utils.logger as logger\nimport torch.nn as nn\nfrom models import convnet\nfrom collections import defaultdict, namedtuple\nimport segmentation_models_pytorch as smp\nimport numpy as np\nfrom sustainbench import get_dataset\nfrom pathlib import Path\n\nfrom sustainbench.common.data_loaders import get_eval_loader\nimport torchvision.transforms as transforms\n# from models.unet import unet\nfrom segmentation_models_pytorch.encoders import get_preprocessing_fn\nfrom sklearn.metrics import f1_score, accuracy_score, precision_recall_fscore_support\nfrom models.loss import l1_l2_loss\n\ndecimal_precision = 5\n\n\n\n\ndef analyze_results(true, pred, pred_gp):\n \"\"\"Calculate ME and RMSE\"\"\"\n # true = true.numpy().flatten()\n true=np.asarray(true).flatten()\n pred = np.asarray(pred).flatten()\n rmse = np.sqrt(np.mean((true - pred) ** 2))\n me = np.mean(true - pred)\n\n print(f\"Without GP: RMSE: {rmse}, ME: {me}\")\n\n if pred_gp is not None:\n rmse_gp = np.sqrt(np.mean((true - pred_gp) ** 2))\n me_gp = np.mean(true - pred_gp)\n print(f\"With GP: RMSE: {rmse_gp}, ME: {me_gp}\")\n return rmse, me, rmse_gp, me_gp\n return rmse, me\n\ndropout=0.5\nsavedir=Path(\"../\")\ncheckpoint_path = '../model_weights/'\n\ndense_features=None\ntrain_steps=25000\nbatch_size=32\nstarter_learning_rate=1e-3\nweight_decay=1\nl1_weight=1\npatience=10\nuse_gp=False\nsigma=1\nr_loc=0.5\nr_year=1.5\ntimes=32\nsigma_e=0.32\nsigma_b=0.01\nin_channels=3\ndevice=torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nis_cuda=True\nmodel = convnet.ConvModel(\n in_channels=13,\n dropout=dropout,\n dense_features=dense_features,\n savedir=savedir,\n use_gp=use_gp,\n sigma=sigma,\n r_loc=r_loc,\n r_year=r_year,\n sigma_e=sigma_e,\n sigma_b=sigma_b,\n device=device,\n).model\n# best_epoch=94\n# checkpoint = torch.load(os.path.join(checkpoint_path, f\"crop_yield_best_val(fluent-sun-33).pth.tar\"))\nmodel.load_state_dict(torch.load('/home/parichya/Documents/model_weights/crop_yield_best_val(fluent-sun-33).pth.tar'))\n\nif is_cuda:\n model = model.cuda()\nmodel=model.float()\n\ndataset = get_dataset(dataset='crop_yield', split_scheme=\"cauvery\", root_dir='data')\ntrain_data = dataset.get_subset('train')\nval_data=dataset.get_subset('val')\ntest_data=dataset.get_subset('test')\nbatch_size=32\n# Prepare the standard data loader\n# train_loader = get_train_loader('standard', train_data, batch_size=batch_size)\nval_loader = get_eval_loader('standard', val_data, batch_size=batch_size)\ntest_loader = get_eval_loader('standard', test_data, batch_size=batch_size)\n\nrunning_val_scores = defaultdict(list)\nfor (val_x, val_y,) in val_loader:\n # print(val_x.min())\n with torch.no_grad():\n model.eval()\n if is_cuda:\n val_x = val_x.to(\"cuda\")\n val_y = val_y.to(\"cuda\")\n\n val_x = val_x.float()\n val_y = val_y.float()\n val_x = torch.permute(val_x, (0, 3, 1, 2))\n val_pred_y = model(val_x)\n # print(val_pred_y.shape)\n val_pred_y = np.squeeze(val_pred_y)\n val_loss, running_val_scores = l1_l2_loss(\n val_pred_y, val_y, l1_weight, running_val_scores\n )\nprint(\"Validation RMSE:\", np.array(running_val_scores[\"RMSE\"]).mean())\n\nrunning_val_scores = defaultdict(list)\nfor (val_x, val_y,) in test_loader:\n # print(val_x.min())\n with torch.no_grad():\n model.eval()\n if is_cuda:\n val_x = val_x.to(\"cuda\")\n val_y = val_y.to(\"cuda\")\n\n val_x = val_x.float()\n val_y = val_y.float()\n val_x = torch.permute(val_x, (0, 3, 1, 2))\n val_pred_y = model(val_x)\n # print(val_pred_y.shape)\n val_pred_y = np.squeeze(val_pred_y)\n val_loss, running_val_scores = l1_l2_loss(\n val_pred_y, val_y, l1_weight, running_val_scores\n )\nprint(\"Test RMSE:\", np.array(running_val_scores[\"RMSE\"]).mean())\n\nexit()\n\nresults = defaultdict(list)\n# gp = GaussianProcess(sigma, r_loc, r_year, sigma_e, sigma_b)\ngp = None\n# with torch.no_grad():\n # for train_im, train_yield, train_loc, train_idx, train_year in train_dataloader:\n # model_output = model(\n # train_im, return_last_dense=True if (gp is not None) else False\n # )\n # if gp is not None:\n # pred, feat = model_output\n # if feat.device != \"cpu\":\n # feat = feat.cpu()\n # results[\"train_feat\"].append(feat.numpy())\n # else:\n # pred = model_output\n # results[\"train_pred\"].extend(pred.squeeze(1).tolist())\n # results[\"train_real\"].extend(train_yield.squeeze(1).tolist())\n # results[\"train_loc\"].append(train_loc.numpy())\n # results[\"train_indices\"].append(train_idx.numpy())\n # results[\"train_years\"].extend(train_year.tolist())\n\npreds=[]\ntr=[]\nrunning_val_scores = defaultdict(list)\nfor test_im, test_yield in val_loader:\n with torch.no_grad():\n\n if is_cuda:\n test_im=test_im.to(\"cuda\")\n test_yield=test_yield.to(\"cuda\")\n test_im=torch.permute(test_im, (0,3,1,2))\n test_im=test_im.float()\n test_yield=test_yield.float()\n model_output = model(\n test_im#, return_last_dense=True if (gp is not None) else False\n )\n val_pred_y = np.squeeze(model_output)\n val_loss, running_val_scores = l1_l2_loss(\n val_pred_y, test_yield, 1, running_val_scores\n )\n # if gp is not None:\n # pred, feat = model_output\n # if feat.device != \"cpu\":\n # feat = feat.cpu()\n # results[\"test_feat\"].append(feat.numpy())\n # else:\n # pred = model_output\n # results[\"test_pred\"].extend(pred.squeeze(1).tolist())\n # results[\"test_real\"].extend(test_yield.tolist())\n # results[\"test_loc\"].append(test_loc.numpy())\n # results[\"test_indices\"].append(test_idx.numpy())\n # results[\"test_years\"].extend(test_year.tolist())\n\n# for key in results:\n# if key in [\n# \"train_feat\",\n# \"test_feat\",\n# \"train_loc\",\n# \"test_loc\",\n# \"train_indices\",\n# \"test_indices\",\n# ]:\n# results[key] = np.concatenate(results[key], axis=0)\n# else:\n# results[key] = np.array(results[key])\n \n \n# model_information = {\n# \"state_dict\": model.state_dict()\n# }\n# for key in results:\n# model_information[key] = results[key]\n#\n# # finally, get the relevant weights for the Gaussian Process\n# model_weight = model.state_dict()[model_weight]\n# model_bias = model.state_dict()[model_bias]\n#\n# if model.state_dict()[model_weight].device != \"cpu\":\n# model_weight, model_bias = model_weight.cpu(), model_bias.cpu()\n#\n# model_information[\"model_weight\"] = model_weight.numpy()\n# model_information[\"model_bias\"] = model_bias.numpy()\n#\n# if gp is not None:\n# print(\"Running Gaussian Process!\")\n# gp_pred = gp.run(\n# model_information[\"train_feat\"],\n# model_information[\"test_feat\"],\n# model_information[\"train_loc\"],\n# model_information[\"test_loc\"],\n# model_information[\"train_years\"],\n# model_information[\"test_years\"],\n# model_information[\"train_real\"],\n# model_information[\"model_weight\"],\n# model_information[\"model_bias\"],\n# )\n# model_information[\"test_pred_gp\"] = gp_pred.squeeze(1)\n\n# filename = f'{predict_year}_{run_number}_{time}_{\"gp\" if (gp is not None) else \"\"}.pth.tar'\n# torch.save(model_information, savedir / filename)\nprint(np.array(running_val_scores[\"RMSE\"]).mean())\n\nresults = defaultdict(list)\ngp = None\npreds = []\ntr = []\nfor test_im, test_yield in test_loader:\n with torch.no_grad():\n\n if is_cuda:\n test_im = test_im.to(\"cuda\")\n test_yield = test_yield.to(\"cuda\")\n test_im = torch.permute(test_im, (0, 3, 1, 2))\n test_im = test_im.float()\n test_yield = test_yield.float()\n model_output = model(\n test_im # , return_last_dense=True if (gp is not None) else False\n )\n # if gp is not None:\n # pred, feat = model_output\n # if feat.device != \"cpu\":\n # feat = feat.cpu()\n # results[\"test_feat\"].append(feat.numpy())\n # else:\n pred = model_output\n results[\"test_pred\"].extend(pred.squeeze(1).tolist())\n results[\"test_real\"].extend(test_yield.tolist())\n\nprint(analyze_results(\n results[\"test_real\"],\n results[\"test_pred\"],\n model_information[\"test_pred_gp\"] if gp is not None else None,\n))\n","repo_name":"Malonje/sustain-ben","sub_path":"test_yield_predict.py","file_name":"test_yield_predict.py","file_ext":"py","file_size_in_byte":8699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"7564451840","text":"multiples = [i for i in range(30) if i%3==0]\nprint(multiples)\n\nsquared = []\nfor x in range(10):\n squared.append(x**2)\nprint(squared)\n\nsquared = [x**2 for x in range(10)]\nprint(squared)\n\n\n\n\n# dict comprehensions\n\nmcase = {'a':10, 'b':34, 'A':7,'Z':3}\n\nmcase_frequency = {\n k.lower(): mcase.get(k.lower(),0) + mcase.get(k.upper(),0)\n for k in mcase.keys()\n}\n\n#print(mcase_frequency)\n\nprint({v: k for k,v in mcase_frequency.items()})\n\n\nmultiples_gen = (i for i in range(30) if i%3 ==0)\nprint(multiples_gen)\nfor x in multiples_gen:\n print(x)\n","repo_name":"sj43/Code-Storage","sub_path":"PythonTips/comprehensions.py","file_name":"comprehensions.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"1823351623","text":"class Solution:\n def maxProduct(self, nums: List[int]) -> int:\n \n # Initialize left and result to 0 and 1 respectively.\n '''left=0\n result=1\n \n # Initialize max_result to 0, which will store the maximum product encountered so far.\n max_result=0\n \n # Loop through each element of the input list using the range function.\n for right in range(len(nums)):\n \n # Calculate the product of the current element and the running product stored in the result variable. \n result*=nums[right]\n \n # Update the max_result variable with the maximum value between the current running product \n # and the previously stored maximum result.\n max_result=max(result,max_result)\n \n \n # If the current max_result is negative, we need to remove elements from the left end \n # of the subarray to make the product positive.\n while max_result<0:\n \n # Divide the current result by the leftmost element of the subarray and move the left pointer to the right.\n result/=nums[left]\n left+=1\n \n \n # Move the right pointer to the right at the end of each iteration \n right+=1\n \n \n \n # Return the final maximum product encountered. \n return max_result'''\n \n \n \n max_prod = nums[0]\n r_min =nums[0]\n r_max = nums[0]\n \n for i in nums[1:]:\n \n \n r_min,r_max = min(r_min * i, i,r_max*i ),max(r_max * i, i,r_min*i )\n \n \n max_prod = max(max_prod, r_max)\n \n return max_prod\n '''\n \n maxi = mini = res = nums[0]\n for n in nums[1:]:\n maxi, mini = max(maxi*n, mini*n, n), min(mini*n, maxi*n, n)\n res = max(res, maxi)\n \n return res'''\n\n\n","repo_name":"AbrhamWendmeneh/-competetive-programming-","sub_path":"152-maximum-product-subarray/152-maximum-product-subarray.py","file_name":"152-maximum-product-subarray.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"14921112348","text":"# Databricks notebook source\n# MAGIC %md\n# MAGIC ### Ingest circuits.csv file\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ##### Step 1 - Read the CSV file using the spark dataframe reader\n\n# COMMAND ----------\n\ndisplay(dbutils.fs.mounts())\n\n# COMMAND ----------\n\ndisplay(dbutils.fs.ls('/mnt/formula1dl9801/raw'))\n\n# COMMAND ----------\n\ncircuits_df = spark.read \\\n .option(\"header\", True) \\\n .option(\"inferSchema\", True) \\\n .csv('dbfs:/mnt/formula1dl9801/raw/circuits.csv')\n\n# COMMAND ----------\n\ntype(circuits_df)\n\n# COMMAND ----------\n\ncircuits_df.show()\n\n# COMMAND ----------\n\ndisplay(circuits_df)\n\n# COMMAND ----------\n\ncircuits_df.printSchema()\n\n# COMMAND ----------\n\ndisplay(circuits_df.describe())\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ######Better way to read the data - memory wise\n\n# COMMAND ----------\n\nfrom pyspark.sql.types import StructType, StructField, IntegerType, StringType, DoubleType\n\n# COMMAND ----------\n\ncircuits_schema = StructType(fields=[StructField(\"circuitId\", IntegerType(), False),\n StructField(\"circuitRef\", StringType(), True),\n StructField(\"name\", StringType(), True),\n StructField(\"location\", StringType(), True),\n StructField(\"country\", StringType(), True),\n StructField(\"lat\", DoubleType(), True),\n StructField(\"lng\", DoubleType(), True),\n StructField(\"alt\", IntegerType(), True),\n StructField(\"url\", StringType(), True)\n])\n\n# COMMAND ----------\n\ncircuits_df = spark.read \\\n.option(\"header\", True) \\\n.schema(circuits_schema) \\\n.csv(\"dbfs:/mnt/formula1dl9801/raw/circuits.csv\")\n\n# COMMAND ----------\n\ncircuits_df.printSchema()\n\n# COMMAND ----------\n\ndisplay(circuits_df)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ##### Step 2 - Select only the required columns\n\n# COMMAND ----------\n\nfrom pyspark.sql.functions import col\n\n# COMMAND ----------\n\ncircuits_selected_df = circuits_df.select(col(\"circuitId\"), col(\"circuitRef\"), col(\"name\"), col(\"location\"), col(\"country\"), col(\"lat\"), col(\"lng\"), col(\"alt\"))\n\n# COMMAND ----------\n\ndisplay(circuits_selected_df)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ##### Step 3 - Rename the columns as required\n\n# COMMAND ----------\n\ncircuits_renamed_df = circuits_selected_df.withColumnRenamed(\"circuitId\", \"circuit_id\") \\\n.withColumnRenamed(\"circuitRef\", \"circuit_ref\") \\\n.withColumnRenamed(\"lat\", \"latitude\") \\\n.withColumnRenamed(\"lng\", \"longitude\") \\\n.withColumnRenamed(\"alt\", \"altitude\") \n\n# COMMAND ----------\n\ndisplay(circuits_renamed_df)\n\n# COMMAND ----------\n\n# MAGIC %md \n# MAGIC ##### Step 4 - Add ingestion date to the dataframe\n\n# COMMAND ----------\n\nfrom pyspark.sql.functions import current_timestamp\n\n# COMMAND ----------\n\ncircuits_final_df = circuits_renamed_df.withColumn(\"ingestion_date\", current_timestamp()) \n\n# from pyspark.sql.functions import lit\n# circuits_final_df = circuits_renamed_df.withColumn(\"ingestion_date\", current_timestamp()).withColumn(\"env\", lit(\"Production\"))\n# lit is required as withColumn can only add objects\n\n# COMMAND ----------\n\ndisplay(circuits_final_df)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ##### Step 5 - Write data to datalake as parquet\n\n# COMMAND ----------\n\ncircuits_final_df.write.mode(\"overwrite\").parquet(\"/mnt/formula1dl9801/processed/circuits\")\n\n# COMMAND ----------\n\ndisplay(spark.read.parquet(\"/mnt/formula1dl9801/processed/circuits\"))\n\n# COMMAND ----------\n\n","repo_name":"aman9801/Formula1-Analysis-Pyspark-Azure-Databricks","sub_path":"ingestion/1.ingest_circuits_file.py","file_name":"1.ingest_circuits_file.py","file_ext":"py","file_size_in_byte":3594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"7045962580","text":"\"\"\"\nGiven an unsorted array A of size N of non-negative integers, \nfind a continuous sub-array which adds to a given number.\n\"\"\"\ndef sub_sum(array, check_sum):\n startIdx = 0 \n endIdx = 0 \n counter = 0\n s = 0 \n arr_size = len(array)\n while(counter < arr_size):\n s += array[counter]\n endIdx = counter \n while (s > check_sum):\n s -= array[startIdx]\n startIdx +=1\n if (s == check_sum):\n break\n counter +=1\n return startIdx, endIdx, s \n\nif __name__ =='__main__':\n T = int(input())\n for i in range(T):\n l = input().rstrip().split(' ')\n n,s = int(l[0]),int(l[1])\n arr = input().rstrip().split(' ')\n arr = [int(arr[j]) for j in range(len(arr))]\n startIdx, endIdx, r = sub_sum(arr,s)\n if (r==s):\n print(\"{} {}\".format(startIdx+1, endIdx+1))\n else:\n print(\"-1\")\n","repo_name":"vmthanh/coding-revise","sub_path":"subarray-with-given-sum.py","file_name":"subarray-with-given-sum.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"44132545876","text":"import mysql.connector\n\ndb_config = {\n 'user': 'natanhmc',\n 'password': '1q2w3e4r5t',\n 'host': 'db4free.net',\n 'database': 'linkedin123',\n 'port': 3306\n}\n\ndef criar_banco():\n conn = mysql.connector.connect(**db_config)\n cursor = conn.cursor()\n \n cursor.execute('''\n CREATE TABLE IF NOT EXISTS contatos (\n id INT PRIMARY KEY AUTO_INCREMENT,\n nome VARCHAR(50),\n perfil_linkedin VARCHAR(50)\n ) \n ''')\n \n cursor.execute(''' \n CREATE TABLE IF NOT EXISTS conexoes (\n id INT PRIMARY KEY AUTO_INCREMENT,\n contato1_id INT,\n contato2_id INT,\n FOREIGN KEY (contato1_id) REFERENCES contatos (id),\n FOREIGN KEY (contato2_id) REFERENCES contatos (id)\n )\n ''')\n \n conn.commit()\n conn.close()\n \ndef adicionar_contato(nome, perfil_linkedin):\n try:\n conn = mysql.connector.connect(**db_config)\n cursor = conn.cursor()\n\n cursor.execute(\"SELECT id FROM contatos WHERE nome = %s AND perfil_linkedin = %s\", (nome, perfil_linkedin))\n contato_id = cursor.fetchone()\n\n if contato_id:\n contato_id = contato_id[0]\n print(\"Perfil já cadastrado !!\")\n else:\n cursor.execute(\"INSERT INTO contatos (nome, perfil_linkedin) VALUES (%s, %s)\", (nome, perfil_linkedin))\n\n conn.commit()\n print(\"Contato criado com sucesso!!!\")\n except mysql.connector.Error as err:\n print(f\"Erro ao adicionar contato: {err}\")\n finally:\n if 'conn' in locals():\n conn.close()\n \ndef listar_contatos():\n conn = mysql.connector.connect(**db_config)\n cursor = conn.cursor()\n \n cursor.execute('SELECT * FROM contatos')\n contatos = cursor.fetchall()\n \n conn.close()\n return contatos\n\ndef excluir_contato(nome, perfil_linkedin):\n conn = mysql.connector.connect(**db_config)\n cursor = conn.cursor()\n\n cursor.execute(\"SELECT id FROM contatos WHERE nome = %s AND perfil_linkedin = %s\", (nome, perfil_linkedin))\n contato_id = cursor.fetchone()\n\n if contato_id:\n contato_id = contato_id[0]\n\n cursor.execute(\"DELETE FROM conexoes WHERE contato1_id = %s OR contato2_id = %s\", (contato_id, contato_id))\n\n cursor.execute(\"DELETE FROM contatos WHERE id = %s\", (contato_id,))\n\n conn.commit()\n print(\"Perfil Excluído !!\")\n else:\n print(\"Perfil não encontrado !!\")\n \n\ndef adicionar_conexao(contato1_id, contato2_id):\n conn = mysql.connector.connect(**db_config)\n cursor = conn.cursor()\n \n cursor.execute('BEGIN')\n \n try:\n cursor.execute('SELECT * FROM conexoes WHERE (contato1_id = %s AND contato2_id = %s) OR (contato1_id = %s AND contato2_id = %s)',\n (contato1_id, contato2_id, contato2_id, contato1_id))\n\n if cursor.fetchone() is None:\n cursor.execute('INSERT INTO conexoes (contato1_id, contato2_id) VALUES (%s, %s)',\n (contato1_id, contato2_id))\n print(\"Conexão criada com sucesso!!\")\n else:\n print(\"A conexão entre esses contatos ja existe\")\n \n conn.commit()\n except mysql.connector.Error as err:\n print(f\"Erro ao adicionar conexão: {err}\")\n conn.rollback()\n finally:\n if 'conn' in locals():\n conn.close()\n \ndef excluir_conexao(id1, id2):\n conn = mysql.connector.connect(**db_config)\n cursor = conn.cursor()\n cursor.execute('DELETE FROM conexoes WHERE (contato1_id = %s AND contato2_id = %s) OR (contato1_id = %s AND contato2_id = %s)', (id1, id2, id2, id1))\n\n conn.commit()\n conn.close()\n \ndef listar_conexoes(contato_id):\n conn = mysql.connector.connect(**db_config)\n\n cursor = conn.cursor()\n \n cursor.execute('''\n SELECT contatos.nome\n FROM contatos\n JOIN conexoes ON contatos.id = CASE \n WHEN conexoes.contato1_id = %s THEN conexoes.contato2_id\n ELSE conexoes.contato1_id\n END\n WHERE conexoes.contato1_id = %s OR conexoes.contato2_id = %s\n ''', (contato_id, contato_id, contato_id))\n \n conexoes = cursor.fetchall()\n conn.close()\n return conexoes\n\n# def carregar\n\ndef menu():\n while True:\n print(\"\\n1. Adicionar Contato\")\n print(\"2. Listar Contatos\")\n print(\"3. Adicionar Conexão\")\n print(\"4. Listar Conexões de um Contato\")\n print(\"5. Excluir Contatos e Conexões\")\n print(\"6. Excluir Conexão\")\n\n print(\"0. Sair\")\n \n escolha = input(\"Escolha uma opção: \")\n \n if escolha == \"1\":\n nome = input(\"Nome do Contato: \")\n perfil_linkedin = input(\"Perfil do contato: \")\n adicionar_contato(nome, perfil_linkedin)\n elif escolha == \"2\":\n contatos = listar_contatos()\n for contato in contatos:\n print(f\"ID : {contato[0]}\\n,Nome: {contato[1]}\\n, Perfil Linkedin: {contato[2]}\")\n elif escolha == \"3\":\n contato1_id = int(input(\"ID do primeiro contato: \"))\n contato2_id = int(input(\"ID do segundo contato: \"))\n adicionar_conexao(contato1_id, contato2_id)\n elif escolha == \"4\":\n contato_id = int(input(\"ID do contato: \"))\n conexoes = listar_conexoes(contato_id)\n print(\"Conexões do contato: \")\n for conexao in conexoes:\n print(conexao[0])\n elif escolha == \"5\":\n nome = input(\"Nome do Contato: \")\n perfil_linkedin = input(\"Perfil do contato: \")\n excluir_contato(nome, perfil_linkedin)\n elif escolha == \"6\":\n contato1 = int(input(\"ID do primeiro contato: \"))\n contato2 = int(input(\"ID do segundo contato: \"))\n excluir_conexao(contato1, contato2)\n elif escolha == \"0\":\n break\n else:\n print(\"Opção inválida. Tente novamente\")\n \nif __name__ == \"__main__\":\n criar_banco()\n menu() \n","repo_name":"natanhmc/4_semestre_ADS","sub_path":"Estrutura_de_Dados/Aula_13/linkediin.py","file_name":"linkediin.py","file_ext":"py","file_size_in_byte":6135,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"71988340463","text":"hora = int(input(\"Quantas Horas o Funcionário Trabalhou No Mês?\"))\nvalor = int(input(\"Qual o valor da hora trabalhada?\"))\npercentual = int(input(\"Qual o valor do percentual de desconto?\"))\n\nsalarioBruto = hora*valor\nprint(f\"Salário Bruto = {salarioBruto}\")\n\ndesconto = (percentual*salarioBruto)/100\nprint(f\"Total do Desconto = {desconto}\")\n\nsalarioLiquido = salarioBruto - percentual\nprint(f\"Salário Líquido = {salarioLiquido}\")","repo_name":"MurilloFagundesAS/Exercicios-ProgramacaoII-FATEC-2020-1","sub_path":"Cálculo de salário/Cálculo de salário.py","file_name":"Cálculo de salário.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"441191959","text":"# API configuration\nOPENAI_API_KEY = 'YOUR_API_KEY'\nCHATGPT_MODEL = 'gpt-3.5-turbo-0613'\nCHATGPT_CONTEXT_FRAME = 8129 # Tokens\nCHATGPT_MODEL_EXTENDED = 'gpt-3.5-turbo-16k'\n\n# How long to idle until the machine closes (in seconds) (default: 120)\nIDLE_TIMEOUT = 120\n\n# Whether to let chatGPT generate its own environments based on context provided by user\n# SELF_TRAIN = False \n\n# Used in SELF_TRAIN mode, to determine if, once reaching the idle state, the environment should be reset or not\n# PERSISTENT_ENVIRONMENTS = False\n\n# TOKEN_LIMIT: Maximum number of tokens to use in one run of SELF_TRAIN mode\n# If the number of tokens exceeds this limit, the machine will memorize its output and close.\n# This is not a hard limit, but rather a soft one, as the machine will try to finish its current task set before closing.\n# TOKEN_LIMIT = 100000\n\n# Number of retries if a completion fails (eg. wrong/broken format) (default: 3)\nMAX_RETRIES=3\n\n# Maximum context timespan (in seconds) (default: 120)\nMAX_TIMESPAN=120\n\n# Path to prompt files (default: /gpt_controller/chat_gpt_interface/*)\nPROMPT_PATH = './prompts/'\n","repo_name":"andrei-calin-dragomir/gpt-controller","sub_path":"gpt_controller/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"18775820557","text":"from typing import Optional\n\n\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n\ndef deleteDuplicates(head: Optional[ListNode]) -> Optional[ListNode]:\n if not head:\n return None\n\n cur = head.val\n start = head\n end = head\n\n while head:\n if head.val != cur:\n cur = head.val\n end.next = head\n end = head\n head = head.next\n end.next = None\n return start\n","repo_name":"wieceslaw/leet-code-problems","sub_path":"easy/remove-duplicates-from-sorted-list/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"36774346934","text":"import numpy as np\nfrom os import urandom\n\ndef WORD_SIZE():\n return 24\n\ndef ALPHA():\n return 8\n\ndef BETA():\n return 3\n\nMASK_VAL = 2**WORD_SIZE() - 1\n\ndef rol(x, k):\n x = x & MASK_VAL\n return (((x << k) & MASK_VAL) | (x >> (WORD_SIZE() - k)))\n\ndef ror(x, k):\n x = x & MASK_VAL\n return ((x >> k) | ((x << (WORD_SIZE() - k)) & MASK_VAL))\n\ndef enc_one_round(p, k):\n c0, c1 = p[0], p[1]\n c0 = ror(c0, ALPHA())\n c0 = (c0 + c1) & MASK_VAL\n c0 = c0 ^ k\n c1 = rol(c1, BETA())\n c1 = c1 ^ c0\n return (c0,c1)\n\ndef dec_one_round(c,k):\n c0, c1 = c[0], c[1]\n c1 = c1 ^ c0\n c1 = ror(c1, BETA())\n c0 = c0 ^ k\n c0 = (c0 - c1) & MASK_VAL\n c0 = rol(c0, ALPHA())\n return (c0, c1)\n\n\ndef expand_key(k, t):\n ks = [0 for i in range(t)]\n ks[0] = k[len(k) - 1]\n l = list(reversed(k[:len(k) - 1]))\n tmp = len(l)\n for i in range(t - 1):\n l[i % tmp], ks[i + 1] = enc_one_round((l[i % tmp], ks[i]), i)\n return ks\n\n\ndef encrypt(p, ks):\n x, y = p[0], p[1]\n for k in ks:\n x, y = enc_one_round((x,y), k)\n return (x, y)\n\n\ndef decrypt(c, ks):\n x, y = c[0], c[1]\n for k in reversed(ks):\n x, y = dec_one_round((x,y), k)\n return (x,y)\n\n\ndef check_testvector():\n key = (0x121110, 0x0a0908, 0x020100)\n pt = (0x20796c, 0x6c6172)\n ks = expand_key(key, 22)\n ct = encrypt(pt, ks)\n if ct == (0xc049a5, 0x385adc):\n print('Testvector for speck48/72 verified.')\n else:\n print('Testvector for speck48/72 not verified.')\n return False\n\n key = (0x1a1918, 0x121110, 0x0a0908, 0x020100)\n pt = (0x6d2073, 0x696874)\n ks = expand_key(key, 23)\n ct = encrypt(pt, ks)\n if ct == (0x735e10, 0xb6445d):\n print('Testvector for speck48/96 verified.')\n else:\n print('Testvector for speck48/96 not verified.')\n return False\n\n n = 10**6\n pt = np.frombuffer(urandom(8 * n), dtype=np.uint32).reshape(2, n) & MASK_VAL\n key = np.frombuffer(urandom(12 * n), dtype=np.uint32).reshape(3, n) & MASK_VAL\n ks = expand_key(key, 22)\n ct = encrypt(pt, ks)\n pt_tmp = decrypt(ct, ks)\n if np.sum(pt[0] == pt_tmp[0]) == n and np.sum(pt[1] == pt_tmp[1]) == n:\n print('Testdecryption verified.')\n else:\n print('Testdecryption not verified.')\n return False\n\n return True\n\n\ndef convert_to_binary(arr):\n X = np.zeros((len(arr) * WORD_SIZE(), len(arr[0])), dtype=np.uint8)\n for i in range(len(arr) * WORD_SIZE()):\n index = i // WORD_SIZE()\n offset = WORD_SIZE() - (i % WORD_SIZE()) - 1\n X[i] = (arr[index] >> offset) & 1\n X = X.transpose()\n return X\n\n\nif __name__ == '__main__':\n check_testvector()","repo_name":"AI-Lab-Y/DLA_search_and_partition_tree","sub_path":"MITM_search/more_applications/Speck48/speck.py","file_name":"speck.py","file_ext":"py","file_size_in_byte":2688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"38881789695","text":"from rest_framework import serializers\nfrom rest_framework_gis.serializers import GeoFeatureModelSerializer\nfrom forager.models import Record, ImageRecord, Species, ImageSpecies\n\n\nclass ImageRecordSerializer(serializers.ModelSerializer):\n class Meta:\n model = ImageRecord\n fields = (\"id\", \"image\", \"default\")\n\n\nclass RecordSerializer(GeoFeatureModelSerializer):\n record_images = ImageRecordSerializer(many=True)\n species = serializers.StringRelatedField()\n user = serializers.ReadOnlyField(source=\"user.email\")\n\n class Meta:\n model = Record\n geo_field = \"location\"\n\n fields = (\"id\", \"species\", \"user\", \"date\", \"notes\", \"record_images\")\n\n\nclass ImageSpeciesSerializer(serializers.ModelSerializer):\n class Meta:\n model = ImageSpecies\n fields = (\"id\", \"image\", \"caption\", \"default\")\n\n\nclass SpeciesSerializer(serializers.ModelSerializer):\n species_images = ImageSpeciesSerializer(many=True)\n\n class Meta:\n model = Species\n fields = (\n \"id\",\n \"common_name\",\n \"scientific_name\",\n \"description\",\n \"start\",\n \"end\",\n \"species_images\",\n )\n","repo_name":"joekbullard/bristol-forager","sub_path":"app/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"12601544090","text":"\n# coding: utf-8\n\n# In[4]:\n\nimport numpy as np #linear algebra\nimport pandas as pd #data processing\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport warnings\nwarnings.simplefilter(action = \"ignore\", category = FutureWarning)\n\nmatches=pd.read_csv('matches.csv')\nmatches['type']=\"pre-qualifier\"\nfor year in range(2008,2016):\n final_match_index=matches[matches['season']==year][-1:].index.values[0]\n matches=matches.set_value(final_match_index,\"type\",\"final\")\n matches=matches.set_value(final_match_index-1,\"type\",\"qualifier-2\")\n matches=matches.set_value(final_match_index-2,\"type\",\"eliminator\")\n matches=matches.set_value(final_match_index-3,\"type\",\"qualifier-1\")\nmatches.groupby(['type'])[\"id\"].count()\nmatches.head(290)\n\n\n# In[6]:\n\ndeliveries=pd.read_csv(\"deliveries.csv\")\ndeliveries.head()\n\n\n# In[10]:\n\nteam_score = deliveries.groupby(['match_id', 'inning'])['total_runs'].sum().unstack().reset_index()\nteam_score.columns = ['match_id', 'Team1_score', 'Team2_score', 'Team1_superover_score', 'Team2_superover_score']\nmatches_agg = pd.merge(matches, team_score, left_on = 'id', right_on = 'match_id', how = 'outer')\n\nteam_extras = deliveries.groupby(['match_id', 'inning'])['extra_runs'].sum().unstack().reset_index()\nteam_extras.columns = ['match_id', 'Team1_extras', 'Team2_extras', 'Team1_superover_extras', 'Team2_superover_extras']\nmatches_agg = pd.merge(matches_agg, team_extras, on = 'match_id', how = 'outer')\n\n#Reorder the columns to make the data more readable\ncols = ['match_id', 'season','city','date','team1','team2', 'toss_winner', 'toss_decision', 'result', 'dl_applied', 'winner', 'Team1_score','Team2_score', 'win_by_runs', 'win_by_wickets', 'Team1_extras', 'Team2_extras', 'Team1_superover_score', 'Team2_superover_score', 'Team1_superover_extras', 'Team2_superover_extras', 'player_of_match', 'type', 'venue', 'umpire1', 'umpire2', 'umpire3']\nmatches_agg = matches_agg[cols]\nmatches_agg.head(2)\n\n\n# In[7]:\n\nbatsman_grp = deliveries.groupby([\"match_id\", \"inning\", \"batting_team\", \"batsman\"])\nbatsmen = batsman_grp[\"batsman_runs\"].sum().reset_index()\n\n# Ignore the wide balls.\nballs_faced = deliveries[deliveries[\"wide_runs\"] == 0]\nballs_faced = balls_faced.groupby([\"match_id\", \"inning\", \"batsman\"])[\"batsman_runs\"].count().reset_index()\nballs_faced.columns = [\"match_id\", \"inning\", \"batsman\", \"balls_faced\"]\nbatsmen = batsmen.merge(balls_faced, left_on=[\"match_id\", \"inning\", \"batsman\"], \n right_on=[\"match_id\", \"inning\", \"batsman\"], how=\"left\")\n\nfours = deliveries[ deliveries[\"batsman_runs\"] == 4]\nsixes = deliveries[ deliveries[\"batsman_runs\"] == 6]\n\nfours_per_batsman = fours.groupby([\"match_id\", \"inning\", \"batsman\"])[\"batsman_runs\"].count().reset_index()\nsixes_per_batsman = sixes.groupby([\"match_id\", \"inning\", \"batsman\"])[\"batsman_runs\"].count().reset_index()\n\nfours_per_batsman.columns = [\"match_id\", \"inning\", \"batsman\", \"4s\"]\nsixes_per_batsman.columns = [\"match_id\", \"inning\", \"batsman\", \"6s\"]\n\nbatsmen = batsmen.merge(fours_per_batsman, left_on=[\"match_id\", \"inning\", \"batsman\"], \n right_on=[\"match_id\", \"inning\", \"batsman\"], how=\"left\")\nbatsmen = batsmen.merge(sixes_per_batsman, left_on=[\"match_id\", \"inning\", \"batsman\"], \n right_on=[\"match_id\", \"inning\", \"batsman\"], how=\"left\")\nbatsmen['SR'] = np.round(batsmen['batsman_runs'] / batsmen['balls_faced'] * 100, 2)\n\nfor col in [\"batsman_runs\", \"4s\", \"6s\", \"balls_faced\", \"SR\"]:\n batsmen[col] = batsmen[col].fillna(0)\n\ndismissals = deliveries[ pd.notnull(deliveries[\"player_dismissed\"])]\ndismissals = dismissals[[\"match_id\", \"inning\", \"player_dismissed\", \"dismissal_kind\", \"fielder\"]]\ndismissals.rename(columns={\"player_dismissed\": \"batsman\"}, inplace=True)\nbatsmen = batsmen.merge(dismissals, left_on=[\"match_id\", \"inning\", \"batsman\"], \n right_on=[\"match_id\", \"inning\", \"batsman\"], how=\"left\")\n\nbatsmen = matches[['id','season']].merge(batsmen, left_on = 'id', right_on = 'match_id', how = 'left').drop('id', axis = 1)\nbatsmen.head(2)\n\n\n# In[8]:\n\nbowler_grp = deliveries.groupby([\"match_id\", \"inning\", \"bowling_team\", \"bowler\", \"over\"])\nbowlers = bowler_grp[\"total_runs\", \"wide_runs\", \"bye_runs\", \"legbye_runs\", \"noball_runs\"].sum().reset_index()\n\nbowlers[\"runs\"] = bowlers[\"total_runs\"] - (bowlers[\"bye_runs\"] + bowlers[\"legbye_runs\"])\nbowlers[\"extras\"] = bowlers[\"wide_runs\"] + bowlers[\"noball_runs\"]\n\ndel( bowlers[\"bye_runs\"])\ndel( bowlers[\"legbye_runs\"])\ndel( bowlers[\"total_runs\"])\n\ndismissal_kinds_for_bowler = [\"bowled\", \"caught\", \"lbw\", \"stumped\", \"caught and bowled\", \"hit wicket\"]\ndismissals = deliveries[deliveries[\"dismissal_kind\"].isin(dismissal_kinds_for_bowler)]\ndismissals = dismissals.groupby([\"match_id\", \"inning\", \"bowling_team\", \"bowler\", \"over\"])[\"dismissal_kind\"].count().reset_index()\ndismissals.rename(columns={\"dismissal_kind\": \"wickets\"}, inplace=True)\n\nbowlers = bowlers.merge(dismissals, left_on=[\"match_id\", \"inning\", \"bowling_team\", \"bowler\", \"over\"], \n right_on=[\"match_id\", \"inning\", \"bowling_team\", \"bowler\", \"over\"], how=\"left\")\nbowlers[\"wickets\"] = bowlers[\"wickets\"].fillna(0)\n\nbowlers_over = bowlers.groupby(['match_id', 'inning', 'bowling_team', 'bowler'])['over'].count().reset_index()\nbowlers = bowlers.groupby(['match_id', 'inning', 'bowling_team', 'bowler']).sum().reset_index().drop('over', 1)\nbowlers = bowlers_over.merge(bowlers, on=[\"match_id\", \"inning\", \"bowling_team\", \"bowler\"], how = 'left')\nbowlers['Econ'] = np.round(bowlers['runs'] / bowlers['over'] , 2)\nbowlers = matches[['id','season']].merge(bowlers, left_on = 'id', right_on = 'match_id', how = 'left').drop('id', axis = 1)\n\nbowlers.head(2)\n\n\n# In[15]:\n\nx,y=2008,2017\nwhile x 1):\n model_G = nn.DataParallel(model_G, list(range(ngpu)))\n model_G.apply(weights_init)\nmodel_G.to(device)\n\nmodel_G = torch.load(\"./sat2mapGen_v1.3.pth\")\nmodel_G.apply(weights_init)\ntest_imgs,_ = next(iter(dataloader_val))\n\nsatellite = test_imgs[:,:,:,:256].to(device)\nmaps = test_imgs[:,:,:,256:].to(device)\n\ngen = model_G(satellite)\n#gen = gen[0]\n\nsatellite = satellite.detach().cpu()\ngen = gen.detach().cpu()\nmaps = maps.detach().cpu()\n\nshow_image(torchvision.utils.make_grid(satellite, padding=10), title=\"Satellite\", figsize=(50,50))\nshow_image(torchvision.utils.make_grid(gen, padding=10), title=\"Generated\", figsize=(50,50))\nshow_image(torchvision.utils.make_grid(maps, padding=10), title=\"Expected Output\", figsize=(50,50))","repo_name":"Shamoonmohd/Gans-For-Image-Generation","sub_path":"Pix2Pix/pix2pix/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"34344841078","text":"import numpy as np\r\nfrom easydict import EasyDict as edict\r\nimport yaml\r\n\r\n\r\nconfig = edict()\r\n\r\n# network related params\r\nconfig.PIXEL_MEANS = np.array([50])\r\nconfig.IMAGE_STRIDE = 0\r\nconfig.RPN_FEAT_STRIDE = 8\r\nconfig.RCNN_FEAT_STRIDE = 8\r\nconfig.FIXED_PARAMS = []\r\nconfig.FIXED_PARAMS_SHARED = ['conv1', 'conv2', 'conv3', 'conv4', 'conv5']\r\n\r\n# dataset related params\r\nconfig.NUM_CLASSES = 2\r\n# config.SCALES = [(512, 512)] # first is scale (the shorter side); second is max size. replaced by SCALE and MAX_SIZE by Ke\r\nconfig.ANCHOR_SCALES = (8, 16, 32)\r\nconfig.ANCHOR_RATIOS = (0.5, 1, 2)\r\nconfig.NUM_ANCHORS = len(config.ANCHOR_SCALES) * len(config.ANCHOR_RATIOS)\r\n\r\nconfig.TRAIN = edict()\r\n\r\n# R-CNN and RPN\r\nconfig.TRAIN.SAMPLES_PER_BATCH = 1\r\n# e2e changes behavior of anchor loader and metric\r\nconfig.TRAIN.END2END = True\r\n# group images with similar aspect ratio\r\nconfig.TRAIN.ASPECT_GROUPING = False\r\n\r\n# R-CNN\r\n# rcnn rois batch size\r\nconfig.TRAIN.BATCH_ROIS = 128\r\n# rcnn rois sampling params\r\nconfig.TRAIN.FG_FRACTION = 0.25\r\nconfig.TRAIN.FG_THRESH = 0.5\r\nconfig.TRAIN.BG_THRESH_HI = 0.5\r\nconfig.TRAIN.BG_THRESH_LO = 0.0\r\n# rcnn bounding box regression params\r\nconfig.TRAIN.BBOX_REGRESSION_THRESH = 0.5\r\nconfig.TRAIN.BBOX_WEIGHTS = np.array([1.0, 1.0, 1.0, 1.0])\r\n\r\n# RPN anchor loader\r\n# rpn anchors batch size\r\nconfig.TRAIN.RPN_BATCH_SIZE = 256\r\n# rpn anchors sampling params\r\nconfig.TRAIN.RPN_FG_FRACTION = 0.5\r\nconfig.TRAIN.RPN_POSITIVE_OVERLAP = 0.7\r\nconfig.TRAIN.RPN_NEGATIVE_OVERLAP = 0.3\r\nconfig.TRAIN.RPN_CLOBBER_POSITIVES = False\r\n# rpn bounding box regression params\r\nconfig.TRAIN.RPN_BBOX_WEIGHTS = (1.0, 1.0, 1.0, 1.0)\r\nconfig.TRAIN.RPN_POSITIVE_WEIGHT = -1.0\r\n\r\n# used for end2end training\r\n# RPN proposal\r\nconfig.TRAIN.CXX_PROPOSAL = True\r\nconfig.TRAIN.RPN_NMS_THRESH = 0.7\r\nconfig.TRAIN.RPN_PRE_NMS_TOP_N = 12000\r\nconfig.TRAIN.RPN_POST_NMS_TOP_N = 2000\r\nconfig.TRAIN.RPN_MIN_SIZE = config.RPN_FEAT_STRIDE\r\n# approximate bounding box regression\r\nconfig.TRAIN.BBOX_NORMALIZATION_PRECOMPUTED = True\r\nconfig.TRAIN.BBOX_MEANS = (0.0, 0.0, 0.0, 0.0)\r\nconfig.TRAIN.BBOX_STDS = (0.1, 0.1, 0.2, 0.2)\r\n\r\n# ===============Ke added====================\r\nconfig.GT_MARGIN = 0 # add a margin around the ground-truth box. generally not used\r\nconfig.MAX_SIZE = 512\r\nconfig.SCALE = 512\r\nconfig.NORM_SPACING = -1\r\nconfig.SLICE_INTV = 2\r\nconfig.WINDOWING = [-1024, 3071]\r\nconfig.TRAIN.RCNN_POS_UPSAMPLE = False\r\n\r\nconfig.TEST = edict()\r\n\r\n# R-CNN testing\r\n# use rpn to generate proposal\r\nconfig.TEST.HAS_RPN = False\r\n# size of images for each device\r\nconfig.TEST.SAMPLES_PER_BATCH = 1\r\n\r\n# RPN proposal\r\nconfig.TEST.CXX_PROPOSAL = True\r\nconfig.TEST.RPN_NMS_THRESH = 0.7\r\nconfig.TEST.RPN_PRE_NMS_TOP_N = 6000\r\nconfig.TEST.RPN_POST_NMS_TOP_N = 300\r\nconfig.TEST.RPN_MIN_SIZE = config.RPN_FEAT_STRIDE\r\n\r\n# RPN generate proposal\r\nconfig.TEST.PROPOSAL_NMS_THRESH = 0.7\r\nconfig.TEST.PROPOSAL_PRE_NMS_TOP_N = 20000\r\nconfig.TEST.PROPOSAL_POST_NMS_TOP_N = 2000\r\nconfig.TEST.PROPOSAL_MIN_SIZE = config.RPN_FEAT_STRIDE\r\n\r\n# RCNN nms\r\nconfig.TEST.NMS = 0.3\r\n\r\n# default settings\r\ndefault = edict()\r\n\r\n# default network\r\ndefault.network = 'vgg'\r\ndefault.pretrained = '/home/yk/ct/data/imagenet_models/MXNet/vgg16'\r\ndefault.pretrained_epoch = 0\r\ndefault.base_lr = 0.001\r\n\r\ndefault.dataset = 'DeepLesion'\r\ndefault.image_set = 'train'\r\n# default.root_path = '/home/yk/ct/data/'\r\ndefault.dataset_path = ''\r\n\r\n# default training\r\ndefault.frequent = 20\r\ndefault.kvstore = 'device'\r\n# default e2e\r\ndefault.e2e_prefix = 'model/e2e'\r\ndefault.e2e_epoch = 10\r\ndefault.e2e_lr = default.base_lr\r\ndefault.e2e_lr_step = '7'\r\n# # default rpn\r\n# default.rpn_prefix = 'model/rpn'\r\n# default.rpn_epoch = 8\r\n# default.rpn_lr = default.base_lr\r\n# default.rpn_lr_step = '6'\r\n# # default rcnn\r\n# default.rcnn_prefix = 'model/rcnn'\r\n# default.rcnn_epoch = 8\r\n# default.rcnn_lr = default.base_lr\r\n# default.rcnn_lr_step = '6'\r\n\r\n# ===============Ke added====================\r\ndefault.gpus = '0'\r\ndefault.val_gpu = default.gpus\r\ndefault.val_image_set = 'val'\r\ndefault.val_vis = False\r\ndefault.val_shuffle = False\r\ndefault.val_has_rpn = True\r\ndefault.proposal = 'rpn'\r\ndefault.val_max_box = 5\r\ndefault.val_iou_th = .5\r\ndefault.val_thresh = 0\r\ndefault.weight_decay = .0005\r\ndefault.groundtruth_file = 'DL_info.csv'\r\ndefault.image_path = ''\r\ndefault.validate_at_begin = True\r\ndefault.testing = False\r\n\r\ndefault.flip = False\r\ndefault.shuffle = True\r\ndefault.work_load_list = None\r\ndefault.resume = False # resume from previous epoch\r\ndefault.begin_epoch = 0\r\ndefault.show_avg_loss = 100 # 1: show exact loss of each batch. >1: smooth the shown loss\r\n\r\ndef merge_a_into_b(a, b):\r\n \"\"\"Merge config dictionary a into config dictionary b\r\n \"\"\"\r\n if type(a) is not edict:\r\n return\r\n\r\n for k, v in a.iteritems():\r\n # recursively merge dicts\r\n if type(v) is edict:\r\n merge_a_into_b(a[k], b[k])\r\n else:\r\n b[k] = v\r\n\r\n\r\ndef cfg_from_file(filename):\r\n \"\"\"Load a config file and merge it into the default options.\"\"\"\r\n with open(filename, 'r') as f: # not valid grammar in Python 2.5\r\n yaml_cfg = edict(yaml.load(f))\r\n return yaml_cfg\r\n","repo_name":"rsummers11/CADLab","sub_path":"lesion_detector_3DCE/rcnn/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":5174,"program_lang":"python","lang":"en","doc_type":"code","stars":417,"dataset":"github-code","pt":"91"}
+{"seq_id":"26860225341","text":"#Tanımlamalar\r\nimport pygame \r\nimport sys\r\npygame.init()\r\nimg = pygame.image.load('projeiçinfoto.png')\r\n\r\n# Renkler\r\nsiyah = (0,0,0)\r\nbeyaz = (255,255,255)\r\nkirmizi = (255,0,0)\r\nyesil = (0,255,0)\r\nmavi = (0,0,255)\r\ngray = (176,176,176)\r\n\r\n#Ekran ve kadının konumlandırılması\r\nw , h = 1000 , 700\r\nscreen = pygame.display.set_mode((w, h))\r\nscreen.fill(gray)\r\nrect = img.get_rect()\r\nrect.center = w//5, h //2\r\nscreen.blit(img, rect)\r\n\r\n#Exit tuşunun konumlandırılması\r\nscreen_en = screen.get_width()\r\nscreen_boy = screen.get_height()\r\nyazi_tipi = pygame.font.SysFont('lucida' , 25 )\r\nyazi = yazi_tipi.render('EXİT', True , siyah)\r\nyazi_1 = yazi_tipi.render('EXİT', True ,beyaz)\r\n\r\n#Önce kadını oynatıp daha sonra exit tuşu daha içte olacak şekilde oynatıyoruz. \r\npygame.display.update()\r\nwhile Warning:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n Warning = False\r\n\r\n#Tuşun son işlemleri\r\n while True:\r\n for event in pygame.event.get():\r\n mouse = pygame.mouse.get_pos()\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n if( screen_en //20) <= mouse[0] <= ( screen_en //20 ) + 50 and (screen_boy//23) <= mouse[1] <= (screen_boy//23) + 30 :\r\n pygame.quit()\r\n pygame.draw.rect( screen, beyaz ,[int(screen_en//20), int(screen_boy//23) ,50,30] )\r\n screen.blit(yazi,[(screen_en//20) + 5, (screen_boy//23) + 3])\r\n mouse = pygame.mouse.get_pos()\r\n if ( screen_en //20) <= mouse[0] <= ( screen_en //20 ) + 50 and (screen_boy//23) <= mouse[1] <= (screen_boy//23) + 30 :\r\n pygame.draw.rect( screen,kirmizi ,[ int(screen_en//20), int(screen_boy//23) ,50,30] )\r\n screen.blit(yazi_1,[(screen_en/20) + 5, (screen_boy/23) + 3])\r\n pygame.display.flip()\r\n\r\n","repo_name":"melekaylin/Proje","sub_path":"proje deneme.py","file_name":"proje deneme.py","file_ext":"py","file_size_in_byte":1925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"70212911983","text":"\nfrom numpy import *\n\n# we use a class because it's easier to save the weights, rules and everything\nclass AdaBoost:\n\t# when we intantiate the object, we need the training data set already\n def __init__(self, training_set):\n self.training_set = training_set\n self.N = len(self.training_set)\n self.weights = ones(self.N)/self.N\n self.RULES = []\n self.ALPHA = []\n\n # add_hypothesis is the main method that follows the whole adaboost algorithm\n def add_hypothesis(self, func, test=False):\n \t# first we calculate the errors\n errors = array([t[1]!=func(t[0]) for t in self.training_set])\n e = (errors*self.weights).sum()\n # and with that error we can compute the alpha \n alpha = 0.5 * log((1-e)/e)\n print(\"error of the hypothesis: \" + str(e))\n print(\"alpha (weight of the hypothesis): \" + str(alpha))\n\n # we will now determine the new weights\n # we first instantiate the empty array\n w = zeros(self.N)\n for i in range(self.N):\n if errors[i] == 1: w[i] = self.weights[i] * exp(alpha)\n else: w[i] = self.weights[i] * exp(-alpha)\n # we make sure to normalize the weights\n self.weights = w / w.sum()\n self.RULES.append(func)\n self.ALPHA.append(alpha)\n\n def evaluate(self):\n NR = len(self.RULES)\n for (x,l) in self.training_set:\n hx = [self.ALPHA[i]*self.RULES[i](x) for i in range(NR)]\n if(sign(l) == sign(sum(hx))): print(x)\n\n# creating the dataset\ndata = []\ndata.append(((11,3), -1))\ndata.append(((10,1), -1))\ndata.append(((4,4), -1))\ndata.append(((12,10), +1))\ndata.append(((2,4), -1))\ndata.append(((10,5), +1))\ndata.append(((8,8), -1))\ndata.append(((6,5), +1))\ndata.append(((7,7), +1))\ndata.append(((7,8), +1))\n\n# function used to print the weights \nprint(data)\ndef print_tab(tab):\n\tfor i in range(len(tab)):\n\t\tprint(str(i+1) + \" | \" + str(tab[i]))\n\n# creating the object adaboost with the dataset created\nm = AdaBoost(data)\n# we had new rules, and print the weights each time to see the evolution\nprint_tab(m.weights)\nm.add_hypothesis(lambda x: 2*(x[1] > 4)-1)\nprint_tab(m.weights)\nm.add_hypothesis(lambda x: 2*(x[0]%2 != 0)-1)\nprint_tab(m.weights)\nm.add_hypothesis(lambda x: 2*(x[0] > x[1] and x[1] >= 5)-1)\nprint_tab(m.weights)\n\n# finally we can evaluate again\nm.evaluate()","repo_name":"Cokral/ml_wayne_state","sub_path":"assignment_5/original_hwk5.py","file_name":"original_hwk5.py","file_ext":"py","file_size_in_byte":2377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"37124221782","text":"from itertools import count\nfrom itertools import islice\nimport math\nfrom pydoc import doc\nimport requests\nimport time\nimport csv\nimport os\nfrom bs4 import BeautifulSoup\nfrom typing import Counter\nfrom asyncio.windows_events import NULL\nfrom webcrawler.page import Page\n\n\nclass Crawler:\n def __init__(self) -> None:\n self.report_info = []\n self.disallowed_url_arr = []\n self.seed_count = 0\n self.word_count = {}\n self.page_rank_dic = {}\n self.soup = None\n self.domain_url = ''\n self.depth = 0\n # list of all the pages crawled\n self.pages = []\n\n # For indexing\n self.indexed_words_dict = {}\n self.document_length_dict = {}\n\n # Check if the repository folder exists, if it doesnt make it\n self.savePath = os.path.dirname(\n os.path.abspath(__file__)) + \"\\\\repository\\\\\"\n if not os.path.exists(self.savePath):\n os.makedirs(self.savePath)\n\n def initialize(self, domain_url, depth):\n self.domain_url = domain_url\n self.depth = int(depth)\n\n def crawl(self):\n debug = True\n depth = 0\n visited = []\n\n # Check robots.txt for any restricted pages\n # Add url to the queue\n queue = []\n\n # delete all current files in repository\n for file in os.listdir(self.savePath):\n os.remove(self.savePath + file)\n\n domain = self.domain_url.split(\"/\")[2]\n queue.append(self.domain_url)\n\n session = requests.Session()\n session.headers.update({'Host': domain,\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Connection': 'keep-alive',\n 'Pragma': 'no-cache',\n 'Cache-Control': 'no-cache'})\n\n while((depth < self.depth) or (len(queue) == 0)):\n depth += 1\n num_outLinks = 0\n page_name = str(depth) + \".html\"\n currentUrl = queue.pop(0)\n\n if(debug):\n print(\"requesting: \" + currentUrl)\n visited.append(currentUrl)\n\n try:\n # get the current page's html\n page = session.get(currentUrl, timeout=5)\n # save the current page's html to the repositroy folder\n completePath = os.path.normpath(\n self.savePath + page_name)\n with open(completePath, 'w', encoding=\"utf-8\") as file:\n file.write(page.text)\n\n except requests.exceptions.Timeout:\n num_try = 0\n while(num_try < 5):\n time.sleep(5)\n page = session.get(currentUrl, timeout=10)\n completePath = os.path.normpath(\n self.savePath + page_name)\n with open(completePath, 'w', encoding=\"utf-8\") as file:\n file.write(page.text)\n\n if(page is not NULL):\n break\n num_try += 1\n except requests.exceptions.TooManyRedirects:\n print('Bad url')\n except requests.exceptions.RequestException as e:\n raise SystemExit(e)\n\n soup = BeautifulSoup(page.text, 'html.parser')\n outlinks = soup.find_all(\"a\", href=True)\n\n # call split on link for # and only check first half\n # ex https://docs.python-requests.org/en/latest/#the-contributor-guide\n # ignore #the-contributor-guide and just go to https://docs.python-requests.org/en/latest/\n\n _outlinks = list()\n for tag in outlinks:\n link = tag[\"href\"]\n # if the tag doesnt have an href skip it\n if not link:\n pass\n # If the tag just has a comment instead of an actual link skip it\n elif link[0] == \"#\":\n #print(\"skipping \" + link[\"href\"])\n pass\n # ex of split: link.split(\"/\") = ['https:', '', 'www.cpp.edu', 'index.shtml']\n elif link[0:4] == \"http\":\n num_outLinks += 1\n if(domain == link.split(\"/\")[2]):\n #print(\"adding \" + link[\"href\"])\n _outlinks.append(link)\n if((link not in visited) and (link not in queue)):\n queue.append(link)\n\n else:\n #print(\"appending then adding \" + link[\"href\"])\n if(link.split(\":\")[0] == \"mailto\"):\n # Skip any links that are just email addresses\n continue\n # Link in this case is not a direct link, looks something like this /blog_portal/category/fashion/ranking/\n # domain would just be ameblo.jp\n # sometimes need to add the starting /\n if(link[0] != \"/\"):\n link = \"/\" + link\n newLink = \"https://\" + domain + link\n num_outLinks += 1\n if((newLink not in visited) and (newLink not in queue)):\n queue.append(newLink)\n _outlinks.append(newLink)\n self.pages.append(\n Page(page_name, currentUrl, _outlinks))\n print('done with crawl...')\n\n # returns a list of pages that link to page\n def pages_linking_to(self, page):\n pages = []\n for _page in self.pages:\n if _page.links_to(page):\n pages.append(_page)\n return pages\n\n def calc_page_rank(self, page):\n page_rank = 0\n # loop through all pages linking to this page and add their page_rank\n for page_linking in self.pages_linking_to(page):\n page_rank += page_linking.get_page_rank()\n\n return page_rank\n\n # give initial pagerank value to all pages\n def set_initial_pagerank_values(self):\n for page in self.pages:\n page.set_page_rank(1 / len(self.pages))\n\n def page_rank(self):\n self.set_initial_pagerank_values()\n\n is_delta_greater = True\n epsilon = 0.2\n delta = 100\n iteration = 0\n info = ''\n # while delta is greater for any page; continue calculating page rank (convergence)\n while(is_delta_greater == True and iteration < 10):\n is_delta_greater = False\n info += f'\\nIteration: {str(iteration)}'\n for i, page in enumerate(self.pages):\n page_rank_prev = page.page_rank\n page_rank_new = self.calc_page_rank(page)\n delta = abs(page_rank_new - page_rank_prev)\n info += f'\\npagerank:: {page_rank_new}\\tdelta: {delta}'\n if delta > epsilon:\n is_delta_greater = True\n iteration += 1\n path = os.path.dirname(os.path.abspath(__file__)) + \"\\\\info.txt\"\n with open(path, 'w', encoding=\"utf-8\") as file:\n file.write(info)\n\n def cal_avg_docs_length(self):\n num_docs = len(self.document_length_dict.keys())\n total_word_count = 0\n\n for val in self.document_length_dict.values():\n total_word_count += val\n\n return (total_word_count / num_docs)\n\n def get_ni(self, word):\n try:\n return len(self.indexed_words_dict[word].keys())\n except:\n return 0\n\n def take(self, n, iterable):\n return list(islice(iterable, n))\n\n def calculate_BMI(self, search_phrase_words):\n ri = 0\n R = 0\n k1 = 1.2\n k2 = float(100)\n b = 0.75\n # total number of documents\n N = len(self.document_length_dict.keys())\n\n # calculate average document length\n avdl = self.cal_avg_docs_length()\n\n # get n for each term. The number of times each term appears accross all documents.\n # each list index corresponds to same index in search_phrase_words\n # documents_list is a list of sets. each set has the pages, word i appears in.\n n_list = list()\n documents_list = list()\n for word in search_phrase_words:\n word = word.lower()\n try:\n n_list.append(len(self.indexed_words_dict[word].keys()))\n documents_list.append(\n set(self.indexed_words_dict[word].keys()))\n except:\n n_list.append(0)\n\n # create a set which is an intersection of all pages where all terms in search phrase appear.\n # we want to see which pages have all words in the search phrase\n pages_set = set()\n for i, item in enumerate(documents_list):\n if i == 0:\n pages_set = item\n else:\n pages_set = pages_set.intersection(item)\n\n # calculate BMI of each page that has the search phrase (i.e. contains all words in search phrase).\n # we return bmi_results which is a dictionary with page names as keys and\n # BMI scores as values\n # we assume ri and R to be zero and qfi to be 1\n bmi_results = dict()\n k_cap = 0.0\n for page in pages_set:\n bmi = 0\n # calculate K for each doc\n dl = float(self.document_length_dict[page])\n k_cap = k1 * ((1 - b) + b * (dl / avdl))\n for i, word in enumerate(search_phrase_words):\n word = word.lower()\n try:\n fi = self.indexed_words_dict[word][page]\n except:\n print('zero times appearing in {page}')\n fi = 0\n ni = self.get_ni(word)\n bmi += math.log10(((0.5)/(0.5)) / ((ni + 0.5) / (N - ni + 0.5))) * \\\n (((k1 + 1) * fi) / (k_cap + fi)) * \\\n (((k2 + 1) * 1) / (k2 + 1))\n bmi_results[page] = bmi\n\n try:\n results = sorted(self.take(10, bmi_results.items()), reverse=True)\n except:\n results = sorted(self.take(len(bmi_results.keys()),\n bmi_results.items()), reverse=True)\n return results\n\n def index_webpages(self):\n # Create empty list for words that need to be cleaned\n word_list = []\n\n # loop through all .html files in repository folder. index the words and their frequencies\n path = os.path.dirname(os.path.abspath(__file__)) + \"\\\\repository\\\\\"\n files_list = os.listdir(path)\n\n for file_name in files_list:\n completePath = os.path.normpath(path + \"\\\\\" + file_name)\n with open(completePath, 'r', encoding=\"utf-8\") as file:\n soup = BeautifulSoup(file, 'html.parser')\n tags = ['h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'p']\n # Get text from the page\n for tag in soup.findAll(tags):\n text = tag.text\n words = text.lower().split()\n # Append it to the wordlist and then clean the words of all symbols\n for word in words:\n word_list.append(word)\n self.clean_index_words(word_list, file_name)\n word_list.clear()\n\n def clean_index_words(self, words_list, file_name):\n cleaned_words_list = []\n\n # Clean the words from any symbols\n for word in words_list:\n symbols = '!@#$%^&*()_-+={[}]|\\;:\"<>?/., '\n for i in range(0, len(symbols)):\n word = word.replace(symbols[i], '')\n if len(word) > 0:\n cleaned_words_list.append(word)\n\n # add length of this document to the global dictionary\n self.document_length_dict[file_name] = len(cleaned_words_list)\n\n # index the words in this document\n for word in cleaned_words_list:\n # if word is there check if current page is recorded, if yes; increment it. If not, add the current page name\n # and set the count to 1\n if word in self.indexed_words_dict:\n if file_name in self.indexed_words_dict[word].keys():\n self.indexed_words_dict[word][file_name] += 1\n else:\n self.indexed_words_dict[word][file_name] = 1\n # if word is not indexed\n else:\n new_dict = dict()\n new_dict[file_name] = 1\n self.indexed_words_dict[word] = new_dict\n\n def init_robot_info(self, link):\n self.disallowed_url_arr.clear()\n url = link + 'robots.txt'\n robot_txt = requests.get(url, timeout=5).text\n\n robot_txt_lines = robot_txt.split('\\n')\n if(len(robot_txt_lines) == 0):\n return\n\n for line in robot_txt_lines:\n line_arr = line.split(' ')\n if(len(line_arr) > 1):\n if((line_arr[0] == 'Disallow:') and (line_arr[1] is not NULL)):\n self.disallowed_url_arr.append(line_arr[1])\n\n def isAllowed(self, link):\n for text in self.disallowed_url_arr:\n if(text in link):\n return False\n return True\n\n def RetrievePhrase(self, phrase):\n phrase_arr = phrase.split(' ')\n bmi_results = self.calculate_BMI(phrase_arr)\n\n # loop through the bmi_results (dic, with key=page_name name and value=bmi score),\n # for each result, find page (with same name)\n # multiply the page's page_rank with the bmi score\n final_result_dic = dict()\n for result in bmi_results:\n for page in self.pages:\n if page.name == result[0]:\n final_result_dic[page.url] = page.page_rank * result[1]\n return final_result_dic\n","repo_name":"bobby-shaj/webcrawler","sub_path":"webcrawler/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":13886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"12021311174","text":"from ..symbol import *\nfrom ..fermion import *\nfrom ..spin import *\nfrom ..state import *\n\n\ndef test_symbol_with_op():\n a = Symbol(\"a\")\n b = Symbol(\"b\")\n assert (\n (\n np.conj(np.array([c1, c2]))\n @ np.array([[a, b], [np.conj(b), a]])\n @ np.array([c1, c2])\n ).evaluate({\"a\": 1, \"b\": 1j})\n == c1.D * c1 + c2.D * c2 + 1j * c1.D * c2 - 1j * c2.D * c1\n )\n\n\ndef test_evaluate_mos():\n a = Symbol(\"a\")\n assert (3 * c1.D * a * c2).evaluate({\"a\": 2}) == -6 * c2 * c1.D\n U = Symbol(\"U\")\n assert (\n Sf(\"12\").D | U * (np.array([c1.D, c2.D]) @ np.array([c1, c2])) ** 2 | Sf(\"12\")\n == 4 * U\n )\n\n\ndef test_spin_symbol():\n a = Symbol(\"a\")\n assert Ss(\"1\").D | a * s1.z | Ss(\"1\") == 0.5 * a\n","repo_name":"refraction-ray/qop","sub_path":"qop/tests/test_mix_string.py","file_name":"test_mix_string.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"91"}
+{"seq_id":"41964309037","text":"# -*- coding:utf-8 -*-\n\nfrom tkinter import *\nimport time\n\n\ndef onGo():\n print(\"0000\")\n t.delete('1.0', 'end')\n for i in range(50):\n t.insert(INSERT, 'a_' + str(i))\n time.sleep(0.1)\n\n\n\nroot = Tk()\nt = Text(root)\nt.pack()\ngoBtn = Button(text=\"Go!\", command=onGo)\ngoBtn.pack()\nroot.mainloop()","repo_name":"RenYanan0813/PythonTest","sub_path":"test/scriptDemo联调脚本/服务器监控/d1.py","file_name":"d1.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"19414695559","text":"workDays = [ 19, 21, 22, 21, 20, 22]\nmonths = ['I','II','III','IV','V','VI']\n\nmonthsDays = dict(zip(months,workDays)) #powstał słownik\nprint(monthsDays)\n\n\n'''\nprzy takim zapisie pojawi się bład: not enough values to unpack (expected 2, got 1)\nz uwagi na to, że element monthsDays składa się z jednego pola {xx:xx}\nfor key,value in monthsDays: \n print('Key is',key,'value is', value)\n'''\n\nfor key in monthsDays:\n print('Key is',key,'value is', monthsDays[key])\n\n#lepiej list, bo w starszych pythonah nie koniecznie wartości będę zwrócone w kolejności\nfor value in monthsDays.values():\n print('the value is',value)\n\n\n","repo_name":"sineczek/PythonSrednioZaawansowany","sub_path":"Zmienne_i_kod/09_iteracja_po_slowniku.py","file_name":"09_iteracja_po_slowniku.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"29929542239","text":"from flask import Flask, render_template, request\nimport flask\nfrom flask_restful import Api, Resource\nimport json\nimport pymongo\nimport os\n\nipBD = os.environ['ipBD']\n\nclient = pymongo.MongoClient(\"mongodb://sabrina:mongolito@{0}/tarefas\".format(ipBD)) # defaults to port 27017\n\ndb = client.tarefas\n\ntarefas = db.tarefas\n\ndictglobal = {}\nglobal id_adicionado\nid_adicionado = 0\ndef add_tarefa(tarefa, idee):\n\tt = tarefa.tempo\n\td = tarefa.dificuldade\n\tdic = {\"tempo\": t, \"dificuldade\": d}\n\tmydict = {\"_id\" : idee, \"tempo\": t, \"dificuldade\": d}\n\ttarefas.insert_one(mydict)\n\treturn idee\n\ndef update_tarefa(tarefa, id_tarefa):\n\tt = tarefa.tempo\n\td = tarefa.dificuldade\n\tdic = {\"tempo\": t, \"dificuldade\": d}\n\tif dictglobal == {}:\n\t\tid_adicionado = 0\n\telse:\n\t\tid_adicionado = (list(dictglobal.keys())[id_tarefa])\n\tmyquery = { \"_id\": id_tarefa }\n\tnewvalues = { \"$set\": dic }\n\n\ttarefas.update_one(myquery, newvalues)\n\treturn id_adicionado\n\ndef remove_tarefa(id_tarefa):\n\tmyquery = { \"_id\": id_tarefa }\n\n\ttarefas.delete_one(myquery)\n\n\n# • Crie uma classe denominada Tarefas. Defina pelo menos uns 2 atributos.\n\nclass Tarefas():\n\tdef __init__(self, tempo=0, dificuldade=0):\n\t\tself.tempo = tempo\n\t\tself.dificuldade = dificuldade\n\n\n\n# • Crie um dicionário global no programa que irá acomodar as tarefas. Defina que a “chave primária” seria\n# \t\tum id autoincremental de tarefas.\n\n\n\n\n# • Crie um webserver em Python usando o Flask.\n\napp = flask.Flask('your_flask_env')\n\n\n\n# • Defina 6 endpoints para expor no Flask (trocando informações via JSON):\n\n# – /Tarefa/ - GET: lista todas as tarefas do dicionário.\n# – /Tarefa/ - POST: Adiciona uma tarefa.\n\n@app.route('/Tarefa', methods=['GET', 'POST'])\n# @app.route(\"/\")\ndef Tarefa():\n\tglobal id_adicionado\n\tid_adicionado = 0\n\tif flask.request.method == 'POST':\n\t\t# adiciona uma tarefa\n\t\tvai = []\n\t\tfor x in tarefas.find():\n\t\t\tvai.append(x)\n\t\tif vai != []:\n\t\t\tfor x in tarefas.find().sort(\"_id\",-1):\n\t\t\t\tiii = int(x['_id']) + 1\n\t\t\t\tbreak\n\t\telse:\n\t\t\tiii = 0\n\t\tadd_tarefa(Tarefas(request.get_json()['tempo'], request.get_json()['dificuldade']), iii)\n\t\tid_adicionado += 1\n\t\treturn (\"ok\", 200)\n\telse:\n\t\t# lista todas as tarefas do dicionario\n\t\tvai = []\n\t\tfor x in tarefas.find():\n\t\t\tvai.append(x)\n\t\treturn(str(vai), 200)\n\n\n\n# – /Tarefa/ - GET: lista a tarefa com o determinado id.\n# – /Tarefa/ - PUT: atualiza uma tarefa com o determinado id.\n# – /Tarefa/ - DELETE: apaga a tarefa com o determinado id.\n\n@app.route('/Tarefa/', methods=['GET', 'PUT', 'DELETE'])\ndef TarefaID(id_task):\n\tif flask.request.method == 'DELETE':\n\t\tremove_tarefa(id_task)\n\t\treturn (\"ok\", 200)\n\telif flask.request.method == 'PUT':\n\t\tprint(request.get_json())\n\t\tupdate_tarefa(Tarefas(request.get_json()['tempo'], request.get_json()['dificuldade']), id_task)\n\t\treturn (\"ok\", 200) #dictglobal[numero]\n\telse:\n\t\tvai = []\n\t\tfor x in tarefas.find({ \"_id\": id_task}):\n\t\t\tvai.append(x)\n\t\treturn(str(vai), 200)#render_template(\"index.html\")#.format(dictglobal[numero])\n\n\n# – /healthcheck/ - Retorna o código 200 sem texto.\n\n@app.route('/healthcheck')\ndef healthy():\n\treturn (\"\", 200)\n\n\n# • Teste o serviço acima em localhost, olhando como o dicionário é alterado. Atenção na hora em que serializar\n# um objeto da sua classe para o formato\n\n\nif __name__ == '__main__':\n\tapp.run(debug=True, host='0.0.0.0', port=5000)\n\n","repo_name":"SabrinaMB/Cloud-APSs","sub_path":"aps1.py","file_name":"aps1.py","file_ext":"py","file_size_in_byte":3384,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"4096942480","text":"import pandas as pd\nimport numpy as np \nimport matplotlib.pyplot as plt \nimport pickle\nfrom sklearn import svm\nfrom sklearn.ensemble import RandomForestRegressor\n\n#Read data from table\ndata = pd.read_csv(\"salesweekly.csv\")\ndata.drop(0)\ndata.drop(\"datum\",axis=1,inplace=True)\nmed1 = data[[\"M01AB\"]]\nmed2 = data[[\"M01AE\"]]\nmed3 = data[[\"N02BA\"]]\nmed4 = data[[\"N02BE\"]]\nmed5 = data[[\"N05B\"]]\nmed6 = data[[\"N05C\"]]\nmed7 = data[[\"R03\"]]\nmed8 = data[[\"R06\"]]\n\n#----------------------------------------------------------------------------------------------------\n\n#Take inputs\nproduct = ['#M01AB', '#M01AE', '#N02BA', '#N02BE', '#N05B', '#N05C', '#R03', '#R06']\ndf = pd.read_csv(r'lead_time.csv', index_col=0)\nindex = int(input(\"Enter index=\"))\nprint(\"Enter sales\")\ncurrentsales = float(input())\nprint(\"Finding data for \"+product[index]+\"....\")\n\n#----------------------------------------------------------------------------------------------------\n\n#Choose model based on index\nif index == 0:\n x = np.array(med1.loc[len(med1)-9:])\n x = np.append(x,currentsales).reshape(1,10)\n filename = 'model1.sav'\n accuracy = 0.82\n\nif index == 1:\n x = np.array(med2.loc[len(med2)-9:])\n x = np.append(x,currentsales).reshape(1,10)\n filename = 'model2.sav'\n accuracy = 0.77\n\nif index == 2:\n x = np.array(med3.loc[len(med3)-9:])\n x = np.append(x,currentsales).reshape(1,10)\n filename = 'model3.sav'\n accuracy = 0.86\n\nif index == 3:\n x = np.array(med4.loc[len(med4)-9:])\n x = np.append(x,currentsales).reshape(1,10)\n filename = 'model4.sav'\n accuracy = 0.94\n\nif index == 4:\n x = np.array(med5.loc[len(med5)-9:])\n x = np.append(x,currentsales).reshape(1,10)\n filename = 'model5.sav'\n accuracy = 0.86\n\nif index == 5:\n x = np.array(med6.loc[len(med6)-9:])\n x = np.append(x,currentsales).reshape(1,10)\n filename = 'model6.sav'\n accuracy = 0.77\n\nif index == 6:\n x = np.array(med7.loc[len(med7)-9:])\n x = np.append(x,currentsales).reshape(1,10)\n filename = 'model7.sav'\n accuracy = 0.87\n\nif index == 7:\n x = np.array(med8.loc[len(med8)-9:])\n x = np.append(x,currentsales).reshape(1,10)\n filename = 'model8.sav'\n accuracy = 0.93\n\n#----------------------------------------------------------------------------------------------------\n\n#Calculate Demand\nmodel = pickle.load(open(filename, 'rb'))\ndemand = model.predict(x)\nprint(\"The predicted demand is: \")\nprint(demand[0])\nmean1 = np.mean(x)\nstd1 = np.std(x)\n\n#Inventory\ndef lt_mean(index):\n #print(df['Actual Lead Time'][str(product[index])])\n return df['Actual Lead Time'][str(product[index])].mean()\n\ndef lt_std(index):\n return df['Actual Lead Time'][str(product[index])].std()\n\ndef ss_cal(z,prod_mean,prod_std,mean1,std1):\n ss = (z*std1*np.sqrt(prod_mean))+(z*mean1*prod_std)\n return ss\n\ndef qor_cal(demand, accuracy, ss):\n qor = demand+((1-accuracy)*demand)+ss\n return qor\n\nprod_mean=lt_mean(index)\nprod_std=lt_std(index)\n\nz=1.28\n\nsafety_stock= np.ceil(ss_cal(z,prod_mean,prod_std,mean1,std1))\nprint(\"Required Safety Stock=\")\nprint(safety_stock)\n\nqor = np.ceil(qor_cal(demand, accuracy, safety_stock))\nprint(\"Required Quantity of Reorder=\")\nprint(qor[0])","repo_name":"Abhimanyu512/AI_Inventory_Management","sub_path":"inputvalue.py","file_name":"inputvalue.py","file_ext":"py","file_size_in_byte":3193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"22051024513","text":"def knapsack_dp(capacity, weights, values, n):\n \"\"\"takes two lists of weight and values and tries to optimize based on the capasity, capasity referes to the weight\n\n Args:\n capacity (int): weight capacity\n weights (list): how much the item of index x is weighted\n values (list): the value of item of index x\n n (int): number of item indexes\n\n Returns:\n _type_: optimal solution\n \"\"\"\n grid = [[0 for x in range(capacity + 1)]\n for x in range(n + 1)]\n\n for item in range(n + 1):\n for cap in range(capacity + 1):\n\n if item == 0 or cap == 0:\n grid[item][cap] = 0\n\n elif weights[item - 1] <= cap:\n grid[item][cap] = max(values[item - 1] +\n grid[item - 1][cap - weights[item - 1]],\n grid[item - 1][cap])\n \n else:\n grid[item][cap] = grid[item - 1][cap]\n\n return grid[n][capacity]\n\nitem_val = [200, 150, 100, 50]\nitem_wt = [40, 32, 16, 8]\ntotal_cap = 64\nn_items = len(item_val)\n\nprint('Max value to put in kanpsack of capacity W:', total_cap)\nprint(knapsack_dp(total_cap, item_wt, item_val, n_items), '$')","repo_name":"VincentKleis/PublicProjects","sub_path":"python_programing/INFO135/lectures/knapsack.py","file_name":"knapsack.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"42957677776","text":"# 9079번 동전 게임\n# https://www.acmicpc.net/problem/9079\n\nimport numpy as np\nfrom copy import deepcopy\n\n\ndef turn(x):\n if x:\n return 0\n else:\n return 1\n\n\ndef isAnswer(arr, g):\n global result\n\n # 동전 뒤집기\n for idx in arr:\n for ny, nx in turning[idx]:\n g[ny][nx] = turn(g[ny][nx])\n # print(np.array(g))\n # 현재 상태의 완성 여부 확인\n total = 0\n for ny in range(3):\n total += sum(g[ny])\n\n if total == 0 or total == 9:\n result = min(result, len(arr))\n return True\n else:\n return False\n\n\ndef recur(node, arr):\n # print(arr)\n if isAnswer(arr, deepcopy(graph)):\n return\n\n for nxt in range(node+1, 8):\n # 동전 뒤집기\n arr.append(nxt)\n recur(nxt, arr)\n arr.pop()\n # 동전 뒤집지 않기\n recur(nxt, arr)\n\n\nT = int(input())\nfor tc in range(1, T+1):\n graph = []\n # H = 0 / T = 1 로 변환\n for _ in range(3):\n input_data = input().replace(\"H\", \"0\").replace(\"T\", \"1\")\n graph.append(list(map(int, input_data.split())))\n\n # 최대로 뒤집는 경우의 수 = 8\n result = 9\n\n # 뒤집는 경우의 수\n turning = []\n for i in range(3):\n arr1 = []\n arr2 = []\n for j in range(3):\n arr1.append([i, j])\n arr2.append([j, i])\n turning.append(arr1)\n turning.append(arr2)\n\n arr3 = []\n arr4 = []\n for i in range(3):\n arr3.append([i, i])\n arr4.append([i, 2-i])\n turning.append(arr3)\n turning.append(arr4)\n\n recur(-1, [])\n if result == 9:\n print(-1)\n else:\n print(result)\n\n\n\n","repo_name":"Johyonghoon/algorithm","sub_path":"backjoon/brute_force/9079.py","file_name":"9079.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"17673307838","text":"from freelancersdk.session import Session\nfrom freelancersdk.resources.projects.projects import update_track\nfrom freelancersdk.resources.projects.exceptions import \\\n TrackNotUpdatedException\n\nimport os\n\n\ndef sample_update_track(track_id, latitude, longitude, stop_tracking):\n url = os.environ.get('FLN_URL')\n oauth_token = os.environ.get('FLN_OAUTH_TOKEN')\n session = Session(oauth_token=oauth_token, url=url)\n\n try:\n track = update_track(session, track_id, latitude, longitude, stop_tracking)\n except TrackNotUpdatedException as e:\n print('Error message: {}'.format(e.message))\n print('Server response: {}'.format(e.error_code))\n return None\n else:\n return track\n\n\n\ntrack_id = 82\nlatitude = 14.5\nlongitude = 15.5\nstop_tracking = False\nresult = sample_update_track(track_id, latitude, longitude, stop_tracking)\n\nif result:\n print('Updated track number {}! Details: {}'.format(track_id, result))\n","repo_name":"freelancer/freelancer-sdk-python","sub_path":"examples/update_track.py","file_name":"update_track.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","stars":62,"dataset":"github-code","pt":"91"}
+{"seq_id":"6618785119","text":"'''\nYou are climbing a stair case. It takes n steps to reach to the top.\n\nEach time you can either climb 1 or 2 steps. In how many distinct ways can you climb to the top?\n\nNote: Given n will be a positive integer.\n'''\n\n'''\nVariable a tells you the number of ways to reach the current step, and b tells you the number of ways to reach the next step. So for the situation one step further up, the old b becomes the new a, and the new b is the old a+b, since that new step can be reached by climbing 1 step from what b represented or 2 steps from what a represented.\n'''\n\ndef climbStairs(n):\n a = b = 1\n for _ in range(n):\n a, b = b, a + b\n return a\n\nprint(climbStairs(4))","repo_name":"hyc121110/LeetCodeProblems","sub_path":"Others/climbStairs.py","file_name":"climbStairs.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"25192850529","text":"from setuptools import setup, find_packages\nfrom setuptools.command.test import test as TestCommand\nfrom codecs import open\nfrom os import path\nimport sys\n\nhere = path.abspath(path.dirname(__file__))\n\nwith open(path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\n\nclass PyTest(TestCommand):\n def run_tests(self):\n self.pytest_args = ['-vvv', '-l']\n import pytest\n errno = pytest.main(self.pytest_args)\n sys.exit(errno)\n\n\nsetup(name='fdns',\n version='0.1.0',\n description='Forwarding DNS',\n long_description=long_description,\n packages=find_packages(),\n author='Shengjing Zhu',\n author_email='zsj950618@gmail.com',\n url='https://github.com/zhsj/fdns',\n license='MIT',\n install_requires=['aiohttp', 'dnslib'],\n tests_require=['pytest'],\n cmdclass={'test': PyTest},\n entry_points={\n 'console_scripts': [\n 'fdns=fdns.app:main',\n\n ]\n }\n )\n","repo_name":"zhsj/fdns","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"91"}
+{"seq_id":"18063495025","text":"import hashlib\nimport os\nimport discord\nfrom discord.ext import commands, tasks\nfrom time import time\nfrom helper_functions import simple_embed\nimport json\nimport config\nfrom bot import is_bot_dev, on_command_error\nfrom discord import app_commands\nfrom typing import List, Optional\n\nfrom PyPDF2 import PdfReader\nimport re\nfrom datetime import datetime\nimport locale\ndiscord_timestamp = \" ()\"\n\nclass Uni(commands.Cog):\n \"\"\"Commands zum debuggen\"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n self.data = get_data()\n self.update_assignments.start()\n \n def cog_unload(self):\n self.update_assignments.cancel()\n\n @staticmethod\n def is_in_uni_server():\n async def predicate(ctx):\n possible_member = ctx.author\n guild = ctx.bot.get_guild(config.UNI_GUILD)\n if possible_member in guild.members:\n return True\n return \"students\" in get_data().keys() and ctx.author.id in get_data()[\"students\"]\n\n return commands.check(predicate)\n \n @staticmethod\n def is_in_uni_server_interaction_check():\n async def predicate(interaction: discord.Interaction) -> bool:\n guild = interaction.client.get_guild(config.UNI_GUILD)\n if interaction.user in guild.members:\n return True\n elif \"students\" in get_data().keys() and interaction.user.id in get_data()[\"students\"]:\n return True\n else:\n e = simple_embed(interaction.user, \"Du hast keine Berechtigung diesen Command auszuführen.\", color=discord.Color.red())\n await interaction.response.send_message(embede=e, ephemeral=True)\n return False \n \n return app_commands.check(predicate)\n\n \n \n @app_commands.command(name=\"vorlesungsstand\", description=\"Zeigt den momentanen Vorlesungsstand an\")\n async def get_vorlesungsstand_nosync(self, interaction: discord.Interaction):\n if \"subjects\" not in self.data.keys():\n e = simple_embed(interaction.user, \"Es stehen keine Daten zur Verfügung\", color=discord.Color.red())\n await interaction.response.send_message(embed=e, ephemeral=True)\n return\n\n e = discord.Embed(title=\"Vorlesungsstand\", color=discord.Color.blurple())\n description = \"\"\n for subject in self.data[\"subjects\"]:\n current = self.data['subjects'][subject]['current']\n timestring = datetime.fromtimestamp(current[1]).strftime('%d.%m.%Y') # %H:%MUhr\n description += f\"**{subject}**\\n{current[0]} - (Stand {timestring})\\n\\n\"\n e.description = description\n await interaction.response.send_message(embed=e, ephemeral=True)\n\n\n async def update_subject_autocomplete(self,interaction: discord.Interaction,current: str,) -> List[app_commands.Choice[str]]:\n choices = [x for x in self.data[\"subjects\"] if not self.data[\"subjects\"][x][\"inactive\"]]\n return [\n app_commands.Choice(name=choice, value=choice) for choice in choices if current.lower() in choice.lower()\n ]\n \n\n @is_in_uni_server_interaction_check()\n @app_commands.command(name=\"update_subject\", description=\"Aktualisiert den Stand eines angegebenen Fachs\")\n @app_commands.describe(\n subject=\"Fach, welches aktualisiert werden soll\",\n new_state=\"Neuer Stand des Faches\", \n timestamp=\"Zeitpunkt der Aktualisierung im Format dd.mm.yyyy\"\n )\n @app_commands.autocomplete(subject=update_subject_autocomplete)\n @app_commands.rename(new_state=\"neuer_stand\", subject=\"fach\", timestamp=\"zeitpunkt\")\n async def update_subject(self, interaction: discord.Interaction, subject: str, new_state: str, timestamp: Optional[str]):\n if \"subjects\" not in self.data.keys():\n e = simple_embed(interaction.user, \"Es stehen keine Fächer zur Verfügung, die aktualisiert werden können\", color=discord.Color.red())\n await interaction.response.send_message(embed=e, ephemeral=True)\n return\n if subject not in self.data[\"subjects\"]:\n e = simple_embed(interaction.user, f\"Das Fach `{subject}` existiert nicht\", color=discord.Color.red())\n await interaction.response.send_message(embed=e, ephemeral=True)\n return\n if self.data[\"subjects\"][subject][\"inactive\"]:\n e = simple_embed(interaction.user, f\"Das Fach `{subject}` ist nicht aktiv\", color=discord.Color.red())\n await interaction.response.send_message(embed=e, ephemeral=True)\n return\n if(timestamp):\n try:\n timestamp = datetime.strptime(timestamp, '%d.%m.%Y').timestamp()\n except ValueError as ex:\n e = simple_embed(interaction.user, ex.args[0], color=discord.Color.red())\n await interaction.response.send_message(embed=e, ephemeral=True)\n return\n else:\n timestamp = time()\n \n start = self.findLastEnd(subject)\n \n self.data[\"subjects\"][subject][\"current\"] = (new_state, timestamp)\n self.data[\"subjects\"][subject][\"history\"].append(\n {\n \"time\": timestamp,\n \"start\": start,\n \"end\": new_state\n }\n )\n await self.update_message()\n update_data(self.data)\n \n e = simple_embed(interaction.user, f\"Das Fach {subject} wurde erfolgreich aktualisiert\", color=discord.Color.green())\n await interaction.response.send_message(embed=e, ephemeral=True)\n \n\n @is_in_uni_server()\n @commands.command(aliases=[\"vls\"])\n async def vorlesungsstand(self, ctx, *args):\n \"\"\"Aktualisiert den Vorlesungsstand eines angegebenen Faches.\n Beispiel: `vls LA1 3.2.2 3.3` setzt den aktuellen Stand auf 3.3 und speichert, dass heute 3.2.2 bis 3.3 behandelt wurden.\n Möglichkeiten, den Befehl anzuwenden: \\n``vls LA1 3.3``\\n``vls LA1 3.2.2 3.3``\\n``vls LA1 3.2.2 3.3 28.11.2022`\n \"\"\"\n \n if not 2 <= len(args) <= 4:\n await ctx.send(embed=simple_embed(\n ctx.author,\n \"Es müssen genau 2-4 Argumente angegeben werden\",\n \"Beispiel:\\n`vls LA1 3.3`\\n`vls LA1 3.2.2 3.3`\\n`vls LA1 3.2.2 3.3 28.11.2022`\",\n color=discord.Color.red()\n ))\n return\n \n timestamp = time()\n \n if len(args) == 2:\n (subject, end) = args\n start = self.findLastEnd(subject)\n elif len(args) == 3:\n (subject, start, end) = args\n elif len(args) == 4:\n (subject, start, end, timestr) = args\n timestamp = datetime.strptime(timestr, '%d.%m.%Y').timestamp()\n \n if \"subjects\" not in self.data.keys() or subject not in self.data[\"subjects\"]:\n await ctx.send(embed=simple_embed(ctx.author, \"Ein Fehler ist aufgetreten\", f\"Das Fach ``{subject}`` ist nicht vorhanden\", color=discord.Color.red()))\n return\n\n self.data[\"subjects\"][subject][\"current\"] = (end, timestamp)\n self.data[\"subjects\"][subject][\"history\"].append(\n {\n \"time\": timestamp,\n \"start\": start,\n \"end\": end\n }\n )\n await self.update_message()\n update_data(self.data)\n await ctx.message.add_reaction(\"\\N{White Heavy Check Mark}\")\n \n\n def findLastEnd(self, subject):\n if \"subjects\" not in self.data.keys() or subject not in self.data[\"subjects\"]:\n return \"0.0\"\n \n if len(self.data[\"subjects\"][subject][\"history\"]) == 0:\n return \"0.0\"\n \n current = self.data[\"subjects\"][subject][\"history\"][0]\n for h in self.data[\"subjects\"][subject][\"history\"]:\n if h[\"time\"] > current[\"time\"]:\n current = h\n \n return current[\"end\"] \n \n \n async def update_message(self):\n if \"channel_id\" not in self.data.keys() or \"message_id\" not in self.data.keys():\n return\n msg = await self.bot.get_channel(self.data[\"channel_id\"]).fetch_message(self.data[\"message_id\"])\n\n if \"subjects\" not in self.data.keys():\n return\n\n e = discord.Embed(title=\"Vorlesungsstand\", color=discord.Color.blurple())\n description = \"\"\n for subject in self.data[\"subjects\"]:\n if self.data['subjects'][subject]['inactive']:\n continue\n current = self.data['subjects'][subject]['current']\n timestring = datetime.fromtimestamp(current[1]).strftime('%d.%m.%Y') # %H:%MUhr\n description += f\"**{subject}**\\n{current[0]} - (Stand {timestring})\\n\\n\"\n e.description = description\n await msg.edit(embed=e)\n\n @is_bot_dev()\n @commands.command(aliases=[\"vlsadd\"])\n async def addSubject(self, ctx, subject):\n \"\"\"Fügt ein Fach der Vorlesungsstandsliste hinzu.\"\"\"\n\n if \"subjects\" not in self.data.keys():\n self.data[\"subjects\"] = {}\n if subject in self.data[\"subjects\"]:\n await ctx.send(embed=simple_embed(ctx.author, \"Ein Fehler ist aufgetreten\", f\"Das Fach ``{subject}`` ist existiert bereits\", color=discord.Color.red()))\n return\n\n self.data[\"subjects\"][subject] = {\n \"current\": (\"0.0\", time()),\n \"history\": [],\n \"inactive\": False\n }\n update_data(self.data)\n await self.update_message()\n await ctx.send(embed=simple_embed(ctx.author, f\"Das Fach ``{subject}`` wurde erfolgreich hinzugefügt.\", color=discord.Color.green()))\n\n\n @is_bot_dev()\n @commands.command(aliases=[\"vlsdeactivate\"])\n async def deactivateSubject(self, ctx, subject):\n \"\"\"Deaktiviert ein Fach.\"\"\"\n\n if \"subjects\" not in self.data.keys():\n self.data[\"subjects\"] = {}\n if subject not in self.data[\"subjects\"]:\n await ctx.send(embed=simple_embed(ctx.author, \"Ein Fehler ist aufgetreten\", f\"Das Fach ``{subject}`` ist existiert nicht\", color=discord.Color.red()))\n return\n\n self.data['subjects'][subject]['inactive'] = True;\n update_data(self.data)\n await self.update_message()\n await ctx.send(embed=simple_embed(ctx.author, f\"Das Fach ``{subject}`` wurde erfolgreich deaktiviert.\", color=discord.Color.green()))\n\n\n\n @is_bot_dev()\n @commands.command(aliases=[\"addstudent\"])\n async def addStudent(self, ctx, student: discord.User):\n \"\"\"Gibt einem Nutzer Berechtigungen, Unicommands zu nutzen\"\"\"\n if \"students\" not in self.data.keys():\n self.data[\"students\"] = []\n\n if student in self.data[\"students\"]:\n await ctx.send(embed=simple_embed(ctx.author, \"Ein Fehler ist aufgetreten\", f\"Der Nutzer ``{student.name}`` hat bereits Berechtigungen\", color=discord.Color.red()))\n return\n\n self.data[\"students\"].append(student.id)\n update_data(self.data)\n await self.update_message()\n await ctx.send(embed=simple_embed(ctx.author, f\"Der Nutzer ``{student.name}`` wurde erfolgreich hinzugefügt.\", color=discord.Color.green()))\n\n\n @is_in_uni_server()\n @commands.command(aliases=[\"vlsmsg\"])\n async def vlsInformation(self, ctx):\n \"\"\"Setzt den aktuellen Kanal als Vorlesungsstand-Informations-Kanal.\"\"\"\n self.data[\"channel_id\"] = ctx.channel.id\n msg = await ctx.send(embed=simple_embed(ctx.author, \"Vorlesungsstand\", color=discord.Color.green()))\n self.data[\"message_id\"] = msg.id\n update_data(self.data)\n await self.update_message()\n\n\n async def send_to_channel(self, file, date, channel_id, ver=1):\n filename = file.split(\"/\")[-1].split(\"\\\\\")[-1]\n channel = self.bot.get_channel(channel_id)\n f = discord.File(file)\n if ver > 1:\n await channel.send(f\"``{filename}`` wurde aktualisiert. Version: ``{ver}``, Abgabedatum {date}\", file=f)\n return\n await channel.send(f\"Neues Übungsblatt: ``{filename}``, Abgabe am {date}\", file=f)\n\n\n @tasks.loop(hours=2)\n async def update_assignments(self):\n # load files (https://github.com/Garmelon/PFERD)\n os.chdir(config.path)\n os.popen(\"sh ../assignment-data/loadAssignments.sh\").read()\n change = False\n with open(config.path + \"/json/assignments.json\", \"r\", encoding='utf-8') as f:\n data = json.load(f)[\"assignments\"]\n\n for subject in data[\"subjects\"].keys():\n path = data[\"subjects\"][subject][\"path\"] + os.sep\n # iterate over pdf files in assignment folder\n for _, _, files in os.walk(path):\n for file in files:\n \n if not file.endswith(\".pdf\"):\n continue\n \n # check whether file is already in data\n if file not in data[\"subjects\"][subject][\"assignments\"].keys():\n date = self.get_due_date(\n path + file, \n data[\"subjects\"][subject][\"pattern\"],\n data[\"subjects\"][subject][\"datetime_pattern\"]\n )\n \n with open(path + file, \"rb\") as f:\n filehash = hashlib.sha1(f.read()).hexdigest()\n\n data[\"subjects\"][subject][\"assignments\"][file] = {\n \"version\": 1, \n \"last_change\": datetime.now().timestamp(), \n \"hash\": filehash\n }\n await self.send_to_channel(path + file, date, data[\"subjects\"][subject][\"channel_id\"])\n change = True\n\n else:\n # # check if file hash has changed\n with open(path + file, \"rb\") as f:\n filehash = hashlib.sha1(f.read()).hexdigest()\n\n if filehash != data[\"subjects\"][subject][\"assignments\"][file][\"hash\"]:\n date = self.get_due_date(\n path + file, \n data[\"subjects\"][subject][\"pattern\"],\n data[\"subjects\"][subject][\"datetime_pattern\"]\n )\n data[\"subjects\"][subject][\"assignments\"][file][\"version\"] += 1\n data[\"subjects\"][subject][\"assignments\"][file][\"last_change\"] = datetime.now().timestamp()\n data[\"subjects\"][subject][\"assignments\"][file][\"hash\"] = filehash\n\n await self.send_to_channel(\n path + file,\n date,\n data[\"subjects\"][subject][\"channel_id\"],\n data[\"subjects\"][subject][\"assignments\"][file][\"version\"] \n )\n change = True\n\n # update data file\n if change:\n with open(config.path + \"/json/assignments.json\", \"w\", encoding='utf-8') as f:\n new_data = {\"assignments\": data}\n json.dump(new_data, f, indent=4)\n\n @update_assignments.before_loop\n async def before_printer(self):\n await self.bot.wait_until_ready()\n \n \n def get_due_date(self, path, time_pattern, datetime_pattern):\n locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')\n pdf_reader = PdfReader(path)\n \n for page in pdf_reader.pages:\n lines = page.extract_text().splitlines()\n for line in lines:\n if re.match(time_pattern, line):\n date = re.match(time_pattern, line).group(1)\n time = re.match(time_pattern, line).group(2)\n actual_date = datetime.strptime(date + \" \" + time, datetime_pattern)\n # set year if none is specified\n if actual_date.year < datetime.now().year:\n actual_date = actual_date.replace(year=datetime.now().year)\n # fix year if date is in the next year (e.g. 1.1.20xx)\n if actual_date.timestamp() < datetime.now().timestamp():\n actual_date = actual_date.replace(year=datetime.now().year + 1)\n return discord_timestamp.format(timestamp=int(actual_date.timestamp()))\n \ndef update_data(data):\n with open(config.path + '/json/uniVL.json', 'w') as myfile:\n json.dump(data, myfile, indent=4)\n\n\ndef get_data():\n try:\n with open(config.path + '/json/uniVL.json', 'r') as myfile:\n return json.loads(myfile.read())\n except FileNotFoundError:\n return {}\n\n\nasync def setup(bot):\n if not os.path.exists(config.path + \"/json/assignments.json\"):\n with open(config.path + \"/json/assignments.json\", \"w\") as f:\n assignment_base = {\n \"assignments\":{\n \"subjects\" : {\n # \"dummy\" : {\n # \"path\" : \"\",\n # \"pattern\": \"\",\n # \"datetime_pattern\": \"\",\n # \"channel_id\": 0\n # \"assignments\" : {\n # \"B1\": {\n # \"hash\": \"\",\n # \"version\": 0,\n # \"last_change\": 0\n # }\n # }\n # }\n }\n }\n }\n f.write(json.dumps(assignment_base, indent=4))\n \n if(config.GUILDS):\n await bot.add_cog(Uni(bot), guilds=config.GUILDS)\n else:\n await bot.add_cog(Uni(bot))\n","repo_name":"Florik3ks/Norman","sub_path":"cogs/uni.py","file_name":"uni.py","file_ext":"py","file_size_in_byte":18017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"36626410225","text":"import io\nfrom PIL import Image\nimport logging\nimport os\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_bootstrap_components as dbc\nfrom dash.dependencies import Input, Output, State\nfrom demo_dash import header_v3 as header\nfrom database import query_database, query_reid\nfrom urllib.parse import parse_qs, urlencode\nimport base64\nimport sys\nimport pandas as pd\nfrom datetime import datetime, timedelta\nsys.path.append('../')\nsys.path.append(f'../reid')\nfrom inference import reid_inference\nfrom utils import to_sqlite\n\n_reid_db_path = None\n_reid = None\n\nexternal_stylesheets = [\n dbc.themes.COSMO,\n 'https://codepen.io/chriddyp/pen/bWLwgP.css',\n \"https://use.fontawesome.com/releases/v5.7.2/css/all.css\",\n]\n\napp = dash.Dash(\n __name__, title=\"RE-ID Dash\",\n external_stylesheets=external_stylesheets,\n suppress_callback_exceptions=True,\n meta_tags=[{\n 'name': 'viewport',\n 'content': 'width=device-width, initial-scale=1.0'\n }])\n\napp.layout = dbc.Container(\n id='app-layout',\n children=[\n dcc.Location(id='url', refresh=False),\n header.title_block,\n header.subtitle,\n html.Hr(),\n dbc.Container(id='page-content', fluid=True),\n ],\n fluid=True,\n)\n\n@app.callback(\n Output(component_id='page-content', component_property='children'),\n Input(component_id='url', component_property='pathname'),\n Input(component_id='url', component_property='search'),\n)\ndef display_page(pathname, search):\n params = extract_results_search_params(pathname, search)\n layout_page = []\n try:\n if params is not None and pathname[1:] == 'results':\n layout_page.append(results_page_content(params))\n else:\n layout_page.append(home_page_content())\n except Exception as ex:\n logging.error(ex)\n return layout_page # , title\n\n\nSIDEBAR_STYLE = {\n \"position\": \"static\",\n \"top\": 0,\n \"left\": 0,\n \"bottom\": 0,\n \"width\": \"20rem\",\n \"padding\": \"2rem 1rem\",\n \"background-color\": \"#f8f9fa\",\n 'height': '100%',\n}\n\n\ndef home_page_content():\n global _reid_db_path\n _reid_db_path = None\n headerColWidth=2\n content = dbc.Col(\n id='home-page',\n children=[\n # Select Database for use\n dbc.Card(dbc.CardBody([\n dbc.Row([\n dbc.Col(html.P('Select Database', style={\n 'font-weight': 'bold'}), width=headerColWidth),\n dbc.Col(dcc.Dropdown(id='database-id',\n options=get_database_options()), width=True),\n ]),\n ])),\n html.Br(),\n dbc.Card(dbc.CardBody([\n dbc.Row([\n dbc.Col(html.P('Start Date & Time', style={\n 'font-weight': 'bold'}), width=headerColWidth),\n dbc.Col([\n dbc.Row([\n dbc.Col(html.P('Date:', style={'font-weight': 'bold'}), width='auto', align='center'),\n dbc.Col(dcc.DatePickerSingle(id='db-date-start-id',display_format='DD-MM-YYYY'), width='auto'),\n dbc.Col(html.P('Hour (0 ~ 23):', style={'font-weight': 'bold'}), width='auto', align='center'),\n dbc.Col(dbc.Input(id='db-time-start-hr-id', type='number', min=0, max=23), width=1),\n dbc.Col(html.P('Minute (0 ~ 59):', style={'font-weight': 'bold'}), width='auto', align='center'),\n dbc.Col(dbc.Input(id='db-time-start-min-id', type='number', min=0, max=59), width=1),\n ]),\n #dash_datetimepicker.DashDatetimepicker(id='datetime-range-id'),\n ],\n width=True\n ),\n ]),\n dbc.Row([\n dbc.Col(html.P('End Date & Time', style={\n 'font-weight': 'bold'}), width=headerColWidth),\n dbc.Col([\n dbc.Row([\n dbc.Col(html.P('Date:', style={'font-weight': 'bold'}), width='auto', align='center'),\n dbc.Col(dcc.DatePickerSingle(id='db-date-end-id',display_format='DD-MM-YYYY'), width='auto'),\n dbc.Col(html.P('Hour (0 ~ 23):', style={'font-weight': 'bold'}), width='auto', align='center'),\n dbc.Col(dbc.Input(id='db-time-end-hr-id', type='number', min=0, max=23), width=1),\n dbc.Col(html.P('Minute (0 ~ 59):', style={'font-weight': 'bold'}), width='auto', align='center'),\n dbc.Col(dbc.Input(id='db-time-end-min-id', type='number', min=0, max=59), width=1),\n ]),\n ],\n width=True\n ),\n ]),\n html.Br(),\n dbc.Row([\n dbc.Col(html.P('Select Camera ID', style={\n 'font-weight': 'bold'}), width=headerColWidth),\n dbc.Col(dcc.Dropdown(id='camera-id'), width=True),\n ]),\n\n # Line separator\n dbc.Row([\n dbc.Col(html.Hr(), align='center'),\n dbc.Col(\n html.P(\"or\", style={'font-weight': 'bold'}), align='center', width='auto'),\n dbc.Col(html.Hr(), align='center'),\n ],\n align='start',\n ),\n\n dbc.Row([\n dbc.Col(html.P('Upload an image', style={\n 'font-weight': 'bold'}), width=headerColWidth),\n dbc.Col(\n dcc.Upload(\n id='upload-image',\n accept='image/*',\n multiple=True,\n children=[\n dbc.Button('Click to upload',\n id='upload-image-button',\n color='primary',\n block=True,\n size=\"lg\",\n style={'word-wrap': 'normal'})\n ],\n ),\n width='auto',\n ),\n ]),\n html.P('Picture Requirement:', style={'font-size': 'small'}),\n html.P('• Best with aspect ratio of 1:2 i.e. 128W, 256H',\n style={'font-size': 'small'}),\n html.P('• Full body image from head to toe',\n style={'font-size': 'small'}),\n ])),\n html.Br(),\n # Upload image\n dbc.Card(dbc.CardBody([\n\n dbc.Row([\n dbc.Col([\n html.P('Select Human Image', style={'font-weight': 'bold'}),\n html.P('(Narrow the search by date & time)', style={'font-size': 'small', 'font-style': 'italic'}),\n ],\n width=headerColWidth),\n dbc.Col(\n id='display-col',\n children=[\n dbc.Spinner(dbc.Row(\n id='view-db-images',\n form=True,\n style={\n 'display': 'flex',\n 'flex-wrap': 'wrap',\n 'overflow': 'auto',\n },\n #no_gutters=True,\n #fluid=True,\n )),\n ],\n width=True,\n align='stretch'\n ),\n ]),\n ])),\n ],\n width=True,\n )\n\n return dbc.Row(children=[\n content,\n ])\n\n\ndef results_page_content(params):\n if 'database' in params:\n path_db = params['database']\n else:\n path_db = None\n if 'image_id' in params:\n img_id = params['image_id']\n else:\n img_id = None\n if 'image' in params:\n img = params['image']\n else:\n img = None\n if 'image_filename' in params:\n img_name = params['image_filename']\n else:\n img_name = None\n\n sidebar_contents = []\n\n # Show selected image\n if path_db is not None and os.path.exists(path_db):\n dbquery = query_database.DbQuery(path_db)\n minDate, maxDate = dbquery.get_date_range()\n details_row = []\n image=None\n if img_id is not None:\n df = dbquery.get_images(img_id=img_id)\n row = df.iloc[0]\n encoded_image = base64.b64encode(row.img)\n image = 'data:image/png;base64,{}'.format(encoded_image.decode())\n if row.img_id is not None:\n details_row.append(dbc.Row(\n [\n html.B('Image ID:', style={'margin-right': '5px'}),\n html.P(row.img_id),\n ],\n #className=\"card-text\",\n ))\n if row.timestamp is not None:\n details_row.append(dbc.Row(\n [\n html.B('Date/Time:', style={'margin-right': '5px'}),\n html.P(row.timestamp),\n ],\n #className=\"card-text\",\n ))\n if row.cam_id is not None:\n details_row.append(dbc.Row(\n [\n html.B('Camera ID:', style={'margin-right': '5px'}),\n html.P(row.cam_id),\n ],\n #className=\"card-text\",\n ))\n if \"loc\" in df.columns and row[\"loc\"] is not None:\n details_row.append(dbc.Row(\n [\n html.B('Location:', style={'margin-right': '5px'}),\n html.P(row[\"loc\"]),\n ],\n #className=\"card-text\",\n ))\n elif img is not None:\n image = img\n if img_name is not None:\n details_row.append(dbc.Row(\n [\n html.B('File Name:', style={'margin-right': '5px'}),\n html.P(img_name),\n ],\n ))\n if image is not None:\n sidebar_contents.append(\n dbc.Card(\n children=[\n dbc.CardImg(\n id='results-sidebar-image',\n src=image,\n style={\n 'width': '8vw',\n 'object-fit': 'contain',\n },\n ),\n dbc.CardBody(details_row),\n ],\n style={\n 'padding': '5%',\n },\n )\n )\n\n # filter\n sidebar_contents.append(\n dbc.Card([\n dbc.CardBody([\n html.H6('Search Filter', style={\n 'font-weight': 'bold', 'color': '#007fcf',}),\n #html.Br(),\n dbc.Col([\n html.P('Select Start Date & Time', style={'font-weight': 'bold'}),\n dbc.Col([\n dbc.Row([\n dbc.Col(html.P('Date:'), width='auto', align='center'),\n dbc.Col(\n dcc.DatePickerSingle(\n id='results-filter-date-start-id',\n display_format='DD-MM-YYYY',\n min_date_allowed=minDate.strftime('%Y-%m-%d'),\n max_date_allowed=maxDate.strftime('%Y-%m-%d')),\n width=True),\n ]),\n dbc.Row([\n dbc.Col(html.P('Hour (0 ~ 23):'), width='auto', align='center'),\n dbc.Col(dbc.Input(id='results-filter-time-start-hr-id', type='number', min=0, max=23), width=True),\n ]),\n dbc.Row([\n dbc.Col(html.P('Minute (0 ~ 59):'), width='auto', align='center'),\n dbc.Col(dbc.Input(id='results-filter-time-start-min-id', type='number', min=0, max=59), width=True),\n ]),\n ]),\n #dash_datetimepicker.DashDatetimepicker(id='results-filter-datetime'),\n ], style={'padding': '1%'}),\n dbc.Col([\n html.P('Select End Date & Time', style={'font-weight': 'bold'}),\n dbc.Col([\n dbc.Row([\n dbc.Col(html.P('Date:'), width='auto', align='center'),\n dbc.Col(\n dcc.DatePickerSingle(\n id='results-filter-date-end-id',\n display_format='DD-MM-YYYY',\n min_date_allowed=minDate.strftime('%Y-%m-%d'),\n max_date_allowed=maxDate.strftime('%Y-%m-%d')),\n width=True),\n ]),\n dbc.Row([\n dbc.Col(html.P('Hour (0 ~ 23):'), width='auto', align='center'),\n dbc.Col(dbc.Input(id='results-filter-time-end-hr-id', type='number', min=0, max=23), width=True),\n ]),\n dbc.Row([\n dbc.Col(html.P('Minute (0 ~ 59):'), width='auto', align='center'),\n dbc.Col(dbc.Input(id='results-filter-time-end-min-id', type='number', min=0, max=59), width=True),\n ]),\n ]),\n #dash_datetimepicker.DashDatetimepicker(id='results-filter-datetime'),\n ], style={'padding': '1%'}),\n dbc.Col([\n html.P('Camera ID', style={'font-weight': 'bold'}),\n dcc.Dropdown(id='results-filter-cam-id',\n options=dbquery.get_cam_id_options()),\n ], style={'padding': '1%'}),\n dbc.Col([\n html.P(children='Threshold (Default is 0.60)',\n style={'font-weight': 'bold'}),\n dbc.Input(id='results-filter-threshold',type='number', step=0.01, value=0.6),\n ],\n style={'padding': '1%'}),\n html.Br(),\n dbc.Button(children=\"Filter\", id='results-filter-button', color=\"primary\",\n block=True, size='lg'),\n ]),\n ])\n )\n\n return dbc.Row(children=[\n dbc.Col(\n id='results-page-sidebar',\n children=sidebar_contents,\n width=3,\n style=SIDEBAR_STYLE,\n ),\n dbc.Col(dbc.Spinner(\n id='display-results-col',\n #width=True,\n ),width=True,),\n ])\n\n\n@app.callback(\n Output(component_id='camera-id', component_property='options'),\n Input(component_id='database-id', component_property='value'),\n)\ndef update_camera_ids(path_db):\n if path_db is not None:\n dbquery = query_database.DbQuery(path_db)\n return dbquery.get_cam_id_options()\n else:\n return []\n\n\n@app.callback(\n Output(component_id='upload-image-button', component_property='disabled'),\n Input(component_id='database-id', component_property='value'),\n)\ndef update_camera_ids(path_db):\n return path_db is None\n\n@app.callback(\n Output(component_id='db-date-start-id', component_property='min_date_allowed'),\n Output(component_id='db-date-end-id', component_property='max_date_allowed'),\n Input(component_id='database-id', component_property='value'),\n)\ndef update_db_start_date_min_end_date_max(path_db):\n if path_db is not None:\n dbquery = query_database.DbQuery(path_db)\n minDate, maxDate = dbquery.get_date_range()\n return minDate.strftime('%Y-%m-%d'), maxDate.strftime('%Y-%m-%d')\n else:\n return None, None\n\n@app.callback(\n Output(component_id='db-date-start-id', component_property='max_date_allowed'),\n Input(component_id='db-date-end-id', component_property='date'),\n Input(component_id='db-date-end-id', component_property='max_date_allowed'),\n)\ndef update_db_start_date_max(end_date, end_max_date):\n if end_date is not None:\n return end_date\n else:\n return end_max_date\n\n\n@app.callback(\n Output(component_id='db-date-end-id', component_property='min_date_allowed'),\n Input(component_id='db-date-start-id', component_property='date'),\n Input(component_id='db-date-start-id', component_property='min_date_allowed'),\n)\ndef update_db_end_date_min(start_date, start_min_date):\n if start_date is not None:\n return start_date\n else:\n return start_min_date\n\n\n@app.callback(\n Output(component_id='results-filter-date-start-id',component_property='max_date_allowed'),\n Input(component_id='results-filter-date-end-id', component_property='date'),\n Input(component_id='results-filter-date-end-id', component_property='max_date_allowed'),\n)\ndef update_results_start_date_max(end_date, end_max_date):\n if end_date is not None:\n return end_date\n else:\n return end_max_date\n\n\n@app.callback(\n Output(component_id='results-filter-date-end-id',component_property='min_date_allowed'),\n Input(component_id='results-filter-date-start-id', component_property='date'),\n Input(component_id='results-filter-date-start-id',component_property='min_date_allowed'),\n)\ndef update_results_end_date_min(start_date, start_min_date):\n if start_date is not None:\n return start_date\n else:\n return start_min_date\n\n\n@app.callback(\n Output(component_id='view-db-images', component_property='children'),\n Input(component_id='database-id', component_property='value'),\n Input(component_id='db-date-start-id', component_property='date'),\n Input(component_id='db-time-start-hr-id', component_property='value'),\n Input(component_id='db-time-start-min-id', component_property='value'),\n Input(component_id='db-date-end-id', component_property='date'),\n Input(component_id='db-time-end-hr-id', component_property='value'),\n Input(component_id='db-time-end-min-id', component_property='value'),\n Input(component_id='camera-id', component_property='value'),\n Input(component_id='upload-image', component_property='contents'),\n State(component_id='upload-image', component_property='filename'),\n)\ndef show_database_images(path_db, start_date, start_hour, start_minute, end_date, end_hour, end_minute, cam_id, upload_img, upload_filename):\n dict_trig = get_callback_trigger()\n if 'upload-image' in dict_trig:\n images_col = []\n for img, filename in zip(upload_img, upload_filename):\n tooltip_msg = f\"File name: {filename}\"\n images_col.append(\n dbc.Card([\n dbc.CardLink(\n dbc.CardImg(\n src=img,\n title=tooltip_msg.strip(),\n style={\n 'width': '8vw',\n 'object-fit': 'contain'\n },\n ),\n key=filename,\n # f'{urlResults}?{urlencode(url_dict)}'\n href=get_results_href(\n path_db, img=img, img_filename=filename)\n ),\n ])\n )\n return images_col\n elif path_db is not None and cam_id is not None:\n dbimage = query_database.DbQuery(path_db)\n start_datetime = compile_start_datetime(start_date, start_hour, start_minute)\n end_datetime = compile_end_datetime(end_date, end_hour, end_minute)\n df_images = dbimage.get_images(\n cam_id=cam_id, start_datetime=start_datetime, end_datetime=end_datetime)\n images_col = []\n for _, row in df_images.iterrows():\n encoded_image = base64.b64encode(row.img)\n components = [\n #html.P(f'Camera {row.cam_id}', style={'text-overflow': 'ellipsis', 'width': '8vw', 'margin': '0'})\n ]\n\n timestamp = row.timestamp\n if timestamp is not None:\n if type(timestamp) == str:\n timestamp = datetime.strptime(timestamp, '%Y-%m-%d %H:%M:%S')\n components.extend([\n html.P(timestamp.date(),\n style={'text-overflow': 'ellipsis', 'width': '8vw', 'margin': '0'}),\n html.P(timestamp.strftime(\"%X\"),\n style={'text-overflow': 'ellipsis', 'width': '8vw', 'margin': '0'}),\n ])\n\n components.append(\n html.Img(\n src='data:image/png;base64,{}'.format(encoded_image.decode()),\n title=row.img_id,\n style={\n 'width': '8vw',\n 'object-fit': 'contain'\n }\n ))\n\n tooltip_msg = \"\"\n if (row.img_id is not None):\n tooltip_msg += f\"Image ID: {row.img_id}\\r\\n\"\n if (timestamp is not None):\n tooltip_msg += f\"Datetime: {timestamp}\\r\\n\"\n if (row.cam_id is not None):\n tooltip_msg += f\"Camera ID: {row.cam_id}\\r\\n\"\n if \"loc\" in df_images.columns:\n tooltip_msg += f'Location: {row[\"loc\"]}\\r\\n'\n images_col.append(\n dbc.Card([\n dbc.CardLink(\n dbc.CardImg(\n src='data:image/png;base64,{}'.format(encoded_image.decode()),\n title=tooltip_msg.strip(),\n style={\n 'width': '8vw',\n 'object-fit': 'contain'\n },\n ),\n key=row.img_id,\n href=get_results_href(path_db, img_id=row.img_id)#f'{urlResults}?{urlencode(url_dict)}'\n ),\n ])\n )\n return images_col\n else:\n return None\n\n\n@app.callback(\n Output(component_id='results-filter-button', component_property='href'),\n Input(component_id='results-filter-date-start-id', component_property='date'),\n Input(component_id='results-filter-time-start-hr-id', component_property='value'),\n Input(component_id='results-filter-time-start-min-id', component_property='value'),\n Input(component_id='results-filter-date-end-id', component_property='date'),\n Input(component_id='results-filter-time-end-hr-id', component_property='value'),\n Input(component_id='results-filter-time-end-min-id', component_property='value'),\n Input(component_id='results-filter-cam-id', component_property='value'),\n Input(component_id='results-filter-threshold', component_property='value'),\n State(component_id='url', component_property='pathname'),\n State(component_id='url', component_property='search'),\n)\ndef update_filter_link(start_date, start_hour, start_minute, end_date, end_hour, end_minute, filter_cam_id, filter_threshold, pathname, search):\n path_db, img_id, img, img_filename, start_datetime, end_datetime, cam_id, threshold = decode_results_search_params(pathname, search)\n\n #dict_trig = get_callback_trigger()\n # date=None\n # hour=None\n # minute=None\n # if start_datetime is not None:\n # date = start_datetime.date()\n # hour = start_datetime.hour\n # minute = start_datetime.minute\n # if 'results-filter-date-start-id' in dict_trig:\n # date = start_date\n # if 'results-filter-time-start-hr-id' in dict_trig:\n # hour = start_hour\n # if 'results-filter-time-start-min-id' in dict_trig:\n # minute = start_minute\n # start_datetime = compile_datetime(date, hour, minute)\n start_datetime = compile_datetime(start_date, start_hour, start_minute)\n\n # date = None\n # hour = None\n # minute = None\n # if end_datetime is not None:\n # date = end_datetime.date()\n # hour = end_datetime.hour\n # minute = end_datetime.minute\n # if 'results-filter-date-end-id' in dict_trig:\n # date = end_date\n # if 'results-filter-time-end-hr-id' in dict_trig:\n # hour = end_hour\n # if 'results-filter-time-end-min-id' in dict_trig:\n # minute = end_minute\n # end_datetime = compile_datetime(date, hour, minute)\n end_datetime = compile_datetime(end_date, end_hour, end_minute)\n\n #if 'results-filter-cam-id' in dict_trig:\n cam_id = filter_cam_id\n #if 'results-filter-threshold' in dict_trig:\n threshold = filter_threshold\n\n return get_results_href(path_db, img_id=img_id, img=img,\n img_filename=img_filename, start_datetime=start_datetime,\n end_datetime=end_datetime, cam_id=cam_id, threshold=threshold)\n\n\n@app.callback(\n Output(component_id='display-results-col', component_property='children'),\n Input(component_id='url', component_property='pathname'),\n Input(component_id='url', component_property='search'),\n # Input(component_id='results-filter-button', component_property='n_clicks'),\n # State(component_id='results-filter-date-start-id', component_property='date'),\n # State(component_id='results-filter-time-start-hr-id', component_property='value'),\n # State(component_id='results-filter-time-start-min-id', component_property='value'),\n # State(component_id='results-filter-date-end-id', component_property='date'),\n # State(component_id='results-filter-time-end-hr-id', component_property='value'),\n # State(component_id='results-filter-time-end-min-id', component_property='value'),\n # State(component_id='results-filter-cam-id', component_property='value'),\n # State(component_id='results-filter-threshold', component_property='value'),\n)\ndef show_results_images(pathname, search):#, n_clicks, start_date, start_hour, start_minute, end_date, end_hour, end_minute, cam_id, threshold):\n # dict_trig = get_callback_trigger()\n\n # if 'url' in dict_trig or 'results-filter-button' not in dict_trig:\n # start_date = None\n # end_date = None\n # cam_id = None\n # threshold = None\n\n # params = extract_results_search_params(pathname, search)\n # if params is not None:\n # if 'database' in params:\n # path_db = params['database']\n # else:\n # path_db = None\n # if 'image_id' in params:\n # img_id = params['image_id']\n # else:\n # img_id = None\n # if 'image' in params:\n # img = params['image']\n # else:\n # img = None\n # if 'image_filename' in params:\n # img_name = params['image_filename']\n # else:\n # img_name = None\n # else:\n # return\n\n path_db, img_id, img, img_filename, start_datetime, end_datetime, cam_id, threshold = decode_results_search_params(pathname, search)\n\n if threshold is None:\n threshold = 0.6\n\n row_images = []\n if path_db is not None and (img_id is not None or img is not None) and os.path.exists(path_db):\n dbquery = query_database.DbQuery(path_db)\n if img_id is not None:\n df = dbquery.get_images(img_id=img_id)\n row = df.iloc[0]\n image = row.img\n else:\n image = img\n\n result = run_reid(image, path_db, threshold)\n\n dict_result = {}\n for item in result:\n for key in item:\n if key in dict_result:\n dict_result[key].append(item[key])\n else:\n dict_result[key] = [item[key]]\n\n if len(dict_result) > 0:\n df = pd.DataFrame.from_dict(dict_result)\n list_cams = sorted(df.cam_id.unique().tolist())\n if cam_id is not None:\n if cam_id in list_cams:\n list_cams = [cam_id]\n else:\n list_cams = None\n\n # start_datetime = compile_start_datetime(start_date, start_hour, start_minute)\n # end_datetime = compile_end_datetime(end_date, end_hour, end_minute)\n if list_cams is not None and len(list_cams) > 0:\n for cam_id in list_cams:\n cam_images=[]\n header = f'Camera {cam_id}'\n for _, row_cam in df[df.cam_id == cam_id].iterrows():\n db_reid = query_reid.DbQuery(path_db)\n df_query = db_reid.get_images(row_cam.img_id)\n df_query.timestamp = pd.to_datetime(df_query.timestamp)\n if start_datetime is not None:\n df_query = df_query[df_query.timestamp >= start_datetime]\n if end_datetime is not None:\n df_query = df_query[df_query.timestamp < end_datetime]\n if len(df_query) > 0:\n if \"loc\" in df_query.columns:\n header = f'Camera {cam_id} {df_query[df_query.cam_id == cam_id][\"loc\"].iloc[0]}'\n for idx_query, row_query in df_query.iterrows():\n encoded_image = base64.b64encode(row_query.img)\n id_tag = f'result-img-id-{row_query.img_id}'\n tooltip = []\n if row_query.img_id is not None:\n tooltip.extend([\n html.B('Image ID:'),\n html.Br(),\n html.Span(row_query.img_id),\n html.Br(),\n ])\n if row_query.timestamp is not None:\n tooltip.extend([\n html.B('Date time detected:'),\n html.Br(),\n html.Span(row_query.timestamp),\n html.Br(),\n ])\n if row_cam.dist is not None:\n tooltip.extend([\n html.B('Similarity: '),\n #html.Br(),\n html.Span(round(row_cam.dist,4)),\n ])\n cam_images.append(\n dbc.Card(\n id=id_tag,\n children=[\n dbc.CardImg(\n src='data:image/png;base64,{}'.format(encoded_image.decode()),\n #id=id_tag,\n #title=tooltip_msg.strip(),\n style={\n 'width': '8vw',\n 'object-fit': 'contain',\n #'margin':'5%',\n },\n ),\n dbc.Tooltip([\n html.P(tooltip, style={'text-align': 'left'}),\n dbc.Button(\n html.B('Query this'),\n id=f'query-img-id-{row_query.img_id}',\n size=\"md\",\n href=get_results_href(path_db, img_id=row_query.img_id),\n ),\n ],\n target=id_tag,\n autohide=False,\n style={'font-size': 'small'},\n )\n ]\n ))\n\n if len(cam_images) > 0:\n row_images.append(\n dbc.Card([\n dbc.CardHeader(header, style={'font-weight': 'bold'}),\n dbc.CardBody(dbc.Row(cam_images),style={'margin': '1%'},),\n ]))\n\n if len(row_images) <= 0:\n row_images.append(html.P('No results found!'))\n\n return row_images\n\n\n@app.callback(\n Output(component_id='results-filter-date-start-id', component_property='date'),\n Input(component_id='url', component_property='pathname'),\n Input(component_id='url', component_property='search'),\n State(component_id='results-filter-date-start-id', component_property='date'),\n)\ndef UpdateStartDateFilter(pathname, search, start_date):\n _, _, _, _, start_datetime, _, _, _ = decode_results_search_params(pathname, search)\n if start_datetime is None:\n return start_date\n return start_datetime.strftime('%Y-%m-%d')\n\n@app.callback(\n Output(component_id='results-filter-time-start-hr-id', component_property='value'),\n Input(component_id='url', component_property='pathname'),\n Input(component_id='url', component_property='search'),\n State(component_id='results-filter-time-start-hr-id', component_property='value'),\n)\ndef UpdateStartHourFilter(pathname, search, start_hour):\n _, _, _, _, start_datetime, _, _, _ = decode_results_search_params(pathname, search)\n if start_datetime is None:\n return start_hour\n return start_datetime.hour\n\n@app.callback(\n Output(component_id='results-filter-time-start-min-id',component_property='value'),\n Input(component_id='url', component_property='pathname'),\n Input(component_id='url', component_property='search'),\n State(component_id='results-filter-time-start-min-id',component_property='value'),\n)\ndef UpdateStartMinuteFilter(pathname, search, start_minute):\n _, _, _, _, start_datetime, _, _, _ = decode_results_search_params(pathname, search)\n if start_datetime is None:\n return start_minute\n return start_datetime.minute\n\n@app.callback(\n Output(component_id='results-filter-date-end-id', component_property='date'),\n Input(component_id='url', component_property='pathname'),\n Input(component_id='url', component_property='search'),\n State(component_id='results-filter-date-end-id', component_property='date'),\n)\ndef UpdateEndDateFilter(pathname, search, end_date):\n _, _, _, _, _, end_datetime, _, _ = decode_results_search_params(pathname, search)\n if end_datetime is None:\n return end_date\n return end_datetime.strftime('%Y-%m-%d')\n\n@app.callback(\n Output(component_id='results-filter-time-end-hr-id', component_property='value'),\n Input(component_id='url', component_property='pathname'),\n Input(component_id='url', component_property='search'),\n State(component_id='results-filter-time-end-hr-id', component_property='value'),\n)\ndef UpdateEndHourFilter(pathname, search, end_hour):\n _, _, _, _, _, end_datetime, _, _ = decode_results_search_params(pathname, search)\n if end_datetime is None:\n return end_hour\n return end_datetime.hour\n\n@app.callback(\n Output(component_id='results-filter-time-end-min-id', component_property='value'),\n Input(component_id='url', component_property='pathname'),\n Input(component_id='url', component_property='search'),\n State(component_id='results-filter-time-end-min-id', component_property='value'),\n)\ndef UpdateEndMinuteFilter(pathname, search, end_minute):\n _, _, _, _, _, end_datetime, _, _ = decode_results_search_params(pathname, search)\n if end_datetime is None:\n return end_minute\n return end_datetime.minute\n\n@app.callback(\n Output(component_id='results-filter-cam-id', component_property='value'),\n Input(component_id='url', component_property='pathname'),\n Input(component_id='url', component_property='search'),\n State(component_id='results-filter-cam-id', component_property='value'),\n)\ndef UpdateCamIdFilter(pathname, search, filter_cam_id):\n _, _, _, _, _, _, cam_id, _ = decode_results_search_params(pathname, search)\n if cam_id is None:\n return filter_cam_id\n return cam_id\n\n@app.callback(\n Output(component_id='results-filter-threshold', component_property='value'),\n Input(component_id='url', component_property='pathname'),\n Input(component_id='url', component_property='search'),\n State(component_id='results-filter-threshold', component_property='value'),\n)\ndef UpdateThresholdFilter(pathname, search, filter_threshold):\n _, _, _, _, _, _, _, threshold = decode_results_search_params(pathname, search)\n if threshold is None:\n return filter_threshold\n return threshold\n\ndef get_database_options():\n path_folder = f'../reid/archive'\n options = []\n for file in sorted(os.listdir(path_folder)):\n filePath = os.path.join(path_folder, file)\n if is_database_valid(filePath):\n fileName, fileExt = os.path.splitext(file)\n if os.path.isfile(filePath) and fileExt.lower() == '.db':\n options.append({'label': fileName, 'value': filePath})\n\n return options\n\n\ndef is_database_valid(path_db):\n try:\n dbTemp = query_database.DbQuery(path_db)\n tableName = 'vectorkb_table'\n if tableName in dbTemp.get_table_list():\n return True#'cam_id' in dbTemp.get_columns_list(tableName)\n else:\n return False\n except:\n return False\n\n\ndef extract_results_search_params(pathname, search):\n if search is not None:\n queries = parse_qs(search[1:])\n logging.info(queries)\n else:\n queries = None\n\n try:\n if pathname is not None:\n if queries is not None and pathname[1:] == 'results':\n params={}\n for name in queries:\n params[name] = queries[name][0]\n return params\n except Exception as ex:\n logging.error(ex)\n return None\n\n\ndef get_callback_trigger():\n ctx = dash.callback_context\n dictTrigger={}\n for trig in ctx.triggered:\n splitTxt = trig['prop_id'].split('.')\n if len(splitTxt) == 2 and len(splitTxt[0]) > 0:\n if splitTxt[0] in dictTrigger:\n dictTrigger[splitTxt[0]].append(splitTxt[1])\n else:\n dictTrigger[splitTxt[0]]=[splitTxt[1]]\n\n return dictTrigger\n\n\n# reference /human_tracker/reid_inference.py\ndef run_reid(img, db_path, threshold=0.6):\n init_reid(db_path)\n global _reid\n #to_sqlite.db_path = db_path\n if isinstance(img, bytes):\n pil_img = to_sqlite.convertBlobtoIMG(img)\n elif isinstance(img,str):\n encoded_image = img.split(\",\")[1]\n decoded_image = base64.b64decode(encoded_image)\n bytes_image = io.BytesIO(decoded_image)\n pil_img = Image.open(bytes_image).convert('RGB')\n\n query_feat = _reid.to_query_feat(pil_img)\n return _reid.infer(query_feat, thres=threshold)\n\n\ndef init_reid(db_path):\n global _reid, _reid_db_path\n if _reid is None or _reid_db_path is None or _reid_db_path != db_path:\n _reid = reid_inference(db_path)\n _reid_db_path = db_path\n\n\ndef get_results_href(path_db, img_id=None, img=None, img_filename=None, start_datetime=None, end_datetime=None, cam_id=None, threshold=None):\n urlResults = '/results'\n url_dict = {'database': path_db}\n if img_id is not None:\n url_dict['image_id'] = img_id\n if img is not None:\n url_dict['image'] = img\n if img_filename is not None:\n url_dict['image_filename'] = img_filename\n if start_datetime is not None:\n url_dict['start'] = start_datetime\n if end_datetime is not None:\n url_dict['end'] = end_datetime\n if cam_id is not None:\n url_dict['camera'] = cam_id\n if threshold is not None:\n url_dict['threshold'] = threshold\n\n return f'{urlResults}?{urlencode(url_dict)}'\n\n\ndef decode_results_search_params(pathname, search):\n path_db = None\n img_id = None\n img = None\n img_filename = None\n start_datetime = None\n end_datetime = None\n cam_id = None\n threshold = None\n params = extract_results_search_params(pathname, search)\n if params is not None:\n if 'database' in params:\n path_db = params['database']\n if 'image_id' in params:\n img_id = params['image_id']\n if 'image' in params:\n img = params['image']\n if 'image_filename' in params:\n img_filename = params['image_filename']\n if 'start' in params:\n start_datetime = extract_datetime(params['start'])\n if 'end' in params:\n end_datetime = extract_datetime(params['end'])\n if 'camera' in params:\n cam_id = int(params['camera'])\n if 'threshold' in params:\n threshold = float(params['threshold'])\n\n return path_db, img_id, img, img_filename, start_datetime, end_datetime, cam_id, threshold\n\n\ndef compile_start_datetime(start_date, start_hour, start_minute):\n return compile_datetime(start_date, start_hour, start_minute)\n\n\ndef compile_end_datetime(end_date, end_hour, end_minute):\n if end_date is None:\n return None\n if end_hour is None or end_hour > 23:\n return compile_datetime(end_date, 0, 0) + timedelta(days=1)\n elif end_minute is None or end_minute > 59:\n return compile_datetime(end_date, end_hour, 0) + timedelta(hours=1)\n else:\n return compile_datetime(end_date, end_hour, end_minute) + timedelta(minutes=1)\n\n\ndef compile_datetime(date, hour, minute):\n if date is None:\n return None\n if hour is None:\n hour = 0\n if minute is None:\n minute = 0\n date_string = f\"{date} {hour}:{minute}\"\n return datetime.strptime(date_string, \"%Y-%m-%d %H:%M\")\n\n\ndef extract_datetime(datetime_str):\n return datetime.strptime(datetime_str, \"%Y-%m-%d %H:%M:%S\")\n","repo_name":"Mokky-ISS/IS02_PT_Capstone_Human_Re-ID","sub_path":"src/demo_dash/dash_app_v3.py","file_name":"dash_app_v3.py","file_ext":"py","file_size_in_byte":43530,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"}
+{"seq_id":"3625675994","text":"Import('*')\n\nenv = env.Clone()\n\nenv.CBDefine('HAVE_CONFIG_H')\n\nif env.get('compiler_mode', '') == 'gnu':\n env.Append(CFLAGS = ['-Wno-unused-value', '-Wno-int-to-pointer-cast',\n '-Wno-implicit-function-declaration'])\n env.Append(CPPDEFINES = ['_POSIX_C_SOURCE=200809L']) # For clang\n\nif not env.GetOption('clean'):\n env.Append(CPPPATH = ['#/src/libyaml/src'])\n\n# Build lib\nsrc = Glob('src/*.c')\nlib = env.Library('#/lib/yaml', src)\n\n# Install headers\nhdrs = 'src/yaml.h'.split()\nhdrs = env.Install(dir = '#/include/', source = hdrs)\nDepends(lib, hdrs)\n\n# Return\nReturn('lib')\n","repo_name":"CauldronDevelopmentLLC/cbang","sub_path":"src/libyaml/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"91"}
+{"seq_id":"13189263362","text":"import numpy as np\nimport json\nimport cv2\nfrom pyniryo import *\nimport math\nimport matplotlib.pyplot as plt\n\nimport speech_recognition as sr\nimport pyaudio\nimport wave\nimport base64\n\nfrom new_objects import take_workspace_img, k_means\n\ndef threshold_hls(img, list_min_hsv, list_max_hsv):\n frame_hsl = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)\n return cv2.inRange(frame_hsl, tuple(list_min_hsv), tuple(list_max_hsv))\n\ndef fill_holes(img):\n # fill holes in a mask\n im_floodfill = img.copy()\n h, w = img.shape[:2]\n mask = np.zeros((h + 2, w + 2), np.uint8)\n cv2.floodFill(im_floodfill, mask, (0, 0), 255)\n im_floodfill_inv = cv2.bitwise_not(im_floodfill)\n img = img | im_floodfill_inv\n return img\n\ndef objs_mask(img):\n # calculate a mask\n color_hls = [[0, 0, 0], [180, 150, 255]]\n\n mask = threshold_hls(img, *color_hls)\n\n kernel3 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))\n kernel5 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))\n kernel7 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))\n kernel11 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))\n\n # erode workspace markers\n mask[:15, :] = cv2.erode(mask[:15, :], kernel7, iterations=5)\n mask[-15:, :] = cv2.erode(mask[-15:, :], kernel7, iterations=5)\n mask[:, :15] = cv2.erode(mask[:, :15], kernel7, iterations=5)\n mask[:, -15:] = cv2.erode(mask[:, -15:], kernel7, iterations=5)\n\n mask = fill_holes(mask)\n\n mask = cv2.dilate(mask, kernel3, iterations=1)\n mask = cv2.erode(mask, kernel5, iterations=1)\n mask = cv2.dilate(mask, kernel11, iterations=1)\n\n mask = fill_holes(mask)\n\n mask = cv2.erode(mask, kernel7, iterations=1)\n\n return mask\n\ndef extract_objs(mask):\n # calculate the coordonate of the detected object on the workspace\n cnts, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n if cnts is not None:\n for cnt in cnts:\n cx, cy = get_contour_barycenter(cnt)\n try:\n angle = (get_contour_angle(cnt)*180)/math.pi\n except NiryoRobotException:\n angle = 0\n \n return cx, cy, angle\n\ndef test_color(dict_color, texte, centers, pos):\n # check the link between recorded data from training and data from actual picture for the color\n color_detect = False\n error_color = False\n for i_color in dict_color.keys():\n if i_color in texte:\n color_detect = True\n sum_diff = 0\n for j_color in range(len(centers[pos])):\n sum_diff = sum_diff + abs(dict_color[i_color][j_color] - centers[pos][j_color])\n if abs(sum_diff) < 100: # gap until 100 is accepted\n print(\"Couleur détectée !\")\n else:\n print(\"Pas d'objet de couleur\", i_color, \"sur le workplace !\")\n error_color = True\n if color_detect == False:\n print(\"Couleur non-détectée dans la base de données !\")\n error_color = True\n return error_color\n \ndef test_shape(dict_shape, texte, nb_pixels_centroid):\n # check the link between recorded data from training and data from actual picture for the shape\n shape_detect = False\n error_shape = False\n for i_shape in dict_shape.keys():\n if i_shape in texte:\n shape_detect = True\n if abs(nb_pixels_centroid - dict_shape[i_shape]) < 200: # gap until 200 pixels is accepted\n print(\"Forme détectée !\")\n else:\n print(\"Pas d'objet de forme\", i_shape, \"sur le workplace !\")\n error_shape = True\n if shape_detect == False:\n print(\"Forme non-détectée dans la base de données !\")\n error_shape = True\n return error_shape\n\ndef text_read():\n # collect the audio speech of the user and transcrit it into text\n FORMAT = pyaudio.paInt16\n CHANNELS = 1\n RATE = 16000\n CHUNK = 3200\n RECORD_SECONDS = 5\n WAVE_OUTPUT_FILENAME = \"voc.wav\"\n MIC_INDEX = 11\n\n audio = pyaudio.PyAudio()\n\n # Start Recording\n stream = audio.open(format=FORMAT, channels=CHANNELS, input_device_index=MIC_INDEX,\n rate=RATE, input=True,\n frames_per_buffer=CHUNK)\n print (\"Donner la désignation de l'objet que vous souhaitez attraper !\\n\")\n\n frames = []\n\n for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):\n data = stream.read(CHUNK, exception_on_overflow = False)\n frames.append(data)\n\n print (\"Enregistrement terminé, analyse en cours...\\nPréparez-vous au déplacement de l'objet.\")\n\n # Stop Recording\n stream.stop_stream()\n stream.close()\n audio.terminate()\n\n waveFile = wave.open(WAVE_OUTPUT_FILENAME, 'wb')\n waveFile.setnchannels(CHANNELS)\n waveFile.setsampwidth(audio.get_sample_size(FORMAT))\n waveFile.setframerate(RATE)\n waveFile.writeframes(b''.join(frames))\n waveFile.close()\n\n rec_vocale = sr.Recognizer()\n fichier = \"../data/voc.wav\", \"fr-FR\"\n\n with sr.AudioFile(fichier[0]) as src: # open file\n audio = rec_vocale.record(src)\n texte = rec_vocale.recognize_google(audio, language=fichier[1]) # translate speech into text in French\n print(texte)\n \n return texte\n\ndef main_detection(dict_color, dict_shape, client, workspace, drop_pose, observation_pose):\n # main loop for object detection and object pick and place\n image = take_workspace_img(client)\n centers, nb_pixels_centroid, pos = k_means(image)\n texte = text_read()\n #texte = \"cercle vert\" # in case of test whithout using vocal option\n mask = objs_mask(image)\n plt.imshow(mask) # show the result of the object detection\n plt.show()\n x, y, angle = extract_objs(mask)\n error_color = test_color(dict_color, texte, centers, pos)\n error_shape = test_shape(dict_shape, texte, nb_pixels_centroid)\n if error_color == False and error_shape == False: # object must not be picked if the wrong object is on the workspace\n z_offset = 0.01 # offset for the vacuum pump\n obj_ = client.get_target_pose_from_rel(workspace, z_offset, (x / 200), (y / 200),\n angle)\n print(\"Position objet : \", obj_)\n client.pick_from_pose(obj_) # take the object\n client.place_from_pose(*drop_pose.to_list()) # place it in the chosen drop zone\n #client.close_gripper() # in case of use of the gripper instead of vacuum pump\n client.move_pose(*observation_pose.to_list()) # robot is again in observation pose for a new round","repo_name":"INSA-FIPMIK/ColaBot","sub_path":"src/detection.py","file_name":"detection.py","file_ext":"py","file_size_in_byte":6565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"35566899926","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path(\"\" , views.inicio , name=\"inicio\"),\n path(\"ver_cursos\" , views.ver_cursos),\n #path(\"alta_curso//\" , views.alta_cursos),\n path(\"profesores\" , views.profesores , name=\"profesores\"),\n path(\"alumnos\" , views.alumnos , name=\"alumnos\"),\n path(\"alta_cursos\", views.curso_formulario),\n path(\"buscar_curso\" , views.buscar_curso),\n path(\"buscar\" , views.buscar),\n]\n\n\n","repo_name":"Mon86/Proyecto1-main","sub_path":"AppCoder/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"13383481366","text":"'''\nВводится число n, после него вводится еще n чисел.\nНайти сореднее арифметическое\nмаксимум\n'''\n\nimport sys\n\nn = int(input(\"enter number\"))\nsumm = 0\nmaxn = -sys.maxsize -1\n\nfor i in range(n):\n x = int(input())\n if x > maxn:\n maxn = x\n summ +=x\nprint(summ/x,maxn)\n","repo_name":"rekhert/Python3","sub_path":"az3.py","file_name":"az3.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"10756952072","text":"import random\nimport json\nimport torch\nfrom chatbot.calc import get_num\nfrom chatbot.converter import converter\nfrom chatbot.translator import translator\nfrom chatbot.spellcheck import spell\nfrom chatbot.webs import find_source, scrape\nimport re\nimport numpy as np\nimport nltk\nfrom nltk.stem import WordNetLemmatizer\nimport torch.nn as nn\n\nnltk.download('maxent_ne_chunker')\nnltk.download('omw-1.4')\nnltk.download('wordnet')\nnltk.download('stopwords')\nnltk.download('large_grammars')\nnltk.download('snowball_data')\nnltk.download('averaged_perceptron_tagger')\nnltk.download('punkt')\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\nwith open('./chatbot/chat.json', 'r') as json_data:\n chats = json.load(json_data)\n\nFILE = \"./chatbot/data.pth\"\ndata = torch.load(FILE)\nans = \"\"\ncho = \"\"\n\ninput_size = data[\"input_size\"]\nhidden_size = data[\"hidden_size\"]\noutput_size = data[\"output_size\"]\nall_words = data['all_words']\ntags = data['tags']\nmodel_state = data[\"model_state\"]\n\n\nclass NeuralNet(nn.Module):\n def __init__(self, input_size, hidden_size, num_classes):\n super(NeuralNet, self).__init__()\n self.l1 = nn.Linear(input_size, hidden_size)\n self.l2 = nn.Linear(hidden_size, hidden_size)\n self.l3 = nn.Linear(hidden_size, num_classes)\n self.relu = nn.ReLU()\n\n def forward(self, x):\n out = self.l1(x)\n out = self.relu(out)\n out = self.l2(out)\n out = self.relu(out)\n out = self.l3(out)\n return out\n\n\nmodel = NeuralNet(input_size, hidden_size, output_size).to(device)\nmodel.load_state_dict(model_state)\nmodel.eval()\n\nwml = WordNetLemmatizer()\n\n\ndef tokenizer(sentence):\n return nltk.word_tokenize(sentence)\n\n\ndef lemmatize(words):\n return wml.lemmatize(words.lower())\n\n\ndef bag_of_words(tokenized_sentence, words):\n sentence_words = [lemmatize(word) for word in tokenized_sentence]\n bag = np.zeros(len(words), dtype=np.float32)\n for idx, w in enumerate(words):\n if w in sentence_words:\n bag[idx] = 1\n return bag\n\n\ndef chatbot(sentence):\n query = sentence\n\n sentence = tokenizer(sentence)\n X = bag_of_words(sentence, all_words)\n X = X.reshape(1, X.shape[0])\n X = torch.from_numpy(X).to(device)\n\n output = model(X)\n _, predicted = torch.max(output, dim=1)\n\n tag = tags[predicted.item()]\n probs = torch.softmax(output, dim=1)\n prob = probs[0][predicted.item()]\n if prob.item() > 0.85:\n for c in chats['chat']:\n if tag == c[\"tag\"] and tag != \"calculator\" and tag != \"spell\" and tag != \"converter\" and tag != \"translator\" and tag != \"classiojrfn\" and tag != \"generalques\":\n return random.choice(c['responses'])\n else:\n ans = scrape(query)\n return (ans)\n\n if tag == \"calculator\" or re.search(r\"(\\d+)\\s*([+*/-])\\s*(\\d+)\", query):\n ans = get_num(query)\n return str(ans)\n\n if tag == \"converter\":\n user_input = query\n try:\n parts = user_input.split()\n value = float(parts[1])\n unit_from = parts[2].lower()\n unit_to = parts[4].lower()\n result = converter.convert(unit_from, unit_to, value)\n ans = (f\"{value} {unit_from} is equal to {result} {unit_to}\")\n return (ans)\n except (ValueError, IndexError):\n return (\"Invalid input format\")\n\n if tag == \"translator\":\n words = query.split()\n new_string = \" \".join(words[1:-2])\n dest = words[-1]\n ans = translator(new_string, dest)\n return ans\n\n if tag == \"classiojrfn\":\n return (\"I can help you with the following : \\n...Your doubts regarding Classio \\n...Calculator [ <+,-,*,/,**,%,sin,cos,tan,root> ] \\n...Converter[ to ] \\n...Translator[translate to ] \\n...Spell Checker[spell check ]\")\n\n if tag == \"spell\":\n ans = spell(query)\n return (ans)\n \n if tag == \"generalques\":\n ans = scrape(query)\n return (ans)\n","repo_name":"jonanmathew/classio-message-server","sub_path":"chatbot/chat.py","file_name":"chat.py","file_ext":"py","file_size_in_byte":4053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"11138404761","text":"from django.urls import path\nfrom . import views\n#from .views import admin_page, Create_user\n\nurlpatterns = [\n #path('Create_user', Create_user.as_view(), name=\"Create_user\"),\n #path('admin_page', admin_page.as_view(), name=\"admin_page\"),\n path('Create_user', views.Create_user, name=\"Create_user\"),\n path('admin_page', views.admin_page, name=\"admin_page\"),\n path('logout_user', views.logout_user, name=\"logout_user\"),\n path('admin_profile', views.admin_profile, name=\"admin_profile\"),\n path('login',views.login, name=\"login\"),\n path('search_page',views.search_page, name=\"search_page\"),\n path('A_notification',views.A_notification, name=\"A_notification\"),\n path('A_repository',views.A_repository, name=\"A_repository\"),\n # path('change_pass', views.change_pass, name=\"change_pass\"),\n]\n","repo_name":"Sajib-Hossain/Document_Digitization","sub_path":"The Document Digitization Project/Web/Document_digitization/admin_page/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"12590719533","text":"\"\"\"remove space\"\"\"\r\n# str1=\"hello world\"\r\n# print(str1.replace(\" \",\"\"))\r\n\r\n\"\"\"reversing a list\"\"\"\r\n# list1=[1,2,3,4,5]\r\n# print(list1[::-1])\r\n# print(list(reversed(list1)))\r\n\r\n\"\"\"list to string and string to list\"\"\"\r\n# print(str1.split(\"\"))\r\n\r\n\"\"\"removing of duplicates from string\"\"\"\r\n# list1=[1,2,3,1,2,3,5]\r\n# a=list(set(list1))\r\n# print(a)\r\n# b=set(list1)\r\n# print(b)\r\n# print(list1.pop())\r\n\r\na1 = ['C', 'I', 'G', 'N', 'A', 'I', 'L', 'C', 'A','I',]\r\ns1 = 'CIGNA'\r\n\r\n\r\ndef f(a, s):\r\n j = 0\r\n for i in range(0, len(a)-1):\r\n if a1[i] == s1[j] and i >= j:\r\n print(a[i], i)\r\n j += 1\r\n if j == len(s1):\r\n return True\r\n else:\r\n return False\r\n\r\n\r\nprint(f(a1, s1))\r\n\r\n","repo_name":"deepthi5497/python_assignment","sub_path":"mock.py","file_name":"mock.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"19646888204","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport Crypto\nfrom Crypto.PublicKey import RSA\nfrom Crypto.Cipher import PKCS1_OAEP\nfrom Crypto import Random\n\nclass RsaManager(object):\n\n\t# Init object and create keys\n\tdef __init__(self):\n\t\tself.key = None\n\t\tself.pubkey = None\n\t\tself.friend_key = None\n\t\tself.key_size = 1024\n\t\tself.gen_key()\n\n\t# Generate public and private keys\n\tdef gen_key(self):\n\t\trandom_generator = Random.new().read\n\t\tself.key = RSA.generate(self.key_size, random_generator)\n\t\tself.pubkey = self.key.publickey()\n\n\t# Encrypt message with friend's key\n\tdef _encrypt(self,msg):\n\t\tcipher = PKCS1_OAEP.new(self.friend_key.publickey())\n\t\tciphertext = cipher.encrypt(msg.encode())\n\t\tprint(\"RSA::Encrypt:\")\n\t\tprint(msg,\"->\",ciphertext)\n\t\treturn ciphertext\n\n\t# Export public key (pem format)\n\tdef export_key_pem(self):\n\t\tstr_key = self.pubkey.exportKey('PEM').decode('utf-8')\n\t\treturn str_key\n\n\t# Import friend's key (pem format)\n\tdef import_friend_key(self, pem_key_bytes):\n\t\tself.friend_key = RSA.importKey(pem_key_bytes)\n\n\t# Decrypt friend's message \n\tdef decrypt_msg(self, ciphertext):\n\t\tcipher = PKCS1_OAEP.new(self.key)\n\t\tmessage = cipher.decrypt(ciphertext)\n\t\treturn message.decode('utf-8')\n\n\nif __name__ == '__main__':\n\n\timport zlib, base64\n\tdef unformat_zlib_64( msg):\n\t\treturn zlib.decompress(base64.b64decode(msg))\n\t\t\n\n\t# Compresse et encode\n\tdef format_zlib_64( msg, key=False):\n\t\tif(key):\n\t\t\treturn base64.b64encode(zlib.compress(msg)).decode(\"utf-8\")\n\t\treturn base64.b64encode(zlib.compress(msg.encode())).decode(\"utf-8\")\n\n\tprint(\"test :\")\n\n\t# Create entities\n\talice = RsaManager()\n\tbob = RsaManager()\n\n\t# Import each other's key\n\tbob.import_friend_key(alice.export_key_pem())\n\talice.import_friend_key(bob.export_key_pem())\n\n\t# Print Pem keys\n\tprint(\"Alice :\")\n\tprint(alice.export_key_pem())\n\tprint(\"Bob :\")\t\n\tprint(bob.export_key_pem())\n\n\t# Alice ---> Bob\n\tcipher = alice._encrypt(\"Coucou gfhgfhdfghdfghdfghdfghdfghdfghdfghgfhfghdfghdfghdfghbob ça roule ?\")\n\tciphercode = format_zlib_64(cipher, True)\n\tprint(\"Alice envoie :\")\n\tprint(ciphercode)\n\tprint(\"Bob dechiffre :\")\n\t\n\tmsgdecode= unformat_zlib_64(ciphercode)\n\tmsg = bob.decrypt_msg(msgdecode)\n\n\tprint(msg)\n\n\t# Bob ---> Alice\n\tcipher = bob._encrypt(\"ça va très bien !\")\n\tprint(\"Alice dechiffre :\")\n\tmsg = alice.decrypt_msg(cipher)\n\tprint(msg)","repo_name":"masterccc/IRCrypt","sub_path":"RsaManager.py","file_name":"RsaManager.py","file_ext":"py","file_size_in_byte":2336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"27755618739","text":"import logging\nimport os\n\nfrom PySide2 import QtWidgets, QtGui, QtCore\nfrom hyo2.grids.grids_manager import GridsManager\nfrom hyo2.qc.qctools.gui_settings import GuiSettings\n\nlogger = logging.getLogger(__name__)\n\n\nclass InputsTab(QtWidgets.QMainWindow):\n here = os.path.abspath(os.path.dirname(__file__))\n\n def __init__(self, parent_win, prj):\n QtWidgets.QMainWindow.__init__(self)\n # Enable dragging and dropping onto the GUI\n self.setAcceptDrops(True)\n\n # store a project reference\n self.prj = prj\n self.parent_win = parent_win\n\n # ui\n self.panel = QtWidgets.QFrame()\n self.setCentralWidget(self.panel)\n self.vbox = QtWidgets.QVBoxLayout()\n self.panel.setLayout(self.vbox)\n\n self.loadData = QtWidgets.QGroupBox(\"Data inputs [drag-and-drop to add, right click to drop files]\")\n # self.loadData.setStyleSheet(\"QGroupBox::title { color: rgb(155, 155, 155); }\")\n self.vbox.addWidget(self.loadData)\n\n vbox = QtWidgets.QVBoxLayout()\n self.loadData.setLayout(vbox)\n\n # add grids\n hbox = QtWidgets.QHBoxLayout()\n vbox.addLayout(hbox)\n text_add_grids = QtWidgets.QLabel(\"Grid files:\")\n hbox.addWidget(text_add_grids)\n # text_add_grids.setFixedHeight(GuiSettings.single_line_height())\n text_add_grids.setMinimumWidth(64)\n self.input_grids = QtWidgets.QListWidget()\n hbox.addWidget(self.input_grids)\n self.input_grids.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)\n self.input_grids.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)\n # noinspection PyUnresolvedReferences\n self.input_grids.customContextMenuRequested.connect(self.make_grids_context_menu)\n self.input_grids.setAlternatingRowColors(True)\n vbox_buttons = QtWidgets.QVBoxLayout()\n hbox.addLayout(vbox_buttons)\n vbox_buttons.addStretch()\n button_add_file_grids = QtWidgets.QPushButton()\n vbox_buttons.addWidget(button_add_file_grids)\n button_add_file_grids.setFixedHeight(36)\n button_add_file_grids.setFixedWidth(36)\n button_add_file_grids.setIcon(QtGui.QIcon(os.path.join(self.parent_win.media, 'add_files.png')))\n button_add_file_grids.setToolTip('Add (or drag-and-drop) BAG and CSAR files')\n # noinspection PyUnresolvedReferences\n button_add_file_grids.clicked.connect(self.click_add_file_grids)\n button_add_folder_grids = QtWidgets.QPushButton()\n vbox_buttons.addWidget(button_add_folder_grids)\n button_add_folder_grids.setFixedHeight(36)\n button_add_folder_grids.setFixedWidth(36)\n button_add_folder_grids.setIcon(QtGui.QIcon(os.path.join(self.parent_win.media, 'add_folder.png')))\n button_add_folder_grids.setToolTip('Add (or drag-and-drop) a Kluster Grid folder')\n button_add_folder_grids.setEnabled(GridsManager.kluster_grid_supported())\n # noinspection PyUnresolvedReferences\n button_add_folder_grids.clicked.connect(self.click_add_folder_grids)\n vbox_buttons.addStretch()\n\n # add s57\n hbox = QtWidgets.QHBoxLayout()\n vbox.addLayout(hbox)\n text_add_s57 = QtWidgets.QLabel(\"S57 files:\")\n hbox.addWidget(text_add_s57)\n text_add_s57.setFixedHeight(GuiSettings.single_line_height())\n text_add_s57.setMinimumWidth(64)\n self.input_s57 = QtWidgets.QListWidget()\n hbox.addWidget(self.input_s57)\n self.input_s57.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)\n self.input_s57.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)\n # noinspection PyUnresolvedReferences\n self.input_s57.customContextMenuRequested.connect(self.make_s57_context_menu)\n self.input_s57.setAlternatingRowColors(True)\n button_add_s57 = QtWidgets.QPushButton()\n hbox.addWidget(button_add_s57)\n button_add_s57.setFixedHeight(GuiSettings.single_line_height())\n button_add_s57.setFixedWidth(GuiSettings.single_line_height())\n button_add_s57.setFixedHeight(36)\n button_add_s57.setFixedWidth(36)\n button_add_s57.setIcon(QtGui.QIcon(os.path.join(self.parent_win.media, 'add_files.png')))\n button_add_s57.setToolTip('Add (or drag-and-drop) S57 feature files')\n # noinspection PyUnresolvedReferences\n button_add_s57.clicked.connect(self.click_add_s57)\n\n vbox.addSpacing(12)\n\n # clear data\n hbox = QtWidgets.QHBoxLayout()\n vbox.addLayout(hbox)\n hbox.addStretch()\n button_clear_data = QtWidgets.QPushButton()\n hbox.addWidget(button_clear_data)\n button_clear_data.setFixedHeight(GuiSettings.single_line_height())\n # button_clear_data.setFixedWidth(GuiSettings.single_line_height())\n button_clear_data.setText(\"Clear data\")\n button_clear_data.setToolTip('Clear all data loaded')\n # noinspection PyUnresolvedReferences\n button_clear_data.clicked.connect(self.click_clear_data)\n hbox.addStretch()\n\n self.vbox.addStretch()\n self.vbox.addStretch()\n\n # data outputs\n self.savedData = QtWidgets.QGroupBox(\"Data outputs [drag-and-drop the desired output folder]\")\n self.savedData.setStyleSheet(\"QGroupBox::title { color: rgb(155, 155, 155); }\")\n self.savedData.setMaximumHeight(GuiSettings.single_line_height() * 8)\n self.vbox.addWidget(self.savedData)\n\n vbox = QtWidgets.QVBoxLayout()\n self.savedData.setLayout(vbox)\n\n # set optional formats\n hbox = QtWidgets.QHBoxLayout()\n vbox.addLayout(hbox)\n text_set_formats = QtWidgets.QLabel(\"Formats:\")\n hbox.addWidget(text_set_formats)\n text_set_formats.setFixedHeight(GuiSettings.single_line_height())\n text_set_formats.setMinimumWidth(64)\n self.output_pdf = QtWidgets.QCheckBox(\"PDF\")\n self.output_pdf.setChecked(True)\n self.output_pdf.setDisabled(True)\n hbox.addWidget(self.output_pdf)\n self.output_s57 = QtWidgets.QCheckBox(\"S57\")\n self.output_s57.setChecked(True)\n self.output_s57.setDisabled(True)\n hbox.addWidget(self.output_s57)\n self.output_shp = QtWidgets.QCheckBox(\"Shapefile\")\n self.output_shp.setToolTip('Activate/deactivate the creation of Shapefiles in output')\n self.output_shp.setChecked(self.prj.output_shp)\n # noinspection PyUnresolvedReferences\n self.output_shp.clicked.connect(self.click_output_shp)\n hbox.addWidget(self.output_shp)\n self.output_kml = QtWidgets.QCheckBox(\"KML\")\n self.output_kml.setToolTip('Activate/deactivate the creation of KML files in output')\n self.output_kml.setChecked(self.prj.output_kml)\n # noinspection PyUnresolvedReferences\n self.output_kml.clicked.connect(self.click_output_kml)\n hbox.addWidget(self.output_kml)\n\n hbox.addSpacing(36)\n\n text_set_prj_folder = QtWidgets.QLabel(\"Create project folder: \")\n hbox.addWidget(text_set_prj_folder)\n text_set_prj_folder.setFixedHeight(GuiSettings.single_line_height())\n self.output_prj_folder = QtWidgets.QCheckBox(\"\")\n self.output_prj_folder.setToolTip('Create a sub-folder with project name')\n self.output_prj_folder.setChecked(self.prj.output_project_folder)\n # noinspection PyUnresolvedReferences\n self.output_prj_folder.clicked.connect(self.click_output_project_folder)\n hbox.addWidget(self.output_prj_folder)\n\n text_set_subfolders = QtWidgets.QLabel(\"Per-tool sub-folders: \")\n hbox.addWidget(text_set_subfolders)\n text_set_subfolders.setFixedHeight(GuiSettings.single_line_height())\n self.output_subfolders = QtWidgets.QCheckBox(\"\")\n self.output_subfolders.setToolTip('Create a sub-folder for each tool')\n self.output_subfolders.setChecked(self.prj.output_subfolders)\n # noinspection PyUnresolvedReferences\n self.output_subfolders.clicked.connect(self.click_output_subfolders)\n hbox.addWidget(self.output_subfolders)\n\n hbox.addStretch()\n\n # add folder\n hbox = QtWidgets.QHBoxLayout()\n vbox.addLayout(hbox)\n text_add_folder = QtWidgets.QLabel(\"Folder:\")\n hbox.addWidget(text_add_folder)\n text_add_folder.setMinimumWidth(64)\n self.output_folder = QtWidgets.QListWidget()\n hbox.addWidget(self.output_folder)\n self.output_folder.setMinimumHeight(GuiSettings.single_line_height())\n self.output_folder.setMaximumHeight(GuiSettings.single_line_height() * 2)\n self.output_folder.clear()\n new_item = QtWidgets.QListWidgetItem()\n new_item.setIcon(QtGui.QIcon(os.path.join(self.parent_win.media, 'folder.png')))\n new_item.setText(\"%s\" % os.path.abspath(self.prj.output_folder).replace(\"\\\\\", \"/\"))\n new_item.setFont(GuiSettings.console_font())\n new_item.setForeground(GuiSettings.console_fg_color())\n self.output_folder.addItem(new_item)\n button_add_folder = QtWidgets.QPushButton()\n hbox.addWidget(button_add_folder)\n button_add_folder.setFixedHeight(36)\n button_add_folder.setFixedWidth(36)\n button_add_folder.setText(\" ... \")\n button_add_folder.setToolTip('Add (or drag-and-drop) output folder')\n # noinspection PyUnresolvedReferences\n button_add_folder.clicked.connect(self.click_add_folder)\n\n # open folder\n hbox = QtWidgets.QHBoxLayout()\n vbox.addLayout(hbox)\n hbox.addStretch()\n\n button_default_output = QtWidgets.QPushButton()\n hbox.addWidget(button_default_output)\n button_default_output.setFixedHeight(GuiSettings.single_line_height())\n # button_open_output.setFixedWidth(GuiSettings.single_line_height())\n button_default_output.setText(\"Use default\")\n button_default_output.setToolTip('Use the default output folder')\n # noinspection PyUnresolvedReferences\n button_default_output.clicked.connect(self.click_default_output)\n\n button_open_output = QtWidgets.QPushButton()\n hbox.addWidget(button_open_output)\n button_open_output.setFixedHeight(GuiSettings.single_line_height())\n # button_open_output.setFixedWidth(GuiSettings.single_line_height())\n button_open_output.setText(\"Open folder\")\n button_open_output.setToolTip('Open the output folder')\n # noinspection PyUnresolvedReferences\n button_open_output.clicked.connect(self.click_open_output)\n\n hbox.addStretch()\n\n def dragEnterEvent(self, e):\n if e.mimeData().hasUrls:\n e.accept()\n else:\n e.ignore()\n\n def dragMoveEvent(self, e):\n if e.mimeData().hasUrls:\n e.accept()\n else:\n e.ignore()\n\n def dropEvent(self, e):\n \"\"\"Drop files directly onto the widget\"\"\"\n if e.mimeData().hasUrls:\n\n e.setDropAction(QtCore.Qt.CopyAction)\n e.accept()\n # Workaround for OSx dragging and dropping\n for url in e.mimeData().urls():\n dropped_path = str(url.toLocalFile())\n dropped_path = os.path.abspath(dropped_path).replace(\"\\\\\", \"/\")\n\n logger.debug(\"dropped path: %s\" % dropped_path)\n if os.path.isdir(dropped_path):\n if GridsManager.is_kluster_path(dropped_path):\n if not GridsManager.kluster_grid_supported():\n msg = \"Kluster Grid folders are currently unsupported in this Python environment.\"\n # noinspection PyCallByClass\n QtWidgets.QMessageBox.critical(self, \"Drag-and-drop Error\", msg, QtWidgets.QMessageBox.Ok)\n else:\n self._add_grids(selection=dropped_path)\n else:\n self._add_folder(selection=dropped_path)\n\n elif os.path.splitext(dropped_path)[-1] in (\".bag\", \".csar\"):\n self._add_grids(selection=dropped_path)\n\n elif os.path.splitext(dropped_path)[-1] in (\".000\",):\n self._add_s57(selection=dropped_path)\n\n else:\n msg = 'Drag-and-drop is only possible with a single folder or the following file extensions:\\n' \\\n '- grid files: .csar or .bag\\n' \\\n '- feature files: .000\\n\\n' \\\n 'Dropped path:\\n' \\\n '%s' % dropped_path\n # noinspection PyCallByClass\n QtWidgets.QMessageBox.critical(self, \"Drag-and-drop Error\", msg, QtWidgets.QMessageBox.Ok)\n else:\n e.ignore()\n\n def click_add_file_grids(self):\n \"\"\" Read the grids provided by the user\"\"\"\n logger.debug('adding grids from file ...')\n\n # ask the file path to the user\n # selections, _ = QtWidgets.QFileDialog.getOpenFileNames(self,\n # \"Add grids\", QtCore.QSettings().value(\"survey_import_folder\"),\n # \"BAG file (*.bag);;CSAR file (*.csar);;All files (*.*)\")\n # noinspection PyCallByClass\n selections, _ = QtWidgets.QFileDialog.getOpenFileNames(self, \"Add grids\",\n QtCore.QSettings().value(\"survey_import_folder\"),\n \"Supported grids (*.bag *.csar);;BAG file (*.bag);;\"\n \"CSAR file (*.csar);;All files (*.*)\")\n if len(selections) == 0:\n logger.debug('adding grids: aborted')\n return\n last_open_folder = os.path.dirname(selections[0])\n if os.path.exists(last_open_folder):\n QtCore.QSettings().setValue(\"survey_import_folder\", last_open_folder)\n\n for selection in selections:\n self._add_grids(selection=os.path.abspath(selection).replace(\"\\\\\", \"/\"))\n\n def click_add_folder_grids(self):\n \"\"\" Read the grids provided by the user\"\"\"\n logger.debug('adding grids from folder ...')\n\n # ask the folder path to the user\n # noinspection PyCallByClass\n selection = QtWidgets.QFileDialog.getExistingDirectory(self, \"Add Kluster Grid folder\",\n QtCore.QSettings().value(\"survey_import_folder\"),\n QtWidgets.QFileDialog.ShowDirsOnly)\n if selection == str():\n logger.debug('adding grids: aborted')\n return\n\n if not GridsManager.is_kluster_path(selection):\n msg = \"The folder %s is not a Kluster Grid\" % selection\n # noinspection PyCallByClass\n QtWidgets.QMessageBox.critical(self, \"Data Reading Error\", msg, QtWidgets.QMessageBox.Ok)\n logger.debug('folder NOT added: %s' % selection)\n return\n\n last_open_folder = os.path.dirname(selection)\n if os.path.exists(last_open_folder):\n QtCore.QSettings().setValue(\"survey_import_folder\", last_open_folder)\n\n self._add_grids(selection=os.path.abspath(selection).replace(\"\\\\\", \"/\"))\n\n def _add_grids(self, selection):\n\n # attempt to read the data\n try:\n self.parent_win.prj.add_to_grid_list(selection)\n\n except Exception as e: # more general case that catches all the exceptions\n msg = 'Error reading \\\"%s\\\".' % selection\n msg += '
%s' % e\n # noinspection PyCallByClass\n QtWidgets.QMessageBox.critical(self, \"Data Reading Error\", msg, QtWidgets.QMessageBox.Ok)\n logger.debug('surface NOT added: %s' % selection)\n return\n\n self._update_input_grid_list()\n self.parent_win.grids_loaded()\n\n def _update_input_grid_list(self):\n \"\"\" update the grid list widget \"\"\"\n grid_list = self.parent_win.prj.grid_list\n self.input_grids.clear()\n for grid in grid_list:\n new_item = QtWidgets.QListWidgetItem()\n if os.path.splitext(grid)[-1] == \".bag\":\n new_item.setIcon(QtGui.QIcon(os.path.join(self.parent_win.media, 'bag.png')))\n elif os.path.splitext(grid)[-1] == \".csar\":\n new_item.setIcon(QtGui.QIcon(os.path.join(self.parent_win.media, 'csar.png')))\n elif GridsManager.is_kluster_path(grid):\n new_item.setIcon(QtGui.QIcon(os.path.join(self.parent_win.media, 'kluster.png')))\n new_item.setText(grid)\n new_item.setFont(GuiSettings.console_font())\n new_item.setForeground(GuiSettings.console_fg_color())\n self.input_grids.addItem(new_item)\n\n def make_grids_context_menu(self, pos):\n logger.debug('context menu')\n\n # check if any selection\n sel = self.input_grids.selectedItems()\n if len(sel) == 0:\n # noinspection PyCallByClass\n QtWidgets.QMessageBox.information(self, \"Grid list\", \"You need to first add and select one or more files!\")\n return\n\n remove_act = QtWidgets.QAction(\"Remove\", self, statusTip=\"Remove the selected grid files\",\n triggered=self.remove_grid_files)\n\n menu = QtWidgets.QMenu(parent=self)\n menu.addAction(remove_act)\n menu.exec_(self.input_grids.mapToGlobal(pos))\n\n def remove_grid_files(self):\n logger.debug(\"user want to remove grid files\")\n\n # remove all the selected files from the list\n selections = self.input_grids.selectedItems()\n for selection in selections:\n self.prj.remove_from_grid_list(selection.text())\n\n self._update_input_grid_list()\n if len(self.parent_win.prj.grid_list) == 0:\n self.parent_win.grids_unloaded()\n else:\n self.parent_win.grids_loaded()\n\n def click_add_s57(self):\n \"\"\" Read the S57 files provided by the user\"\"\"\n logger.debug('adding s57 features from file ...')\n\n # ask the file path to the user\n # noinspection PyCallByClass\n selections, _ = QtWidgets.QFileDialog.getOpenFileNames(self, \"Add S57 features\",\n QtCore.QSettings().value(\"survey_import_folder\"),\n \"S57 file (*.000);;All files (*.*)\")\n if len(selections) == 0:\n logger.debug('adding s57: aborted')\n return\n last_open_folder = os.path.dirname(selections[0])\n if os.path.exists(last_open_folder):\n QtCore.QSettings().setValue(\"survey_import_folder\", last_open_folder)\n\n for selection in selections:\n selection = os.path.abspath(selection).replace(\"\\\\\", \"/\")\n self._add_s57(selection=selection)\n\n def _add_s57(self, selection):\n\n # attempt to read the data\n try:\n self.parent_win.prj.add_to_s57_list(selection)\n\n except Exception as e: # more general case that catches all the exceptions\n msg = 'Error reading \\\"%s\\\".' % selection\n msg += '
%s' % e\n # noinspection PyCallByClass\n QtWidgets.QMessageBox.critical(self, \"Data Reading Error\", msg, QtWidgets.QMessageBox.Ok)\n logger.debug('s57 file NOT added: %s' % selection)\n return\n\n self._update_input_s57_list()\n self.parent_win.s57_loaded()\n\n def _update_input_s57_list(self):\n \"\"\" update the s57 list widget \"\"\"\n s57_list = self.parent_win.prj.s57_list\n self.input_s57.clear()\n for s57 in s57_list:\n new_item = QtWidgets.QListWidgetItem()\n if os.path.splitext(s57)[-1] == \".000\":\n new_item.setIcon(QtGui.QIcon(os.path.join(self.parent_win.media, 's57.png')))\n new_item.setText(s57)\n new_item.setFont(GuiSettings.console_font())\n new_item.setForeground(GuiSettings.console_fg_color())\n self.input_s57.addItem(new_item)\n\n def make_s57_context_menu(self, pos):\n logger.debug('context menu')\n\n # check if any selection\n sel = self.input_s57.selectedItems()\n if len(sel) == 0:\n # noinspection PyCallByClass\n QtWidgets.QMessageBox.information(self, \"S57 list\", \"You need to first add and select one or more files!\")\n return\n\n remove_act = QtWidgets.QAction(\"Remove\", self, statusTip=\"Remove the selected S57 files\",\n triggered=self.remove_s57_files)\n\n menu = QtWidgets.QMenu(parent=self)\n menu.addAction(remove_act)\n menu.exec_(self.input_s57.mapToGlobal(pos))\n\n def remove_s57_files(self):\n logger.debug(\"user want to remove S57 files\")\n\n # remove all the selected files from the list\n selections = self.input_s57.selectedItems()\n for selection in selections:\n selection = os.path.abspath(selection.text()).replace(\"\\\\\", \"/\")\n self.prj.remove_from_s57_list(selection)\n\n self._update_input_s57_list()\n if len(self.parent_win.prj.s57_list) == 0:\n self.parent_win.s57_unloaded()\n else:\n self.parent_win.s57_loaded()\n\n def click_clear_data(self):\n \"\"\" Clear all the read data\"\"\"\n logger.debug('clear data')\n self.parent_win.prj.clear_data()\n self.input_grids.clear()\n self.parent_win.grids_unloaded()\n self.input_s57.clear()\n self.parent_win.s57_unloaded()\n\n def click_output_kml(self):\n \"\"\" Set the KML output\"\"\"\n self.prj.output_kml = self.output_kml.isChecked()\n QtCore.QSettings().setValue(\"survey_export_kml\", self.prj.output_kml)\n\n def click_output_shp(self):\n \"\"\" Set the Shapefile output\"\"\"\n self.prj.output_shp = self.output_shp.isChecked()\n QtCore.QSettings().setValue(\"survey_export_shp\", self.prj.output_shp)\n\n def click_output_project_folder(self):\n \"\"\" Set the output project folder\"\"\"\n self.prj.output_project_folder = self.output_prj_folder.isChecked()\n QtCore.QSettings().setValue(\"survey_export_project_folder\", self.prj.output_project_folder)\n\n def click_output_subfolders(self):\n \"\"\" Set the output in sub-folders\"\"\"\n self.prj.output_subfolders = self.output_subfolders.isChecked()\n QtCore.QSettings().setValue(\"survey_export_subfolders\", self.prj.output_subfolders)\n\n def click_add_folder(self):\n \"\"\" Read the grids provided by the user\"\"\"\n logger.debug('set output folder ...')\n\n # ask the output folder\n # noinspection PyCallByClass\n selection = QtWidgets.QFileDialog.getExistingDirectory(self, \"Set output folder\",\n QtCore.QSettings().value(\"survey_export_folder\"), )\n if selection == \"\":\n logger.debug('setting output folder: aborted')\n return\n logger.debug(\"selected path: %s\" % selection)\n\n self._add_folder(os.path.abspath(selection).replace(\"\\\\\", \"/\"))\n\n def _add_folder(self, selection):\n\n path_len = len(selection)\n logger.debug(\"folder path length: %d\" % path_len)\n if path_len > 140:\n\n msg = 'The selected path is %d characters long. ' \\\n 'This may trigger the filename truncation of generated outputs (max allowed path length: 260).\\n\\n' \\\n 'Do you really want to use: %s?' % (path_len, selection)\n msg_box = QtWidgets.QMessageBox(self)\n msg_box.setWindowTitle(\"Output folder\")\n msg_box.setText(msg)\n msg_box.setStandardButtons(QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)\n msg_box.setDefaultButton(QtWidgets.QMessageBox.No)\n reply = msg_box.exec_()\n\n if reply == QtWidgets.QMessageBox.No:\n return\n\n # attempt to read the data\n try:\n self.prj.output_folder = selection\n\n except Exception as e: # more general case that catches all the exceptions\n msg = 'Error setting the output folder to \\\"%s\\\".' % selection\n msg += '
%s' % e\n # noinspection PyCallByClass\n QtWidgets.QMessageBox.critical(self, \"Output Folder Error\", msg, QtWidgets.QMessageBox.Ok)\n logger.debug('output folder NOT set: %s' % selection)\n return\n\n self.output_folder.clear()\n new_item = QtWidgets.QListWidgetItem()\n new_item.setIcon(QtGui.QIcon(os.path.join(self.parent_win.media, 'folder.png')))\n new_item.setText(\"%s\" % self.prj.output_folder)\n new_item.setFont(GuiSettings.console_font())\n new_item.setForeground(GuiSettings.console_fg_color())\n self.output_folder.addItem(new_item)\n\n QtCore.QSettings().setValue(\"survey_export_folder\", self.prj.output_folder)\n\n logger.debug(\"new output folder: %s\" % self.prj.output_folder)\n\n def click_default_output(self):\n \"\"\" Set default output data folder \"\"\"\n self._add_folder(selection=self.prj.default_output_folder())\n\n def click_open_output(self):\n \"\"\" Open output data folder \"\"\"\n logger.debug('open output folder: %s' % self.prj.output_folder)\n self.prj.open_output_folder()\n","repo_name":"hydroffice/hyo2_qc","sub_path":"hyo2/qc/qctools/widgets/survey/inputs_tab.py","file_name":"inputs_tab.py","file_ext":"py","file_size_in_byte":25618,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"91"}
+{"seq_id":"20572993417","text":"\nfrom datetime import datetime\nfrom sys import exec_prefix\nfrom typing import List\n\nfrom django.contrib.auth.models import User\nfrom arbeitsstunden.models import customHours\nfrom arbeitsstunden.management.commands.utils.data import member\nfrom arbeitsstunden.models import account, costCenter, project, season, work\nfrom utils.member import newMember\nimport arbeitsstunden.management.commands.utils.data as interfaces\n\ndef getStatus(Status: str)-> int:\n if(Status == \"Anwärter\"): return 1\n if(Status == \"Aktives Mitglied\"): return 2\n if(Status == \"Inaktives Mitglied\"): return 3\n if(Status == \"Alter Herr\"): return 4\n if(Status == \"Außerordentliches Mitglied\"): return 5\n if(Status == \"Ehrenmitglied\"): return 6\n return 1\n\ndef Nutzerliste(Liste: List[interfaces.Nutzer]):\n \n import requests\n requests.packages.urllib3.disable_warnings() \n \n from arbeitsstunden.management.commands.utils.csv import bcolors\n import sys\n for i in Liste:\n print(\"Loading -> \" + i.Vorname + \" \" + i.Nachname)\n try:\n current_user_status = getStatus(i.Status)\n current_user_eintrittsdatum = i.Eintrittsdatum.split(\".\")[2] + \"-\" + i.Eintrittsdatum.split(\".\")[1] + \"-\" + i.Eintrittsdatum.split(\".\")[0]\n \n newMember(i.Vorname, i.Nachname, \"Deutschland\", \"Aachen\", i.E_Mail, eintrittsdatum=datetime.fromisoformat(current_user_eintrittsdatum), status = current_user_status)\n \n sys.stdout.write(\"\\033[F\") # Cursor up one line\n except Exception as inst:\n print(bcolors.FAIL + \"[FAIL]\" + bcolors.ENDC + i.Nachname + \" couldn't be imported: \" + str(inst))\n print(bcolors.OKBLUE + \"[INFO]\" + bcolors.ENDC + \" Trying to Update\")\n \n import member.models as memberModel\n try:\n user = User.objects.all().filter(\n last_name = i.Nachname,\n first_name = i.Vorname\n )\n tempMember = memberModel.profile.objects.get(user=user)\n tempMember.status = i.status\n except: \n pass\n\n\ndef Arbeitsstunden(\n Array_user: List[interfaces.user], \n Array_project: List[interfaces.project], \n Array_project_item: List[interfaces.project_item], \n Array_project_item_hour: List[interfaces.project_item_hour], \n Array_season: List[interfaces.season], \n Array_member: List[interfaces.member],\n Array_reduction: List[interfaces.reduction]\n ):\n \n # season\n for i in Array_season:\n try:\n temp, _ = season.objects.get_or_create(\n year = i.year,\n hours = int(i.obligatory_minutes)/60\n )\n except:\n temp = season.objects.get(year = i.year)\n temp.hours = int(i.obligatory_minutes)\n temp.save()\n\n newCostcenter = costCenter(name=\"import\", description=\"----\")\n newCostcenter.save()\n \n for thing in Array_reduction:\n if thing.reduction == 0:\n continue\n \n currentSeason, _ = season.objects.get_or_create(year = thing.season_id)\n employeeIndex = next((index for (index, d) in enumerate(Array_member) if d.id == thing.member_id), None)\n currentAccount, _ = account.objects.get_or_create(\n name = Array_member[employeeIndex].first_name + \" \" + Array_member[employeeIndex].last_name\n )\n \n try:\n \n currentReduction, _ = customHours.objects.get_or_create(\n customHours = int(thing.reduction) / 60,\n season = currentSeason,\n used_account = currentAccount,\n status = next((index for (index, d) in enumerate(customHours.status_info) if d[1] == thing.status))\n )\n except:\n pass\n \n import sys \n \n for i in Array_project:\n print(\"Loading -> \" + i.name)\n sys.stdout.write(\"\\033[F\") # Cursor up one line\n currentCenter, _ = costCenter.objects.get_or_create(\n name = i.name,\n description = i.description,\n )\n \n currentProjects: List[interfaces.project_item] = []\n for x in Array_project_item:\n if x.project_id == i.id: \n currentProjects.append(x)\n Array_project_item.remove(x)\n\n for projects_items in currentProjects:\n currentProject, _ = project.objects.get_or_create(\n name = projects_items.title,\n season = season.objects.get_or_create(year = int(projects_items.season))[0],\n description = projects_items.description,\n costCenter = currentCenter,\n )\n \n workingParts: List[interfaces.project_item_hour] = []\n for y in Array_project_item_hour:\n if y.project_item_id == projects_items.id:\n workingParts.append(y)\n Array_project_item_hour.remove(y)\n \n for works in workingParts:\n currentWork, _ = work.objects.get_or_create(\n name = works.id,\n hours = int(works.duration)/60,\n startDate = projects_items.date\n )\n \n # add current work to Project\n currentProject.parts.add(currentWork)\n \n # employye Namen suchen\n employeeIndex = next((index for (index, d) in enumerate(Array_member) if d.id == works.member_id), None)\n \n # Employye account suchen und hinzufügen\n if(employeeIndex is not None):\n temp, _ = account.objects.get_or_create(\n name = Array_member[employeeIndex].first_name + \" \" + Array_member[employeeIndex].last_name\n )\n currentWork.employee.add(temp)\n \n pass\n\n","repo_name":"ASV-Aachen/Website","sub_path":"Webpage/arbeitsstunden/management/commands/utils/import_functions.py","file_name":"import_functions.py","file_ext":"py","file_size_in_byte":6009,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"91"}
+{"seq_id":"6300835189","text":"import argparse\n\nimport random\nimport json\nfrom json import JSONEncoder\n\n\nclass CommonEncoder(JSONEncoder):\n def default(self, o):\n if isinstance(o, Vector3):\n return [o.x, o.y, o.z]\n return o.__dict__\n\n\nclass Vector3:\n def __init__(self, x, y, z):\n self.x = x\n self.y = y\n self.z = z\n\n def __add__(self, v):\n return Vector3(self.x + v.x, self.y + v.y, self.z + v.z)\n\n def __sub__(self, v):\n return Vector3(self.x - v.x, self.y - v.y, self.z - v.z)\n\n def __mul__(self, n):\n return Vector3(self.x * n, self.y * n, self.z * n)\n\n def __truediv__(self, n):\n n = 1.0 if n == 0 else float(n)\n return Vector3(self.x / n, self.y / n, self.z / n)\n\n def cross(self, v):\n return Vector3(self.y * v.z - self.z * v.y,\n self.z * v.x - self.x * v.z,\n self.x * v.y - self.y * v.z)\n\n def normalize(self):\n self.x = self.x / self.length\n self.y = self.y / self.length\n self.z = self.z / self.length\n\n def dot(self, v):\n return (self.x * v.x + self.y * v.y + self.z * v.z)\n\n @property\n def length(self):\n return (self.x ** 2 + self.y ** 2 + self.z ** 2)**(1.0/2)\n\n def get_as_dict(self, keys=[\"x\", \"y\", \"z\"]):\n return {keys[0]: self.x, keys[1]: self.y, keys[2]: self.z}\n\n @staticmethod\n def get_random(min_val=0, max_val=1):\n return Vector3(random.uniform(min_val, max_val),\n random.uniform(min_val, max_val),\n random.uniform(min_val, max_val))\n\n\nclass Scene(object):\n def __init__(self, background_color, global_ambient):\n self.backgroundColor = background_color\n self.globalAmbient = global_ambient\n\n\nclass Camera(object):\n def __init__(self, position, direction, z_near, z_far, povy):\n self.position = position\n self.direction = direction\n self.z_near = z_near\n self.z_far = z_far\n self.povy = povy\n\n\nclass Material(object):\n def __init__(self, ambient, specular, diffuse):\n self.ambient = ambient\n self.specular = specular\n self.diffuse = diffuse\n\n\nclass Sphere(object):\n def __init__(self, position, radius, material):\n self.center = position\n self.radius = radius\n self.material = material\n\n\nclass Light(object):\n def __init__(self, position, ambient, specular, diffuse):\n self.position = position\n self.material = Material(ambient, specular, diffuse)\n\n\nclass Triangle(object):\n def __init__(self, point_a, point_b, point_c):\n self.pointA = point_a\n self.pointB = point_b\n self.pointC = point_c\n\n vec_ab = point_b - point_a\n vec_ac = point_c - point_a\n normal = vec_ab.cross(vec_ac)\n normal.normalize()\n\n self.normalA = normal\n self.normalB = normal\n self.normalC = normal\n self.material = Material(Vector3.get_random(),\n Vector3.get_random(),\n Vector3.get_random())\n\n\nclass Pyramid(object):\n def __init__(self, base_a, base_b, base_c, top):\n self.base_a = base_a\n self.base_b = base_b\n self.base_c = base_c\n self.top = top\n\n def get_triangle_list(self):\n result = []\n result.append(Triangle(self.top, self.base_a, self.base_b))\n result.append(Triangle(self.top, self.base_b, self.base_c))\n result.append(Triangle(self.top, self.base_c, self.base_a))\n result.append(Triangle(self.base_a, self.base_b, self.base_c))\n\n return result\n\n def __copy__(self):\n return type(self)(self.base_a, self.base_b,\n self.base_c, self.top)\n\n\ndef get_sierpinski_pyramid(level, pyramid):\n result = []\n if level == 0:\n result.append(pyramid)\n return result\n top_pyramid = Pyramid(pyramid.base_a + (pyramid.top - pyramid.base_a)/2,\n pyramid.base_b + (pyramid.top - pyramid.base_b)/2,\n pyramid.base_c + (pyramid.top - pyramid.base_c)/2,\n pyramid.top)\n a_pyramid = Pyramid(pyramid.base_a,\n pyramid.base_b + (pyramid.base_a - pyramid.base_b)/2,\n pyramid.base_c + (pyramid.base_a - pyramid.base_c)/2,\n pyramid.top + (pyramid.base_a - pyramid.top)/2)\n b_pyramid = Pyramid(pyramid.base_a + (pyramid.base_b - pyramid.base_a)/2,\n pyramid.base_b,\n pyramid.base_c + (pyramid.base_b - pyramid.base_c)/2,\n pyramid.top + (pyramid.base_b - pyramid.top)/2)\n c_pyramid = Pyramid(pyramid.base_a + (pyramid.base_c - pyramid.base_a)/2,\n pyramid.base_b + (pyramid.base_c - pyramid.base_b)/2,\n pyramid.base_c,\n pyramid.top + (pyramid.base_c - pyramid.top)/2)\n result += get_sierpinski_pyramid(level - 1, top_pyramid)\n result += get_sierpinski_pyramid(level - 1, a_pyramid)\n result += get_sierpinski_pyramid(level - 1, b_pyramid)\n result += get_sierpinski_pyramid(level - 1, c_pyramid)\n return result\n\n\ndef get_args():\n\n main_parser = argparse.ArgumentParser(description=\"Scene Generator\")\n main_parser.add_argument(\"--lights\", type=int, help=\"Number of lights \"\n \"(default: %(default)s)\", default=3)\n\n subparsers = main_parser.add_subparsers(dest=\"parser_name\")\n subparsers.required = True\n\n parser_random = subparsers.add_parser(\"random\",\n help=\"Generates random scene\")\n parser_random.add_argument(\"--spheres\", type=int,\n help=\"Number of spheres (default: %(default)s)\",\n default=10)\n parser_random.add_argument(\"--triangles\", type=int,\n help=\"Number of triangles \"\n \"(default: %(default)s)\", default=5)\n\n parser_sierpinski = subparsers.add_parser(\"sierpinski\", help=\"Generates \"\n \"Sierpinski's pyramid\")\n parser_sierpinski.add_argument(\"--depth\", type=int,\n help=\"Depth of recursion \"\n \"(default: %(default)s)\", default=3)\n\n parser_cube_spheres = subparsers.add_parser(\"cube_of_spheres\",\n help=\"Generates cube \"\n \"of random Spheres\")\n parser_cube_spheres.add_argument(\"--side-length\", type=int,\n help=\"Number of spheres per side \"\n \"(default: %(default)s)\", default=5)\n\n return main_parser.parse_args()\n\n\ndef generate_sierpinski_pyramid(depth):\n output = {\"Triangle\": []}\n\n pyramid = Pyramid(Vector3(-4, 0, 0),\n Vector3(4, 0, 0),\n Vector3(0, 0, -2*1.71),\n Vector3(0, 4, 1/3 * 2*1.71))\n pyramid = get_sierpinski_pyramid(depth, pyramid)\n for x in pyramid:\n output[\"Triangle\"] += x.get_triangle_list()\n\n return output\n\n\ndef generate_random_scene(n_spheres, n_triangles):\n output = {\"Triangle\": [],\n \"Sphere\": []}\n\n for _ in range(n_triangles):\n tri = Triangle(Vector3.get_random(-5, 5),\n Vector3.get_random(-5, 5),\n Vector3.get_random(-5, 5))\n output[\"Triangle\"].append(tri)\n\n for _ in range(n_spheres):\n sphere = Sphere(Vector3.get_random(-10, 10),\n random.uniform(0, 5),\n Material(Vector3.get_random(),\n Vector3.get_random(),\n Vector3.get_random()))\n output[\"Sphere\"].append(sphere)\n\n return output\n\n\ndef get_camera():\n return Camera(Vector3(0, 0, 0),\n Vector3(0, 0, -1),\n 1, 10, 90)\n\n\ndef get_lights(n_lights):\n lights = []\n for _ in range(n_lights):\n light = Light(Vector3.get_random(-10, 10),\n Vector3.get_random(),\n Vector3.get_random(),\n Vector3.get_random())\n lights.append(light)\n\n return lights\n\n\ndef generate_cube_of_spheres(length):\n output = {\"Sphere\": []}\n radius = 1\n distance = 2.5\n\n start = -(length-1) * distance/2\n pos_x = pos_y = pos_z = start\n\n for x in range(length):\n for y in range(length):\n for z in range(length):\n sphere = Sphere(Vector3(pos_x, pos_y, pos_z),\n radius,\n Material(Vector3.get_random(),\n Vector3.get_random(),\n Vector3.get_random()))\n output[\"Sphere\"].append(sphere)\n pos_z += distance\n pos_z = start\n pos_y += distance\n pos_y = start\n pos_x += distance\n\n return output\n\n\ndef get_scene_config():\n return Scene(Vector3.get_random(),\n Vector3.get_random())\n\n\ndef main():\n\n output = dict()\n args = get_args()\n # output[\"Camera\"] = [get_camera()]\n # output[\"Scene\"] = get_scene_config()\n output[\"Light\"] = get_lights(args.lights)\n\n options = {\"random\":\n lambda: generate_random_scene(args.spheres, args.triangles),\n \"sierpinski\":\n lambda: generate_sierpinski_pyramid(args.depth),\n \"cube_of_spheres\":\n lambda: generate_cube_of_spheres(args.side_length)}\n\n output.update(options[args.parser_name]())\n\n with open('scene.json', 'w') as outfile:\n json.dump(output, outfile, cls=CommonEncoder)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"MGniew/raytracer-opencl","sub_path":"scripts/scene_generator.py","file_name":"scene_generator.py","file_ext":"py","file_size_in_byte":9880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"10216232328","text":"from flask import json\nfrom typing import Any\n\nimport bot.utils.Cards as Cards\nimport bot.utils.User as User\nimport bot.utils.storage.Storage as Storage\nfrom bot.utils.Logger import logger\nfrom bot.utils.Weekdays import Weekdays\n\nNO_ANSWER = \"🤔 Sorry, I don't have an answer for that.\"\n\n\ndef handle_event(event, user: User, space: str, is_room: bool) -> Any:\n if 'slashCommand' in event['message']:\n command = event['message']['slashCommand']['commandId']\n logger.debug(f\"Slash command {command}\")\n # /add_team team_name\n if command == '1':\n return add_team(event)\n # /teams\n if command == '3':\n return get_teams()\n # /join_team\n if command == '4':\n return join_team(is_room)\n # /users [team_name]\n if command == '5':\n return get_users(event)\n # /standup\n if command == '6':\n return trigger_standup(user, is_room)\n # /enable_schedule or /disable_schedule\n if command == '7' or command == '8':\n return enable_schedule(user, is_room)\n # /change_schedule_time day time\n if command == '9':\n return change_schedule_time(event, user, is_room)\n # /schedules\n if command == '10':\n return get_schedules(user, is_room)\n # /leave_team\n if command == '11':\n return leave_team(user, space, is_room)\n # /remove_team\n if command == '12':\n return remove_team()\n # /questions\n if command == '13':\n return get_questions(user)\n # /add_question QUESTION\n if command == '14':\n return add_question(event, user)\n # /remove_question\n if command == '15':\n return remove_question(user)\n # /reorder_questions\n if command == '16':\n return reorder_questions(user)\n # Handle standup answers and generic requests.\n else:\n return generic_input(event, user, is_room)\n\n\ndef add_team(event) -> Any:\n team_name = ''\n if 'argumentText' in event['message']:\n team_name = event['message']['argumentText'].strip(' \"\\'')\n if team_name and Storage.add_team(team_name=team_name):\n text = f\"I successfully added the new team '{team_name}'.\"\n else:\n text = f\"🤕 Sorry, I couldn't add the new team '{team_name}'.\"\n return json.jsonify({'text': text})\n\n\ndef get_teams() -> Any:\n teams = Storage.get_teams()\n return json.jsonify(Cards.get_team_list_card(teams))\n\n\ndef join_team(is_room: bool) -> Any:\n teams = Storage.get_teams()\n return json.jsonify(Cards.get_team_selection_card(teams, is_room, False))\n\n\ndef get_users(event) -> Any:\n team_name = ''\n if 'argumentText' in event['message']:\n team_name = event['message']['argumentText'].strip(' \"\\'')\n users = Storage.get_users(team_name=team_name)\n return json.jsonify(Cards.get_user_list_card(users))\n\n\ndef trigger_standup(user: User, is_room: bool) -> Any:\n if is_room:\n text = \"🤕 Sorry, but this command has no effect in a room.\"\n else:\n Storage.reset_standup(google_id=user.google_id)\n next_question = Storage.get_current_question(google_id=user.google_id)\n if next_question is None:\n text = \"🤕 Sorry, I could not find a standup question. \" \\\n \"Add new questions with `/add_question QUESTION`.\"\n else:\n text = f\"*Hi {user.name}!*\\nYou requested to do the standup.\\n\\n\" \\\n f\"_{next_question.question}_\"\n return json.jsonify({'text': text})\n\n\ndef enable_schedule(user: User, is_room: bool) -> Any:\n if is_room:\n text = \"🤕 Sorry, but this command has no effect in a room.\"\n return json.jsonify({'text': text})\n else:\n schedules = Storage.get_schedules(google_id=user.google_id)\n return json.jsonify(Cards.get_schedule_enable_card(schedules, False))\n\n\ndef change_schedule_time(event, user: User, is_room: bool) -> Any:\n if is_room:\n text = \"🤕 Sorry, but this command has no effect in a room.\"\n else:\n schedule_day = ''\n schedule_time = ''\n if 'argumentText' in event['message']:\n argument = event['message']['argumentText'].strip(' \"\\'')\n arguments = argument.rsplit(' ')\n if len(arguments) == 2:\n schedule_day = arguments[0].strip(' \"\\'').capitalize()\n schedule_time = arguments[1].strip(' \"\\'')\n if schedule_time and schedule_day and schedule_day in Weekdays \\\n and Storage.update_schedule_time(google_id=user.google_id, day=schedule_day,\n time=schedule_time):\n text = f\"Your standup schedule time for '{schedule_day}' is now '{schedule_time}'.\"\n else:\n text = f\"🤕 Sorry, I couldn't change your standup schedule time '{schedule_time}' \" \\\n f\"for '{schedule_day}'. Use e.g. `/change_schedule_time monday 09:00:00`\"\n return json.jsonify({'text': text})\n\n\ndef get_schedules(user: User, is_room: bool) -> Any:\n if is_room:\n text = \"🤕 Sorry, but this command has no effect in a room.\"\n return json.jsonify({'text': text})\n else:\n schedules = Storage.get_schedules(google_id=user.google_id)\n return json.jsonify(Cards.get_schedule_list_card(schedules))\n\n\ndef leave_team(user: User, space: str, is_room: bool) -> Any:\n if is_room:\n Storage.leave_team_with_room(space=space)\n text = \"The room is no longer part of a team. Run `/join_team` to join the room to another team.\"\n else:\n Storage.leave_team(google_id=user.google_id)\n text = \"You left the team. Run `/join_team` to join another team.\"\n return json.jsonify({'text': text})\n\n\ndef remove_team() -> Any:\n teams = Storage.get_teams()\n return json.jsonify(Cards.get_team_remove_card(teams, False))\n\n\ndef get_questions(user: User) -> Any:\n questions = Storage.get_questions(google_id=user.google_id)\n if questions:\n return json.jsonify(Cards.get_question_list_card(questions))\n else:\n text = \"🤕 Sorry, I couldn't find any questions for you. \" \\\n \"Make sure you joined a team with `/join_team` and/or your team as questions. \" \\\n \"Use `/add_question QUESTION` to add a new question for your team.\"\n json.jsonify({'text': text})\n\n\ndef add_question(event, user: User) -> Any:\n question = ''\n if 'argumentText' in event['message']:\n question = event['message']['argumentText'].strip(' \"\\'')\n if question and Storage.add_question(google_id=user.google_id, question=question):\n text = f\"I successfully added the new question '{question}'.\"\n else:\n text = f\"🤕 Sorry, I couldn't add the new question '{question}'. \" \\\n f\"Make sure you joined a team with `/join_team`.\"\n return json.jsonify({'text': text})\n\n\ndef remove_question(user: User) -> Any:\n questions = Storage.get_questions(google_id=user.google_id)\n return json.jsonify(Cards.get_question_remove_card(questions, False))\n\n\ndef reorder_questions(user) -> Any:\n questions = Storage.get_questions(google_id=user.google_id)\n if questions:\n return json.jsonify(Cards.get_question_reorder_card(questions, 1))\n else:\n text = \"🤕 Sorry, I couldn't find any questions of your team.\"\n return json.jsonify({'text': text})\n\n\ndef generic_input(event, user: User, is_room) -> Any:\n text = NO_ANSWER\n if not is_room:\n previous_question = Storage.get_previous_question(google_id=user.google_id)\n if previous_question:\n logger.debug(f\"Previous question: {previous_question.id_}, {previous_question.question}, \"\n f\"{previous_question.order}\")\n current_question = Storage.get_current_question(google_id=user.google_id,\n previous_question=previous_question)\n if current_question:\n logger.debug(f\"Current question: {current_question.id_}, {current_question.question}, \"\n f\"{current_question.order}\")\n answer = event['message']['text']\n Storage.add_standup_answer(google_id=user.google_id, answer=answer,\n current_question=current_question)\n next_question = Storage.get_current_question(google_id=user.google_id,\n previous_question=current_question)\n logger.debug(f\"Next question: {next_question}\")\n if next_question is None:\n answers = Storage.get_standup_answers(google_id=user.google_id)\n card = Cards.get_standup_card(user, answers, True)\n return json.jsonify({'cards': [card]})\n else:\n text = f\"_{next_question.question}_\"\n return json.jsonify({'text': text})\n","repo_name":"samuelba/google-chat-standup-bot","sub_path":"bot/events/Message.py","file_name":"Message.py","file_ext":"py","file_size_in_byte":9047,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"91"}
+{"seq_id":"33414431859","text":"from django.contrib.auth import get_user_model\r\nfrom django.utils.datastructures import MultiValueDictKeyError\r\nfrom djoser.serializers import UserCreateSerializer\r\nfrom drf_extra_fields.fields import LowercaseEmailField\r\nfrom rest_framework import serializers\r\n\r\nfrom api.utils import get_recipe_serializer\r\n\r\nUser = get_user_model()\r\n\r\n\r\nclass CustomUserCreateSerializer(UserCreateSerializer):\r\n email = LowercaseEmailField()\r\n\r\n class Meta:\r\n model = User\r\n fields = (\r\n 'id',\r\n 'username',\r\n 'first_name',\r\n 'last_name',\r\n 'email',\r\n 'password',\r\n )\r\n\r\n\r\nclass UserBaseSerializer(serializers.BaseSerializer):\r\n def to_representation(self, instance):\r\n user = self.context['request'].user\r\n is_subscribed = (\r\n False\r\n if user.is_anonymous\r\n else instance.following.filter(user_id=user.id).exists()\r\n )\r\n return {\r\n 'id': instance.id,\r\n 'username': instance.username,\r\n 'first_name': instance.first_name,\r\n 'last_name': instance.last_name,\r\n 'email': instance.email,\r\n 'is_subscribed': is_subscribed,\r\n }\r\n\r\n\r\nclass SubscriptionSerializer(UserBaseSerializer):\r\n def to_representation(self, instance, user=None):\r\n data = super(SubscriptionSerializer, self).to_representation(instance)\r\n data['recipes_count'] = instance.recipes.count()\r\n author_recipes = instance.recipes.all()\r\n try:\r\n recipes_limit = self.context.get('request').GET['recipes_limit']\r\n author_recipes = author_recipes[: int(recipes_limit)]\r\n except (MultiValueDictKeyError, ValueError):\r\n pass\r\n\r\n recipes = get_recipe_serializer()(\r\n author_recipes,\r\n many=True,\r\n )\r\n data['recipes'] = recipes.data\r\n\r\n return data\r\n","repo_name":"bretton-test/foodgram_2","sub_path":"backend/users/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"}
+{"seq_id":"33625144934","text":"# Import all of your extensions first\nfrom tempfile import TemporaryDirectory\nfrom email.mime.application import MIMEApplication\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.mime.image import MIMEImage\nimport mimetypes\nfrom pathlib import Path\nfrom matplotlib.backends.backend_pdf import PdfPages\nimport os\nimport smtplib\n\n# Define your signature. It will be attached to the email.\nSIGNATURE = (\n '
Preston Pope
\\n'\n \"
EOG Resources, Inc.
\\n\"\n\n \"
Cell: (832) 465-0621
\\n\"\n \"
preston_pope@eogresources.com
\\n\"\n)\n\n\nclass EmailMsg:\n def __init__(self, recipient_list, sender, subject, signature=SIGNATURE):\n self.sender = sender\n self.subject = subject\n self.recipient_list = recipient_list\n self.signature = signature\n self.attachments = []\n self.images = []\n self.text = []\n\n def send(self):\n msg = self._construct_msg()\n with smtplib.SMTP(\"smtp.eogresources.com\") as mailer:\n server_response = smtplib.SMTP.ehlo(mailer)\n mailer.sendmail(msg[\"From\"], self.recipient_list, msg.as_string())\n\n def _construct_msg(self):\n \"\"\"This constructs the email\"\"\"\n msg = MIMEMultipart()\n msg[\"Subject\"] = self.subject\n msg[\"From\"] = self.sender\n for attachment in self.attachments:\n msg.attach(attachment)\n for display in self.images:\n msg.attach(display)\n all_text = \" \".join(self.text)\n all_text += \" \" + self.signature\n msg.attach(MIMEText(all_text, \"html\"))\n return msg\n\n def convert_plots_to_attachment(self, figure_name, figures):\n \"\"\"This function is used when you want to add multiple (or list of) plots to an email\"\"\"\n with TemporaryDirectory() as temp_dir:\n dir_path = Path(str(temp_dir))\n file = dir_path / \"plots.pdf\"\n with PdfPages(file) as pdf_file:\n for figure in figures:\n pdf_file.savefig(figure, dpi=300, bbox_inches=\"tight\")\n self.attach_file(figure_name, file, \"pdf\")\n\n def convert_summary_plots_to_attachment(self, figure_name, figures):\n \"\"\"This function is used when you only want to add one plot to an email\"\"\"\n with TemporaryDirectory() as temp_dir:\n dir_path = Path(str(temp_dir))\n file = dir_path / \"plots.pdf\"\n with PdfPages(file) as pdf_file:\n pdf_file.savefig(figures, dpi=300, bbox_inches=\"tight\")\n self.attach_file(figure_name, file, \"pdf\")\n\n def attach_file(self, attachment_name, file, file_type):\n \"\"\"Attaches the file to the email\"\"\"\n with open(file, \"rb\") as f:\n attachment = MIMEApplication(f.read(), _subtype=file_type)\n attachment.add_header(\n \"Content-Disposition\", \"attachment\", filename=attachment_name\n )\n self.attachments.append(attachment)\n\n def add_image(self, image_name, image):\n \"\"\"Use this to add the plot image to the body of the email\"\"\"\n c_type, encoding = mimetypes.guess_type(image)\n main_type, sub_type = c_type.split(\"/\", 1)\n attachment = MIMEImage(image, _subtype=sub_type)\n attachment.add_header(\n \"Content-Disposition\", \"attachment\", image_name=image_name\n )\n self.images.append(attachment)\n self.add_text(f'
')\n\n def add_text(self, text):\n \"\"\"This will allow you to add text in the body of the email you send.\"\"\"\n self.text.append(text)\n","repo_name":"Splintered-Glass-Solutions/learn_python","sub_path":"weather_report/_email.py","file_name":"_email.py","file_ext":"py","file_size_in_byte":3699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"18141868239","text":"r, c = map(int, input().split())\nA = []\nfor line in range(r):\n A.append(list(map(int, input().split())))\n\n[A[i].append(sum(A[i])) for i in range(r)]\n\ntrans = []\nfor i in range(c+1):\n trans.append([A[j][i] for j in range(r)])\n trans[i].append(sum(trans[i]))\n\nret = []\nfor i in range(r+1):\n ret.append([trans[j][i] for j in range(c+1)])\n print(' '.join(map(str, ret[i])))","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p02413/s221044871.py","file_name":"s221044871.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"73129749738","text":"class Solution(object):\n def findJudge(self, n, trust):\n t=[0 for _ in range(n)]\n for i in trust:\n t[i[1]-1]+=1\n t[i[0]-1]-=1\n print(t)\n for i in range(len(t)):\n if t[i]==n-1:\n return i+1\n return -1\n","repo_name":"0-shubham-0/Python-Practise","sub_path":"Leetcode/leet997.py","file_name":"leet997.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"42663741303","text":"from rest_framework import serializers\nfrom crm.models import EvaluateCompany, EvaluateCompanyEvaluations\n\n\nclass EvaluationSerializer(serializers.ModelSerializer):\n class Meta:\n model = EvaluateCompanyEvaluations\n fields = \"__all__\"\n read_only_fields = [\"id\", \"estate\", \"created_at\", \"created_by\"]\n\n def to_representation(self, instance):\n rep = super(EvaluationSerializer, self).to_representation(instance)\n evaluate_company = EvaluateCompany.objects.get(id=instance.evaluate_company_id)\n rep[\"name\"] = evaluate_company.name\n rep[\"total_valuation_fee\"] = (\n instance.estate_valuation_fee + instance.land_valuation_fee\n )\n return rep\n","repo_name":"atsushiL/blue-green","sub_path":"crm/serializers/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"23102230928","text":"import os,sys\nimport pandas as pd\nfrom src.exception import JobRecException\nfrom src.logger import logging\nimport tensorflow as tf\nimport tensorflow_recommenders as tfrs\nfrom src.model.jobrecommender import JobsRecommenderModel\n\nclass ModelCreator:\n def __init__(self,):\n try:\n pass\n except Exception as e:\n raise JobRecException(e,sys)\n \n def create_model(candidatedf,querydf):\n try:\n\n jobs = pd.DataFrame(candidatedf[\"Title\"].unique(), columns=[\"Title\"])\n jobs_tf = tf.data.Dataset.prefetch(tf.data.Dataset.from_tensor_slices(dict(jobs)),buffer_size=tf.data.AUTOTUNE)\n jobs_map = jobs_tf.map(lambda x: {\"job_title\": x[\"Title\"]})\n job_titles_vocabulary = tf.keras.layers.StringLookup(mask_token=None)\n job_titles_vocabulary.adapt(jobs_map.map(lambda x: x[\"job_title\"]))\n job_model = tf.keras.Sequential([job_titles_vocabulary,\\\n tf.keras.layers.Embedding(job_titles_vocabulary.vocabulary_size(), 64)])\n \n users_tf = tf.data.Dataset.prefetch(tf.data.Dataset.from_tensor_slices(dict(querydf)), buffer_size=tf.data.AUTOTUNE)\n users_map = users_tf.map(lambda x: {\"user_id\": x[\"UserID\"]})\n user_ids_vocabulary = tf.keras.layers.StringLookup(mask_token=None)\n user_ids_vocabulary.adapt(users_map.map(lambda x: x[\"user_id\"]))\n user_model = tf.keras.Sequential([user_ids_vocabulary,\\\n tf.keras.layers.Embedding(user_ids_vocabulary.vocabulary_size(), 64)])\n \n jobs_x = jobs_tf.map(lambda x: x[\"Title\"])\n # Define your objectives.\n task = tfrs.tasks.Retrieval(metrics=tfrs.metrics.FactorizedTopK(jobs_x.batch(128).map(job_model)))\n model = JobsRecommenderModel(user_model, job_model, task)\n\n return model\n\n except Exception as e:\n raise JobRecException(e,sys)","repo_name":"bsb4018/jobrecsys","sub_path":"src/model/model_creator.py","file_name":"model_creator.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"18414203839","text":"import sys\ninput = sys.stdin.readline\n\ndef divisors(n):\n i = 1\n table = set()\n while i * i <= n:\n if not n % i:\n table.add(i)\n table.add(n//i)\n i += 1\n table = list(table)\n return table\n\nN = int(input())\nA = list(map(int, input().split()))\n\nD = divisors(A[0]) + divisors(A[1])\nD.sort(reverse=True)\nfor d in D:\n cnt = 0\n for a in A:\n if a%d:\n cnt += 1\n if cnt <= 1:\n print(d)\n exit()\n\n","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03061/s664179030.py","file_name":"s664179030.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"18048735057","text":"import os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom openpyxl import Workbook\nfrom openpyxl.utils.dataframe import dataframe_to_rows\n\nOURDATASET_ROOT = \"/media/ourDataset/v1.0\"\nGROUP_MIN_LAST_FRAME = 100 # last_time = last_frame * fps(10)\nSAVE_ON = 1\nlabelInfo_output_path = \"./runs\"\n\ndef main():\n dayExps = os.listdir(OURDATASET_ROOT)\n dayExps.sort()\n df_dayExp_groupId_frameId = pd.DataFrame({\n 'dayExp': [],\n 'groupId': [],\n 'frameId': [],\n 'path': []\n })\n for dayExp in dayExps:\n dayExp_dataset_folder = os.path.join(OURDATASET_ROOT, dayExp, 'Dataset')\n dayExp_groupId_frameId_folders = os.listdir(dayExp_dataset_folder)\n dayExp_groupId_frameId_folders.sort()\n temp = pd.DataFrame({\n 'dayExp': [dayExp for item in dayExp_groupId_frameId_folders],\n 'groupId': [int(item[5:9]) for item in dayExp_groupId_frameId_folders],\n 'frameId': [int(item[15:19]) for item in dayExp_groupId_frameId_folders],\n 'path': [os.path.join(dayExp_dataset_folder, item) for item in dayExp_groupId_frameId_folders]\n })\n\n df_dayExp_groupId_frameId = pd.concat([df_dayExp_groupId_frameId, temp], axis=0, ignore_index=True)\n\n\n cnt = df_dayExp_groupId_frameId.groupby(['dayExp', \"groupId\"]).agg(\"count\")\n if SAVE_ON:\n df_save = pd.DataFrame({\n \"dayExp\": [\"{}\".format(item[0]) for item in cnt.frameId.keys().to_list()],\n \"groupId\": [\"{}_group{:0>4d}\".format(item[0], int(item[1])) for item in cnt.frameId.keys().to_list()],\n \"cnt\": cnt.frameId.values.tolist()\n })\n wb = Workbook()\n ws = wb.active\n for r in dataframe_to_rows(df_save, index=False, header=True):\n ws.append(r)\n if not os.path.exists(labelInfo_output_path):\n os.mkdir(labelInfo_output_path)\n df_label_save_path = os.path.join(labelInfo_output_path, 'cnt_frames.xlsx')\n wb.save(df_label_save_path)\n # if not os.path.exists(df_label_save_path):\n # wb.save(df_label_save_path)\n\n\n # data = cnt[\"frameId\"]\n # bin_width = 10\n # data_bins = [x for x in range(50-bin_width//2, data.max()+bin_width//2, bin_width)]\n # res = plt.hist(data, bins=data_bins)\n # plt.xlabel(\"cnt_frames_in_group\")\n # plt.xticks([x for x in range(50, data.max()+bin_width//2, bin_width)], rotation=\"vertical\")\n # plt.ylabel(\"cnt_groups\")\n # for i in range(len(res[0])):\n # plt.text(res[1][i], res[0][i]+2, str(int(res[0][i])))\n # plt.show()\n\n data = cnt[cnt[\"frameId\"]>=GROUP_MIN_LAST_FRAME][\"frameId\"]\n bin_width = 10\n data_bins = [x for x in range(GROUP_MIN_LAST_FRAME - bin_width // 2, data.max() + bin_width // 2, bin_width)]\n res = plt.hist(data, bins=data_bins)\n plt.xlabel(\"cnt_frames_in_group\")\n plt.xticks([x for x in range(GROUP_MIN_LAST_FRAME, data.max() + bin_width // 2, bin_width)], rotation=\"vertical\")\n plt.ylabel(\"cnt_groups\")\n for i in range(len(res[0])):\n plt.text(res[1][i], res[0][i] + 2, str(int(res[0][i])))\n plt.show()\n\n print(\"Sum_groups={}, Sum_frames={}\".format(len(data), data.sum()))\n\n print(\"done\")\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"Aiuan/ourDataset_v1_postprocess","sub_path":"python/select_groups_by_people/statistic_summary.py","file_name":"statistic_summary.py","file_ext":"py","file_size_in_byte":3246,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"90"}
+{"seq_id":"4716562357","text":"s_lst = list(input().split())\nmin_cnt = 51\nif len(s_lst[0]) > len(s_lst[1]):\n max_len = len(s_lst[0])\n min_len = len(s_lst[1])\n max_lst = s_lst[0]\n min_lst = s_lst[1]\nelse:\n max_len = len(s_lst[1])\n min_len = len(s_lst[0])\n max_lst = s_lst[1]\n min_lst = s_lst[0]\nfor i in range(0, max_len - min_len + 1):\n cnt = 0\n lst = max_lst[i:min_len + i]\n for j in range(min_len):\n if lst[j] != min_lst[j]:\n cnt += 1\n if min_cnt > cnt:\n min_cnt = cnt\nprint(min_cnt)\n","repo_name":"ambosing/PlayGround","sub_path":"Python/Problem Solving/BOJ/boj1120.py","file_name":"boj1120.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"18501693399","text":"H, W = map(int, input().split())\na = [input() for _ in range(H)]\n\nclm = [True] * H\nfor hi in range(H):\n ai = a[hi]\n for c in ai:\n if c == '#':\n clm[hi] = False\n break\n \nrow = [True] * W\nfor wi in range(W):\n for ci in range(H):\n if a[ci][wi] == '#':\n row[wi] = False\n break\n \nfor hi in range(H):\n if clm[hi] == True:\n continue\n ans = ''\n ai = a[hi]\n for wi in range(W):\n if row[wi] == True:\n continue\n ans += ai[wi]\n print(ans)\n","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03273/s078503488.py","file_name":"s078503488.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"39757728925","text":"import ipaddress\nimport json\nimport requests\n\n# https://docs.cloud.oracle.com/en-us/iaas/tools/public_ip_ranges.json\n\ndef update():\n\tresults = []\n\n\tresponse = requests.get('https://docs.cloud.oracle.com/en-us/iaas/tools/public_ip_ranges.json')\n\n\tif response.status_code != 200:\n\t\treturn False\n\n\tdata = json.loads(response.text)\n\n\tfor region_info in data['regions']:\n\t\tregion = region_info['region']\n\n\t\tfor cidr_info in region_info['cidrs']:\n\t\t\tcidr = cidr_info['cidr']\n\t\t\tservice = \"/\".join(cidr_info['tags'])\n\n\t\t\tresults.append(\"%s %s %s %s\" % (cidr, 'oracle', service, region))\n\n\n\t# Write results to file\n\twith open('data/oracle.txt', 'w') as f:\n\t\tf.write(\"\\n\".join(results))\n\t\tf.close()\n\n\treturn len(results)","repo_name":"oldrho/ip2provider","sub_path":"lists/oracle.py","file_name":"oracle.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","stars":81,"dataset":"github-code","pt":"90"}
+{"seq_id":"31909231594","text":"\"\"\"\nThis module defines the players of the game\n\"\"\"\n\nfrom functools import reduce\nfrom deck import Card\n\nclass Player():\n \"\"\"\n This is the Player base class\n \"\"\"\n hand = []\n round_total = 0\n round_bet = 0\n out_of_round = False\n\n def __init__(self, name='Dealer', bankroll=0):\n self.name = name\n self.bankroll = bankroll\n self.hand = list()\n\n def __str__(self):\n return f'Player name: {self.name}\\nPlayer bankroll: ${self.bankroll}\\nPlayer current hand: {reduce(lambda x, y: x.rank + \" of \" + x.suit + \" and \" + y.rank + \" of \" + y.suit, self.hand)}\\nPlayer current bet: ${self.round_bet}'\n\n def __del__(self):\n print('\\nPlayer deleted.')\n\n def bet(self):\n return self.name, self.round_bet\n\n def hit(self, deck):\n \"\"\"\n This method lets the player get a new card to their hand\n \"\"\"\n try:\n self.hand.append(deck.pop(0))\n except IndexError:\n print('There are no more cards in the deck!')\n\n def stand(self):\n \"\"\"\n This method lets the player stand on their card amount\n \"\"\"\n while True:\n choice = input(\n f'\\n{self.name}, would you like to stand? Enter Yes or No: ')\n if choice.lower() in ('yes', 'y', 'no', 'n'):\n break\n else:\n print('Please enter a valid value')\n continue\n return choice.lower() in ('yes', 'y')\n\n\nclass Human(Player):\n \"\"\"\n This is the Human class to simulate a human\n \"\"\"\n\n def bet(self):\n \"\"\"\n This method lets the human player bet an amount of money\n \"\"\"\n while True:\n try:\n self.round_bet = float(\n input(f'{self.name}, please enter an amount to bet for this round: '))\n if self.round_bet > self.bankroll:\n print('You have bet more than you have!')\n continue\n if self.round_bet <= 0:\n self.out_of_round = True\n else:\n self.bankroll -= self.round_bet\n break\n except TypeError:\n print('Please enter in a valid bet!')\n continue\n except ValueError:\n print('Please enter in a valid bet!')\n return self.name, self.round_bet\n\n\nclass Dealer(Player):\n \"\"\"\n This is the Dealer class to simulate the computer\n \"\"\"\n hidden_card_value = Card()\n\n def __str__(self):\n return f'Player name: {self.name}\\nPlayer current hand: {reduce(lambda x, y: x.rank + \" of \" + x.suit + \" and \" + y.rank + \" of \" + y.suit, self.hand)}'\n\n def hide_card(self):\n \"\"\"\n This method hides the card value of one of the cards in the hand\n \"\"\"\n try:\n self.hidden_card_value = self.hand[1]\n self.hand[1] = Card()\n except IndexError:\n print('The dealer does not have enough cards!')\n\n def reveal_card(self):\n \"\"\"\n This method reveals the hidden card value of the dealer's hand\n \"\"\"\n self.hand[1] = self.hidden_card_value\n self.hidden_card_value = Card()\n","repo_name":"tnydg99/practice_python","sub_path":"blackjack/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":3223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"3799538667","text":"import requests\n\nfrom random import randint\n\nimport pyfiglet\n\nfrom termcolor import colored\n\ndef art_name(name, color):\n asciii = pyfiglet.figlet_format(name)\n colored_name = colored(asciii, color=color)\n print(colored_name)\n\n\nart_name(\"Dad Jokes 3000\", \"blue\")\n\njoke_theme = input(\"What's the joke theme?\")\n\nurl = \"https://icanhazdadjoke.com/search\"\n\nresponse = requests.get(\n url, \n headers={\"Accept\":\"application/json\"},\n params={\"term\": joke_theme}\n)\nteste = response.json()\n\ntotal_jokes = teste[\"total_jokes\"]\n\nhandling = teste['results']\n\njoke_number = randint(0, total_jokes)\n\ntry:\n print(handling[joke_number].get(\"joke\"))\nexcept IndexError:\n print(\"There's no joke about this topic...\")\n\n\n","repo_name":"Jacques-Drumond/DadJokesAPI","sub_path":"DadJokes3000.py","file_name":"DadJokes3000.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"72240535018","text":"import random\n\nRight = False\n\nandy = random.randint(1,100)\n\ncount = 0\n\n\nwhile Right !=True:\n while True:\n try:\n gucci = int(input(\"I be your manager lil gangsta, guess your cut from the latest album, playa(0-100): \"))\n count +=1\n break\n except:\n print(\"Sorry, homie, but now you're thinking in Spanish\")\n continue\n \n if (gucci==andy):\n print(\"Good job, muchacho!\")\n print(\"Your record label cut guesses took this many attempts: \" + str(count))\n Right = True\n else:\n if(gucciandy):\n print(\"You gotchyo head in the clouds, balla\")\n \n \n \n\n","repo_name":"DandyDaniel/2019ProgPort","sub_path":"NumberGuesser/numguess/numguess.py","file_name":"numguess.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"18204679739","text":"N = int(input())\nl = []\nfor _ in range(N):\n A, B = map(int, input().split())\n l.append((A, B))\n\nt = N//2\ntl = sorted(l)\ntr = sorted(l, key=lambda x:-x[1])\n\nif N%2:\n print(tr[t][1]-tl[t][0]+1)\nelse:\n a1, a2 = tl[t-1][0], tr[t][1]\n a3, a4 = tl[t][0], tr[t-1][1]\n print(a4-a3+a2-a1+1)\n","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p02661/s031312527.py","file_name":"s031312527.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"18659200970","text":"import torch.nn as nn\nimport selection.layers as layers\nfrom selection.models import utils\nfrom selection.models import train\nfrom torch.utils.data import DataLoader\n\n\nclass MLP(nn.Module):\n '''\n Multilayer perceptron (MLP) model.\n\n Args:\n input_size: number of inputs.\n output_size: number of outputs.\n hidden: list of hidden layer widths.\n activation: nonlinearity between hidden layers.\n output_activation: nonlinearity at output.\n '''\n def __init__(self,\n input_size,\n output_size,\n hidden,\n activation,\n output_activation=None,\n batch_norm=False):\n super().__init__()\n\n # Fully connected layers.\n self.input_size = input_size\n self.output_size = output_size\n fc_layers = [nn.Linear(d_in, d_out) for d_in, d_out in\n zip([input_size] + hidden, hidden + [output_size])]\n self.fc = nn.ModuleList(fc_layers)\n\n # Activation functions.\n self.activation = utils.get_activation(activation)\n self.output_activation = utils.get_activation(output_activation)\n\n # Set up batch norm.\n if batch_norm:\n layer_normalizers = [nn.BatchNorm1d(d) for d in hidden]\n else:\n layer_normalizers = [nn.Identity() for d in hidden]\n self.layer_normalizers = nn.ModuleList(layer_normalizers)\n\n # Set up training.\n self.learn = train.Training(self)\n\n def forward(self, x):\n for fc, norm in zip(self.fc, self.layer_normalizers):\n x = fc(x)\n x = self.activation(x)\n x = norm(x)\n\n return self.output_activation(self.fc[-1](x))\n\n def evaluate(self, dataset, loss_fn, mbsize=None):\n training = self.training\n self.eval()\n mbsize = mbsize if mbsize else len(dataset)\n loader = DataLoader(dataset, batch_size=mbsize)\n loss = utils.validate(self, loader, loss_fn)\n if training:\n self.train()\n return loss\n\n def extra_repr(self):\n return 'hidden={}'.format([fc.in_features for fc in self.fc[1:]])\n\n\nclass SelectorMLP(nn.Module):\n '''MLP with input layer selection.\n\n Args:\n input_layer: input layer type (e.g., 'concrete_gates').\n input_size: number of inputs.\n output_size: number of outputs.\n hidden: list of hidden layer widths.\n activation: nonlinearity between hidden layers.\n output_activation: nonlinearity at output.\n kwargs: additional arguments (e.g., k, init, append). Some are optional,\n but k is required for ConcreteMask and ConcreteGates.\n '''\n def __init__(self,\n input_layer,\n input_size,\n output_size,\n hidden,\n activation,\n output_activation=None,\n batch_norm=False,\n **kwargs):\n super().__init__()\n\n # Set up input layer.\n if input_layer == 'concrete_mask':\n k = kwargs.get('k')\n append = kwargs.get('append', True)\n kwargs['append'] = append\n mlp_input_size = 2 * input_size if append else input_size\n self.input_layer = layers.ConcreteMask(input_size, **kwargs)\n elif input_layer == 'concrete_selector':\n k = kwargs.get('k')\n mlp_input_size = k\n self.input_layer = layers.ConcreteSelector(input_size, **kwargs)\n elif input_layer == 'concrete_gates':\n append = kwargs.get('append', True)\n kwargs['append'] = append\n mlp_input_size = 2 * input_size if append else input_size\n self.input_layer = layers.ConcreteGates(input_size, **kwargs)\n elif input_layer == 'concrete_max':\n append = kwargs.get('append', True)\n kwargs['append'] = append\n mlp_input_size = 2 * input_size if append else input_size\n self.input_layer = layers.ConcreteMax(input_size, **kwargs)\n else:\n raise ValueError('unsupported input layer: {}'.format(input_layer))\n\n # Set up MLP.\n self.mlp = MLP(mlp_input_size, output_size, hidden, activation,\n output_activation, batch_norm)\n\n # Set up training.\n self.learn = train.AnnealedTemperatureTraining(self)\n\n def forward(self, x, **kwargs):\n return_mask = kwargs.get('return_mask', False)\n if return_mask:\n assert (\n isinstance(self.input_layer, layers.ConcreteMask) or\n isinstance(self.input_layer, layers.ConcreteGates))\n x, m = self.input_layer(x, **kwargs)\n return self.mlp(x), m\n else:\n return self.mlp(self.input_layer(x, **kwargs))\n\n def evaluate(self, dataset, loss_fn, mbsize=None, **kwargs):\n training = self.training\n self.eval()\n mbsize = mbsize if mbsize else len(dataset)\n loader = DataLoader(dataset, batch_size=mbsize)\n loss = utils.validate_input_layer(self, loader, loss_fn, **kwargs)\n if training:\n self.train()\n return loss\n\n def get_inds(self, **kwargs):\n return self.input_layer.get_inds(**kwargs)\n","repo_name":"iancovert/dl-selection","sub_path":"selection/models/mlp.py","file_name":"mlp.py","file_ext":"py","file_size_in_byte":5266,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"90"}
+{"seq_id":"41104098205","text":"def check_response(response):\n for key, value in response.items():\n print(key, value)\n print('')\n \n \ndef save_response(response, name, path):\n # path = os.environ['ROOT']'\n \n # save the timeline\n with open(f'{path}/_data/timelines/{name.lower()}.yml', 'w') as f:\n f.write(response['timeline'])\n \n # create the .md file\n with open(f'{path}/src/name.md.temp', 'r') as f:\n md_template = f.readlines()\n \n updated_md = []\n for line in md_template:\n if line.startswith('name:'):\n updated_md.append(f'name: {name}\\n')\n elif line.startswith('character:'):\n updated_md.append(f'character: {name.lower()}\\n')\n elif line.startswith('description:'):\n updated_md.append(f\"description: {response['basic_intro']}\\n\")\n elif line.startswith('url:'):\n updated_md.append(f\"url: /characters/{name.lower()}\\n\")\n elif line.startswith('image:'):\n updated_md.append(f\"image: {name.lower()}.png\\n\")\n else:\n updated_md.append(line)\n \n updated_md.append(response[\"description\"])\n \n with open(f'{path}/_characters/{name.lower()}.md', 'w') as f:\n f.writelines(updated_md)\n\n \n ","repo_name":"wooginawunan/gpt-web-notes","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"16947946551","text":"class Solution:\n def canConstruct(self, ransomNote: str, magazine: str) -> bool:\n hashset = {}\n\n for i in ransomNote:\n if hashset.get(i):\n hashset[i] += 1\n else:\n hashset[i] = 1\n\n for i in magazine:\n if hashset.get(i):\n hashset[i] -= 1\n\n for i in hashset:\n if hashset[i] != 0:\n return False\n return True\n\n def canConstruct2(self, ransomNote: str, magazine: str) -> bool:\n # This is slower as hashset grows\n\n hashset = {}\n\n for i in magazine:\n if hashset.get(i):\n hashset[i] += 1\n else:\n hashset[i] = 1\n\n for i in ransomNote:\n if hashset.get(i):\n hashset[i] -= 1\n else:\n return False\n return True\n\n\nransomNote = \"a\"; magazine = \"b\"\nransomNote = \"aa\"; magazine = \"aab\"\nransomNote = \"aaaa\"; magazine = \"aaab\"\n\ns = Solution()\nprint(s.canConstruct2(ransomNote, magazine))\n\n\n","repo_name":"iamsuman/algorithms","sub_path":"iv/Leetcode/easy/383_ransom_note_string.py","file_name":"383_ransom_note_string.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"90"}
+{"seq_id":"34401524662","text":"# -*- coding: utf-8 -*-\n\nimport unittest\n\nfrom cwr.parser.decoder.dictionary import \\\n InterestedPartyForAgreementDictionaryDecoder\n\n\"\"\"\nDictionary to Message decoding tests.\n\nThe following cases are tested:\n\"\"\"\n\n__author__ = 'Bernardo Martínez Garrido'\n__license__ = 'MIT'\n__status__ = 'Development'\n\n\nclass TestInterestedPartyForAgreementDictionaryEncoding(unittest.TestCase):\n def setUp(self):\n self._decoder = InterestedPartyForAgreementDictionaryDecoder()\n\n def test_encoded(self):\n data = {}\n\n data['record_type'] = 'IPA'\n data['transaction_sequence_n'] = 3\n data['record_sequence_n'] = 15\n data['ip_n'] = 'IP123'\n data['ip_last_name'] = 'LAST NAME'\n data['agreement_role_code'] = 'AS'\n data['ip_writer_first_name'] = 'FIRST NAME'\n data['ipi_name_n'] = 250165006\n data['ipi_base_n'] = 'I-000000229-7'\n data['pr_society'] = 1\n data['pr_share'] = 50.1\n data['mr_society'] = 2\n data['mr_share'] = 50.2\n data['sr_society'] = 3\n data['sr_share'] = 50.3\n\n record = self._decoder.decode(data)\n\n self.assertEqual('IPA', record.record_type)\n self.assertEqual(3, record.transaction_sequence_n)\n self.assertEqual(15, record.record_sequence_n)\n self.assertEqual('IP123', record.ip_n)\n self.assertEqual('LAST NAME', record.ip_last_name)\n self.assertEqual('AS', record.agreement_role_code)\n self.assertEqual('FIRST NAME', record.ip_writer_first_name)\n self.assertEqual(250165006, record.ipi_name_n)\n self.assertEqual(1, record.pr_society)\n self.assertEqual(50.1, record.pr_share)\n self.assertEqual(2, record.mr_society)\n self.assertEqual(50.2, record.mr_share)\n self.assertEqual(3, record.sr_society)\n self.assertEqual(50.3, record.sr_share)\n\n self.assertEqual('I-000000229-7', record.ipi_base_n)\n","repo_name":"weso/CWR-DataApi","sub_path":"tests/parser/dictionary/decoder/record/test_interested_party_for_agreement.py","file_name":"test_interested_party_for_agreement.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"90"}
+{"seq_id":"74942183335","text":"import csv\nimport pandas as pd\n\nrez = {}\nright = 0\nfalse = 0\nwith open('try.csv') as f:\n\twith open('data/temp_new.csv') as f2:\n\t\trez = csv.reader(f)\n\t\trez = [i for i in rez]\n\t\tval = csv.reader(f2, delimiter='\\t')\n\t\tval = [i for i in val]\n\t\tfor i in range(len(rez)):\n\t\t\tif int(rez[i][1]) == int(val[i][-1]):\n\t\t\t\tright += 1\n\t\t\telse:\n\t\t\t\tfalse += 1\n\t\tprint('r=', right, 'f=', false)\n\n\n# rez = pd.read_csv('try.csv')\n\n# val = pd.read_csv('data/temp_new.csv', delimiter='\\t')\n\n# i = 1\n# while i <= 6299:\n# \trow = next(rez.iterrows())[i]\n# \tprint(row)\n# \ti += 1","repo_name":"Isterikus/fck_kr_nn","sub_path":"comp.py","file_name":"comp.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"18536703749","text":"def get_root(s):\n if s != root[s]:\n root[s] = get_root(root[s])\n return root[s]\n return s\n\ndef unite(s, t):\n root_s = get_root(s)\n root_t = get_root(t)\n if not root_s == root_t:\n if rank[s] == rank[t]:\n root[root_t] = root_s\n rank[root_s] += 1\n elif rank[s] > rank[t]:\n root[root_t] = root_s\n else:\n root[root_s] = root_t\n\ndef same(s, t):\n if get_root(s) == get_root(t):\n return True\n else:\n return False\n\nn, m = map(int, input().split())\np = list(map(int, input().split()))\nroot = [i for i in range(n)]\nrank = [1 for _ in range(n)]\nfor _ in range(m):\n x, y = map(int, input().split())\n unite(x - 1, y - 1)\nans = 0\nfor i in range(n):\n if get_root(i) == get_root(p[i] - 1):\n ans += 1\nprint(ans)","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03354/s499080112.py","file_name":"s499080112.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"11579521697","text":"import scapy.contrib.http2 as h2\nimport json\nglobal cnt\ncnt = 1\n# 后续补充其他类型帧\ndef extract_type(frame_name):\n if frame_name.startswith('= 50:\r\n end_time = mid_time - 1\r\n else:\r\n start_time = mid_time + 1\r\n return start_time\r\n\r\n\r\n\r\ndef main():\r\n stdin = open('./input.txt', 'r')\r\n n = int(stdin.readline())\r\n\r\n total_server_computer = 0\r\n server_room = []\r\n for _ in range(n):\r\n row = list(map(int, stdin.readline().split()))\r\n server_room.append(row)\r\n total_server_computer += sum(row)\r\n\r\n if total_server_computer == 0:\r\n print(0)\r\n return\r\n\r\n time = binary_search(server_room, total_server_computer)\r\n print(time)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()","repo_name":"ag502/algorithm","sub_path":"Problem/BOJ_17245_서버실/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"}
+{"seq_id":"37886979183","text":"from odoo import models, fields, api, _\nfrom odoo.tools.misc import formatLang, format_date\n\nclass AccountPayment(models.Model):\n _inherit = 'account.payment'\n\n collection_receipt_id = fields.Many2one('account.collection.receipt', string='Collection Receipt')\n acknowledgement_receipt_id = fields.Many2one('account.acknowledgement.receipt', string='Acknowledgement Receipt')\n # vendor_bill_description = fields.Text(compute='_get_bill_details')\n\n # CUSTOMER PAYMENT DETAILS\n cp_cash = fields.Float(string='Cash')\n cp_check_no = fields.Char(string='Check No.')\n cp_check_date = fields.Date(string='Check Date')\n cp_bank_id = fields.Many2one('res.bank', string='Bank / Branch')\n cp_amount = fields.Monetary(string='Amount')\n\n @api.multi\n def action_generate_collection_receipt(self):\n for record in self:\n cr_id = self.env['account.collection.receipt'].create({})\n record.collection_receipt_id = cr_id.id\n\n @api.multi\n def action_print_collection_receipt(self):\n # return self.env['report'].get_action(self, 'globpak.report_account_collection_receipt')\n return self.env.ref('globpak.account_collection_receipt').report_action(self)\n\n @api.multi\n def action_generate_acknowledgement_receipt(self):\n for record in self:\n cr_id = self.env['account.acknowledgement.receipt'].create({})\n record.acknowledgement_receipt_id = cr_id.id\n\n @api.multi\n def action_print_acknowledgement_receipt(self):\n # return self.env['report'].get_action(self, 'globpak.report_account_acknowledgement_receipt')\n return self.env.ref('globpak.account_acknowledgement_receipt').report_action(self)\n\n # @api.multi\n # def _get_bill_details(self):\n # for record in self:\n # description = ''\n # for invoice in record.invoice_ids:\n # if invoice.x_description:\n # description += invoice.x_description\n # description += \" \\n\"\n # record.vendor_bill_description = description\n\n def make_stub_line(self, invoice):\n \"\"\" Return the dict used to display an invoice/refund in the stub\n \"\"\"\n # Find the account.partial.reconcile which are common to the invoice and the payment\n if invoice.type in ['in_invoice', 'out_refund']:\n invoice_sign = 1\n invoice_payment_reconcile = invoice.move_id.line_ids.mapped('matched_debit_ids').filtered(lambda r: r.debit_move_id in self.move_line_ids)\n else:\n invoice_sign = -1\n invoice_payment_reconcile = invoice.move_id.line_ids.mapped('matched_credit_ids').filtered(lambda r: r.credit_move_id in self.move_line_ids)\n\n if self.currency_id != self.journal_id.company_id.currency_id:\n amount_paid = abs(sum(invoice_payment_reconcile.mapped('amount_currency')))\n else:\n amount_paid = abs(sum(invoice_payment_reconcile.mapped('amount')))\n\n amount_residual = invoice_sign * invoice.residual\n\n description = invoice.x_description\n # if not description:\n # description = invoice.reference and invoice.number + ' - ' + invoice.reference or invoice.number\n\n return {\n 'due_date': format_date(self.env, invoice.date_due),\n # 'number': invoice.reference and invoice.number + ' - ' + invoice.reference or invoice.number,\n 'number': description,\n 'amount_total': formatLang(self.env, invoice_sign * invoice.amount_total, currency_obj=invoice.currency_id),\n 'amount_residual': formatLang(self.env, amount_residual, currency_obj=invoice.currency_id) if amount_residual*10**4 != 0 else '-',\n 'amount_paid': formatLang(self.env, invoice_sign * amount_paid, currency_obj=invoice.currency_id),\n 'currency': invoice.currency_id,\n }\n","repo_name":"Jeisonpernia/globpak","sub_path":"models/account_payment.py","file_name":"account_payment.py","file_ext":"py","file_size_in_byte":3883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"28900716835","text":"from typing import Any, Dict, Tuple\nfrom airflow.triggers.base import BaseTrigger, TriggerEvent\n\nfrom hooks.redis_hook_async import RedisHookAsync\nfrom hooks.sidekiq_hook_async import SidekiqHookAsync\nfrom hooks.sidekiq_waiter import JobStatusWaiter\n\nclass SidekiqJobTrigger(BaseTrigger):\n def __init__(\n self,\n jid: str,\n http_conn_id: str = \"le_default\",\n redis_conn_id: str = \"le_redis_conn\",\n job_params: Dict[str, str] = {}\n ) -> None:\n super().__init__()\n self.jid = jid\n self.http_conn_id = http_conn_id\n self.redis_conn_id = redis_conn_id\n self.job_params = job_params\n\n def serialize(self) -> Tuple[str, Dict[str, Any]]:\n return (\n \"triggers.sidekiq_job_trigger.SidekiqJobTrigger\",\n {\n \"jid\": self.jid,\n \"redis_conn_id\": self.redis_conn_id,\n \"http_conn_id\": self.http_conn_id,\n \"job_params\": self.job_params\n }\n )\n\n async def run(self) -> None:\n # Subscribe to `sidekiq:job:{self.jid}` channel, then close connection after use\n async with RedisHookAsync(redis_conn_id=self.redis_conn_id).get_client() as client:\n channel = client.pubsub()\n await channel.subscribe(f\"sidekiq:job:{self.jid}\")\n self.log.info(f'Subscribed to channel \"sidekiq:job:{self.jid}\"')\n\n # Submit job\n hook = SidekiqHookAsync(http_conn_id=self.http_conn_id)\n await hook.submit_job(**self.job_params)\n self.log.info(f'Sidekiq job \"{self.jid}\" is submitted successfully !')\n\n # Wait for job status\n waiter = JobStatusWaiter(sub=channel)\n response = await waiter.wait()\n await channel.unsubscribe()\n yield TriggerEvent(response)\n","repo_name":"khoaanguyenn/airflow-research","sub_path":"airflow/plugins/triggers/sidekiq_job_trigger.py","file_name":"sidekiq_job_trigger.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"13882896066","text":"#! /usr/bin/env python\n\nimport rospy\n\nimport actionlib\n\nimport exercise3.msg\n\n\nclass FactorialAction(object):\n # create messages that are used to publish feedback/result\n _feedback = exercise3.msg.FactorialFeedback()\n _result = exercise3.msg.FactorialResult()\n\n def __init__(self, name):\n self._action_name = name\n self._as = actionlib.SimpleActionServer(\n self._action_name, exercise3.msg.FactorialAction, execute_cb=self.execute_cb, auto_start=False)\n self._as.start()\n\n def execute_cb(self, goal):\n # helper variables\n r = rospy.Rate(1)\n success = True\n\n # append the seeds for the fibonacci sequence\n self._feedback.sequence = []\n self._feedback.sequence.append(1)\n self._feedback.sequence.append(2)\n\n # publish info to the console for the user\n rospy.loginfo('%s: Executing, creating factorial of order %i' % (\n self._action_name, goal.order))\n\n # start executing the action\n while(self._feedback.sequence[1] <= goal.order):\n # check that preempt has not been requested by the client\n if self._as.is_preempt_requested():\n rospy.loginfo('%s: Preempted' % self._action_name)\n self._as.set_preempted()\n success = False\n break\n\n self._feedback.sequence[0] = self._feedback.sequence[0] * \\\n self._feedback.sequence[1]\n self._feedback.sequence[1] += 1\n # publish the feedback\n self._as.publish_feedback(self._feedback)\n # this step is not necessary, the sequence is computed at 1 Hz for demonstration purposes\n r.sleep()\n\n if success:\n self._result.sequence = self._feedback.sequence\n rospy.loginfo('%s: Succeeded' % self._action_name)\n self._as.set_succeeded(self._result)\n\n\nif __name__ == '__main__':\n rospy.init_node('factorial')\n server = FactorialAction(rospy.get_name())\n rospy.spin()\n","repo_name":"adivijay04/ROS_exercise3","sub_path":"src/exercise3/scripts/factorial_server.py","file_name":"factorial_server.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"26753260243","text":"def __init__(self, config):\n self.config = config\n pass\n\n\n# start initial request\ndef search(self, query):\n print('INFO: Searching FMovies for \"' + query + '\"...')\n url = str(self.config['indexer fmovies']['url'])\n headers = {\n 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',\n }\n params = (\n ('keyword', query),\n )\n\n\n\n\n## to get cookies and return ehader\ndef get_cookie(self, url):\n headers = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml',\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36',\n }\n\n response = requests.get(url, headers=headers, verify=False)\n cookie = '; '.join([x.name + '=' + x.value for x in response.cookies])\n\n return cookie\n\n\n\n","repo_name":"elithaxxor/RANDOM_PY_SCRIPTS_OLD","sub_path":"xxSCRAPER.py","file_name":"xxSCRAPER.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"17008600750","text":"'''https://py.checkio.org/en/mission/roman-numerals/'''\r\n\r\n\r\nLOOKUP = [\r\n (1000, 'M'),\r\n (900, 'CM'),\r\n (500, 'D'),\r\n (400, 'CD'),\r\n (100, 'C'),\r\n (90, 'XC'),\r\n (50, 'L'),\r\n (40, 'XL'),\r\n (10, 'X'),\r\n (9, 'IX'),\r\n (5, 'V'),\r\n (4, 'IV'),\r\n (1, 'I'),\r\n]\r\n\r\n\r\ndef checkio(num: int) -> str:\r\n res = []\r\n for n, roman in LOOKUP:\r\n d, num = divmod(num, n)\r\n res.append(roman * d)\r\n return ''.join(res)\r\n\r\n\r\nif __name__ == '__main__':\r\n assert checkio(6) == 'VI', '6'\r\n assert checkio(76) == 'LXXVI', '76'\r\n assert checkio(499) == 'CDXCIX', '499'\r\n assert checkio(3888) == 'MMMDCCCLXXXVIII', '3888'\r\n print('OK')\r\n","repo_name":"siimveske/checkio.org","sub_path":"7. ice base/014_roman_numerals.py","file_name":"014_roman_numerals.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"16317510087","text":"from time import time\n\nfrom flask import *\n\nfrom model.playlist import *\nfrom model.song import *\nfrom model.user import *\n\napp = Flask(__name__)\n\n\n@app.route('/', methods=['POST', 'GET'])\ndef login_page():\n if request.method == 'POST':\n if 'delete' in request.form:\n username = request.cookies.get('username')\n userManager = UserManager()\n userManager.delete_user(username)\n return render_template('login.html')\n\n\n@app.route('/home', methods=['POST', 'GET'])\ndef home_page():\n userManager = UserManager()\n playlistManager = PlaylistManager()\n if 'login' in request.form:\n username = request.form['username']\n user = userManager.get_user(username)\n if user is None:\n user = User(username)\n message = username + \" has been created\"\n userManager.insert_user(user)\n else:\n message = \"Hello \" + username\n\n #\n # elif 'userRating' in request.form:\n # userRating = request.form['userRating']\n else:\n username = request.cookies.get('username')\n user = User(username)\n message = \"Hello \" + username\n genres = userManager.get_user_genre(user)\n songManager = SongManager()\n songList = songManager.get_songs()\n\n songList=songManager.get_songs()\n playlists = playlistManager.get_user_playlists(username)\n listofgenres = playlistManager.genre_list(playlists, username)\n popular_songs = songManager.song_in_every_playlist()\n resp = make_response(\n render_template(\"index.html\", message=message, genres=genres, songList=songList, playlists=playlists,\n genrelists=listofgenres, popular_songs=popular_songs))\n resp.set_cookie('username', username)\n return resp\n\n\n@app.route('/addSong', methods=['POST'])\ndef add_song_to_playlist():\n username = request.cookies.get('username')\n name = request.cookies.get('playlist_name')\n addSong = request.form['addSong']\n playlistManager = PlaylistManager()\n playlistManager.insert_song_in_playlist(name, username, addSong)\n\n songManager = SongManager()\n songList = songManager.get_songs()\n\n playlistSong = playlistManager.get_songs_in_playlist(name, username)\n\n resp = make_response(render_template(\"playlist.html\", songList=songList, playlistSong=playlistSong))\n return resp\n\n\n@app.route('/createPlaylist', methods=['POST'])\ndef create_playlist_button():\n username = request.cookies.get('username')\n name = request.form['plName']\n playlistManager = PlaylistManager()\n if not playlistManager.is_playlist_in_user(name, username):\n if request.form['visibility'] == \"private\":\n password = request.form['plPassword']\n playlistManager.insert_private_playlist(PrivatePlaylist(name, username, time(), password))\n else:\n playlistManager.insert_public_playlist(PublicPlaylist(name, username, time()))\n return redirect('/home#playlist')\n\n\n@app.route('/viewplaylistlogin', methods=['POST'])\ndef view_playlist_button():\n username = request.cookies.get('username')\n name = request.form['plName']\n playlistManager = PlaylistManager()\n songManager = SongManager()\n songList=songManager.get_songs()\n if playlistManager.is_private(name,username)==True:\n pass\n password = request.form['plPassword']\n if playlistManager.password_check(name,username, password) == True:\n playlistSong = playlistManager.get_songs_in_playlist(name,username)\n resp = make_response(render_template(\"playlist.html\",songList=songList,playlistSong=playlistSong))\n resp.set_cookie('playlist_name', name)\n return resp\n else:\n return redirect('/home#playlist')\n else:\n playlistSong = playlistManager.get_songs_in_playlist(name,username)\n resp = make_response(render_template(\"playlist.html\",songList=songList,playlistSong=playlistSong))\n resp.set_cookie('playlist_name', name)\n return resp\n\n@app.route('/genreButton', methods=['POST'])\ndef genre_button():\n username = request.cookies.get('username')\n genre = request.form['genre']\n user = User(username)\n userManager = UserManager()\n if userManager.is_genre_in_user_genre(user, genre):\n userManager.delete_user_genre(user, genre)\n else:\n userManager.insert_user_genre(user, genre)\n return redirect('/home#settings')\n\n\n@app.route('/deleteUser', methods=['POST'])\ndef delete_user_button():\n username = request.cookies.get('username')\n userManager = UserManager()\n userManager.delete_user(username)\n return redirect('/')\n\n\n@app.route('/updateUser', methods=['POST'])\ndef update_username_button():\n username = request.cookies.get('username')\n userManager = UserManager()\n new_username = request.form['username']\n userManager.update_username(username, new_username)\n return redirect('/')\n\n\n@app.route('/homeGenre', )\ndef home_genre():\n userManager = UserManager()\n username = request.cookies.get('username')\n user = User(username)\n genres = userManager.get_user_genre(user)\n return render_template('genre.html', genres=genres)\n\n\n@app.route('/homePlaylist')\ndef home_playlist():\n playlistManager = PlaylistManager()\n username = request.cookies.get('username')\n playlists = playlistManager.get_user_playlists(username)\n return render_template('playlist.html', playlists=playlists)\n\n\n@app.route('/playlist', methods=['POST', 'GET'])\ndef playlist_page():\n if request.method == 'POST':\n username = request.cookies.get('username')\n playlistManager = PlaylistManager()\n if 'playlistButton' in request.form:\n playlistName = request.form['playlist']\n if playlistManager.is_playlist_in_user(playlistName, username):\n playlistManager.delete_playlist(playlistName, username)\n else:\n playlist = Playlist(playlistName, username, 0)\n playlistManager.insert_playlist(playlist)\n playlists = playlistManager.get_user_playlists(username)\n resp = make_response(render_template(\"index.html\", playlists=playlists))\n return resp\n\n\n@app.route('/songs', methods=['POST', 'GET'])\ndef songs_page():\n songManager = SongManager()\n songList = songManager.get_songs()\n resp = make_response(render_template(\"index.html\", songList=songList))\n return resp\n\n\n@app.route('/settings', methods=['POST', 'GET'])\ndef settings_page():\n if request.method == 'POST':\n userManager = UserManager()\n username = request.cookies.get('username')\n user = User(username)\n if 'genreButton' in request.form:\n genre = request.form['genre']\n if userManager.is_genre_in_user_genre(user, genre):\n userManager.delete_user_genre(user, genre)\n else:\n userManager.insert_user_genre(user, genre)\n genres = userManager.get_user_genre(user)\n resp = make_response(render_template(\"settings.html\", genres=genres))\n return resp\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"Tooo/Playlist-Database","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7117,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"90"}
+{"seq_id":"18033568839","text":"x,y=map(int,input().split())\n\nif x*y<0:\n ans=abs(abs(y)-abs(x))+1\nelif x==0:\n ans=abs(y)\n if y<0:\n ans+=1\nelif y==0:\n ans=abs(x)\n if x>0:\n ans+=1\nelse:\n ans=abs(abs(y)-abs(x))\n if x>y:\n ans+=2\n\nprint(ans)","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03838/s158155280.py","file_name":"s158155280.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"70458442538","text":"import os\nimport pickle\nimport re\n\nDATA_FILE = \"./data.pickle\"\n\n\ndef check_pickle(ext: str):\n \"\"\"Makes sure the file exists and the dict for the extension is set\n\n Args:\n ext (str): Name of manga extension\n \"\"\"\n\n # creates pickle file if it doesn't exist and stores an empty dict in it\n if not os.path.exists(DATA_FILE):\n f = open(DATA_FILE, \"wb\")\n pickle.dump({}, f)\n f.close()\n\n with open(DATA_FILE, \"rb\") as f:\n data = pickle.load(f)\n f.close()\n\n if not ext in data:\n data[ext] = {}\n\n with open(DATA_FILE, \"wb\") as f:\n pickle.dump(data, f)\n f.close()\n# end_check_pickle\n\n\ndef write_pickle(ext: str, key: str, value):\n \"\"\"Writes pickle data for extension\n\n Args:\n ext (str): Name of manga extension\n key (str): Key for data\n value: Data to be saved\n \"\"\"\n\n check_pickle(ext)\n\n with open(DATA_FILE, \"rb\") as f:\n data = pickle.load(f)\n f.close()\n\n data[ext][key] = value\n\n with open(DATA_FILE, \"wb\") as f:\n pickle.dump(data, f)\n f.close()\n# end_write_pickle\n\n\ndef read_pickle(ext: str, key: str):\n \"\"\"Reads pickle data for extension\n\n Args:\n ext (str): Name of manga extension\n key (str): Key for data\n\n Returns:\n str: Data that was saved\n \"\"\"\n\n check_pickle(ext)\n\n with open(DATA_FILE, \"rb\") as f:\n data = pickle.load(f)\n f.close()\n\n if key in data[ext]:\n return data[ext][key]\n\n else:\n return None\n# end_read_pickle\n\n\ndef delete_pickle(ext: str, key: str = \"\") -> bool:\n \"\"\"Deletes either extension or key from pickle\n\n Args:\n ext (str): Name of manga extension\n key (str, optional): Key for data. Defaults to \"\".\n\n Returns:\n bool: False if not found, True if deleted successfully\n \"\"\"\n\n if not os.path.exists(DATA_FILE):\n return False\n\n with open(DATA_FILE, \"rb\") as f:\n data = pickle.load(f)\n\n try:\n if key == \"\":\n del data[ext]\n\n else:\n del data[ext][key]\n\n except:\n return False\n\n with open(DATA_FILE, \"wb\") as f:\n pickle.dump(data, f)\n f.close()\n\n return True\n # end_delete_pickle\n\n\ndef is_url(url: str) -> bool:\n \"\"\"Checks whether string parameter is a url\n\n Args:\n url (str): String to check whether it is a url\n\n Returns:\n bool: Boolean value whether string is a url\n \"\"\"\n regex = re.compile(\n r'^(?:http)s?://' # http:// or https://\n # domain...\n r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+(?:[A-Z]{2,6}\\.?|[A-Z0-9-]{2,}\\.?)|'\n r'localhost|' # localhost...\n r'\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})' # ...or ip\n r'(?::\\d+)?' # optional port\n r'(?:/?|[/?]\\S+)$', re.IGNORECASE)\n\n return not re.match(regex, url) == None\n# end_is_url\n\n\ndef is_valid_download_range(to_download: str) -> bool:\n \"\"\"Checks whether download range from manga_info is valid\n\n Args:\n num (str): Range of chapters to download. Eg (\"1-10\", \"1-10.5\", \"1,2,3,5-7\", \"1.5,3-8\")\n\n Returns:\n bool: _description_\n \"\"\"\n res = True\n pattern = re.compile(\"^\\d+(\\.?\\d+)?$\")\n dl_spl = to_download.split(\",\")\n num_list = []\n\n for i in range(len(dl_spl)):\n num_list += dl_spl[i].split(\"-\")\n\n for number in num_list:\n if not bool(re.search(pattern, number.strip())):\n res = False\n\n return res\n# end_is_digit\n","repo_name":"Benjababe/GenericMangoDownloader","sub_path":"core/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":3488,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"90"}
+{"seq_id":"42826346421","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Dec 17 11:58:07 2019\r\n\r\n@author: sivan\r\n\"\"\"\r\n\r\nimport sys\r\nli=[]\r\nfor p in sys.argv[1:]:\r\n li+=eval(p)\r\nli.sort()\r\nprint(li)\r\n ","repo_name":"sivanidwarampudi/Local","sub_path":"untitled30.py","file_name":"untitled30.py","file_ext":"py","file_size_in_byte":178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"71131128616","text":"import sys\r\n\r\nimport time\r\nfrom PyQt5.QtCore import pyqtSlot\r\nfrom PyQt5.QtGui import *\r\nfrom PyQt5.QtWidgets import QAction\r\nfrom PyQt5.QtWidgets import QApplication, QWidget\r\nfrom PyQt5.QtWidgets import QComboBox\r\nfrom PyQt5.QtWidgets import QLineEdit\r\nfrom PyQt5.QtWidgets import QMainWindow\r\nfrom PyQt5.QtWidgets import QMessageBox\r\nfrom PyQt5.QtWidgets import QPushButton\r\nfrom PyQt5.QtWidgets import QTableWidget\r\nfrom PyQt5.QtWidgets import QTableWidgetItem\r\n\r\napp = QApplication(sys.argv)\r\n\r\nw = QMainWindow()\r\nw.resize(950, 450)\r\nw.move(500, 300)\r\nw.setWindowTitle('Simple')\r\nmenu = w.menuBar()\r\nmenu.setNativeMenuBar(False)\r\nfileMenu = menu.addMenu('&File')\r\n@pyqtSlot()\r\ndef on_press():\r\n print('[', time.strftime('%X'), ']', \" Button is press\")\r\n@pyqtSlot()\r\ndef on_click():\r\n print('[', time.strftime('%X'), ']', 'Button is clicked')\r\nQMessageBox.about(w, 'Write', 'Would you like write? Send you main on: mongolzzz21@gmail.com')\r\nresult = QMessageBox.question(w, 'Start', 'Do you like Python?', QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\r\n\r\nif (result == QMessageBox.Yes):\r\n\r\n m_but = QAction('Exit', w)\r\n m_but.setShortcut('Ctrl+Q')\r\n m_but.setStatusTip('Exit app')\r\n m_but.triggered.connect(w.close)\r\n fileMenu.addAction(m_but)\r\n textbox = QLineEdit(w)\r\n textbox.move(10, 60)\r\n textbox.resize(100, 20)\r\n butt = QPushButton(\"\", w);\r\n butt.setToolTip('Click to quite');\r\n butt.resize(butt.sizeHint())\r\n butt.move(10, 35)\r\n butt.clicked.connect(on_click);\r\n butt.pressed.connect(on_press);\r\n\r\n table = QTableWidget(w)\r\n tableItem = QTableWidgetItem()\r\n\r\n table.move(10, 120)\r\n table.setRowCount(3)\r\n table.setColumnCount(2)\r\n table.resize(216, 134)\r\n\r\n table.setItem(0, 0, QTableWidgetItem(\"Item 1\"))\r\n table.setItem(0, 1, QTableWidgetItem(\"Item 2\"))\r\n table.setItem(1, 0, QTableWidgetItem(\"Item 3\"))\r\n table.setItem(1, 1, QTableWidgetItem(\"Item 4\"))\r\n table.setItem(2, 0, QTableWidgetItem(\"Item 5\"))\r\n table.setItem(2, 1, QTableWidgetItem(\"Item 6\"))\r\n\r\n combbox = QComboBox(w)\r\n combbox.addItem(\"1\")\r\n combbox.addItem(\"2\")\r\n combbox.addItem(\"3\")\r\n combbox.addItem(\"4\")\r\n combbox.addItem(\"5\")\r\n combbox.move(10, 90)\r\n\r\n w.show()\r\n # m.show()\r\nelif (result == QMessageBox.No):\r\n QMessageBox.critical(w, 'CRERROR', 'CRITICAL ERROR! finished with exit code -1073740791 (0xC0000409)')\r\n exit()\r\nsys.exit(app.exec_())","repo_name":"JCodePeace/U_LB_Python","sub_path":"Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"43338073351","text":"import time\nimport sys\n\n\nclass Profiler:\n def __init__(self, enabled=True):\n self.profile = {}\n self.enabled = enabled\n self.out = sys.stdout\n\n def start(self, fname):\n if fname not in self.profile:\n self.profile[fname] = {\n 'current': None,\n 'total': 0,\n 'calls': 0\n }\n self.profile[fname]['current'] = time.time()\n self.profile[fname]['calls'] += 1\n\n def finish(self, fname):\n self.profile[fname]['total'] += time.time() - self.profile[fname]['current']\n\n def print_profile(self):\n self.out.write(\"=\" * 85 + \"\\n\")\n self.out.write(\"=\" + \" \" * 35 + \"Time Profiles\" + \" \" * 35 + \"=\\n\")\n self.out.write(\"=\" * 85 + \"\\n\")\n self.out.write(f\"{'Function name':40s} {'# of calls':15s} {'time per call':15s} {'total time':15s}\\n\")\n self.out.write(\"-\" * 85 + \"\\n\")\n for k in self.profile:\n self.out.write(f\"{k:40s} {self.profile[k]['calls']:<15d} \")\n self.out.write(f\"{self.profile[k]['total']/self.profile[k]['calls']:<15.2f} \")\n self.out.write(f\"{self.profile[k]['total']:<15.2f}\\n\")\n self.out.write(\"=\" * 85 + \"\\n\")\n\n\nglobal_profiler = Profiler(False)\n\n\ndef profile(func, profiler=global_profiler):\n \"\"\"Wraps specified functions of an object with start and finish\"\"\"\n if not profiler.enabled:\n return func\n\n def wrap(*args, **kwargs):\n profiler.start(func.__name__)\n result = func(*args, **kwargs)\n profiler.finish(func.__name__)\n return result\n\n wrap.__name__ = func.__name__\n return wrap\n","repo_name":"Rahgooy/MDFT","sub_path":"mdft_nn/helpers/profiling.py","file_name":"profiling.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"90"}
+{"seq_id":"18014146883","text":"# ---\n# jupyter:\n# jupytext:\n# comment_magics: false\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.2'\n# jupytext_version: 1.1.5\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# %%\nimport pandas as pd\n\nfrom nltk.classify import NaiveBayesClassifier\nfrom nltk.sentiment import SentimentAnalyzer\nfrom nltk.sentiment.util import *\n\n# Useful references:\n# https://towardsdatascience.com/basic-binary-sentiment-analysis-using-nltk-c94ba17ae386\n\n# NLTK provides for some other sorts of features\n# from nltk.sentiment.util import (mark_negation, extract_unigram_feats)\n# mark_negation(): Append _NEG suffix to words that appear in the scope between\n# a negation and a punctuation mark. extract_unigram_feats():\n# Populate a dictionary of unigram features, reflecting the presence/absence\n# in the document of each of the tokens in unigrams.\n\n# Data\nPROC_DIR = 'data/'\nTRAIN = PROC_DIR + 'train.csv'\nDEV = PROC_DIR + 'dev.csv'\n# In a previous step, I tokenized and pre-processed data and written\n# out to a csv file.\n\ndf_train = pd.read_csv(TRAIN)\ndf_dev = pd.read_csv(DEV)\n\n# %%\ndf_train = pd.DataFrame(df_train,columns=['id','label','text'])\n\n# %%\n# Feature extraction\ndf_pos_train = df_train[df_train['label'] == 'positive']\npos_tweets = df_pos_train['text'].tolist()\n\ndf_neg_train = df_train[df_train['label'] == 'negative']\nneg_tweets = df_neg_train['text'].tolist()\n\ndf_neutral_train = df_train[df_train['label'] == 'neutral']\nneutral_tweets = df_neutral_train['text'].tolist()\n\n# %%\n# how balanced is this training set?\nlen(df_pos_train)\n\n# %%\nlen(df_neg_train)\n\n# %%\nlen(df_neutral_train)\n\n\n# %%\ndef features(sentence):\n words = sentence.lower().split()\n return dict(('contains(%s)' % w, True) for w in words)\n\npositive_featuresets = [(features(tweet),'positive') for tweet in pos_tweets]\nnegative_featuresets = [(features(tweet),'negative') for tweet in neg_tweets]\nneutral_featuresets = [(features(tweet),'neutral') for tweet in neutral_tweets]\ntraining_features = positive_featuresets + negative_featuresets + neutral_featuresets\n\n# %%\nlen(training_features)\n\n# %%\nsentiment_analyzer = SentimentAnalyzer()\ntrainer = NaiveBayesClassifier.train\nclassifier = sentiment_analyzer.train(trainer, training_features)\n\n# %%\n# Create evaluation data\n\n#df_dev = pd.DataFrame(df_dev,columns=['id','label','text'])\ntruth_list = list(df_dev[['text', 'label']].itertuples(index=False, name=None))\nlen(truth_list)\n\n# %%\n# sanity check to make sure we manipulated the dataframe properly\ntruth_list[100]\n\n# %%\n# The evaluation method needs the feature extractor that was run to train the classifier\n# Specifically, it wants a list of tuples (features,truth), where features is a dict\nfor i, (text, expected) in enumerate(truth_list):\n text_feats = features(text)\n truth_list[i] = (text_feats, expected)\ntruth_list[100]\n\n# %%\n# evaluate and print out all metrics\nsentiment_analyzer.evaluate(truth_list,classifier)\n\n# %%\n# example of how to get to individual metrics\nfor key,value in sorted(sentiment_analyzer.evaluate(truth_list).items()):\n print('{0}: {1}'.format(key, value))\n","repo_name":"ANLY580/lectures","sub_path":"5-bayes-sentiment/sentiment_nltk_naivebayes.py","file_name":"sentiment_nltk_naivebayes.py","file_ext":"py","file_size_in_byte":3197,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"90"}
+{"seq_id":"3384643756","text":"import unittest\nimport tempfile\nimport os.path\n\nimport pyredditdl.config\n\nclass ProcessorCommonTest(unittest.TestCase):\n def setUp(self):\n self.reddit_object = {\n 'kind': 't3',\n 'data': {\n 'subreddit': 'example',\n 'title': \"Example Subreddit object\",\n 'url': 'http://loremflickr.com/320/240/building.jpg',\n }\n }\n\n self.resdir = tempfile.mkdtemp('pyredditdl-test')\n pyredditdl.config.config['dir'] = self.resdir\n","repo_name":"art-solopov/pyRedditDL","sub_path":"tests/processor_common_test.py","file_name":"processor_common_test.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"37093310897","text":"'''a* game'''\n\n\n\ndef astar(start, end):\n '''astar algorithm'''\n openlist = []\n closedlist = []\n camefrom = []\n openlist.append(start)\n start.g = 0\n while len(openlist) != 0:\n openlist.sort(key=lambda x: x.f)\n current = openlist[0]\n openlist.remove(current)\n closedlist.append(current)\n if current == end:\n camefrom = retrace(start, end)\n return camefrom\n for node in current.adjacents:\n if node in closedlist:\n continue\n set_gscore(current, node)\n tentative_g = current.g + set_gscore(current, node)\n if node not in openlist:\n openlist.append(node)\n elif tentative_g >= node.g:\n continue\n node.parent = current\n node.g = tentative_g\n node.h = Manhattan(node, end)\n node.f = node.g + node.h\n\n\n\ndef Manhattan(start, end):\n '''calculate manhattan distance'''\n xtotal = abs(end.posx - start.posx)\n ytotal = abs(end.posy - start.posy)\n return (xtotal + ytotal) * 10\n\n\ndef set_gscore(current, adjacent):\n '''sets gscore for node'''\n return 10 if adjacent.posx == current.posx or adjacent.posy == current.posy else 14\n\n\ndef retrace(start, end):\n '''retraces the path'''\n path = []\n i = end\n while i is not start:\n path.append(i)\n i = i.parent\n return path\n","repo_name":"regireed89/IntroToPython","sub_path":"Python Projects/Astar.py","file_name":"Astar.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"33235953116","text":"import setuptools\r\n\r\n__author__ = 'WangHao'\r\n__version__ = '0.0.1'\r\n\r\n\r\ndef get_description():\r\n return (\"Provide a simple way to get backtesting results without\\\r\n need to care about the specifications\")\r\n\r\nwith open(\"README.md\",\"r\") as fh:\r\n long_description=fh.read()\r\n\r\n\r\nsetuptools.setup(\r\n name=\"ptbt\",\r\n version=__version__,\r\n author=\"WangHao\",\r\n author_email=\"wanghao0524@outlook.com\",\r\n description=get_description(),\r\n long_description=long_description,\r\n long_description_content_type=\"text/markdown\",\r\n license=\"BSD\",\r\n keywords=\"Portfolio backtesting\",\r\n url=\"https://github.com/WilliamWang1996/ptbt\",\r\n include_package_data=True,\r\n packages=setuptools.find_packages(),\r\n platforms=['any'],\r\n classifiers=[\r\n \"Programming Language :: Python :: 3.6\",\r\n \"Natural Language :: English\",\r\n \"Intended Audience :: Developers\",\r\n \"Operating System :: OS Independent\",\r\n \"Development Status :: 3 - Alpha\",\r\n \"Topic :: Utilities\",\r\n \"License :: OSI Approved :: BSD License\",\r\n ],\r\n python_requires='>=3.6',\r\n)\r\n","repo_name":"WilliamWang1996/ptbt","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"18108582179","text":"N = int(input())\nA = [int(input()) for i in range(N)]\n\ndef insertion_sort(A, N, diff, cnt):\n for i in range(diff, N):\n tmp_num = A[i]\n j = i - diff\n while j >= 0 and A[j] > tmp_num:\n A[j+diff] = A[j]\n j = j - diff\n cnt += 1\n A[j+diff] = tmp_num\n return cnt\n\nif __name__ == \"__main__\":\n cnt = 0\n divide_cnt = 0\n diffs = []\n if N == 1:\n diffs.append(1)\n divide_cnt += 1\n else:\n quotient = N\n while quotient != 1:\n quotient = quotient // 2\n diffs.append(quotient)\n divide_cnt += 1\n for diff in diffs:\n cnt = insertion_sort(A, N, diff, cnt)\n print(divide_cnt)\n print(\" \".join(map(str, diffs)))\n print(cnt)\n for num in A:\n print(num)\n\n","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p02262/s945109907.py","file_name":"s945109907.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"11690260850","text":"from config import *\n\n\nclass Ant(pg.sprite.Sprite):\n def __init__(self, draw_surf, home, pher_type=1):\n super().__init__()\n self.draw_surf = draw_surf\n self.home = home\n self.image = pg.Surface((12, 21)).convert()\n self.image.set_colorkey(0)\n color_brown = (80, 42, 42)\n self.pher_type = pher_type\n\n pg.draw.ellipse(self.image, color_brown, [4, 6, 4, 9])\n\n self.original_image = pg.transform.rotate(self.image.copy(), -90)\n self.rect = self.image.get_rect(center=self.home)\n self.angle = randint(0, 360)\n self.desire_direction = pg.Vector2(cos(radians(self.angle)), sin(radians(self.angle)))\n self.position = pg.Vector2(self.rect.center)\n self.velocity = pg.Vector2(0, 0)\n self.last_pheromone = home\n\n def update(self, delta_time, **kwargs):\n pheromones = kwargs[\"pheromones\"]\n cur_w, cur_h = self.draw_surf.get_size()\n mid_sensor_result = left_sensor_result = right_sensor_result = (0, 0, 0)\n random_angle = randint(0, 360)\n acceleration = pg.Vector2(0, 0)\n wander_strength = 0.15\n max_speed = 12\n steer_strength = 3\n\n if self.position.distance_to(self.last_pheromone) > 24:\n pheromones.add(Trail(self.position, self.pher_type))\n self.last_pheromone = pg.Vector2(self.rect.center)\n\n mid_sensor_left = self.vint(self.position + pg.Vector2(21, -3).rotate(self.angle))\n mid_sensor_right = self.vint(self.position + pg.Vector2(21, 3).rotate(self.angle))\n left_sensor_1 = self.vint(self.position + pg.Vector2(18, -14).rotate(self.angle))\n left_sensor_2 = self.vint(self.position + pg.Vector2(16, -21).rotate(self.angle))\n right_sensor_1 = self.vint(self.position + pg.Vector2(18, 14).rotate(self.angle))\n right_sensor_2 = self.vint(self.position + pg.Vector2(16, 21).rotate(self.angle))\n\n # Checks if the sensor points are inside a rect and if they are grab the pixel values to the l/r of each sensor\n # and check the max value of either the left/right sensor for each R G B value\n if self.draw_surf.get_rect().collidepoint(mid_sensor_left) and self.draw_surf.get_rect().collidepoint(mid_sensor_right):\n mid_sensor_left_right = self.draw_surf.get_at(mid_sensor_left)[:3]\n mid_sensor_right_right = self.draw_surf.get_at(mid_sensor_right)[:3]\n mid_sensor_result = (max(mid_sensor_left_right[0], mid_sensor_right_right[0]),\n max(mid_sensor_left_right[1], mid_sensor_right_right[1]),\n max(mid_sensor_left_right[2], mid_sensor_right_right[2]))\n\n if self.draw_surf.get_rect().collidepoint(left_sensor_1) and self.draw_surf.get_rect().collidepoint(left_sensor_2):\n left_sensor_right_1 = self.draw_surf.get_at(left_sensor_1)[:3]\n left_sensor_right_2 = self.draw_surf.get_at(left_sensor_2)[:3]\n left_sensor_result = (max(left_sensor_right_1[0], left_sensor_right_2[0]),\n max(left_sensor_right_1[1], left_sensor_right_2[1]),\n max(left_sensor_right_1[2], left_sensor_right_2[2]))\n\n if self.draw_surf.get_rect().collidepoint(right_sensor_1) and self.draw_surf.get_rect().collidepoint(right_sensor_2):\n right_sensor_right_1 = self.draw_surf.get_at(right_sensor_1)[:3]\n right_sensor_right_2 = self.draw_surf.get_at(right_sensor_2)[:3]\n right_sensor_result = (max(right_sensor_right_1[0], right_sensor_right_2[0]),\n max(right_sensor_right_1[1], right_sensor_right_2[1]),\n max(right_sensor_right_1[2], right_sensor_right_2[2]))\n\n # If the max sensor value (index 2 blue pixel) is gt the max value of either the left sensor,\n # right. And the mid sensor green / blue is == 0 Then set the desired direction to forward. And set wander to 0\n if mid_sensor_result[2] > max(left_sensor_result[2], right_sensor_result[2]) and mid_sensor_result[:2] == (0, 0):\n self.desire_direction = pg.Vector2(1, 0).rotate(self.angle).normalize() # Set dir to forwards\n wander_strength = 0\n\n elif left_sensor_result[2] > right_sensor_result[2] and left_sensor_result[:2] == (0, 0):\n self.desire_direction = pg.Vector2(1, -2).rotate(self.angle).normalize() # Set dir to left (0,-1)\n wander_strength = 0\n\n elif right_sensor_result[2] > left_sensor_result[2] and right_sensor_result[:2] == (0, 0):\n self.desire_direction = pg.Vector2(1, 2).rotate(self.angle).normalize() # Set dir to right (0, 1)\n wander_strength = 0\n\n # Avoid edges\n if not self.draw_surf.get_rect().collidepoint(left_sensor_2) and self.draw_surf.get_rect().collidepoint(right_sensor_2):\n self.desire_direction += pg.Vector2(0, 1).rotate(self.angle)\n wander_strength = 0\n steer_strength = 4\n\n elif not self.draw_surf.get_rect().collidepoint(right_sensor_2) and self.draw_surf.get_rect().collidepoint(left_sensor_2):\n self.desire_direction += pg.Vector2(0, -1).rotate(self.angle)\n wander_strength = 0\n steer_strength = 4\n\n elif not self.draw_surf.get_rect().collidepoint(self.vint(self.position + pg.Vector2(21, 0).rotate(self.angle))):\n self.desire_direction += pg.Vector2(-1, 0).rotate(self.angle)\n wander_strength = 0\n steer_strength = 5\n\n random_direction = pg.Vector2(cos(radians(random_angle)), sin(radians(random_angle)))\n\n if self.desire_direction != (0, 0) and wander_strength != 0:\n self.desire_direction = pg.Vector2(self.desire_direction + random_direction * wander_strength).normalize()\n\n final_desire_direction = self.desire_direction * max_speed\n desire_direction_with_steer = (final_desire_direction - self.velocity) * steer_strength\n acceleration = desire_direction_with_steer if pg.Vector2(desire_direction_with_steer).magnitude() <= steer_strength else pg.Vector2(desire_direction_with_steer.normalize() * steer_strength)\n final_velocity = self.velocity + acceleration * delta_time\n\n self.velocity = final_velocity if pg.Vector2(final_velocity).magnitude() <= max_speed else pg.Vector2(final_velocity.normalize() * max_speed)\n self.position += self.velocity * delta_time\n self.angle = degrees(atan2(self.velocity[1], self.velocity[0]))\n self.image = pg.transform.rotate(self.original_image, -self.angle)\n self.rect = self.image.get_rect(center=self.rect.center)\n self.rect.center = self.position\n\n @staticmethod\n def vint(vector2):\n return int(vector2[0]), int(vector2[1])\n\n\nclass Trail(pg.sprite.Sprite):\n def __init__(self, position, pheromone_type):\n super().__init__()\n self.type = pheromone_type\n self.image = pg.Surface((8, 8))\n self.image.fill(0)\n self.image.set_colorkey(0)\n self.rect = self.image.get_rect(center=position)\n self.strength = 500\n\n def update(self, delta_time):\n self.strength -= ((delta_time / 10) * FPS) * (60/FPS)\n if self.strength < 0:\n return self.kill()\n trail_strength = self.strength / 500\n self.image.fill(0)\n if self.type == 1:\n pg.draw.circle(self.image, [0, 0, 90 * trail_strength + 10], [4, 4], 4)\n\n if self.type == 2:\n pg.draw.circle(self.image, [0, 90 * trail_strength + 10, 0], [4, 4], 4)\n\n\n","repo_name":"RakLord/Ant","sub_path":"ant.py","file_name":"ant.py","file_ext":"py","file_size_in_byte":7612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"22346771175","text":"\"\"\"Unit testing for pysetl.storage.connector module.\"\"\"\nfrom tempfile import TemporaryDirectory\nimport pytest\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.types import StructType, StructField, StringType\nfrom pyarrow.fs import LocalFileSystem, S3FileSystem, FileType\nfrom pysetl.enums import FileStorage\nfrom pysetl.utils.exceptions import InvalidConfigException, BuilderException\nfrom pysetl.config import (\n FileConfig, AwsCredentials, CsvConfig, JsonConfig, ParquetConfig\n)\nfrom pysetl.storage.connector import (\n FileConnector, CsvConnector, ParquetConnector, JsonConnector,\n ConnectorBuilder\n)\n\n\ndef test_file_connector_s3():\n file_config = FileConfig(\n storage=FileStorage.CSV,\n path=\"s3://my-bucket/data\",\n aws_credentials=AwsCredentials(\n access_key=\"\",\n secret_key=\"\",\n session_token=\"\",\n credentials_provider=\"\"\n )\n )\n s3_file_connector = FileConnector(file_config)\n\n assert isinstance(s3_file_connector.filesystem, S3FileSystem)\n\n\ndef test_file_connector_s3_bad_aws_config():\n file_config = FileConfig(\n storage=FileStorage.CSV,\n path=\"s3://my-bucket/data\"\n )\n s3_file_connector = FileConnector(file_config)\n\n with pytest.raises(InvalidConfigException) as error:\n _ = s3_file_connector.filesystem\n\n assert str(error.value) == \"No S3 credentials provided\"\n\n\ndef test_file_connector_paths():\n config = FileConfig(\n storage=FileStorage.CSV,\n path=\"/ruta/al/archivo\"\n )\n connector = FileConnector(config)\n\n assert connector.absolute_path == \"/ruta/al/archivo\"\n assert connector.base_path == \"/ruta/al\"\n assert connector.uri.scheme == \"\"\n assert not connector.has_wildcard\n\n\ndef test_file_connector_write_on_wildcard():\n # Create a SparkSession\n spark = SparkSession.builder.getOrCreate()\n\n # Create a DataFrame\n columns = [\"language\", \"users_count\"]\n data = [(\"Java\", \"20000\"), (\"Python\", \"100000\"), (\"Scala\", \"3000\")]\n dataframe = spark.sparkContext.parallelize(data).toDF(columns)\n\n config = FileConfig(\n storage=FileStorage.CSV,\n path=\"/ruta/al/archivo/*\"\n )\n connector = FileConnector(config)\n\n with pytest.raises(InvalidConfigException) as error:\n connector.write(dataframe)\n\n assert str(error.value) == \"Can't write to wildcard path\"\n\n\ndef test_file_connector():\n # Create a SparkSession\n spark = SparkSession.builder.getOrCreate()\n\n # Create a DataFrame\n columns = [\"language\", \"users_count\"]\n data = [(\"Java\", \"20000\"), (\"Python\", \"100000\"), (\"Scala\", \"3000\")]\n dataframe = spark.sparkContext.parallelize(data).toDF(columns)\n\n # Create FileConfig pointing to a tempdir\n with TemporaryDirectory() as path:\n config = FileConfig(\n storage=FileStorage.CSV,\n path=path,\n header=\"true\"\n )\n\n # Instantitate file Connector\n file_connector = FileConnector(config)\n local_fs = file_connector.filesystem\n\n assert file_connector\n assert isinstance(local_fs, LocalFileSystem)\n\n # Write dataframe\n file_connector.write(dataframe)\n output_type = local_fs.get_file_info(file_connector.config.path).type\n assert output_type == FileType.Directory\n\n # Read dataframe\n data_from_fs = file_connector.read()\n\n # Compare with original dataframe\n assert dataframe.exceptAll(data_from_fs).count() == 0\n\n # Remove data\n file_connector.drop()\n type_after_drop = local_fs.get_file_info(file_connector.absolute_path)\n assert type_after_drop.type == FileType.NotFound\n\n\ndef test_file_connector_with_partitions():\n # Create a SparkSession\n spark = SparkSession.builder.getOrCreate()\n\n # Create a DataFrame\n columns = [\"language\", \"users_count\"]\n data = [(\"Java\", \"20000\"), (\"Python\", \"100000\"), (\"Scala\", \"3000\")]\n dataframe = spark.sparkContext.parallelize(data).toDF(columns)\n\n # Create FileConfig pointing to a tempdir\n with TemporaryDirectory() as path:\n config = FileConfig(\n storage=FileStorage.CSV,\n path=path,\n header=\"true\",\n partition_by=[\"language\"],\n data_schema=StructType([\n StructField(\"language\", StringType(), False),\n StructField(\"users_count\", StringType(), False),\n ])\n )\n\n # Instantitate file Connector\n file_connector = FileConnector(config)\n local_fs = file_connector.filesystem\n\n assert file_connector\n assert isinstance(local_fs, LocalFileSystem)\n\n # Write dataframe\n file_connector.write(dataframe)\n\n # Validate partitions\n partitions = file_connector.list_partitions()\n output_type = local_fs.get_file_info(file_connector.config.path).type\n assert output_type == FileType.Directory\n\n assert all([\n local_fs.get_file_info(p).type == FileType.Directory\n for p\n in partitions\n ])\n assert set([\n p.replace(file_connector.absolute_path, \"\")\n for p\n in partitions\n ]) == set([\"/language=Scala\", \"/language=Java\", \"/language=Python\"])\n\n # Read dataframe\n data_from_fs = file_connector.read_partitions(partitions)\n\n # Compare with original dataframe\n assert (\n dataframe\n .select(\"users_count\", \"language\")\n .exceptAll(data_from_fs)\n .count()\n ) == 0\n\n # Remove data\n file_connector.drop()\n type_after_drop = local_fs.get_file_info(file_connector.absolute_path)\n assert type_after_drop.type == FileType.NotFound\n\n\ndef test_file_connector_bad_partition_config():\n # Create a SparkSession\n spark = SparkSession.builder.getOrCreate()\n\n # Create a DataFrame\n columns = [\"language\", \"users_count\"]\n data = [(\"Java\", \"20000\"), (\"Python\", \"100000\"), (\"Scala\", \"3000\")]\n dataframe = spark.sparkContext.parallelize(data).toDF(columns)\n\n # Create FileConfig pointing to a tempdir\n with TemporaryDirectory() as path:\n config = FileConfig(\n storage=FileStorage.CSV,\n path=path,\n header=\"true\",\n partition_by=[\"non_existent\"]\n )\n\n # Instantitate file Connector\n file_connector = FileConnector(config)\n local_fs = file_connector.filesystem\n\n assert file_connector\n assert isinstance(local_fs, LocalFileSystem)\n\n # Write dataframe\n with pytest.raises(InvalidConfigException) as error:\n file_connector.write(dataframe)\n\n assert str(error.value) == \"Partition columns in configuration not in data\"\n\n\ndef test_csv_connector():\n config = CsvConfig(path=\"/ruta/al/archivo\")\n\n assert CsvConnector(config)\n\n\ndef test_csv_connector_bad_config():\n config = ParquetConfig(path=\"/ruta/al/archivo\")\n\n with pytest.raises(InvalidConfigException) as error:\n _ = CsvConnector(config)\n\n assert str(error.value) == \"Not a CsvConfig for a CsvConnector\"\n\n\ndef test_json_connector():\n config = JsonConfig(path=\"/ruta/al/archivo\")\n\n assert JsonConnector(config)\n\n\ndef test_json_connector_bad_config():\n config = ParquetConfig(path=\"/ruta/al/archivo\")\n\n with pytest.raises(InvalidConfigException) as error:\n _ = JsonConnector(config)\n\n assert str(error.value) == \"Not a JsonConfig for a JsonConnector\"\n\n\ndef test_parquet_connector():\n config = ParquetConfig(path=\"/ruta/al/archivo\")\n\n assert ParquetConnector(config)\n\n\ndef test_parquet_connector_bad_config():\n config = CsvConfig(path=\"/ruta/al/archivo\")\n\n with pytest.raises(InvalidConfigException) as error:\n _ = ParquetConnector(config)\n\n assert str(error.value) == \"Not a ParquetConfig for a ParquetConnector\"\n\n\ndef test_connector_builder_csv():\n config = FileConfig(\n storage=FileStorage.CSV,\n path=\"/ruta/al/archivo\",\n header=\"true\"\n )\n\n connector = ConnectorBuilder(config).build().get()\n\n assert isinstance(connector, CsvConnector)\n\n\ndef test_connector_builder_json():\n config = FileConfig(\n storage=FileStorage.JSON,\n path=\"/ruta/al/archivo\"\n )\n\n connector = ConnectorBuilder(config).build().get()\n\n assert isinstance(connector, JsonConnector)\n\n\ndef test_connector_builder_parquet():\n config = FileConfig(\n storage=FileStorage.PARQUET,\n path=\"/ruta/al/archivo\"\n )\n\n connector = ConnectorBuilder(config).build().get()\n\n assert isinstance(connector, ParquetConnector)\n\n\ndef test_connector_builder_not_built():\n config = FileConfig(\n storage=FileStorage.CSV,\n path=\"/ruta/al/archivo\"\n )\n\n with pytest.raises(BuilderException) as error:\n _ = ConnectorBuilder(config).get()\n\n assert str(error.value) == \"Connector is not defined\"\n","repo_name":"JhossePaul/pysetl","sub_path":"tests/test_connector.py","file_name":"test_connector.py","file_ext":"py","file_size_in_byte":8671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"40356913126","text":"hrs = float(input(\"Enter Hours:\"))\r\n#h = float(hrs)\r\nif (hrs<= 0):\r\n quit()\r\nrate = float(input(\"Enter hour rate:\"))\r\n#r = float(rate)\r\nif(hrs>0) and (hrs<=40):\r\n pay = hrs * rate\r\nelse:\r\n hrs_extra = hrs - 40\r\n extra_pay = float(hrs_extra * 1.5*rate)\r\n total_pay = extra_pay + (40*rate)\r\nprint(total_pay)\r\n","repo_name":"nehagopalrao/Testscripts","sub_path":"assignment_3_1.py","file_name":"assignment_3_1.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"45268436179","text":"import datetime\nfrom django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.core.paginator import Paginator\nfrom django.template.loader import render_to_string\n\nimport json\n\nfrom shop.forms import OrderForm, ReviewForm\nfrom shop.models import Products, ProductImages, ProductReviews, Categories, Customers, Orders, OrderItems\n\nAPP_NAME = 'Giger'\nSHOP_URL = 'http://127.0.0.1:8000'\n\ndef error404Page(request, exception):\n categories = Categories.objects.filter(is_active=True)\n\n context = {\n 'page_title': f'{APP_NAME} | 404',\n 'page_categories': categories,\n }\n return render(request, 'shop/error.html', context=context)\n\ndef homePage(request):\n categories = Categories.objects.filter(is_active=True)\n\n context = {\n 'page_title': f'{APP_NAME} | Home',\n 'page_categories': categories,\n }\n return render(request, 'shop/home.html', context=context)\n\ndef productPage(request, product_slug):\n categories = Categories.objects.filter(is_active=True)\n product = get_object_or_404(\n Products, \n url_slug=product_slug, \n is_active=True, \n category_id__is_active=True\n )\n\n if request.method == 'POST':\n form = ReviewForm(request.POST)\n\n if form.is_valid():\n ProductReviews.objects.create(\n product_id = product,\n name = request.POST.get('name'),\n email = request.POST.get('email'),\n description = request.POST.get('review'),\n rate = request.POST.get('stars'),\n )\n\n images = ProductImages.objects.filter(product_id=product.pk)\n reviews = ProductReviews.objects.filter(product_id=product.pk)\n\n context = {\n 'page_title': f'{APP_NAME} | ' + product.name,\n 'page_categories': categories,\n 'product_object': product,\n 'product_images': images,\n 'product_reviews': reviews.order_by('-creation_date'),\n 'product_reviews_len': len(reviews),\n }\n\n return render(request, 'shop/product.html', context=context)\n\ndef categoryPage(request, category_slug):\n categories = Categories.objects.filter(is_active=True)\n category = get_object_or_404(\n Categories, \n url_slug=category_slug, \n is_active=True\n )\n products = Products.objects.filter(\n category_id=category.pk, \n is_active=True\n )\n\n # Перевірка поля sortBy \n try:\n sort_method = int(request.GET.get('sortBy'))\n except TypeError:\n sort_method = 1\n except ValueError:\n return redirect(category.get_absolute_url())\n \n if sort_method < 1 and sort_method > 5:\n return redirect(category.get_absolute_url())\n \n # Вибір методу сортування обєктів\n if sort_method == 2:\n products = products.order_by('-creation_date')\n elif sort_method == 3:\n products = products.order_by('creation_date')\n elif sort_method == 4:\n products = products.order_by('price')\n elif sort_method == 5:\n products = products.order_by('-price')\n\n\n # Перевірка поля show\n try:\n products_per_page = int(request.GET.get('show'))\n except TypeError:\n products_per_page = 20\n except ValueError:\n return redirect(category.get_absolute_url())\n \n if products_per_page < 0:\n return redirect(category.get_absolute_url())\n if products_per_page == 0:\n products_per_page = len(products)\n\n # Розбиття обєктів на сторінки\n paginate = Paginator(products, products_per_page)\n\n # Перевірка поля page \n try:\n page = int(request.GET.get('page'))\n except TypeError:\n page = 1\n except ValueError:\n return redirect(category.get_absolute_url())\n\n if page > paginate.num_pages or page < 1:\n return redirect(category.get_absolute_url())\n\n\n context = {\n 'page_title': f'{APP_NAME} | {category.name}',\n 'page_categories': categories,\n 'category_object': category,\n 'category_products': paginate.page(page),\n 'category_products_count': len(products),\n 'paginator': paginate,\n 'paginator_page': page,\n 'paginator_start': products_per_page * (page - 1) + 1,\n 'paginator_end': products_per_page * page,\n }\n\n return render(request, 'shop/category.html', context=context)\n\ndef searchPage(request):\n categories = Categories.objects.filter(is_active=True)\n # Перевірка поля s \n try:\n search_text = str(request.GET.get('s'))\n except TypeError:\n return redirect('home')\n except ValueError:\n return redirect('home')\n \n if len(search_text) > 255 or search_text == 'None':\n return redirect('home')\n\n products = Products.objects.filter(\n name__contains=search_text,\n is_active=True,\n )\n\n print(products)\n\n # Розбиття обєктів на сторінки\n products_per_page = 20\n paginate = Paginator(products, products_per_page)\n\n # Перевірка поля page \n try:\n page = int(request.GET.get('page'))\n except TypeError:\n page = 1\n except ValueError:\n return redirect('home')\n\n if page > paginate.num_pages or page < 1:\n return redirect('home')\n\n context = {\n 'page_title': f'{APP_NAME} | Пошук',\n 'page_categories': categories,\n 'search_text': search_text,\n 'search_products': paginate.page(page),\n 'search_products_count': len(products),\n 'paginator': paginate,\n 'paginator_page': page,\n 'paginator_start': products_per_page * (page - 1) + 1,\n 'paginator_end': products_per_page * page,\n }\n\n return render(request, 'shop/search.html', context=context)\n\ndef getProductApi(request, product_id):\n product = get_object_or_404(\n Products,\n pk=product_id,\n is_active=True, \n category_id__is_active=True\n )\n\n response = {\n 'product': {\n 'id': product.pk,\n 'slug': product.url_slug,\n 'name': product.name,\n 'price': product.price,\n 'image': product.get_product_image.image.url,\n 'availability': product.availability,\n }\n }\n\n return JsonResponse(response)\n\ndef wishlistPage(request):\n categories = Categories.objects.filter(is_active=True)\n\n context = {\n 'page_title': f'{APP_NAME} | Список бажаного',\n 'page_categories': categories,\n }\n return render(request, 'shop/wishlist.html', context=context)\n\ndef cartPage(request):\n categories = Categories.objects.filter(is_active=True)\n\n context = {\n 'page_title': f'{APP_NAME} | Корзина',\n 'page_categories': categories,\n }\n\n return render(request, 'shop/cart.html', context=context)\n\ndef checkOutPage(request):\n categories = Categories.objects.filter(is_active=True)\n\n if request.method == 'POST':\n form = OrderForm(request.POST)\n\n if form.is_valid():\n try:\n order_items = json.loads(request.POST.get('products'))\n except ValueError:\n return HttpResponse(status=500)\n\n customer = Customers(\n name = request.POST.get('name'),\n surname = request.POST.get('surname'),\n address = f\"{request.POST.get('city')} {request.POST.get('street')} {request.POST.get('streetNumber')}\",\n phone = request.POST.get('phoneNubmer')\n )\n customer.save()\n\n order = Orders(\n customer_id = customer,\n notes = '',\n shipping_method = request.POST.get('shipping_method'),\n )\n order.save()\n\n for item in order_items:\n product = Products.objects.get(pk=item.get('id'))\n item_model = OrderItems(\n product_id = product,\n order_id = order,\n name = product.name,\n buying_price = product.price,\n count = item.get('count')\n )\n item_model.save()\n return redirect('checkout_success')\n\n context = {\n 'page_title': f'{APP_NAME} | Оформити замовлення',\n 'page_categories': categories,\n 'shipping_methods': (\n ('Доставка Нова пошта', 'Замовлення відправляється через нову пошту оплата при отриманні посилки.', 'novaPoshta'),\n ('Доставка Укр пошта', 'Замовлення відправляється через у��р пошту оплата при отриманні посилки.', 'urkPoshta'),\n ('Самовивіз', 'Отримуєте замовлення в одному з нащих магазинів.', 'selfPickUp'),\n ),\n }\n\n return render(request, 'shop/checkout.html', context=context)\n\ndef checkOutSuccessPage(request):\n categories = Categories.objects.filter(is_active=True)\n\n context = {\n 'page_title': f'{APP_NAME} | Гарного дня',\n 'page_categories': categories,\n }\n return render(request, 'shop/checkout_success.html', context=context)\n\ndef getHotlineFeedApi(request):\n categories = Categories.objects.filter(is_active=True)\n products = Products.objects.filter(\n is_active=True, \n category_id__is_active=True\n )\n\n context = {\n 'page_title': f'{APP_NAME} | Hotline Feed',\n 'shop_name': APP_NAME,\n 'shop_url': SHOP_URL,\n 'now_time': datetime.datetime.now(),\n 'categories': categories,\n 'products': products,\n }\n\n return render(request, 'shop/hotline.xml', context=context, content_type='text/xml')","repo_name":"DJmoster/GigerProject","sub_path":"giger/shop/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"74739846696","text":"from styx_msgs.msg import TrafficLight\nimport rospy\nimport numpy as np\nimport tensorflow as tf\n\n\nclass TLClassifier(object):\n def __init__(self, model_path=None):\n \n\n self.graph = self.load_graph(model_path)\n self.sess = tf.Session(graph=self.graph)\n\n def load_graph(self, path):\n graph = tf.Graph()\n with graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(path, 'rb') as fid:\n \n od_graph_def.ParseFromString(fid.read())\n tf.import_graph_def(od_graph_def, name='')\n \n return graph\n \n def get_classification(self, image):\n \"\"\"Determines the color of the traffic light in the image\n Args:\n image (cv::Mat): image containing the traffic light\n Returns:\n int: ID of traffic light color (specified in styx_msgs/TrafficLight)\n \"\"\"\n \n img_numpy = np.asarray(image[:,:])\n img_expd = np.expand_dims(img_numpy, axis=0)\n with self.graph.as_default():\n \n img_tns = self.graph.get_tensor_by_name('image_tensor:0')\n det_bx = self.graph.get_tensor_by_name('detection_boxes:0')\n det_scr = self.graph.get_tensor_by_name('detection_scores:0')\n det_cls = self.graph.get_tensor_by_name('detection_classes:0')\n num_det = self.graph.get_tensor_by_name('num_detections:0')\n (boxes, scores, classes, num) = self.sess.run([det_bx, det_scr, det_cls, num_det],feed_dict={img_tns: img_expd})\n \n if classes[0][0]==1:\n return TrafficLight.GREEN\n if classes[0][0]==2:\n return TrafficLight.RED\n if classes[0][0]==3:\n return TrafficLight.YELLOW\n if classes[0][0]==4:\n return TrafficLight.UNKNOWN\n \n \n return TrafficLight.UNKNOWN\n\n\n ","repo_name":"chinitaberrio/Capstone_SDC_nanodegree","sub_path":"ros/src/tl_detector/light_classification/tl_classifier.py","file_name":"tl_classifier.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"41305030375","text":"import os\nimport pickle\n\n\nclass FileDB:\n def __init__(self, data_path):\n \"\"\"\n Initialises self.path with the save path of flask application\n \"\"\"\n self.path = os.path.join(os.getcwd(), data_path)\n \n def get_path(self):\n \"\"\"\n Returns save path of flask application relative to container/user\n \"\"\"\n return self.path\n \n def get_data(self, file_name):\n \"\"\"\n Returns data within provided file\n \"\"\"\n folder_name = file_name.split(\"_\", 1)[0].replace(\".pkl\", \"\")\n \n return pickle.load(open(os.path.join(self.path, folder_name, file_name), \"rb\"), encoding=\"bytes\")\n \n def check_file(self, file_name):\n \"\"\"\n Checks if file exists\n \"\"\"\n if \".\" in file_name:\n folder_name = file_name.split(\".\", 1)[0]\n \n else:\n folder_name = file_name.split(\"_\", 1)[0]\n \n return os.path.exists(os.path.join(self.path, folder_name, file_name))\n \n def check_img(self, file_path):\n \"\"\"\n Checks if image file exists\n \"\"\"\n return os.path.exists(os.path.join(self.path, file_path))\n \n def get_file_path(self, file_name):\n \"\"\"\n Returns exact file path of provided file\n \"\"\"\n folder_name = file_name.split(\"_\", 1)[0].replace(\".pkl\", \"\")\n \n if \"checkpoint\" in file_name:\n return os.path.join(self.path, folder_name, \"checkpoint\", file_name)\n \n else:\n return os.path.join(self.path, folder_name, file_name)\n \n def get_dataset(self, file_type):\n \"\"\"\n Returns all files matching file_type\n \"\"\"\n \n if file_type == \"dataset\":\n ext = \".pkl\"\n elif file_type == \"build\":\n ext = \"_build\"\n elif file_type == \"train\":\n ext = \"_train\"\n \n if file_type == \"ckpt\":\n return [\"{}_{}\".format(root.split(\"/\")[-1], name) for root, dirs, files in os.walk(self.path) for name in dirs if \"checkpoint\" in name]\n else:\n return [name for root, dirs, files in os.walk(self.path) for name in files if ext in name]\n \n def delete_data(self, file_name):\n \"\"\"\n Deletes file given file name\n \"\"\"\n folder_name = file_name.split(\"_\", 1)[0].replace(\".pkl\", \"\")\n \n if \"ckpt\" in file_name:\n file_path = os.path.join(self.path, folder_name, \"checkpoint\", file_name)\n \n else:\n file_path = os.path.join(self.path, folder_name, file_name)\n \n if not os.path.exists(file_path):\n return\n \n os.remove(os.path.join(self.path, file_path))\n return file_name\n \n def upload(self, recv_file, file_name):\n \"\"\"\n Uploads file contents given blob and file name\n \"\"\"\n folder_name = file_name.split(\"_\", 1)[0].replace(\".pkl\", \"\")\n \n file_path = os.path.join(self.path, folder_name)\n \n if not os.path.exists(file_path):\n os.makedirs(file_path)\n \n if \".ckpt\" in file_name or \"checkpoint\" in file_name:\n file_path = os.path.join(file_path, \"checkpoint\")\n \n if not os.path.exists(file_path):\n os.makedirs(file_path)\n \n recv_file.save(os.path.join(file_path, file_name))\n","repo_name":"bryanscw/web-app-play2vec","sub_path":"app/flask/database/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":3438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"37481983621","text":"import datetime\nimport json\nimport random\nfrom PIL import Image\nfrom io import BytesIO\nfrom django.core.files.uploadedfile import InMemoryUploadedFile\nfrom django.http import JsonResponse\n\nfrom app import models\nfrom app.obj.User import User\n\n\ndef get_random_number_str(length):\n \"\"\"\n 生成随机数字字符串\n :param length: 字符串长度\n :return:\n \"\"\"\n num_str = ''.join(str(random.choice(range(10))) for _ in range(length))\n return num_str\n\n\ndef processImg(pic, id):\n im_pic = Image.open(pic)\n w, h = im_pic.size\n if w >= h:\n w_start = (w - h) * 0.618\n box = (w_start, 0, w_start + h, h)\n region = im_pic.crop(box)\n else:\n h_start = (h - w) * 0.618\n box = (0, h_start, w, h_start + w)\n region = im_pic.crop(box)\n # region就是PIL处理后的正方形了\n # 先保存到磁盘io\n pic_io = BytesIO()\n region.save(pic_io, im_pic.format)\n # 再转化为InMemoryUploadedFile数据\n pic_file = InMemoryUploadedFile(\n file=pic_io,\n field_name=None,\n name=id + get_random_number_str(10) + '.' + pic.name.split('.')[-1],\n content_type=pic.content_type,\n size=pic.size,\n charset=None\n )\n return pic_file\n\n\ndef JsonPackage(targetDict):\n return JsonResponse(json.dumps(targetDict), safe=False)\n\n\ndef getUserId(cookie):\n ans = User(cookie).getProfileInfo(whole=False)\n if ans['validation']:\n return {\"validation\": True, \"uid\": ans['id']}\n else:\n return {\"validation\": False, \"mes\": \"invalid user\"}\n\n\ndef payMoney(cookie, amount, password):\n ans = User(cookie).payMoney(amount=amount, password=password)\n return ans\n\n\ndef findInfo(viewId):\n query1 = models.OrderInfo.objects.filter(customerId=viewId)\n query2 = models.GoodsInfo.objects.filter(sellerId=viewId)\n orderList = []\n goodsList = []\n for i in query1:\n if i.removal:\n continue\n data = {\"id\": i.id, \"name\": i.goodsId.name, \"identity\": \"customer\", \"status\": \"\"}\n if i.finished and i.paid:\n data[\"status\"] = \"finished\"\n elif not i.finished and i.paid:\n data[\"status\"] = \"not receive\"\n elif not i.finished and not i.paid:\n data[\"status\"] = \"unpaid\"\n else:\n data[\"status\"] = \"Error\"\n orderList.append(data)\n for i in query2:\n query3 = models.OrderInfo.objects.filter(goodsId=i.id)\n if i.show:\n show = \"\"\n if i.show:\n show = \"show\"\n else:\n show = \"not show\"\n goodsList.append(\n {\"id\": i.id, \"name\": i.name, \"category\": i.categoryID.name, \"status\": show})\n for j in query3:\n if j.removal:\n continue\n data = {\"id\": j.id, \"name\": j.goodsId.name, \"identity\": \"seller\", \"status\": \"\"}\n if j.finished and j.paid:\n data[\"status\"] = \"finished\"\n elif not j.finished and j.paid:\n data[\"status\"] = \"not receive\"\n elif not j.finished and not j.paid:\n data[\"status\"] = \"unpaid\"\n else:\n data[\"status\"] = \"Error\"\n orderList.append(data)\n return goodsList, orderList\n","repo_name":"willyou23/puxiaoyu_back","sub_path":"app/staticFunc.py","file_name":"staticFunc.py","file_ext":"py","file_size_in_byte":3259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"23649960662","text":"# -*- coding: utf-8 -*-\n#########################################################\n# SERVICE : mount.py #\n# mount and database operations for xnas #\n# #\n# I. Helwegen 2020 #\n#########################################################\n\n####################### IMPORTS #########################\nimport logging\nfrom copy import deepcopy\nfrom mounts.devices import devices\nfrom mounts.fstab import fstab\nfrom mounts.mountfs import mountfs\nfrom mounts.zfs import zfs\nfrom mounts.mountpoint import mountpoint\nfrom common.stdin import stdin\nfrom common.xnas_engine import groups\n#########################################################\n\n####################### GLOBALS #########################\nFSTYPES = [\"ext2\", \"ext3\", \"ext4\", \"ntfs\", \"ntfs-3g\", \"fat\", \"vfat\", \"exfat\", \"btrfs\", \"jfs\", \"xfs\", \"iso9660\", \"udf\"]\nFSTYPES_UMASK = [\"ntfs\", \"ntfs-3g\", \"fat\", \"vfat\", \"exfat\", \"iso9660\", \"udf\"]\n#########################################################\n\n###################### FUNCTIONS ########################\n\n#########################################################\n\n#########################################################\n# Class : mount #\n#########################################################\nclass mount(devices, fstab, mountfs, zfs, mountpoint):\n def __init__(self, engine, human = False):\n self.engine = engine\n self.logger = logging.getLogger('xnas.xmount')\n devices.__init__(self, self.logger, human)\n fstab.__init__(self, self.logger)\n mountfs.__init__(self, self.logger)\n zfs.__init__(self, self.logger, False)\n mountpoint.__init__(self, self.logger)\n\n def __del__(self):\n mountpoint.__del__(self)\n zfs.__del__(self)\n mountfs.__del__(self)\n fstab.__del__(self)\n devices.__del__(self)\n\n def inDB(self):\n listentries = []\n # First for generic mounts\n entries = fstab.getEntries(self, FSTYPES)\n for entry in entries:\n listentry = {}\n #Shrink I\n if entry[\"uuid\"]:\n listentry[\"device\"] = entry[\"uuid\"]\n elif entry[\"fsname\"]:\n listentry[\"device\"] = entry[\"fsname\"]\n else:\n listentry[\"device\"] = entry[\"label\"]\n listentry[\"uuid\"] = self.getUuid(entry).upper()\n #copy\n for key, value in entry.items():\n if key == \"options\":\n listentry[key] = \",\".join(value)\n elif key != \"fsname\" and key != \"uuid\" and key != \"label\":\n listentry[key] = value\n dbkey, dbval = self.engine.findInGroup(groups.MOUNTS, 'uuid', listentry[\"uuid\"])\n if dbkey:\n listentry['xmount'] = dbkey\n else:\n listentry['xmount'] = \"-\"\n listentries.append(listentry)\n # Then for ZFS mounts\n entries = zfs.getEntries(self)\n for entry in entries:\n listentry = {}\n listentry[\"device\"] = entry[\"label\"]\n listentry[\"uuid\"] = self.getUuid(entry).upper()\n #copy\n for key, value in entry.items():\n if key == \"options\":\n listentry[key] = \",\".join(value)\n elif key != \"fsname\" and key != \"uuid\" and key != \"label\":\n listentry[key] = value\n dbkey, dbval = self.engine.findInGroup(groups.MOUNTS, 'uuid', entry[\"label\"])\n if dbkey:\n listentry['xmount'] = dbkey\n else:\n listentry['xmount'] = \"-\"\n listentries.append(listentry)\n return listentries\n\n def getMounts(self):\n mymounts = []\n mounts = self.engine.checkGroup(groups.MOUNTS)\n if mounts:\n for key, mount in mounts.items():\n mymount = {}\n mymount['xmount'] = key\n # Check but same for zfs and regular fs-es\n device = self.getDevices(mount['uuid'])\n if device:\n fsnames = []\n for mydevice in device:\n fsnames.append(mydevice['fsname'])\n mymount['device'] = fsnames\n mymount['mountpoint'] = device[0]['mountpoint']\n if not mymount['mountpoint']:\n mymount['mountpoint'] = mount['mountpoint']\n mymount['type'] = device[0]['type']\n mymount['size'] = device[0]['size']\n mymount['used'] = device[0]['used']\n mymount['mounted'] = device[0]['mounted']\n if mount['zfs']:\n #mymount['enabled'] = zfs.isEna(self, mount['uuid'])\n mymount['health'] = zfs.getHealth(self, mount['uuid'], device[0]['mounted'])\n else:\n #mymount['enabled'] = fstab.isEna(self, mount['uuid'], device[0]['fsname'], device[0]['label'])\n mymount['health'] = fstab.getHealth(self, mount['uuid'], device[0]['fsname'], device[0]['label'], device[0]['mounted'])\n else:\n mymount['device'] = None\n mymount['mountpoint'] = mount['mountpoint']\n if mount['zfs']:\n entry = zfs.getEntry(self, mount['uuid'])\n else:\n entry = fstab.getEntry(self, mount['uuid'])\n if entry:\n mymount['type'] = entry['type']\n else:\n mymount['type'] = None\n mymount['size'] = None\n mymount['used'] = None\n mymount['mounted'] = False\n mymount['health'] = \"UNAVAIL\"\n #mymount['enabled'] = mount['enabled']\n mymount['referenced'] = self.isReferenced(key, True)\n mymount['method'] = mount['method']\n if mymount:\n mymounts.append(mymount)\n\n return mymounts\n\n def getAvailable(self):\n blkdevices = self.getBlockList(FSTYPES)\n newdevices = []\n for blkdevice in blkdevices:\n if not fstab.findEntry(self, blkdevice):\n newdevices.append(blkdevice)\n # Exclude zfs devices from this list. if they exist, they are in the zpool list\n return newdevices\n\n def getBlock(self):\n typefilter = FSTYPES\n typefilter.append(\"zfs\")\n blkdevices = self.getBlockList(typefilter)\n for blkdevice in blkdevices:\n if blkdevice[\"type\"] == \"zfs\":\n dbkey, dbval = self.engine.findInGroup(groups.MOUNTS, 'uuid', blkdevice[\"label\"])\n else:\n dbkey, dbval = self.engine.findInGroup(groups.MOUNTS, 'uuid', blkdevice['uuid'])\n if dbkey:\n blkdevice['xmount'] = dbkey\n else:\n blkdevice['xmount'] = \"-\"\n return blkdevices\n\n def pop(self, interactive, popArgs):\n addedMounts = []\n # First for generic mounts\n entries = fstab.getEntries(self, FSTYPES)\n for entry in entries:\n uuid = self.getUuid(entry)\n if uuid: # Don't add if uuid cannot be found\n dbkey, dbval = self.engine.findInGroup(groups.MOUNTS, 'uuid', uuid)\n popArg = {}\n if not dbkey:\n if popArgs:\n for arg in popArgs:\n if 'device' in arg:\n if arg['device'] == self.getFsname(uuid):\n popArg = arg\n break\n newMount = self.addToDB(entry, uuid, interactive, popArg)\n if newMount:\n addedMounts.append(newMount)\n # Then for ZFS mounts\n entries = zfs.getEntries(self)\n for entry in entries:\n pool = entry['label']\n if pool: # Don't add if pool cannot be found\n dbkey, dbval = self.engine.findInGroup(groups.MOUNTS, 'uuid', pool)\n popArg = {}\n if not dbkey:\n if popArgs:\n for arg in popArgs:\n if arg['device'] == pool:\n popArg = arg\n break\n newMount = self.addToDB(entry, \"\", interactive, popArg)\n if newMount:\n addedMounts.append(newMount)\n return addedMounts\n\n def getReferenced(self, name):\n # Mounts can only be referenced to shares, so check shares for references\n dbkeys = []\n refs = self.engine.findAllInGroup(groups.SHARES, 'xmount', name)\n for dbkey, dbval in refs.items():\n if 'remotemount' in dbval and 'enabled' in dbval:\n if not dbval['remotemount'] and dbval['enabled']:\n dbkeys.append(dbkey)\n return dbkeys\n\n def isReferenced(self, name, silent = False):\n # Mounts can only be referenced to shares, so check shares for references\n ref = self.getReferenced(name)\n if ref and not silent:\n self.logger.warning(\"{} is referenced by {}\".format(name, ref))\n return ref != []\n\n def mnt(self, name, dbItem = True, Zfs = False, mpoint = \"\"):\n retval = False\n isMounted = False\n uuid = name\n isZfs = Zfs\n if dbItem:\n db = self.engine.checkKey(groups.MOUNTS, name)\n if db:\n isZfs = db['zfs']\n uuid = db['uuid']\n retval = True\n else:\n retval = True\n\n if retval:\n retval = False\n if isZfs:\n entry = zfs.getEntry(self, uuid)\n else:\n entry = fstab.getEntry(self, uuid)\n device = self.getDevices(uuid)\n if entry and device:\n isMounted = device[0]['mounted']\n if not isMounted:\n if isZfs:\n if zfs.available(self):\n retval = zfs.mount(self, uuid, self.engine.checkKey(groups.SETTINGS,\"zfsmountrecursive\"))\n else:\n self.logger.error(\"{} is of type zfs, but zfs is not installed\".format(name))\n else:\n if mpoint:\n mp = mpoint\n else:\n mp = entry['mountpoint']\n retval = mountfs.mount(self, mp)\n elif dbItem:\n self.logger.warning(\"{} already mounted\".format(name))\n\n if retval:\n device[0]['mounted'] = True\n self.logger.info(\"{} mounted\".format(name))\n elif not isMounted:\n self.logger.warning(\"{} not mounted\".format(name))\n else: # Already mounted\n retval = True\n return retval\n\n def umnt(self, name, dbItem = True, Zfs = False, mpoint = \"\"):\n retval = False\n isMounted = False\n uuid = name\n isZfs = Zfs\n if dbItem:\n db = self.engine.checkKey(groups.MOUNTS, name)\n if db:\n isZfs = db['zfs']\n uuid = db['uuid']\n retval = True\n else:\n retval = True\n\n if retval:\n retval = False\n if isZfs:\n entry = zfs.getEntry(self, uuid)\n if not entry:\n entry = {}\n entry['mountpoint'] = mountpoint.getMountPoint(self, uuid, True)\n else:\n entry = fstab.getEntry(self, uuid)\n if not entry:\n entry = {}\n entry['mountpoint'] = mountpoint.getMountPoint(self, uuid)\n device = self.getDevices(uuid)\n if 'mountpoint' in entry and device:\n isMounted = device[0]['mounted']\n if isMounted:\n if isZfs:\n if zfs.available(self):\n retval = zfs.unmount(self, uuid)\n else:\n self.logger.error(\"{} is of type zfs, but zfs is not installed\".format(name))\n else:\n if mpoint:\n mp = mpoint\n else:\n mp = entry['mountpoint']\n retval = mountfs.unmount(self, mp)\n elif dbItem:\n self.logger.warning(\"{} is not mounted\".format(name))\n\n if retval:\n device[0]['mounted'] = False\n self.logger.info(\"{} unmounted\".format(name))\n elif isMounted:\n self.logger.warning(\"{} not unmounted\".format(name))\n else:\n retval = True\n return retval\n\n def getMountpoint(self, name):\n retval = \"\"\n db = self.engine.checkKey(groups.MOUNTS, name)\n if db:\n if db['zfs']:\n entry = zfs.getEntry(self, db['uuid'])\n else:\n entry = fstab.getEntry(self, db['uuid'])\n if entry:\n retval = entry['mountpoint']\n return retval\n\n def clr(self, name):\n retval = False\n db = self.engine.checkKey(groups.MOUNTS, name)\n if db:\n if not self.isReferenced(name):\n retval = self.engine.removeFromGroup(groups.MOUNTS, name)\n if retval:\n self.logger.info(\"{} removed from database\".format(name))\n else:\n self.logger.warning(\"{} not removed from database\".format(name))\n return retval\n\n \"\"\"\n def ena(self, name):\n retval = False\n db = self.engine.checkKey(groups.MOUNTS, name)\n if db:\n if db['method'] == \"startup\":\n if db['zfs']:\n retval = zfs.ena(self, db['uuid'])\n else:\n retval = fstab.ena(self, db['uuid'])\n else:\n retval = True\n if db['zfs']:\n if zfs.isEna(self, db['uuid']):\n retval = zfs.dis(self, db['uuid'])\n else:\n if fstab.isEna(self, db['uuid']):\n retval = fstab.dis(self, db['uuid'])\n if retval:\n db['enabled'] = True\n self.logger.info(\"{} enabled\".format(name))\n else:\n self.logger.warning(\"{} not enabled\".format(name))\n return retval\n\n def dis(self, name):\n retval = False\n db = self.engine.checkKey(groups.MOUNTS, name)\n if db:\n if db['zfs']:\n if not self.isReferenced(name):\n if zfs.isEna(self, db['uuid']):\n retval = zfs.dis(self, db['uuid'])\n else:\n retval = True\n else:\n if not self.isReferenced(name):\n if fstab.isEna(self, db['uuid']):\n retval = fstab.dis(self, db['uuid'])\n else:\n retval = True\n if retval:\n db['enabled'] = False\n self.logger.info(\"{} disabled\".format(name))\n else:\n self.logger.warning(\"{} not disabled\".format(name))\n return retval\n \"\"\"\n\n def shw(self, name):\n mountData = {}\n db = self.engine.checkKey(groups.MOUNTS, name)\n if db:\n device = self.getDevices(db['uuid'])\n if db['zfs']:\n entry = zfs.getEntry(self, db['uuid'])\n else:\n entry = fstab.getEntry(self, db['uuid'])\n if device:\n fsnames = []\n for mydevice in device:\n fsnames.append(mydevice['fsname'])\n mountData['fsname'] = fsnames\n uuids = []\n for mydevice in device:\n uuids.append(mydevice['uuid'])\n mountData['uuid'] = uuids\n mountData['label'] = device[0]['label']\n mountData['mountpoint'] = device[0]['mountpoint']\n if not mountData['mountpoint']:\n mountData['mountpoint'] = db['mountpoint']\n mountData['type'] = device[0]['type']\n else:\n mountData['fsname'] = None\n mountData['uuid'] = None\n mountData['label'] = None\n mountData['mountpoint'] = db['mountpoint']\n if entry:\n mountData['type'] = entry['type']\n else:\n mountData['type'] = None\n if entry:\n mountData['options'] = fstab.getExtraOptions(self, entry['options'])\n #mountData['auto'] = not 'noauto' in entry['options']\n mountData['rw'] = not 'ro' in entry['options']\n mountData['ssd'] = 'noatime' in entry['options']\n mountData['freq'] = entry['dump']\n mountData['pass'] = entry['pass']\n else:\n mountData['options'] = []\n #mountData['auto'] = False\n mountData['rw'] = False\n mountData['ssd'] = False\n mountData['freq'] = 0\n mountData['pass'] = 0\n mode = self.getMode(mountData['mountpoint'])\n mountData['uacc'] = self.getUacc(mode)\n mountData['sacc'] = self.getSacc(mode)\n mountData['method'] = db['method']\n mountData['idletimeout'] = 0\n mountData['timeout'] = 0\n if not db['zfs']:\n hasito, itoval = fstab.getopt(self, entry['options'], \"x-systemd.idle-timeout\")\n hasto, toval = fstab.getopt(self, entry['options'], \"x-systemd.mount-timeout\")\n if hasto:\n mountData['timeout'] = self.engine.tryInt(toval)\n if db['method'] == \"auto\" and hasito:\n mountData['idletimeout'] = self.engine.tryInt(itoval)\n return mountData\n\n def addFs(self, name):\n retval = True\n db = self.engine.checkKey(groups.MOUNTS, name)\n newEntry = False\n entry = {}\n uuid = \"\"\n isZfs = False\n entryNew = {}\n changed = False\n currentMountpoint = \"\"\n currentLabel = \"\"\n deleteCurrentMountpoint = False\n umask = None\n sacc = \"rw\"\n uacc = \"rw\"\n mode = 0o777\n curmode = 0o777\n method = \"disabled\" #should never occur as settings[\"method\"] is always set\n\n if 'type' in self.engine.settings:\n retval = self.checkType(self.engine.settings['type'])\n if not retval:\n self.logger.error(\"Invalid type entered: {}\".format(self.engine.settings['type']))\n if retval and 'method' in self.engine.settings:\n retval = self.engine.checkMethod(self.engine.settings['method'])\n if not retval:\n self.logger.error(\"Invalid method entered: {}\".format(self.engine.settings['method']))\n\n if retval:\n retval, newEntry, entry, uuid, isZfs = self.checkDbEntryExistence(db, name)\n\n # Make Mountpoint\n if retval:\n MPvalid = True\n MPnew = \"\"\n if not newEntry:\n currentMountpoint = entry['mountpoint']\n\n if 'mountpoint' in self.engine.settings:\n MPnew = self.engine.settings['mountpoint']\n MPvalid = not mountpoint.exists(self, MPnew)\n if not MPvalid:\n if mountpoint.mounted(self, MPnew):\n # Check mountpoint is linked to current uuid\n mp = \"\"\n if isZfs:\n if 'label' in self.engine.settings:\n mp = mountpoint.getMountPoint(self, self.engine.settings['label'], True)\n else:\n tempUuid = self.deviceUuid(self.engine.settings)\n if tempUuid:\n mp = mountpoint.getMountPoint(self, tempUuid, False)\n MPvalid = mp == MPnew\n else:\n MPvalid = True\n elif newEntry: # New entry and no mountpoint\n MPValid = False\n\n if currentMountpoint and MPnew:\n if currentMountpoint != MPnew:\n if MPvalid:\n deleteCurrentMountpoint = True\n else:\n self.logger.info(\"New mountpoint invalid, keep current: {}\".format(currentMountpoint))\n MPnew = currentMountpoint\n MPvalid = True\n else:\n MPvalid = True # current mountpoint is unmounted later\n\n if not MPnew:\n MPnew = currentMountpoint\n if not MPnew: # no mountpoint at all\n MPvalid = False\n\n if not MPvalid:\n label = \"\"\n if 'label' in entry:\n label = entry['label']\n MPnew = mountpoint.make(self, name, backupmountpoint = label)\n\n retval = MPnew != None\n self.engine.settings['mountpoint'] = MPnew\n\n # Create, check and update entry\n if retval:\n entryNew = deepcopy(entry)\n if isZfs:\n currentLabel = entry['label']\n changed = zfs.makeEntry(self, entryNew, self.engine.settings, name)\n retval = zfs.checkEntry(self, entryNew, newEntry, changed)\n else:\n changed = fstab.makeEntry(self, entryNew, self.engine.settings)\n retval = fstab.checkEntry(self, entryNew, newEntry, changed)\n\n #check mode and other settings\n if retval:\n if not mountpoint.exists(self, entryNew['mountpoint']):\n retval = mountpoint.create(self, entryNew['mountpoint'])\n if retval:\n self.logger.info(\"Created new mountpoint: {}\".format(entryNew['mountpoint']))\n if retval:\n curmode = self.getMode(entryNew['mountpoint'])\n if 'uacc' in self.engine.settings:\n uacc = self.engine.settings['uacc']\n elif newEntry:\n uacc = \"rw\"\n else:\n uacc = self.getUacc(curmode)\n if 'sacc' in self.engine.settings:\n sacc = self.engine.settings['sacc']\n elif newEntry:\n sacc = \"rw\"\n else:\n sacc = self.getSacc(curmode)\n mode = self.setMode(uacc, sacc)\n if entryNew['type'].lower() in FSTYPES_UMASK:\n changed = changed or fstab.setUmaskOption(self, entryNew['options'], self.strMode(self.umask(mode)))\n\n # If changed, unmount\n if changed and retval:\n if isZfs:\n if currentLabel:\n retval = self.umnt(currentLabel, dbItem = False, Zfs = isZfs, mpoint = \"\")\n else:\n if currentMountpoint:\n retval = self.umnt(self.getUuid(entryNew), dbItem = False, Zfs = isZfs, mpoint = currentMountpoint)\n if not retval:\n self.logger.warning(\"Unable to unmount {}\".format(name))\n\n #delete old mountpoint if required\n if retval:\n if deleteCurrentMountpoint:\n retval = mountpoint.delete(self, currentMountpoint)\n if retval:\n self.logger.info(\"Removed old mountpoint: {}\".format(currentMountpoint))\n\n #change mode\n if retval:\n if mode != curmode:\n self.chMode(entryNew['mountpoint'], mode)\n self.logger.info(\"Changed mountpoint mode: user {}, superuser {}\".format(uacc, sacc))\n\n #update entry\n if retval:\n if isZfs:\n retval = zfs.updateEntry(self, entryNew, newEntry)\n else:\n retval = fstab.updateEntry(self, entryNew, newEntry)\n\n # Mount if startup method or dynmount\n if retval:\n if 'method' in self.engine.settings:\n method = self.engine.settings['method']\n if (method == \"startup\") or (method == \"dynmount\"):\n if isZfs:\n uuid = entryNew['label']\n else:\n uuid = self.getUuid(entryNew)\n retval = self.mnt(uuid, dbItem = False, Zfs = isZfs, mpoint = entryNew['mountpoint'])\n\n if retval and not isZfs and changed:\n retval = fstab.systemdReload(self, remote = False)\n\n # Add to DB or edit DB\n if retval:\n if db:\n # remove old item from db\n if not self.isReferenced(name):\n retval = self.engine.removeFromGroup(groups.MOUNTS, name)\n #add new item to db\n dbMount = {}\n dbMountItems = {}\n if isZfs:\n dbMountItems['uuid'] = entryNew['label']\n dbMountItems['zfs'] = True\n else:\n dbMountItems['uuid'] = self.getUuid(entryNew)\n dbMountItems['zfs'] = False\n dbMountItems['mountpoint'] = entryNew['mountpoint']\n dbMountItems['method'] = method\n dbMount[name] = dbMountItems\n self.engine.addToGroup(groups.MOUNTS, dbMount)\n self.logger.info(\"{} added/ edited\".format(name))\n else:\n self.logger.warning(\"{} not added/ edited\".format(name))\n\n return retval\n\n def delFs(self, name):\n retval = False\n db = self.engine.checkKey(groups.MOUNTS, name)\n if db:\n if db['zfs']:\n if not self.isReferenced(name):\n retval = self.umnt(name)\n if retval:\n retval = zfs.deletePool(self, db['uuid'])\n self.logger.info(\"As {} is a ZFS pool, it will not be deleted, only removed from database\".format(name))\n else:\n if not self.isReferenced(name):\n retval = self.umnt(name)\n if retval:\n retval = fstab.deleteEntry(self, db['uuid'])\n if retval:\n mountpoint.delete(self, db['mountpoint'])\n self.logger.info(\"Removed mountpoint: {}\".format(db['mountpoint']))\n if retval:\n self.logger.info(\"{} deleted\".format(name))\n self.clr(name) # Remove from DB\n else:\n self.logger.warning(\"{} not deleted\".format(name))\n return retval\n\n def getMntEntry(self, name):\n entry = {}\n db = self.engine.checkKey(groups.MOUNTS, name)\n if db:\n if db['zfs']:\n entry = zfs.getEntry(self, db['uuid'])\n else:\n entry = fstab.getEntry(self, db['uuid'])\n return entry\n\n def getDevicePath(self, name):\n return self.getDevPath(name)\n\n ################## INTERNAL FUNCTIONS ###################\n\n def addToDB(self, entry, uuid, interactive = False, popArg = {}):\n dbMount = {}\n newMount = {}\n addThis = True\n name = \"\"\n\n if popArg:\n if self.engine.checkKey(groups.MOUNTS, popArg['xmount']):\n self.logger.error(\"Name already exists, not added: {}\".format(popArg['xmount']))\n addThis = False\n else:\n name = popArg['xmount']\n elif uuid:\n name = self.engine.generateUniqueName(groups.MOUNTS, entry['mountpoint'], entry['label'], entry['fsname'])\n else:\n name = self.engine.generateUniqueName(groups.MOUNTS, entry['mountpoint'], entry['label'])\n\n if interactive and not popArg:\n addThis = False\n cont = True\n stdinput = stdin(\"\", exitevent = None, mutex = None, displaylater = False, background = False)\n\n print(\"New mount found:\")\n if uuid:\n print(\" device : \", self.getFsname(uuid))\n else:\n print(\" device : \", entry['label'])\n print(\" mountpoint : \", entry['mountpoint'])\n print(\" type : \", entry['type'])\n print(\" Generated name: \", name)\n\n while cont:\n res = \"\"\n while not res:\n res = stdinput.inputchar(\"Add this mount (y/n/c)? \")\n if res:\n res = res.lower()[0]\n if res == \"y\":\n addThis = True\n cont = False\n print(\"New mount added: {}\".format(name))\n elif res == \"n\":\n addThis = False\n cont = False\n print(\"New mount skipped: {}\".format(name))\n # text\n elif res == \"c\":\n newname = stdinput.input(\"Enter new name for this mount: \")\n if ord(newname[0]) == 3: # ^C\n self.engine.exitSignal()\n else:\n if not self.engine.valid(newname):\n print(\"Name contains special characters, try again\")\n elif self.engine.checkKey(groups.MOUNTS, newname):\n print(\"Name already exists, try again\")\n else:\n name = newname\n print(\"New mount added: {}\".format(name))\n addThis = True\n cont = False\n elif ord(res) == 3: # ^C\n self.engine.exitSignal()\n else:\n print(\"Invalid response, y = yes, n = no, c = change name\")\n res = \"\"\n del stdinput\n\n if addThis:\n dbMountItems = {}\n if uuid:\n dbMountItems['uuid'] = uuid\n dbMountItems['zfs'] = False\n else:\n dbMountItems['uuid'] = entry['label']\n dbMountItems['zfs'] = True\n if not zfs.available(self):\n self.logger.error(\"{} is of type zfs, but zfs is not installed\".format(name))\n self.logger.info(\"Please install zfs no your distro (if available) and try again\")\n addThis = False\n if addThis:\n dbMountItems['mountpoint'] = entry['mountpoint']\n if \"noauto\" in entry[\"options\"]:\n dbMountItems['method'] = \"disabled\"\n else:\n dbMountItems['method'] = \"startup\"\n dbMount[name] = dbMountItems\n self.engine.addToGroup(groups.MOUNTS, dbMount)\n newMount['xmount'] = name\n if uuid:\n newMount[\"device\"] = self.getFsname(uuid)\n else:\n newMount[\"device\"] = entry['label']\n newMount['mountpoint'] = entry['mountpoint']\n newMount['type'] = entry['type']\n newMount['method'] = dbMountItems['method']\n self.logger.info(\"New mount entry: {}\".format(name))\n\n return newMount\n\n def deviceUuid(self, settings):\n devUuid = \"\"\n if 'uuid' in settings:\n devUuid = settings['uuid']\n else:\n dev = {}\n if 'fsname' in settings:\n dev = self.getDevices(settings['fsname'])\n elif 'label' in settings:\n dev = self.getDevices(settings['label'])\n if dev:\n devUuid = dev[0]['uuid']\n return devUuid\n\n def checkType(self, type):\n return type in FSTYPES\n\n def checkDbEntryExistence(self, db, name):\n retval = True\n newEntry = False\n entry = {}\n uuid = \"\"\n isZfs = False\n\n # Check existence of entry\n if db: # in db\n self.logger.info(\"{} found in database, editing content\".format(name))\n if db['zfs']:\n isZfs = True\n entry = zfs.getEntry(self, db['uuid'])\n uuid = db['uuid']\n else:\n entry = fstab.getEntry(self, db['uuid'])\n if not 'method' in self.engine.settings:\n self.engine.settingsStr(self.engine.settings, 'method', True, db['method'])\n #ignore label, uuid, fsname or type in settings\n if 'label' in self.engine.settings:\n self.logger.info(\"{} in database, ignore label option: {}\".format(name,self.engine.settings['label']))\n del self.engine.settings['label']\n if 'uuid' in self.engine.settings:\n self.logger.info(\"{} in database, ignore uuid option: {}\".format(name,self.engine.settings['uuid']))\n del self.engine.settings['uuid']\n if 'fsname' in self.engine.settings:\n self.logger.info(\"{} in database, ignore fsname option: {}\".format(name,self.engine.settings['fsname']))\n del self.engine.settings['fsname']\n if 'type' in self.engine.settings:\n self.logger.info(\"{} in database, ignore type option: {}\".format(name,self.engine.settings['type']))\n del self.engine.settings['type']\n else: # not in db, check uuid, name or label in pool\n # allow usage of name as label if no label set\n if 'label' in self.engine.settings:\n label = self.engine.settings['label']\n else:\n label = name\n if 'uuid' in self.engine.settings: # check in fstab\n entry = fstab.getEntry(self, uuid = self.engine.settings['uuid'])\n if entry:\n uuid = self.getUuid(entry)\n self.logger.info(\"{} not in database, but uuid found, editing content\".format(name))\n elif 'fsname' in self.engine.settings: # check in fstab\n entry = fstab.getEntry(self, fsname = self.engine.settings['fsname'])\n if entry:\n uuid = self.getUuid(entry)\n self.logger.info(\"{} not in database, but fsname found, editing content\".format(name))\n elif label: # check in fstab\n entry = fstab.getEntry(self, label = label)\n if entry:\n uuid = self.getUuid(entry)\n self.logger.info(\"{} not in database, but label found, editing content\".format(name))\n else: # Check in zfs pool\n entry = zfs.getEntry(self, label)\n if entry:\n self.logger.info(\"{} not in database, but found in ZFS pool, editing content\".format(name))\n isZfs = True\n\n # check entry is somewhere else is DB or create new entry\n if entry:\n # Check DB\n if not uuid:\n if isZfs:\n uuid = entry['label']\n else:\n uuid = self.getUuid(entry)\n dbkey, dbval = self.engine.findInGroup(groups.MOUNTS, 'uuid', uuid)\n if dbkey:\n self.logger.warning(\"{} found in database under different mount: {}\".format(name, dbkey))\n retval = False\n if retval:\n newEntry = False\n if not 'method' in self.engine.settings:\n if not \"noauto\" in entry[\"options\"]:\n self.engine.settingsStr(self.engine.settings, 'method', True, 'startup')\n elif \"x-systemd.automount\" in entry[\"options\"]:\n self.engine.settingsStr(self.engine.settings, 'method', True, 'auto')\n else:\n self.engine.settingsStr(self.engine.settings, 'method', True, 'disabled')\n elif self.engine.settings['method'] == \"auto\" and isZfs:\n self.engine.settings['method'] = \"startup\" # no auto for zfs\n else:\n if retval:\n if \"type\" in self.engine.settings:\n if self.engine.settings[\"type\"].lower().find(\"zfs\") >= 0:\n isZfs = True\n if isZfs:\n self.logger.info(\"{} not found, creating new ZFS pool item not allowed\".format(name))\n self.logger.info(\"This must be done via ZFS: e.g. 'zpool create {}'\".format(name))\n retval = False\n else:\n self.logger.info(\"{} not found, creating new item\".format(name))\n if not 'method' in self.engine.settings:\n self.engine.settingsStr(self.engine.settings, 'method', True, 'startup')\n elif self.engine.settings['method'] == \"auto\" and isZfs:\n self.engine.settings['method'] = \"startup\" # no auto for zfs\n newEntry = True\n\n if isZfs and not zfs.available(self):\n self.logger.error(\"{} is of type zfs, but zfs is not installed\".format(name))\n self.logger.info(\"Please install zfs no your distro (if available) and try again\")\n self.logger.info(\"Common package to install on most distros: '{}'\".format(zfs.installName()))\n retval = False\n\n if retval:\n if 'idletimeout' in self.engine.settings:\n self.engine.settings['idletimeout'] = self.engine.tryInt(self.engine.settings['idletimeout'])\n if 'timeout' in self.engine.settings:\n self.engine.settings['timeout'] = self.engine.tryInt(self.engine.settings['timeout'])\n\n return retval, newEntry, entry, uuid, isZfs\n\n######################### MAIN ##########################\nif __name__ == \"__main__\":\n pass\n","repo_name":"Helly1206/xnas","sub_path":"opt/xnas/mounts/mount.py","file_name":"mount.py","file_ext":"py","file_size_in_byte":38581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"10897278442","text":"# Cell 1 for Monte Carlo on Stocks\n# OPTIONAL CELL\nfrom google.colab import drive\ndrive.mount('/content/drive')\n\n\n# Cell 2 for Monte Carlo on Stocks\n# function to change the returns into new set of returns\ndef return_returns(rows, returns, add):\n temp_returns = []\n if (len(returns)==(rows-1)):\n temp_returns.append(returns[0])\n for i in range(1, len(returns)):\n temp_returns.append(returns[i])\n temp_returns.append(add)\n return temp_returns\n\n\n# function to calculate mean and standard deviation\ndef mean_and_sigma (rows, returns):\n limit = len(returns)\n mu = 0\n for i in range(limit):\n mu += returns[i]\n mu/=rows\n sigma = 0\n for i in range(limit):\n sigma+=((returns[i]-mu)**2)\n sigma/=rows\n sigma = sigma**0.5\n return mu,sigma\n\n# function for next stock price and final return\ndef values(returns, rows, so,timestep):\n mu, sigma = mean_and_sigma(rows,returns)\n e = random.uniform(-2,2)\n return_ = mu/timestep + sigma*e*(timestep**-0.5)\n returns_ = return_returns(rows,returns,return_)\n s = so*(return_ + 1)\n return returns_,s\n \n \n # Cell 3 for Monte Carlo on Stocks\n# plots\ndef plotting(opens,closes,paths,rows,iters, dates, prev_dates):\n \n # plotting historical data\n f = plt.figure()\n f.set_figwidth(20)\n f.set_figheight(10)\n plt.plot(prev_dates, closes, label = 'Closes')\n plt.plot(prev_dates, opens, label = 'Opens')\n plt.title('Historical data', fontweight = 'bold')\n plt.xlabel('Timesteps')\n plt.ylabel('Stock closing price')\n plt.legend()\n plt.show()\n\n # plotting the paths generated and making a list of the price at the end of the time period\n # also making the list for an average plot\n finals = []\n avg_path = []\n f = plt.figure()\n f.set_figwidth(20)\n f.set_figheight(10)\n for i in range(rows+1):\n avg_path.append(0)\n for y in paths:\n plt.plot(dates,y)\n finals.append(y[rows])\n for i in range(rows+1):\n avg_path[i] += y[i]\n for i in range(rows+1):\n avg_path[i] /= iters\n plt.title('Generated paths', fontweight = 'bold')\n plt.xlabel('Timesteps')\n plt.ylabel('Stock closing price')\n plt.show()\n\n #plotting the cumulative average of the final stock prices\n cumm = 0\n xx = []\n yy = []\n for i in range(len(finals)):\n cumm = (cumm*i + finals[i])/(i+1)\n xx.append(i)\n yy.append(cumm)\n f = plt.figure()\n f.set_figwidth(10)\n f.set_figheight(5)\n plt.plot(xx,yy)\n plt.title('Cumulative average of final stock price', fontweight = 'bold')\n plt.xlabel('Trails')\n plt.ylabel('Cumulative average of final stock price')\n plt.show()\n\n # plotting the average path\n f = plt.figure()\n f.set_figwidth(20)\n f.set_figheight(10)\n plt.plot(dates,avg_path)\n plt.title('Estimated path', fontweight = 'bold')\n plt.xlabel('Timesteps')\n plt.ylabel('Stock closing price')\n plt.show()\n\n # finding the maximum and minimum stock price\n maximum = max(finals)\n minimum = min(finals)\n\n # histogram plot for final stock prices\n a = numpy.arange(round(minimum) - 1, round(maximum))\n fig,ax = plt.subplots()\n ax.hist(finals,bins = a)\n plt.title('Histogram of the final stock prices', fontweight = 'bold')\n plt.show()\n \n # returns\n return avg_path[rows], minimum, maximum, avg_path\n\n\n# Cell 4 for Monte Carlo on Stocks\n# number of trading days in a month is being taken as 22\nmonth = 22\n\n# monte carlo body\ndef montecarlo (sheetnum, iters, timestep):\n\n # opening the sheet needed to open\n sheet = wb[sheets[sheetnum]]\n # the number of rows is the number of entries for a month + the heading\n # the columns have contents in the order Dates, Series, Open, High, Low, Prev. Close, LTP, Close, VWAP, 52W H, 52W L, Volume, Value, No. of Trades\n rows = month*timestep + 1\n\n # getting the next trading dates to happen for a month\n dates = []\n counter = 0\n date_temp = sheet.cell(rows,1).value\n daychange = datetime.timedelta(days=1/timestep)\n dates.append(date_temp)\n while True:\n date_temp = date_temp + daychange\n if date_temp.isoweekday() < 6:\n dates.append(date_temp)\n counter+=1\n if counter == month*timestep:\n break\n\n # getting open prices, close prices and dates\n opens = []\n closes = []\n prev_dates = []\n for i in range(2, rows + 1):\n opens.append(sheet.cell(i,3).value)\n closes.append(sheet.cell(i,8).value)\n prev_dates.append(sheet.cell(i,1).value)\n\n # monte carlo simulation\n paths = []\n returns_ = []\n for i in range(rows-2):\n returns_.append((closes[i+1]/closes[i])-1)\n for i in range(iters):\n path = []\n random.seed()\n returns = returns_\n path.append(closes[rows-2])\n for j in range(rows-1):\n returns, s = values(returns,rows-1, path[j], timestep)\n path.append(s)\n paths.append(path)\n\n # calling the plot function\n estimate, min, max, avg_path = plotting(opens,closes,paths, rows-1, iters, dates, prev_dates)\n\n # final print statements\n print(f'The estimated price of stock at the end of month is', end = \" \")\n print(\"%.2f\"%estimate + \" INR.\")\n print(f'The maximum estimated price of the stock at the end of month is', end = \" \")\n print(\"%.2f\"%max + \" INR.\")\n print(f'The minimum estimated price of the stock at the end of month is', end = \" \")\n print(\"%.2f\"%min + \" INR.\")\n\n\n # return statement\n paths.append(avg_path)\n paths.append(dates)\n return paths\n\n\n# Cell 5 for Monte Carlo on Stocks\n# required imports\nimport openpyxl as op\nimport datetime\nimport random\nimport matplotlib.pyplot as plt\nimport numpy\n\n# opening workbook\nwb = op.load_workbook(input(\"Enter path of the file: \"))\nsheets = wb.sheetnames\nwb_report = op.Workbook()\n\n# conditions\niters = int(input(\"Enter number of trails to be done: \"))\ntimestep = int(input(\"Enter number of times the note of stock price is taken in a day: \"))\n\n# Monte Carlo simulation\npaths = montecarlo(0, iters, timestep)\n\n# saving in a new spreadsheet\nname = \" Report\"\nsheet_name = sheets[0] + name\nwb_report['Sheet'].title=sheet_name\nsheet = wb_report.active\nheadings = []\nc = \"Trail \"\nfor i in range(iters,0,-1):\n x = c + str(i)\n headings.append(x)\nheadings.append(\"Average path\")\nheadings.append(\"Dates\")\nentries = 22*timestep + 1\ncounter = 0\nfor i in range(iters+1, -1, -1):\n counter += 1\n sheet.cell(row=1, column = counter).value=headings[i]\n for j in range(entries):\n sheet.cell(row = j+2, column = counter).value=paths[i][j]\nname = input(\"Enter the path to store the data: \")\nwb_report.save(name)\n","repo_name":"phoenix-feathers/whitepaper","sub_path":"monte_carlo_on_stock.py","file_name":"monte_carlo_on_stock.py","file_ext":"py","file_size_in_byte":6355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"40690973384","text":"import numpy as np\n#import calwxt_ramer\nimport calpreciptype\n\nprint(calpreciptype.calwxt_ramer.__doc__)\n#python -m numpy.f2py -c calwxt_ramer.f90 -m calwxt_ramer\n#python -m numpy.f2py -c calpreciptype.f90 -m calpreciptype\n\n# constants\na = 17.271\nb = 237.7 # degC\n \ndef dewpoint_approximation(T,RH):\n Td = (b * gamma(T,RH)) / (a - gamma(T,RH))\n return Td\n\ndef gamma(T,RH):\n g = (a * T / (b + T)) + np.log(RH/100.0)\n return g\n\nf = open('CRP_sounding.csv', 'r')\ndata = np.genfromtxt(f, delimiter=',')\n\npmid = data[:,0] * 100.\nt = data[:,2] + 273.15\nq = data[:,2] + 273.15\nrh = data[:,4] / 100.\ntd = dewpoint_approximation(t-273.15,rh*100.)+273.15\npint = np.hstack((pmid, 0.))\n\n\nptype = calpreciptype.calwxt_ramer(t,q,pmid,rh,td,pint)\n\nprint(ptype)","repo_name":"zarzycki/ncl-zarzycki","sub_path":"projects/freezing/test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"30950569550","text":"class Solution(object):\n def pivotArray(self, nums, pivot):\n \"\"\"\n :type nums: List[int]\n :type pivot: int\n :rtype: List[int]\n \"\"\"\n\n l1 = []\n l2 = []\n c=0\n\n for i in nums:\n if i < pivot:\n l1.append(i)\n if i > pivot:\n l2.append(i)\n if i == pivot:\n c+=1\n\n for i in range(c):\n l1.append(pivot)\n\n for i in l2:\n l1.append(i)\n\n\n print(l1)\n\ns = Solution()\ns.pivotArray([9,12,5,10,14,3,10],10)","repo_name":"AaroneGeorge/LEETCODE","sub_path":"two pointers/2161/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"18114255389","text":"#coding:utf-8\n#1_4_D\ndef is_carriable(w_lim, trucks, loads):\n \"\"\" Check whether w_lim(maximum carrying capacity) is possible or not \"\"\"\n i = 0\n while trucks > 0:\n w = 0\n while i < len(loads):\n if w + loads[i] > w_lim:\n trucks -= 1\n break\n w += loads[i]\n i += 1\n if i == len(loads):\n return True\n return False\n\nn, trucks = map(int, input().split())\nloads = [int(input()) for i in range(n)]\n\nleft = max(loads)\nright = sum(loads) + 1\n\nwhile right - left > 1:\n mid = (left + right) // 2\n if is_carriable(mid, trucks, loads):\n ans = mid\n right = mid\n else:\n left = mid\n\nif is_carriable(max(loads), trucks, loads):\n ans = max(loads)\n\nprint(ans)","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p02270/s993576533.py","file_name":"s993576533.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"18358399789","text":"from collections import deque\n\ndef dfs(edge, s):\n used = {s}\n que = deque([s])\n while que:\n v = que.pop()\n for u in edge[v]:\n if u in used:\n continue\n used.add(u)\n que.append(u)\n return used\n\ndef bellmanFord(edges):\n coins = [float('inf')] * N\n coins[0] = 0\n for _ in range(len(U)):\n f = True\n for u, v, c in edges:\n if coins[u] + c < coins[v]:\n coins[v] = coins[u] + c\n f = False\n if f:\n return max(0, -coins[-1])\n return -1\n\nN, M, P = map(int, input().split())\nA = [[] for _ in range(N)]\nA_rev = [[] for _ in range(N)]\nE = [tuple()] * M\nfor i in range(M):\n a, b, c = map(int, input().split())\n a -= 1\n b -= 1\n c = P - c\n A[a].append(b)\n A_rev[b].append(a)\n E[i] = (a, b, c)\nU = dfs(A, 0) & dfs(A_rev, N-1)\nF = [(a, b, c) for (a, b, c) in E if a in U and b in U]\nprint(bellmanFord(F))","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p02949/s450348795.py","file_name":"s450348795.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"9512420717","text":"INPUT = \"\"\"Game 1: 3 blue, 4 red; 1 red, 2 green, 6 blue; 2 green\nGame 2: 1 blue, 2 green; 3 green, 4 blue, 1 red; 1 green, 1 blue\nGame 3: 8 green, 6 blue, 20 red; 5 blue, 4 red, 13 green; 5 green, 1 red\nGame 4: 1 green, 3 red, 6 blue; 3 green, 6 red; 3 green, 15 blue, 14 red\nGame 5: 6 red, 1 blue, 3 green; 2 blue, 1 red, 2 green\"\"\"\n\n\ndef parse_input():\n results = []\n\n for line in INPUT.split(\"\\n\"):\n seen_balls = {}\n line = line.split(\": \")[1]\n draws = line.split(\"; \")\n for draw in draws:\n ball_colors = draw.split(', ')\n for ball_color in ball_colors:\n count, color = ball_color.split(' ')\n if seen_balls.get(color) is None:\n seen_balls[color] = int(count)\n elif seen_balls[color] < int(count):\n seen_balls[color] = int(count)\n\n results.append(seen_balls)\n return results\n\n\ndef possible_game_sums(game_results, content):\n sum = 0\n\n for i, game_result in enumerate(game_results):\n print(game_result)\n possible = True\n for color in game_result.keys():\n if content.get(color) is None or content[color] < game_result[color]:\n possible = False\n break\n if possible:\n sum += i+1\n\n return sum\n\n\ndef power_of_minimum(game_results):\n powers = []\n\n for game_result in game_results:\n power = 1\n for color in game_result.keys():\n power *= game_result[color]\n powers.append(power)\n\n return powers\n\n\nif __name__ == '__main__':\n game_results = parse_input()\n content = {'red': 12, 'green': 13, 'blue': 14}\n\n # print(possible_game_sums(game_results, content))\n print(power_of_minimum(game_results))\n print(sum(power_of_minimum(game_results)))\n","repo_name":"iptch/2023-advent-of-code","sub_path":"DHE/day2.py","file_name":"day2.py","file_ext":"py","file_size_in_byte":1826,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"90"}
+{"seq_id":"27362106854","text":"from aiogram.types import InlineKeyboardMarkup, InlineKeyboardButton\nfrom app.keyboards.callback_datas import new_follow_callback, choise_mark_callback, choise_price_callback, unfollow_callback\n\nfrom bot import db\n\n# Клавиатура новой подписки и отписки,я просто хз как это сделать лучше\ndef new_follow(flag = False,id_follow = 0):\n new_follow_key = InlineKeyboardMarkup(row_width=1)\n if flag:\n unfollow_button = InlineKeyboardButton(text=\"Отписаться\",\n callback_data=unfollow_callback.new(id_follow=id_follow))\n new_follow_key.insert(unfollow_button)\n new_follow_key.insert(InlineKeyboardButton(text=\"Новая подписка\", callback_data=new_follow_callback.new(clown=12)))\n new_follow_key.insert(InlineKeyboardButton(text=\"Назад\", callback_data=\"cancel\"))\n return new_follow_key\n\n# Клавиатура выбора нужной марки\ndef choise_mark_but():\n choise_mark = InlineKeyboardMarkup(row_width=3)\n marks = db.show_marks()\n for mark in marks:\n mark_button = InlineKeyboardButton(text=str(mark[1]), callback_data=choise_mark_callback.new(mark_id=mark[0]))\n choise_mark.insert(mark_button)\n return choise_mark\n\n# Клавиатура выбора нужной цены\ndef choise_price_but(mark_id):\n choise_price = InlineKeyboardMarkup(row_width=3)\n prices = db.show_prices()\n prices_radius = [500000, 1000000, \"больш��\"]\n print(prices)\n for i in range(3):\n price_button = InlineKeyboardButton(text=str(f\"{prices[i][1]}-{prices_radius[i]}\"), callback_data=choise_price_callback.new(mark_id=mark_id, price_id=prices[i][0]))\n choise_price.insert(price_button)\n return choise_price\n","repo_name":"Murolando/AutoAvitoBot","sub_path":"app/keyboards/new_follow_buttons.py","file_name":"new_follow_buttons.py","file_ext":"py","file_size_in_byte":1811,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"90"}
+{"seq_id":"73844570536","text":"test_file = '15_test.txt'\nreal_file = '15.txt'\n\nimport parse\nfrom tqdm import tqdm\n\n\nclass Range():\n\tdef __init__(self, start=None, stop=None):\n\t\tif start is None:\n\t\t\tif stop is None:\n\t\t\t\tself.slices = []\n\t\t\telse:\n\t\t\t\tself.slices = [(start, start)]\n\t\telse:\n\t\t\tself.slices = [(start, stop)]\n\n\tdef __len__(self):\n\t\treturn sum(j-i+1 for i,j in self.slices)\n\n\tdef __contains__(self, item):\n\t\treturn any(self.slices_interect(s, (item, item)) for s in self.slices)\n\n\tdef add_slice(self, start, stop):\n\t\tmerged_slices = [(start, stop)]\n\t\tunmerged_slices = []\n\t\tfor s in self.slices:\n\t\t\tif self.slices_interect(s, (start, stop)):\n\t\t\t\tmerged_slices.append(s)\n\t\t\telse:\n\t\t\t\tunmerged_slices.append(s)\n\t\tself.slices = unmerged_slices + [self.merge_slices(merged_slices)]\n\n\tdef merge_slices(self, slices):\n\t\ti = min(s[0] for s in slices)\n\t\tj = max(s[1] for s in slices)\n\t\treturn (i,j)\n\n\tdef slices_interect(self, s1, s2):\n\t\ti1, j1 = s1\n\t\ti2, j2 = s2\n\t\treturn i1-1<=i2<=j1+1 or i1-1<=j2<=j1+1 or i2-1<=i1<=j2+1 or i2-1<=j1<=j2+1 \n\n\n\ndef distance(p1, p2):\n\tx1, y1 = p1\n\tx2, y2 = p2\n\treturn abs(x1-x2) + abs(y1-y2)\n\n\ndef parse_line(line):\n\tfmt = \"Sensor at x={:d}, y={:d}: closest beacon is at x={:d}, y={:d}\"\n\tsx, sy, bx, by = parse.parse(fmt, line)\n\treturn (sx, sy), (bx, by)\n\n\ndef solve(real=True):\n\tif real:\n\t\ty = 2000000\n\t\tfile = real_file\n\telse:\n\t\ty = 10\n\t\tfile = test_file\n\n\tbeacons = set()\n\tno_beacon = set()\n\tx_range = Range()\n\twith open(file, 'r') as f:\n\t\tfor line in f.readlines():\n\t\t\ts, b = parse_line(line.strip())\n\t\t\tbeacons.add(b)\n\n\t\t\tsx, sy = s\n\n\t\t\trow_range = distance(s, b) - abs(sy - y)\n\t\t\tif row_range > 0:\n\t\t\t\tx_range.add_slice(sx-row_range, sx+row_range)\n\n\tcount = len(x_range)\n\n\tfor bx, by in beacons:\n\t\tif by == y and bx in x_range:\n\t\t\tcount -= 1\n\n\treturn count\n\n\ndef solve2(real=True):\n\tif real:\n\t\tys = range(4000000+1)\n\t\tfile = real_file\n\telse:\n\t\tys = range(20+1)\n\t\tfile = test_file\n\n\tsensors = []\n\tdistances = []\n\twith open(file, 'r') as f:\n\t\tfor line in f.readlines():\n\t\t\ts, b = parse_line(line.strip())\n\t\t\td = distance(s, b)\n\t\t\tsensors.append(s)\n\t\t\tdistances.append(d)\n\n\n\tfor y in tqdm(ys):\n\t\tx_range = Range()\n\n\t\tfor s, d in zip(sensors, distances):\n\t\t\tsx, sy = s\n\t\t\trow_range = d - abs(sy - y)\n\t\t\tif row_range > 0:\n\t\t\t\tx_range.add_slice(sx-row_range, sx+row_range)\n\t\t\t\t\n\t\tif len(x_range.slices) == 2:\n\t\t\tx = x_range.slices[0][1]+1\n\t\t\treturn 4000000*x + y\n\treturn\n\n\nif __name__ == \"__main__\":\n\tprint(solve())\n\tprint(solve2())","repo_name":"iyevenko/AdventOfCode","sub_path":"2022/15.py","file_name":"15.py","file_ext":"py","file_size_in_byte":2438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"36729639118","text":"import numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom sklearn.metrics import f1_score, accuracy_score, classification_report\nfrom GAT import GAT\nimport gat_parameters as mp\n# from graphs.stock_graph_v1 import StockNetworkNodeLabels\nfrom stock_graph_v2mst import StockNetworkNodeLabels\nfrom dgl.dataloading import GraphDataLoader\ntorch.manual_seed(0)\n\n\ndef test_network(feats, model, subgraph, labels, multilabel_loss):\n with torch.no_grad():\n model.eval()\n model.g = subgraph\n for layer in model.gat_layers:\n layer.g = subgraph\n output = model(feats.float())\n loss_data = multilabel_loss(output, labels.float())\n predict = np.argmax(output.data.cpu().numpy(), axis=1)\n true_labels = np.argmax(labels.data.cpu().numpy(), axis=1)\n f1_val = f1_score(true_labels, predict, average='macro')\n accuracy = accuracy_score(true_labels,predict)\n return f1_val, loss_data.item(), accuracy, predict, true_labels\n\n\ndef test_model(g, test_dataloader, num_feats, num_labels, device, attention_heads, model_load_path):\n multilabel_loss = torch.nn.BCEWithLogitsLoss()\n test_f1_list, test_acc_list = [], []\n y_preds_list, y_true_list = [], []\n for batch, subgraph in enumerate(test_dataloader):\n subgraph = subgraph.to(device)\n model = GAT(g, mp.NUM_LAYERS, num_feats, mp.HIDDEN_UNITS, num_labels, attention_heads, F.elu, mp.IN_DROP,\n mp.ATTENION_DROP, mp.LEAKY_ALPHA, mp.RESIDUAL).to(device)\n model.load_state_dict(torch.load(model_load_path))\n f1_val, test_loss, test_acc, y_preds, y_true = test_network(subgraph.ndata['feat'], model, subgraph, subgraph.ndata['label'], multilabel_loss)\n test_f1_list.append(f1_val)\n test_acc_list.append(test_acc)\n y_preds_list.append(y_preds.tolist())\n y_true_list.append(y_true.tolist())\n\n print(f\"Test Set F-1 Score {np.array(test_f1_list).mean()} Accuracy {np.array(test_acc_list).mean()}\")\n y_preds_ary = np.hstack(y_preds_list)\n y_true_ary = np.hstack(y_true_list)\n class_names = ['Sell', 'Hold', 'Buy']\n print(classification_report(y_true_ary.flatten(), y_preds_ary.flatten(), target_names=class_names))\n\n\nif __name__ == '__main__':\n\n save_path = \"./data/dgl_graphs/multifeature_mst/\"\n\n device = torch.device(\"cpu\")\n\n model_save_path = f\"./saved_models/stock_gat_v1_ep_{mp.EPOCHS}_lr_{mp.L_R}_batch_{mp.BATCH_SIZE}.pt\"\n\n train_dataset = StockNetworkNodeLabels(mode='train', save_path=save_path)\n valid_dataset = StockNetworkNodeLabels(mode='valid', save_path=save_path)\n test_dataset = StockNetworkNodeLabels(mode='test', save_path=save_path)\n train_dataloader = GraphDataLoader(train_dataset, batch_size=mp.BATCH_SIZE)\n valid_dataloader = GraphDataLoader(valid_dataset, batch_size=mp.BATCH_SIZE)\n test_dataloader = GraphDataLoader(test_dataset, batch_size=mp.BATCH_SIZE)\n g = train_dataset[0]\n num_labels = train_dataset.num_labels\n num_feats = g.ndata['feat'].shape[1]\n\n\n attention_heads = ([mp.ATTENTION_HEADS] * mp.NUM_LAYERS) + [mp.NUM_OUT_HEADS]\n\n test_model(g, test_dataloader, num_feats, num_labels, device, attention_heads, model_save_path)\n","repo_name":"sujit-khanna/StockGAT","sub_path":"test_stock_gat.py","file_name":"test_stock_gat.py","file_ext":"py","file_size_in_byte":3213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"40150598044","text":"\"\"\"\nAUTEUR : Théo Hurlimann\n LIEU : CFPT Informatique Genève\n DATE : avril 2022\n PROJET: ARCHIVER\n VERSION : 1.0\n FICHIER : Archive.py: \n - Possède les méthodes pour créer une archive et la gérer.\n\"\"\"\nfrom typing import Optional\nimport src.Constants as Constants\nfrom src.FolderImport import FolderImport\nfrom src.FolderName import FolderName\nfrom src.Utilities import Utilities\nfrom src.Resource import Resource\nfrom src.Metadata import Metadata\nfrom src.Type import Type\nfrom src.Bdd import Bdd\nfrom src import myConfig\nimport src.Archive as Archive\nimport pickle \nimport stat\nfrom os import makedirs, chmod, path, remove\nimport shutil, os, py7zr, datetime, orjson\nfrom progress.bar import Bar\n\nclass Archive :\n def __init__(self, name : str) -> None:\n \"\"\"Constructeur de la classe Archive\n Parameters:\n name (String): le nom d'archive\n \"\"\"\n \n \n self.compressFiles = []\n self.name = name\n self.pathArchive = Utilities.getPathOfArchive(name)\n if path.exists(self.pathArchive): \n self.myDb = Bdd(self.pathArchive)\n else:\n self.createDirectoryArchive(self.pathArchive)\n self.myDb = Bdd(self.pathArchive)\n \n #if not p.endswith(\"/\"):\n # self.pathArchive = p+\"/\"\n pass\n \n def __add__(self, archive : Archive) -> Archive:\n return self.mergeTwoArchive(archive)\n\n def createDirectoryArchive(self, path :str) -> bool:\n \"\"\"Permet de Créer le dossier de l'archive\n\n Parameters:\n path (String): chemin de l'archive\n\n Returns:\n bool: True si le dossier a été créé\n bool: False si le n'a dossier pas été créé\n \"\"\"\n try :\n makedirs(path)\n return True\n except Exception as e:\n print(e)\n return False\n\n def archiveFolder(self, FolderImport : FolderImport) -> bool:\n \"\"\"Permet d'archiver un dossier\n\n Parameters:\n FolderImport (FolderImport): dossier à archiver\n\n Returns:\n bool: True si le dossier a été créé\n bool: False si le n'a dossier pas été créé\n \"\"\"\n try:\n bar = Bar('Processing', max=FolderImport.nbResource, suffix='%(index).0f/%(max).0f fichiers archivés') \n for Resource in FolderImport.resources:\n self.compressFiles.append(self.archiveResource(Resource))\n bar.next()\n pass\n bar.finish()\n return True\n except Exception as e:\n print(e)\n return False\n\n def writeMetadataInJson(self, r : Resource, newArchive : Archive= None) -> None:\n \"\"\"Permet d'écrire les métadonnées dans un fichier json\n\n Parameters:\n r (Resource): Resource contenant la métadonnées à écrire\n newArchive (Archive): archive où écrire les métadonnées\n\n Returns:\n \n \"\"\"\n \n m = r.universalMetadata\n if newArchive is None:\n pathOfJson = self.pathArchive + \"/\"+ m.sha1[0:2] +\"/\"+ m.sha1 + \".json\"\n else:\n pathOfJson = newArchive.pathArchive + \"/\"+ m.sha1[0:2] +\"/\"+ m.sha1 + \".json\"\n \n if not path.exists(pathOfJson):\n f= open(pathOfJson,\"w+\")\n f.write(\"[\")\n f.write(m.toJson(r.type))\n f.write(\"]\")\n f.close()\n else:\n arrayM = []\n with open(pathOfJson, \"r\") as f:\n data = orjson.loads(f.read())\n for d in data:\n arrayMT = []\n arrayMT.append(Metadata.jsonDecoder(d))\n arrayMT.append(Type(-1, d[\"typeName\"]))\n arrayM.append(arrayMT)\n arrayM.append([m, r.type])\n f = open(pathOfJson,\"w+\")\n f.write(\"[\")\n for m, t in arrayM:\n f.write(m.toJson(t))\n \n # if it is not the last metadata\n if m != arrayM[-1][0]:\n f.write(\",\")\n \n f.write(\"]\")\n f.close()\n \n def rebuildDbWithJson(self) -> None:\n \"\"\"Permet de reconstruire la base de données à partir des fichiers json\n\n Parameters:\n \n\n Returns:\n None\n \"\"\"\n filesJson = []\n #If exist, delete the old database\n if path.exists(self.pathArchive+\"/archiver.db\"):\n remove(self.pathArchive+\"/archiver.db\")\n self.myDb = Bdd(self.pathArchive)\n \n \n for (root, dirs, files) in os.walk(self.pathArchive):\n for file in files:\n if file.endswith(\".json\"): \n filesJson.append(root+\"/\"+file)\n\n bar = Bar('Reconstruction de la base de données :', max=len(filesJson), suffix='%(percent)d%%') \n for file in filesJson:\n with open(file, \"r\") as f:\n data = orjson.loads(f.read())\n for d in data:\n r = Resource(d[\"sha1\"], True, Metadata.jsonDecoder(d), Type(-1, d[\"typeName\"]))\n self.archiveResource(r,True)\n bar.next()\n pass\n bar.finish()\n \n\n def archiveResource(self, resource : Resource, rebuild : Optional[bool] = False) -> bool:\n \"\"\"Permet d'archiver une ressource\n\n Parameters:\n resource (Resource): ressource à archiver\n\n Returns:\n bool: True si la ressource a été créé\n \n \"\"\"\n #print(\"On commence l'archive de \"+resource.path)\n #self.verifTemp()\n content = None\n e = self.myDb.addExtension(resource.extension)\n fn = self.myDb.addFileName(resource.fileName)\n t = self.myDb.addType(resource.type)\n pn = self.myDb.addPathName(resource.pathName)\n\n resource.universalMetadata.extension = e\n resource.universalMetadata.fileName = fn\n resource.universalMetadata.pathName = pn\n resource.type = t\n\n if not self.myDb.resourceExist(resource): \n if not rebuild:\n resource.universalMetadata.sizeCompressed = self.compressResource(resource)\n \n self.myDb.addResource(resource,True)\n \n folders = self.myDb.addFoldersWithPathName(resource.universalMetadata.pathName)\n\n if not self.myDb.metadataExist(resource.universalMetadata):\n resource.universalMetadata = self.myDb.addMetadata(resource.universalMetadata)\n self.myDb.addFullTextMetadatas(resource,folders[-1])\n if not rebuild:\n self.writeMetadataInJson(resource)\n else:\n resource.universalMetadata = self.myDb.getMetadataByAllFields(resource.universalMetadata)\n \n \n self.myDb.addFolderMetadata(folders[-1],resource.universalMetadata)\n for folder in folders:\n self.myDb.addFolderPathName(folder,resource.universalMetadata.pathName)\n\n \n return True\n\n def createDirectoryResource(self, resource : Resource) -> str:\n \"\"\"Créer le dossier pour la ressource dans l'archive\n\n Parameters:\n resource (Resource): ressource où on a besoin de créer son dossier\n\n Returns:\n String: chemin du dossier de la ressource\n \"\"\"\n pathResource = Archive.createPathRessourceInArchive(self.pathArchive, resource) #self.pathArchive+\"/\"+Resource.sha1[0:2]\n if not path.exists(pathResource):\n makedirs(pathResource)\n return pathResource\n\n def compressResource(self, Resource : Resource) -> bool:\n \"\"\"Permet de compresser une ressource directement dans l'archive\n\n Parameters:\n Resource (Resource): ressource à compresser\n\n Returns:\n bool: True si la ressource a été créé\n \n \"\"\"\n self.pathTmp = self.pathArchive+\"/temp/\"\n\n # Pour ne pas perdre le fichier\n #shutil.copy(Resource.path,self.pathTmp+Resource.sha1+Resource.extension.value)\n \n dirWork = myConfig.getOption(Constants.NAME_OF_SECTION_ARCHIVE, Constants.NAME_OF_DEFAULT_ARCHIVE_DIR)\n dirResource = self.createDirectoryResource(Resource)\n newPath = dirResource+\"/\"+Resource.sha1+myConfig.getOption(Constants.NAME_OF_SECTION_ARCHIVE, Constants.NAME_OF_DEFAULT_EXTENSION_COMPRESSION)\n os.chdir(os.path.dirname(Resource.path))\n \n with py7zr.SevenZipFile(newPath, 'w') as archive:\n archive.write(Resource.fileName.value+Resource.extension.value)\n pass\n \n # Get the size of the compressed file\n size = os.path.getsize(newPath)\n\n os.chdir(dirWork)\n return size\n\n def extractResources(self, metadatas : list[Metadata], path : str) -> bool:\n \"\"\"Permet d'extraire une liste de ressources\n \n Parameters:\n ressources (list): liste de métadonné à extraire\n path (str): chemin où extraire les ressources\n \n Returns:\n bool: True si la liste a été extraite\n bool: False si la liste n'a pas été extraite\n \"\"\"\n fileNames = []\n bar = Bar('Processing', max=len(metadatas), suffix='%(index).0f/%(max).0f fichiers extraits')\n for m in metadatas:\n if m.fileName.value in fileNames:\n m.fileName.value = m.fileName.value+\"_\"+str(fileNames.count(m.fileName.value) + 1)\n if not self.extractResource(m, path):\n return False\n fileNames.append(m.fileName.value)\n bar.next()\n bar.finish()\n return True\n\n def extractResource(self, m : Metadata, path : str) -> bool:\n \"\"\"Permet d'extraire une ressource\n \n Parameters:\n r (Resource): ressource à extraire\n path (str): chemin où extraire la ressource\n \n Returns:\n bool: True si la ressource a été extraite\n bool: False si la ressource n'a pas été extraite\n \"\"\"\n try:\n pathOfResource = self.pathArchive+\"/\"+m.sha1[0:2]+\"/\"+m.sha1+myConfig.getOption(Constants.NAME_OF_SECTION_ARCHIVE, Constants.NAME_OF_DEFAULT_EXTENSION_COMPRESSION)\n pathExtraction = path\n with py7zr.SevenZipFile(pathOfResource, 'r') as archive:\n archive.extractall(pathExtraction)\n\n #change the name of the file\n #os.rename(path+\"/\"+m.sha1+m.extension.value, path+\"/\"+m.fileName.value+m.extension.value)\n\n Metadata.addMetadataInRessource(path+\"/\"+m.sha1+m.extension.value, m)\n return True\n except Exception as e:\n print(e)\n return False\n \n def duplicateResourceToOtherArchive(self, Resource : Resource, Archive : Archive) -> bool:\n \"\"\"Duplique le fichier depuis l'archive source vers l'archive destination\n \n Parameters:\n Resource (Resource): ressource à dupliquer\n Archive (Archive): archive destination\n \n Returns:\n bool: True si la ressource a été dupliquée\n bool: False si la ressource n'a pas été dupliquée\n \"\"\"\n try:\n pathOldFile = self.pathArchive+\"/\"+Resource.sha1[0:2]+\"/\"+Resource.sha1+myConfig.getOption(Constants.NAME_OF_SECTION_ARCHIVE, Constants.NAME_OF_DEFAULT_EXTENSION_COMPRESSION)\n pathNewDirectory = Archive.pathArchive+\"/\"+Resource.sha1[0:2]\n pathNewFile = pathNewDirectory+\"/\"+Resource.sha1+myConfig.getOption(Constants.NAME_OF_SECTION_ARCHIVE, Constants.NAME_OF_DEFAULT_EXTENSION_COMPRESSION)\n if not path.exists(pathNewDirectory):\n makedirs(pathNewDirectory)\n #chmod(pathNewDirectory, 0o664)\n \n shutil.copy(pathOldFile, pathNewFile)\n return True\n except Exception as e:\n print(e)\n return False\n \n def mergeTwoArchive(self, archive : Archive) -> Archive:\n \"\"\"Permet de fusionner deux archives\n \n Parameters:\n archive (Archive): archive à fusionner\n \n Returns:\n Archive: archive fusionnée\n \"\"\"\n\n LIMIT = 10\n\n resources = []\n metadatas = []\n offset = 0\n\n bddA : Bdd = self.myDb\n bddB : Bdd = archive.myDb\n\n bddA.askToInsertExtensionsWithOtherDb(bddB.pathFile)\n bddA.askToInsertFileNameWithOtherDb(bddB.pathFile)\n bddA.askToInsertFolderNameWithOtherDb(bddB.pathFile)\n bddA.askToInsertPathNameWithOtherDb(bddB.pathFile)\n bddA.askToInsertTypeWithOtherDb(bddB.pathFile)\n\n # Ajout des ressources \n resources = bddB.getAllResources(LIMIT, offset)\n while len(resources) > 0:\n \n for r in resources:\n # Récupération du nouveau type_id\n #r.type = self.myDb.getTypeByName(r.type)\n #r.path = archive.pathArchive + \"/\" + r.sha1[0:2] + \"/\" + r.sha1\n r, added = bddA.addResource(r)\n if added:\n archive.duplicateResourceToOtherArchive(r, self)\n\n\n offset += LIMIT\n resources = bddB.getAllResources(LIMIT, offset)\n\n # Ajout des métadonnées\n offset = 0\n metadatas = bddB.getAllMetadatas(LIMIT, offset)\n while len(metadatas) > 0:\n for m in metadatas:\n m = self.getNewIdsMetadata(m)\n m, added = bddA.addMetadata(m,True)\n if added:\n r = archive.myDb.getResourceBySha1(m.sha1)\n r.universalMetadata = m\n bddA.addFullTextMetadatas(r, FolderName(-1,m.pathName.value.rsplit(\"/\",1)[1]))\n archive.writeMetadataInJson(r, self)\n \n offset += LIMIT\n metadatas = bddB.getAllMetadatas(LIMIT, offset)\n \n # Ajout des liaisons folderName metadata\n offset = 0\n folderNameMetadata = bddB.getAllFolderNameMetadata(LIMIT, offset)\n while len(folderNameMetadata) > 0:\n for fn, m in folderNameMetadata:\n fn = bddA.getFolderNameByName(fn)\n m = self.getNewIdsMetadata(m)\n m = bddA.getMetadataByAllFields(m)\n bddA.addFolderNameIdMetadataId(fn.id, m.id)\n offset += LIMIT\n folderNameMetadata = bddB.getAllFolderNameMetadata(LIMIT, offset)\n \n\n # Ajout des liaisons folderName PathName\n offset = 0\n folderNamePathName = bddB.getAllFolderNamePathName(LIMIT, offset)\n while len(folderNamePathName) > 0:\n for fn, pn in folderNamePathName:\n fn = bddA.getFolderNameByName(fn)\n pn = bddA.getPathNameByPath(pn)\n bddA.addFolderNameIdPathNameId(fn.id, pn.id)\n offset += LIMIT\n folderNamePathName = bddB.getAllFolderNamePathName(LIMIT, offset)\n \n def getNewIdsMetadata(self, m : Metadata) -> Metadata:\n \"\"\"Permet de récupérer les nouvelles ids contenu dans la classe Metadata\n Parameters:\n m (Metadata): La metadata à traiter\n Returns:\n Metadata: La metadata avec les nouvelles ids\n \"\"\"\n m.extension = self.myDb.getExtensionByName(m.extension)\n m.pathName = self.myDb.getPathNameByPath(m.pathName)\n m.fileName = self.myDb.getFileNameByName(m.fileName)\n return m \n\n def search(self, options : dict) -> list:\n \"\"\"Permet de rechercher des ressources dans l'archive\n \n Parameters:\n options (dict): options de recherche\n \n Returns:\n list: liste des ressources trouvées\n \"\"\" \n return self.myDb.search(options)\n\n def searchFullText(self, options : str) -> list:\n \"\"\"Permet de rechercher des ressources dans l'archive de façon fulltext\n \n Parameters:\n options (dict): options de recherche\n \n Returns:\n list: liste des ressources trouvées\n \"\"\" \n return self.myDb.searchFullText(options)\n\n def saveToExtract(self, ressources : list) -> bool:\n \"\"\"Permet de sauvegarder des ressources pour l'extraction\n \n Parameters:\n ressources (list): liste des ressources à sauvegarder pour l'extraction\n \n Returns:\n Bool : True si la sauvegarde a réussi\n Bool : False si la sauvegarde a échoué\n \"\"\"\n try:\n \n arrayToStore = []\n for r in ressources:\n arrayToStore.append(r.universalMetadata)\n \n file_to_store = open(self.pathArchive+\"/ressourcesToExtract.txt\", \"wb\")\n pickle.dump(arrayToStore, file_to_store)\n file_to_store.close()\n \n return True\n except Exception as e:\n print(e)\n return False\n \n def loadFromFileToExtract(self) -> list:\n \"\"\"Permet de charger les ressources à extraire depuis le fichier ressourcesToExtract.pickle\n \n Returns:\n list: liste des ressources à extraire\n list: liste vide si impossible de charger le fichier\n \"\"\"\n try:\n file_to_load = open(self.pathArchive+\"/ressourcesToExtract.txt\", \"rb\")\n metadatas = pickle.load(file_to_load)\n file_to_load.close()\n # delete the file\n remove(self.pathArchive+\"/ressourcesToExtract.txt\")\n return metadatas\n except Exception as e:\n print(e)\n return []\n\n def getResourceBySha1WithMetadataById(self, sha1 : str, id : int) -> Resource:\n \"\"\"Permet de récupérer une ressource avec ses métadonnées à partir de son sha1 et de son id\n \n Parameters:\n sha1 (str): sha1 de la ressource\n id (int): id de la metadata\n \n Returns:\n Resource: ressource trouvée\n \"\"\"\n \n return self.myDb.getResourceBySha1WithMetadataById(sha1, id)\n\n @staticmethod\n def createPathRessourceInArchive(pathOfArchive : str, Resource : Resource) -> str:\n \"\"\"Crée le chemin d'accès à la ressource dans l'archive\n \n Parameters:\n pathOfArchive (str): chemin d'accès à l'archive\n Resource (Resource): ressource à créer\n \n Returns:\n str: chemin d'accès à la ressource\n \"\"\"\n return pathOfArchive+\"/\"+Resource.sha1[0:2]\n\n\n","repo_name":"theohrlmnn/archiver","sub_path":"src/Archive.py","file_name":"Archive.py","file_ext":"py","file_size_in_byte":18734,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"1470568872","text":"import json\nimport logging\nimport os\n\n# from requirements.txt\nimport pytest\nfrom fabric import Connection\nfrom paramiko.ssh_exception import NoValidConnectionsError\nfrom scp import SCPClient\n\n# local libraries\nfrom arm_template_deploy import ArmTemplateDeploy\nfrom lib.helpers import (get_unused_local_port, run_averecmd, run_ssh_command,\n run_ssh_commands, split_ip_range, wait_for_op)\n\n\n# COMMAND-LINE OPTIONS ########################################################\ndef pytest_addoption(parser):\n parser.addoption(\n \"--build_root\", action=\"store\", default=None,\n help=\"Local path to the root of the Azure/Avere repo clone \"\n + \"(e.g., /home/user1/git/Azure/Avere). This is used to find the \"\n + \"various templates that are deployed during these tests. (default: \"\n + \"$BUILD_SOURCESDIRECTORY if set, else current directory)\",\n )\n parser.addoption(\n \"--location\", action=\"store\", default=None,\n help=\"Azure region short name to use for deployments (default: westus2)\",\n )\n parser.addoption(\n \"--ssh_priv_key\", action=\"store\", default=None,\n help=\"SSH private key to use in deployments and tests (default: ~/.ssh/id_rsa)\",\n )\n parser.addoption(\n \"--ssh_pub_key\", action=\"store\", default=None,\n help=\"SSH public key to use in deployments and tests (default: ~/.ssh/id_rsa.pub)\",\n )\n parser.addoption(\n \"--test_vars_file\", action=\"store\", default=None,\n help=\"Test variables file used for passing values between runs. This \"\n + \"file is in JSON format. It is loaded during test setup and written \"\n + \"out during test teardown. Command-line options override variables \"\n + \"in this file. (default: $VFXT_TEST_VARS_FILE if set, else None)\"\n )\n\n\n# FIXTURES ####################################################################\n@pytest.fixture()\ndef cluster_ips(test_vars):\n \"\"\"\n Return a list of known cluster IPs. The cluster mgmt IP, at a minimum,\n must be known. Cluster node and vserver IPs are also added to the list of\n cluster IPs that is returned.\n \"\"\"\n c_ips = [test_vars[\"cluster_mgmt_ip\"]]\n if \"cluster_node_ips\" in test_vars:\n c_ips += test_vars[\"cluster_node_ips\"]\n if \"cluster_vs_ips\" in test_vars:\n c_ips += test_vars[\"cluster_vs_ips\"]\n return c_ips\n\n\n@pytest.fixture()\ndef node_ips(cluster_ips, ssh_con, test_vars):\n \"\"\"Queries the cluster to get a list of IPs for each node.\"\"\"\n log = logging.getLogger(\"node_ips\")\n last_ex = None\n for _ip in cluster_ips:\n # For resiliency, attempt to issue averecmd calls to known cluster IPs.\n try:\n result = run_averecmd(ssh_client=ssh_con,\n password=os.environ[\"AVERE_ADMIN_PW\"],\n node_ip=_ip,\n method=\"cluster.get\")\n test_vars[\"averecmd_ip\"] = _ip # this IP worked for averecmd\n c_ips = result[\"clusterIPs\"][0]\n node_ip_range = \"{0}-{1}\".format(c_ips[\"firstIP\"], c_ips[\"lastIP\"])\n test_vars[\"cluster_node_ips\"] = split_ip_range(node_ip_range)\n return test_vars[\"cluster_node_ips\"]\n except Exception as e:\n log.error(\"cluster.get failed for IP {}\".format(_ip))\n log.error(e)\n last_ex = e\n assert not last_ex\n\n\n@pytest.fixture()\ndef node_names(cluster_ips, ssh_con, test_vars):\n \"\"\"Queries the cluster to get a list of node names.\"\"\"\n log = logging.getLogger(\"node_names\")\n last_ex = None\n for _ip in cluster_ips:\n # For resiliency, attempt to issue averecmd calls to known cluster IPs.\n try:\n nodes = run_averecmd(ssh_client=ssh_con,\n password=os.environ[\"AVERE_ADMIN_PW\"],\n node_ip=_ip,\n method=\"node.list\")\n test_vars[\"averecmd_ip\"] = _ip # this IP worked for averecmd\n test_vars[\"nodes\"] = nodes\n return test_vars[\"nodes\"]\n except Exception as e:\n log.error(\"node.list failed for IP {}\".format(_ip))\n log.error(e)\n last_ex = e\n assert not last_ex\n\n\n@pytest.fixture()\ndef averecmd_params(ssh_con, node_ips, node_names, test_vars):\n \"\"\"Convenience fixture: common averecmd parameters.\"\"\"\n node_ip = test_vars[\"cluster_mgmt_ip\"]\n if \"averecmd_ip\" in test_vars:\n node_ip = test_vars[\"averecmd_ip\"]\n\n return {\n \"ssh_client\": ssh_con,\n \"password\": os.environ[\"AVERE_ADMIN_PW\"],\n \"node_ip\": node_ip\n }\n\n\n@pytest.fixture()\ndef mnt_nodes(ssh_con, test_vars):\n if (\"storage_account\" not in test_vars) or (not test_vars[\"storage_account\"]):\n return\n\n log = logging.getLogger(\"mnt_nodes\")\n check = run_ssh_command(ssh_con, \"ls ~/STATUS.NODES_MOUNTED\",\n ignore_nonzero_rc=True, timeout=30)\n if check['rc']: # nodes were not already mounted\n # Update needed packages.\n commands = [\"sudo apt-get update\", \"sudo apt-get install nfs-common\"]\n run_ssh_commands(ssh_con, commands, timeout=600)\n\n # Set up mount points and /etc/fstab.\n commands = []\n for i, vs_ip in enumerate(test_vars[\"cluster_vs_ips\"]):\n commands.append(\"sudo mkdir -p /nfs/node{}\".format(i))\n commands.append(\"sudo chown nobody:nogroup /nfs/node{}\".format(i))\n fstab_line = \"{}:/msazure /nfs/node{} nfs \".format(vs_ip, i) + \\\n \"hard,nointr,proto=tcp,mountproto=tcp,retry=5 0 0\"\n commands.append(\"sudo sh -c 'echo \\\"{}\\\" >> /etc/fstab'\".format(\n fstab_line))\n run_ssh_commands(ssh_con, commands, timeout=30)\n\n # Mount the nodes.\n def _log_diag(in_str):\n log.info(json.dumps(in_str, indent=4).replace(\"\\\\n\", \"\\n\"))\n try:\n commands = \"\"\"\n sudo mount -av\n touch ~/STATUS.NODES_MOUNTED\n \"\"\".split(\"\\n\")\n _log_diag(run_ssh_commands(ssh_con, commands, timeout=300))\n except Exception as e:\n # Show some diag info.\n log.info(\"Exception caught when attempting to mount. Diag info:\")\n diag_commands = \"\"\"\n cat /etc/mtab\n nfsstat\n sudo ufw status\n service portmap status\n sudo iptables -n -L -v\n netstat -rn\n \"\"\".split(\"\\n\")\n _log_diag(run_ssh_commands(ssh_con, diag_commands, ignore_nonzero_rc=True))\n for vs_ip in test_vars[\"cluster_vs_ips\"]:\n _log_diag(run_ssh_command(ssh_con, \"rpcinfo -p \" + vs_ip, ignore_nonzero_rc=True))\n\n run_ssh_command(ssh_con, \"sudo apt -y install traceroute nmap\", ignore_nonzero_rc=True)\n _log_diag(run_ssh_command(ssh_con, \"traceroute \" + vs_ip, ignore_nonzero_rc=True))\n _log_diag(run_ssh_command(ssh_con, \"sudo nmap -sS \" + vs_ip, ignore_nonzero_rc=True))\n raise\n\n\n@pytest.fixture(scope=\"module\")\ndef resource_group(test_vars):\n log = logging.getLogger(\"resource_group\")\n rg = test_vars[\"atd_obj\"].create_resource_group()\n log.info(\"Resource Group: {}\".format(rg))\n return rg\n\n\n@pytest.fixture(scope=\"module\")\ndef storage_account(test_vars):\n log = logging.getLogger(\"storage_account\")\n atd = test_vars[\"atd_obj\"]\n sa = atd.st_client.storage_accounts.get_properties(\n atd.resource_group,\n atd.storage_account\n )\n log.info(\"Storage Account: {}\".format(sa))\n return sa\n\n\n@pytest.fixture()\ndef scp_con(ssh_con_fabric):\n \"\"\"Create an SCP client based on an SSH connection to the controller.\"\"\"\n log = logging.getLogger(\"scp_con\")\n # client = SCPClient(ssh_con.get_transport()) # PARAMIKO\n client = SCPClient(ssh_con_fabric.transport)\n yield client\n log.debug(\"Closing SCP client.\")\n client.close()\n\n\n@pytest.fixture()\ndef ssh_con(ssh_con_fabric):\n return ssh_con_fabric.client\n\n\n@pytest.fixture()\ndef ssh_con_fabric(test_vars):\n \"\"\"Create an SSH connection to the controller.\"\"\"\n log = logging.getLogger(\"ssh_con_fabric\")\n\n # SSH connection/client to the public IP.\n pub_client = Connection(test_vars[\"public_ip\"],\n user=test_vars[\"controller_user\"],\n connect_kwargs={\n \"key_filename\": test_vars[\"ssh_priv_key\"],\n })\n\n # If the controller's IP is not the same as the public IP, then we are\n # using a jumpbox to get into the VNET containing the controller. In that\n # case, create an SSH tunnel before connecting to the controller.\n msg_con = \"SSH connection to controller ({})\".format(test_vars[\"controller_ip\"])\n if test_vars[\"public_ip\"] != test_vars[\"controller_ip\"]:\n for port_attempt in range(1, 11):\n tunnel_local_port = get_unused_local_port()\n tunnel_remote_port = 22\n\n msg_con += \" via jumpbox ({0}), local port {1}\".format(\n test_vars[\"public_ip\"], tunnel_local_port)\n\n log.debug(\"Opening {}\".format(msg_con))\n with pub_client.forward_local(local_port=tunnel_local_port,\n remote_port=tunnel_remote_port,\n remote_host=test_vars[\"controller_ip\"]):\n client = Connection(\"127.0.0.1\",\n user=test_vars[\"controller_user\"],\n port=tunnel_local_port,\n connect_kwargs={\n \"key_filename\": test_vars[\"ssh_priv_key\"],\n })\n try:\n client.open()\n except NoValidConnectionsError as ex:\n exp_err = \"Unable to connect to port {} on 127.0.0.1\".format(tunnel_local_port)\n if exp_err not in str(ex):\n raise\n else:\n log.warning(\"{0} (attempt #{1}, retrying)\".format(\n exp_err, str(port_attempt)))\n continue\n\n yield client\n log.debug(\"{} closed\".format(msg_con))\n break # no need to iterate again\n else:\n log.debug(\"Opening {}\".format(msg_con))\n pub_client.open()\n yield pub_client\n log.debug(\"Closing {}\".format(msg_con))\n\n pub_client.close()\n\n\n@pytest.fixture(scope=\"module\")\ndef test_vars(request):\n \"\"\"\n Loads saved test variables, instantiates an ArmTemplateDeploy object, and\n dumps test variables during teardown.\n \"\"\"\n log = logging.getLogger(\"test_vars\")\n\n def envar_check(envar):\n if envar in os.environ:\n return os.environ[envar]\n return None\n\n # Load command-line arguments into a dictionary.\n cl_opts = {\n \"build_root\": request.config.getoption(\"--build_root\"),\n \"location\": request.config.getoption(\"--location\"),\n \"ssh_priv_key\": request.config.getoption(\"--ssh_priv_key\"),\n \"ssh_pub_key\": request.config.getoption(\"--ssh_pub_key\"),\n \"test_vars_file\": request.config.getoption(\"--test_vars_file\")\n }\n cja = {\"sort_keys\": True, \"indent\": 4} # common JSON arguments\n log.debug(\"JSON from command-line args: {}\".format(\n json.dumps(cl_opts, **cja)))\n\n # Set build_root value (command-line arg, envar, cwd).\n build_root = request.config.getoption(\"--build_root\")\n if not build_root:\n build_root = envar_check(\"BUILD_SOURCESDIRECTORY\")\n if not build_root:\n build_root = os.getcwd()\n log.debug(\"build_root = {}\".format(build_root))\n\n # Set test_vars_file value (command-line arg, envar).\n test_vars_file = request.config.getoption(\"--test_vars_file\")\n if not test_vars_file:\n test_vars_file = envar_check(\"VFXT_TEST_VARS_FILE\")\n log.debug(\"test_vars_file = {}\".format(test_vars_file))\n\n default_cl_opts = { # defaults for command-line options\n \"build_root\": build_root,\n \"location\": \"westus2\",\n \"ssh_priv_key\": os.path.expanduser(r\"~/.ssh/id_rsa\"),\n \"ssh_pub_key\": os.path.expanduser(r\"~/.ssh/id_rsa.pub\"),\n \"test_vars_file\": test_vars_file\n }\n log.debug(\"Defaults for command-line args: {}\".format(\n json.dumps(default_cl_opts, **cja)))\n\n vars = {}\n\n # Load JSON from test_vars_file, if specified.\n if test_vars_file and os.path.isfile(test_vars_file):\n log.debug(\"Loading into vars from {} (test_vars_file)\".format(\n test_vars_file))\n with open(test_vars_file, \"r\") as vtvf:\n vars = {**vars, **json.load(vtvf)}\n log.debug(\"After loading from test_vars_file, vars is: {}\".format(\n json.dumps(vars, **cja)))\n\n # Override test_vars_file values with command-line arguments.\n for k, v in cl_opts.items():\n if v: # specified on the command-line, so override\n vars[k] = v\n elif k not in vars: # not specified on command-line nor test vars file\n vars[k] = default_cl_opts[k] # use the default\n log.debug(\"After overriding with command-line args, vars is: {}\".format(\n json.dumps(vars, **cja)))\n\n atd_obj = ArmTemplateDeploy(_fields={**vars})\n # \"Promote\" serializable members to the top level.\n vars = {**vars, **json.loads(atd_obj.serialize())}\n\n if test_vars_file: # write out vars to test_vars_file\n log.debug(\"vars: {}\".format(json.dumps(vars, **cja)))\n log.debug(\"Saving vars to {} (test_vars_file)\".format(test_vars_file))\n with open(test_vars_file, \"w\") as vtvf:\n json.dump(vars, vtvf, **cja)\n\n vars[\"atd_obj\"] = atd_obj # store the object in a common place\n\n yield vars\n\n if test_vars_file: # write out vars to test_vars_file\n vars = {**vars, **json.loads(vars[\"atd_obj\"].serialize())}\n vars.pop(\"atd_obj\")\n log.debug(\"vars: {}\".format(json.dumps(vars, **cja)))\n log.debug(\"Saving vars to {} (test_vars_file)\".format(test_vars_file))\n with open(test_vars_file, \"w\") as vtvf:\n json.dump(vars, vtvf, **cja)\n\n\n@pytest.fixture()\ndef ext_vnet(test_vars):\n \"\"\"\n Creates a resource group containing a new VNET, subnet, public IP, and\n jumpbox for use in other tests.\n \"\"\"\n log = logging.getLogger(\"ext_vnet\")\n vnet_atd = ArmTemplateDeploy(\n location=test_vars[\"location\"],\n resource_group=test_vars[\"atd_obj\"].deploy_id + \"-rg-vnet\"\n )\n rg = vnet_atd.create_resource_group()\n log.info(\"Resource Group: {}\".format(rg))\n\n vnet_atd.deploy_name = \"ext_vnet\"\n with open(\"{}/src/vfxt/azuredeploy.vnet.json\".format(\n test_vars[\"build_root\"])) as tfile:\n vnet_atd.template = json.load(tfile)\n\n with open(test_vars[\"ssh_pub_key\"], \"r\") as ssh_pub_f:\n ssh_pub_key = ssh_pub_f.read()\n\n vnet_atd.deploy_params = {\n \"uniqueName\": test_vars[\"atd_obj\"].deploy_id,\n \"jumpboxAdminUsername\": \"azureuser\",\n \"jumpboxSSHKeyData\": ssh_pub_key\n }\n test_vars[\"ext_vnet\"] = wait_for_op(vnet_atd.deploy()).properties.outputs\n log.debug(test_vars[\"ext_vnet\"])\n return test_vars[\"ext_vnet\"]\n","repo_name":"Azure/Avere","sub_path":"test/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":15398,"program_lang":"python","lang":"en","doc_type":"code","stars":70,"dataset":"github-code","pt":"90"}
+{"seq_id":"86345525467","text":"import matplotlib.pyplot as plt\nimport math\nimport torch.optim as optim\nimport torch\n\n\ndef change_lr1(epoch, T=5, factor=0.3, min=1e-3):\n mul = 1.\n if epoch < T:\n mul = mul\n elif epoch < T * 3:\n mul = mul * factor\n else:\n return min\n return max((1 + math.cos(math.pi * epoch / T)) * mul / 2, min)\n\n\ndef change_lr2(epoch, T=7, factor=0.3, min=1e-3):\n mul = 1.\n if epoch < T:\n mul = mul\n elif epoch < T * 3:\n mul = mul * factor\n elif epoch < T * 5:\n mul = mul * factor * factor\n else:\n return min\n return max((1 + math.cos(math.pi * epoch / T)) * mul / 2, min)\n\n\ndef change_lr_d1(epoch, T=6, factor=0.3, min=1e-3):\n mul = 1.\n n = epoch / T\n while n > 1:\n mul *= factor\n n -= 1\n return max((1 + math.cos(math.pi * (epoch % T) / T)) * mul / 2, min)\n\n\ndef change_lr3(epoch, T=5, factor=1, min=1e-3):\n mul = 1.\n if epoch < T:\n mul = mul\n elif epoch < T * 3:\n mul = mul * factor\n else:\n return min\n return max((1 + math.cos(math.pi * epoch / T)) * mul / 2, min)\n\n\ndef change_lr_d2(epoch, T=5, factor=0.3, min=1e-3):\n mul = 1.\n n = epoch / T\n while n > 1:\n mul *= factor\n n -= 1\n return max((1 + math.cos(math.pi * (epoch % T) / T)) * mul / 2, min)\n\n\ndef getXY(func, epoches, start_lr):\n x = []\n y = []\n for i in range(epoches):\n x.append(i)\n y.append(start_lr * func(i))\n return x, y\n\n\ndef getXY_scheduler(scheduler, epoches):\n x = []\n y = []\n for i in range(epoches):\n x.append(i)\n y.append(scheduler.get_last_lr())\n scheduler.step()\n return x, y\n\n\ndef change_0(epoch):\n return 1.\n\n\ndef change_1(epoch, T=9, factor=0.1, min=1e-3):\n mul = 1.\n n = epoch / T\n while n > 1:\n mul *= factor\n n -= 1\n return max(mul, min)\n\n\ndef change_2(epoch, T=9, min=1e-3):\n return max((1 + math.cos(math.pi * epoch / T)) / 2, min)\n\n\ndef change_3(epoch, T=9, factor=0.3, min=1e-3):\n mul = 1.\n n = (epoch - T) / (2 * T)\n while n > 0:\n mul *= factor\n n -= 1\n return max((1 + math.cos(math.pi * epoch / T)) * mul / 2, min)\n\n\noptimizer = optim.SGD([torch.Tensor([1, 1])], lr=1e-3, momentum=0, dampening=0, weight_decay=0)\noptimizer = optim.RMSprop([torch.Tensor([1, 1])], lr=1e-2, alpha=0.99, eps=1e-8, weight_decay=0, momentum=0)\noptimizer = optim.Adam([torch.Tensor([1, 1])], lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0)\nscheduler1 = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[5, 10, 15], gamma=0.3)\nscheduler2 = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=5, eta_min=0, last_epoch=-1)\nscheduler2 = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=change_lr3)\nscheduler3 = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=change_lr1)\n\n# x1, y1 = getXY(change_lr1, epoches=16, start_lr=1e-3)\n# x2, y2 = getXY(change_lr2, epoches=36, start_lr=1e-3)\n# x3, y3 = getXY(change_lr_d1, epoches=18, start_lr=1e-3)\n\n# x1, y1 = getXY_scheduler(scheduler1, epoches=16)\n# x2, y2 = getXY_scheduler(scheduler2, epoches=16)\n# x3, y3 = getXY_scheduler(scheduler3, epoches=16)\n\nscheduler1 = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=change_1)\nscheduler2 = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=change_2)\nscheduler3 = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=change_3)\nx1, y1 = getXY_scheduler(scheduler1, epoches=27)\nx2, y2 = getXY_scheduler(scheduler2, epoches=27)\nx3, y3 = getXY_scheduler(scheduler3, epoches=27)\n# fig = plt.figure(figsize=(16, 9))\n# plt.plot(x2, y2)\n# plt.show()\nfig = plt.figure(figsize=(16, 9))\nsub1 = fig.add_subplot(1, 3, 1)\n# sub1.set_title(\"changelr1\")\nsub1.set_title(\"(a)\", y=-0.1, fontsize=18)\nsub1.plot(x1, y1)\nsub2 = fig.add_subplot(1, 3, 2)\n# sub2.set_title(\"changelr2\")\nsub2.set_title(\"(b)\", y=-0.1, fontsize=18)\nsub2.plot(x2, y2)\nsub3 = fig.add_subplot(1, 3, 3)\n# sub2.set_title(\"changelr3\")\nsub3.set_title(\"(c)\", y=-0.1, fontsize=18)\nsub3.plot(x3, y3)\nplt.show()\n","repo_name":"QFaceblue/Driving-Behavior-Recognition","sub_path":"show_lr.py","file_name":"show_lr.py","file_ext":"py","file_size_in_byte":3994,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"90"}
+{"seq_id":"18313963679","text":"import sys\nimport numpy as np\nfrom collections import deque\ninput = lambda: sys.stdin.readline().rstrip()\n\n\ndef solve():\n N = int(input())\n edge = [[] for _ in range(N)]\n ab = [tuple() for _ in range(N - 1)]\n for i in range(N - 1):\n a, b = map(int, input().split())\n a, b = a - 1, b - 1\n edge[a].append(b)\n edge[b].append(a)\n ab[i] = (a, b)\n\n # print(ab)\n # print(edge)\n\n max_v = 0\n max_v_dim = 0\n for i, e in enumerate(edge):\n if max_v_dim < len(e):\n max_v_dim = len(e)\n max_v = i\n\n # print(max_v)\n # print(max_v_dim)\n\n # bfs\n q = deque()\n q.append(max_v)\n ec = np.full(N, -1, dtype='i8')\n ec[max_v] = max_v_dim + 10\n vc = dict()\n # vc = np.full((N, N), -1, dtype='i8')\n\n while q:\n nv = q.popleft()\n nc = ec[nv]\n tc = 1\n for v in edge[nv]:\n v1, v2 = min(nv, v), max(nv, v)\n if not ((v1, v2) in vc):\n if nc == tc:\n tc += 1\n ec[v] = tc\n vc[(v1, v2)] = tc\n tc += 1\n q.append(v)\n\n print(max_v_dim)\n for v1, v2 in ab:\n print(vc[(v1, v2)])\n\n\nif __name__ == '__main__':\n solve()\n","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p02850/s408330006.py","file_name":"s408330006.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"34998185564","text":"import turtle\n\ndef leaf(n, pencolor, brushcolor):\n def cir():\n for i in range(9):\n turtle.forward(n)\n turtle.right(10)\n\n a = turtle.heading()\n turtle.color(brushcolor)\n turtle.begin_fill()\n cir()\n turtle.right(90)\n cir()\n turtle.end_fill()\n\n turtle.setheading(a)\n turtle.color(pencolor)\n cir()\n turtle.right(90)\n cir()\n\ndef flower(x, y, size, color_set):\n turtle.up()\n turtle.goto(x, y)\n turtle.down()\n turtle.setheading(90)\n turtle.color(color_set[0])\n turtle.forward(size * 2)\n turtle.right(30)\n leaf(size, color_set[0], color_set[0])\n turtle.setheading(90)\n turtle.forward(size * 14)\n\n for i in range(9):\n leaf(size, color_set[1], color_set[2])\n turtle.right(10)\n \nturtle.tracer(0)\nflower(-100, -120, 7, (\"lightgreen\", \"mistyrose\", \"lightpink\"))\nflower(0, -120, 4, (\"greenyellow\", \"lemonchiffon\", \"gold\"))\nflower(100, -120, 8, (\"palegreen\", \"paleturquoise\", \"lightblue\"))\nturtle.update()\n","repo_name":"96no3/PythonStudy","sub_path":"Python/201911/191121/2019112109.py","file_name":"2019112109.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"7188416733","text":"from django.urls import path\n\nfrom . import views\n#app_name = 'bank'\nurlpatterns = [\n path('', views.index, name='index'),\n path('transaction', views.transaction, name='transaction'),\n path('register',views.register, name='register'),\n path('customers',views.customers, name='customers'),\n path('transfer',views.transfer, name='transfer')\n]","repo_name":"poojarode/Banking-System","sub_path":"GRIP_BANK/bank/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"42122879213","text":"# -*- coding: utf-8 -*-\n\n'''\nMIMO communication default parameters\n'''\n\ndL = 50 \t # 信道长度\ndK = 6 \t\t# 稀疏度/多径数,满足:K<L\ndM = 8 # 每帧的OFDM符号数\ndNt = 2 # 发送天线数\ndNr = 1 # 接收天线数 \ndSNR = 20 # AWGN信道信噪比\ndmodulate = 4 \t\t# 星座调制: 1 -> BPSK, 2 -> QPSK, 4 -> 16QAM\ndetype = \"CS\"\t\t# 信道估计类型。'CS' 或 'LS'","repo_name":"gymmer/py_ofdm_cs","sub_path":"src/MIMO/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"90"}
+{"seq_id":"27309544181","text":"'''\nCreated on May 18, 2015\n\n@author: boris\n'''\n\n \ndef normalizeMatrixData(plotData):\n transposed = [list(x) for x in zip(*plotData)]\n transposedNormalized = []\n for row in transposed:\n maxRow = max(row)\n if maxRow == 0:\n maxRow = 1\n transposedNormalized.append([1.0*x/maxRow for x in row])\n return [list(x) for x in zip(*transposedNormalized)]\n","repo_name":"uio-bmi/track_rand","sub_path":"lib/hb/quick/visualization/VisualizationUtil.py","file_name":"VisualizationUtil.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"}
+{"seq_id":"40328918215","text":"#!/usr/bin/python3\n# coding: utf-8\n\n# pandas读取大于内存的文件\n\n# 方法1, 设置chunksize, 分块读取\nchunksize = 10 ** 6 # # 每块为chunksize条数据(index)\nchunks = []\nfor chunk in pd.read_csv(filename, chunksize=chunksize):\n print(chunk)\n chunks.append(chunk)\ndf = pd.concat(chunks, ignore_index=True)\n\n# 方法2, 使用iterator, 但是也需要设置chunksize\nchunkSize = 10 ** 6\n# iterator参数,默认为False,将其改为True,返回一个可迭代对象TextFileReader,使用它的get_chunk(num)方法可获得前num行的数据\nreader = pd.read_csv(filename, iterator=True)\nwhile True:\n try:\n chunk = reader.get_chunk(chunkSize)\n print(chunk)\n except StopIteration:\n break\n\n# 数据写入文件时候,指定换行符及分列符号:\ndf.to_csv(\"result.txt\", index=False, sep='\\001', line_terminator='\\n\\001\\001\\001\\n') \n# line_terminator:自定义换行符\n# sep: 自定义分列符号,分隔符;\n\n# 超大文件,仅仅读取指定行,读取前几行:\ndf = pandas.read_csv(filename, header=0, nrows=1000) # nrows: 读取前几行,这里读取前1000行;\n\ndef main():\n pass\n\nif __name__ == '__main__':\n main()\n","repo_name":"gswyhq/hello-world","sub_path":"pandas/pandas读取超大文件.py","file_name":"pandas读取超大文件.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"zh","doc_type":"code","stars":9,"dataset":"github-code","pt":"90"}
+{"seq_id":"11697564204","text":"\r\nfrom random_gen import RandomGen\r\n\r\n# Material names taken from https://minecraft-archive.fandom.com/wiki/Items\r\nRANDOM_MATERIAL_NAMES = [\r\n \"Arrow\",\r\n \"Axe\",\r\n \"Bow\",\r\n \"Bucket\",\r\n \"Carrot on a Stick\",\r\n \"Clock\",\r\n \"Compass\",\r\n \"Crossbow\",\r\n \"Exploration Map\",\r\n \"Fire Charge\",\r\n \"Fishing Rod\",\r\n \"Flint and Steel\",\r\n \"Glass Bottle\",\r\n \"Dragon's Breath\",\r\n \"Hoe\",\r\n \"Lead\",\r\n \"Map\",\r\n \"Pickaxe\",\r\n \"Shears\",\r\n \"Shield\",\r\n \"Shovel\",\r\n \"Sword\",\r\n \"Saddle\",\r\n \"Spyglass\",\r\n \"Totem of Undying\",\r\n \"Blaze Powder\",\r\n \"Blaze Rod\",\r\n \"Bone\",\r\n \"Bone meal\",\r\n \"Book\",\r\n \"Book and Quill\",\r\n \"Enchanted Book\",\r\n \"Bowl\",\r\n \"Brick\",\r\n \"Clay\",\r\n \"Coal\",\r\n \"Charcoal\",\r\n \"Cocoa Beans\",\r\n \"Copper Ingot\",\r\n \"Diamond\",\r\n \"Dyes\",\r\n \"Ender Pearl\",\r\n \"Eye of Ender\",\r\n \"Feather\",\r\n \"Spider Eye\",\r\n \"Fermented Spider Eye\",\r\n \"Flint\",\r\n \"Ghast Tear\",\r\n \"Glistering Melon\",\r\n \"Glowstone Dust\",\r\n \"Gold Ingot\",\r\n \"Gold Nugget\",\r\n \"Gunpowder\",\r\n \"Ink Sac\",\r\n \"Iron Ingot\",\r\n \"Iron Nugget\",\r\n \"Lapis Lazuli\",\r\n \"Leather\",\r\n \"Magma Cream\",\r\n \"Music Disc\",\r\n \"Name Tag\",\r\n \"Nether Bricks\",\r\n \"Paper\",\r\n \"Popped Chorus Fruit\",\r\n \"Prismarine Crystal\",\r\n \"Prismarine Shard\",\r\n \"Rabbit's Foot\",\r\n \"Rabbit Hide\",\r\n \"Redstone\",\r\n \"Seeds\",\r\n \"Beetroot Seeds\",\r\n \"Nether Wart Seeds\",\r\n \"Pumpkin Seeds\",\r\n \"Wheat Seeds\",\r\n \"Slimeball\",\r\n \"Snowball\",\r\n \"Spawn Egg\",\r\n \"Stick\",\r\n \"String\",\r\n \"Wheat\",\r\n \"Netherite Ingot\",\r\n]\r\n\r\nclass Material:\r\n \"\"\"\r\n Creates a material with a name and mining rate \r\n Name is used to identify the material\r\n Mining rate is amount of hunger needed to mine \r\n\r\n Name and mining rate are either input or chosen at random\r\n\r\n \"\"\"\r\n \r\n def __init__(self, name: str, mining_rate: float) -> None:\r\n \"\"\"\r\n Initialises the constructor for Material\r\n\r\n Parameters:\r\n name(string): name of the material\r\n mining_rate (float): rate of mining within the cave\r\n Returns:\r\n None\r\n\r\n Worst case complexity: O(1)\r\n Best Case complexity: O(1)\r\n \"\"\"\r\n\r\n self.name = name\r\n self.mining_rate = mining_rate\r\n \r\n def __str__(self) -> str:\r\n \"\"\"\r\n Returns the material details as a formatted string\r\n\r\n Returns:\r\n output (str): material name and mining rate\r\n\r\n Worst case complexity: O(1)\r\n Best Case complexity: O(1)\r\n \"\"\"\r\n return f'({self.name},{self.mining_rate})'\r\n \r\n # Just chose 30 to be the max hunger cost cuase i dont know what else\r\n # feels like we need something to determine the hunger cost based on the material cause harder materials like iron should take more to mine than wheat\r\n @classmethod\r\n def random_material(cls):\r\n \"\"\"\r\n Returns the material randomly created\r\n\r\n Returns:\r\n Material\r\n\r\n Worst case complexity: O(1)\r\n Best Case complexity: O(1)\r\n \"\"\"\r\n return Material(RANDOM_MATERIAL_NAMES[RandomGen.randint(0,len(RANDOM_MATERIAL_NAMES)-1)],RandomGen.randint(1,30))\r\n\r\nif __name__ == \"__main__\":\r\n print(Material(\"Coal\", 4.5))\r\n print(Material.random_material())\r\n\r\n","repo_name":"raunakoirala/Mining-and-Trading-Game-1008","sub_path":"assignment 3/material.py","file_name":"material.py","file_ext":"py","file_size_in_byte":3430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"18445060829","text":"N,M = list(map(int,input().split(\" \")))\nAs = list(map(int,input().split(\" \")))\n\n# N → 10^4 数字0-9 → 10^5\n\nmatch= [-1,2,5,5,4,5,6,3,7,6]\n\ndp = [-1 for _ in range(N + 100)] #N 本使ったときの最大値\ndp[0] = 0\nfor num in As:\n dp[match[num] ] = 1\n\nfor i in range(N + 1):\n for num in As:\n match_num = match[num]\n if i - match_num >= 0:\n #数字numを使うときのマッチの本数\n dp[i] = max( dp[i - match_num] + 1 , dp[i])\n# print(dp)\n\nans = []\ncot = N\nAs.sort(reverse=True)\nwhile True:\n if cot == 0:\n break\n for num in As:\n if dp[cot - match[num]] == dp[cot] - 1:\n ans.append(str(num))\n cot -= match[num]\n break\nprint(\"\".join(ans))","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03128/s888822568.py","file_name":"s888822568.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"29894623419","text":"# docs : https://www.w3schools.com/python/python_mysql_select.asp\nfrom utils import db\n\nsqlcmd = db.conn()\n\ndef log():\n\n sql = \"INSERT INTO logger (name, address) VALUES (%s, %s)\"\n val = (\"John\", \"Highway 21\")\n sqlcmd.execute(sql, val)\n\n db.conn.commit()\n\n return {\"rowcount\" : sqlcmd.rowcount}\n\ndef logs():\n\n sqlcmd.execute(\"SELECT * FROM logger\")\n\n return sqlcmd.fetchall()","repo_name":"4nkitd/cvf-python","sub_path":"model/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"}
+{"seq_id":"25683998693","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nA Python script that uses numpy and pyper with R and the \"lme4\" library\nto compute relations with linear mixed effects models.\nInstall the \"lme4\" library with:\n R -e \"install.packages('lme4', repos='http://cran.r-project.org')\"\n\"\"\"\nfrom __future__ import division, print_function, unicode_literals\n\nimport difflib\n\nimport numpy as np\nimport pyper\n\nfrom .util import cran\n\nDEFAULT_BS_ITER = 1000\n\n\ndef classify_treatment_repetition(analysis, id_ctl=\"co\", id_trt=\"\",\n id_ctl_res=\"\", id_trt_res=\"\"):\n \"\"\"Convenience method for assigning treatment and repetition\n\n This method pairs treatments and repetitions in an analysis\n using the measurement titles and identifiers given as\n keyword arguments.\n\n Parameters\n ----------\n analysis: shapeout.analysis.Analysis\n The analysis instance to use. The titles of the individual\n measurements will be searched for the `id_*` terms.\n id_ctl: str\n Identifies a control measurement.\n id_ctl_res: str\n Identifies a control measurement in the reservoir. Set to\n an empty string to disable.\n id_trt: str\n Identifies the treatment measurement. Set to an empty\n string to use all non-control measurements as treatments.\n id_trt_res: str\n Identifies the treatment measurement in the reservoir.\n Must be set if `id_ctl_res` is used.\n \"\"\"\n # sanity checks\n if id_ctl == \"\" and id_trt == \"\":\n raise ValueError(\"At least `id_ctl` or `id_trt` must be set!\")\n\n idlist = []\n\n for mm in analysis:\n if mm.config[\"setup\"][\"chip region\"] == \"reservoir\":\n if id_ctl_res and id_ctl_res in mm.title:\n idlist.append([\"res ctl\", mm])\n elif id_trt_res and id_trt_res in mm.title:\n idlist.append([\"res trt\", mm])\n elif id_ctl_res == \"\":\n idlist.append([\"res ctl\", mm])\n elif id_trt_res == \"\":\n idlist.append([\"res trt\", mm])\n else:\n idlist.append([\"none\", mm])\n else:\n if id_ctl and id_ctl in mm.title:\n idlist.append([\"ctl\", mm])\n elif id_trt and id_trt in mm.title:\n idlist.append([\"trt\", mm])\n elif id_ctl == \"\":\n idlist.append([\"ctl\", mm])\n elif id_trt == \"\":\n idlist.append([\"trt\", mm])\n else:\n idlist.append([\"none\", mm])\n\n # extract and rename treatment\n treatment = [tt for (tt, mm) in idlist]\n treatment = [tt.replace(\"res\", \"Reservoir\") for tt in treatment]\n treatment = [tt.replace(\"ctl\", \"Control\") for tt in treatment]\n treatment = [tt.replace(\"trt\", \"Treatment\") for tt in treatment]\n treatment = [tt.replace(\"none\", \"None\") for tt in treatment]\n\n assert len(treatment) == len(analysis)\n # identify timeunit via similarity analysis\n ctl_str = [mm.title if tt == \"ctl\" else \"\" for (tt, mm) in idlist]\n ctl_r_str = [mm.title if tt == \"res ctl\" else \"\" for (tt, mm) in idlist]\n trt_str = [mm.title if tt == \"trt\" else \"\" for (tt, mm) in idlist ]\n trt_r_str = [mm.title if tt == \"res trt\" else \"\" for (tt, mm) in idlist]\n matchids = match_similar_strings(ctl_str, trt_str, ctl_r_str, trt_r_str)\n timeunit = np.zeros(len(analysis))\n for ii, match in enumerate(matchids):\n timeunit[match[0]] = ii+1\n timeunit[match[1]] = ii+1\n if id_ctl_res or id_trt_res:\n timeunit[match[2]] = ii+1\n timeunit[match[3]] = ii+1\n\n # Set all non-paired treatments to \"None\"\n for ii, tu in enumerate(timeunit):\n if tu == 0:\n treatment[ii] = \"None\"\n return treatment, timeunit\n\n\ndef match_similar_strings(a, b, c, d):\n \"\"\"Similarity analysis to identify string-matches in four lists\n\n Given four lists of strings a, b, c, and d. Find the\n strings that match best using similarity analysis and return\n the matching list IDs with highest similarity first. Empty\n strings are ignored.\n\n For instance, the lists\n\n a = [\"peter\", \"hans\", \"\", \"golf\"]\n b = [\"gogo\", \"ham\", \"freddy\", \"\"]\n c = [\"red\", \"gans\", \"\", \"hugo\"]\n d = [\"old\", \"futur\", \"erst\", \"ha\"]\n\n will return the following match IDs:\n\n [1, 1, 1, 3]\n [3, 0, 3, 0]\n [0, 2, 0, 2]\n\n which means that these words are similar:\n\n [\"hans\", \"ham\", \"gans\", \"ha\"]\n [\"golf\", \"gogo\", \"hugo\", \"old\"]\n [\"peter\", \"freddy\", \"red\", \"erst\"]\n \"\"\"\n ratio = lambda x, y: difflib.SequenceMatcher(a=x, b=y).ratio()\n n = len(a)\n assert len(a) == len(b) == len(c) == len(d)\n # build up simliarity matrix\n smat = np.zeros((n, n, n, n))\n for ii in range(n):\n for jj in range(n):\n if a[ii] and b[jj]:\n ratij = ratio(a[ii], b[jj])\n else:\n ratij = 0\n for kk in range(n):\n if a[ii] and c[kk]:\n ratik = ratio(a[ii], c[kk])\n else:\n ratik = 0\n for ll in range(n):\n if a[ii] and d[ll]:\n ratil = ratio(a[ii], d[ll])\n else:\n ratil = 0\n smat[ii, jj, kk, ll] = ratij + ratik + ratil\n # match with maxima\n matchids = []\n for _ in range(n):\n if np.max(smat) == 0:\n break\n ai, aj, ak, al = np.argwhere(smat==smat.max())[0]\n matchids.append([ai, aj, ak, al])\n smat[ai, :, :, :] = 0\n smat[:, aj, :, :] = 0\n smat[:, :, ak, :] = 0\n smat[:, :, :, al] = 0\n return matchids\n\n\ndef diffdef(y, yR, bs_iter=DEFAULT_BS_ITER, rs=117):\n \"\"\"\n Computes bootstrapped median distributions of same size\n for two distributions of different size.\n\n Parameters\n ----------\n y: 1d ndarray of length N\n Channel data\n yR: 1d ndarray of length M\n Reservoir data\n bs_iter: int\n Number of bootstrapping iterations to perform\n rs: int\n Random state seed for random number generator\n\n Returns\n -------\n median: nd array of shape (bs_iter, 1)\n Boostrap distribution of medians of y \n median_r: nd array of shape (bs_iter, 1)\n Boostrap distribution of medians of yR \n \"\"\"\n # Convert to arrays\n y = np.array(y)\n yR = np.array(yR)\n # Seed random numbers that are reproducible on different machines\n prng_object = np.random.RandomState(rs)\n # Initialize median arrays\n Median = np.zeros([bs_iter, 1])\n MedianR = np.zeros([bs_iter, 1])\n # If this loop is still too slow, we could get rid of it and\n # do everything with arrays. Depends on whether we will\n # eventually run into memory problems with array sizes\n # of y*bs_iter and yR*bs_iter.\n for q in range(bs_iter):\n # Channel data:\n # Compute random indices and draw from y\n draw_y_idx = prng_object.randint(0, len(y), len(y))\n y_resample = y[draw_y_idx]\n Median[q, 0] = np.median(y_resample)\n # Reservoir data\n # Compute random indices and draw from yR\n draw_yR_idx = prng_object.randint(0, len(yR), len(yR))\n yR_resample = yR[draw_yR_idx]\n MedianR[q, 0] = np.median(yR_resample)\n return [Median, MedianR]\n\n\ndef linmixmod(xs, treatment, timeunit, model='lmm', RCMD=cran.rcmd):\n '''\n Linear Mixed-Effects Model computation for one fixed effect and one \n random effect.\n This function uses the R packages \"lme4\" and \"stats\".\n\n The response variable is modeled using two linear mixed effect models \n (Model and Nullmodel) of the form:\n - xs~treatment+(1+treatment|timeunit)\n (Random intercept + random slope model)\n - xs~(1+treatment|timeunit)\n (Nullmodel without the fixed effect \"treatment\")\n\n Both models are compared in R using \"anova\" (from the R-package \"stats\")\n which performs a likelihood ratio test to obtain the p-Value for the\n significance of the fixed effect (treatment).\n\n Optionally differential deformations are computed which are then used in the\n Linear Mixed Model\n\n Parameters\n ----------\n xs: list of multiple 1D ndarrays\n Each index of `xs` contains an array of response variables.\n (eg. list containing \"area_um\" data of several measurements)\n treatment: list\n Each item is a description/identifier for a treatment. The\n enumeration matches the index of `xs`.\n treatment[i] can be 'Control', 'Treatment', 'Reservoir Control' or \n 'Reservoir Treatment'. If 'Reservoir ...' is chosen, the algorithm\n will perform a bootstrapping algorithm that removes the median from each\n Channel measurement. That means for each 'Control' or 'Treatment' has to exist\n a 'Reservoir ...' measurement. The resulting Differential deformations\n are then used in the Linear Mixed Model.\n Values of 'None' are excluded from the analysis.\n timeunit: list\n Each item is a description/identifier for a time. The\n enumeration matches the index of `xs`.\n (e.g. list containing integers \"1\" and \"2\" according to the day\n at which the content in `xs` was measured) \n Values of '0' are excluded from the analysis.\n model: string\n 'lmm': A linear mixed model will be applied\n 'glmm': A generalized linear mixed model will be applied\n\n Returns\n -------\n (Generalized) Linear Mixed Effects Model Result: dictionary\n The dictionary contains:\n -Estimate: the average value of cells that had Treatment 1\n -Fixed Effect: Change of the estimate value due to the Treatment 2\n -Std Error for the Estimate\n -Std Error for the Fixed Effect\n -p-Value\n\n References\n ----------\n .. [1] R package \"lme4\":\n Bates D, Maechler M, Bolker B and Walker S (2015). lme4: Linear mixed-\n effects models using Eigen and S4. R package version 1.1-9, \n https://CRAN.R-project.org/package=lme4. \n\n .. [2] R function \"anova\" from package \"stats\":\n Chambers, J. M. and Hastie, T. J. (1992) Statistical Models in S, \n Wadsworth & Brooks/Cole\n\n Examples\n -------\n import numpy as np\n import pyper\n from nptdms import TdmsFile\n import os\n\n xs = [\n [100,99,80,120,140,150,100,100,110,111,140,145], #Larger values (Channel1)\n [20,10,5,16,14,22,27,26,5,10,11,8,15,17,20,9], #Smaller values (Reservoir1)\n [115,110,90,110,145,155,110,120,115,120,120,150,100,90,100], #Larger values (Channel2)\n [30,30,15,26,24,32,37,36,15,20,21,18,25,27,30,19], #Smaller values (Reservoir2)\n [150,150,130,170,190,250,150,150,160,161,180,195,130,120,125,130,125],\n [2,1,5,6,4,2,7,6,5,10,1,8,5,7,2,9,11,8,13],\n [155,155,135,175,195,255,155,155,165,165,185, 200,135,125,130,135,140,150,135,140],\n [25,15,19,26,44,42,35,20,15,10,11,28,35,10,25,13]] \n treatment1 = ['Control', 'Reservoir Control', 'Control', 'Reservoir Control',\\\n 'Treatment', 'Reservoir Treatment','Treatment', 'Reservoir Treatment']\n timeunit1 = [1, 1, 2, 2, 1, 1, 2, 2]\n\n #Example 1: linear mixed models on differential deformations\n Result_1 = linmixmod(xs=xs,treatment=treatment1,timeunit=timeunit1,model='lmm')\n\n #Result_1:Estimate=93.69375 (i.e. the average Control value is 93.69)\n # FixedEffect=43.93 (i.e. The treatment leads to an increase) \n # p-Value(Likelihood Ratio Test)=0.0006026 (i.e. the increase is significant)\n\n #Example 2: Ordinary Linear mixed models\n #'Reservoir' measurements are now Controls\n #'Channel' measurements are Treatments\n #This does not use differential deformation in linmixmod()\n treatment2 = ['Treatment', 'Control', 'Treatment', 'Control',\\\n 'Treatment', 'Control','Treatment', 'Control']\n timeunit2 = [1, 1, 2, 2, 3, 3, 4, 4]\n Result_2 = linmixmod(xs=xs,treatment=treatment2,timeunit=timeunit2,model='lmm')\n\n #Result_2:Estimate=17.17 (i.e. the average Control value is 17.17 )\n # FixedEffect=120.257 (i.e. The treatment leads to an increase) \n # p-Value(Likelihood Ratio Test)=0.00033 (i.e. the deformation\n # increases significantly)\n\n #Example 3: Generalized Linear mixed models\n treatment3 = ['Treatment', 'Control', 'Treatment', 'Control',\\\n 'Treatment', 'Control','Treatment', 'Control']\n timeunit3 = [1, 1, 2, 2, 3, 3, 4, 4] \n Result_3 = linmixmod(xs=xs,treatment=treatment3,timeunit=timeunit3,model='glmm')\n\n #Result_3:Estimate=2.71 (i.e. the average Control value is exp(2.71)=15.08)\n # FixedEffect=2.19 (i.e. The treatment leads to an increase) \n # p-Value(Likelihood Ratio Test)=0.00366 (i.e. the deformation\n # increases significantly) \n '''\n\n modelfunc = \"xs~treatment+(1+treatment|timeunit)\"\n nullmodelfunc = \"xs~(1+treatment|timeunit)\"\n\n # Check if all input lists have the same length\n if len(xs) != len(treatment) or len(xs) != len(timeunit):\n msg = \"`treatment` and `timeunit` not defined for all variables!\"\n raise ValueError(msg)\n \n if len(xs) < 3:\n msg = \"Linear Mixed Models require repeated measurements. \" +\\\n \"Please select more treatment repetitions.\"\n raise ValueError(msg)\n\n # Check that names are valid\n for trt in treatment:\n if trt not in [\"None\",\n \"Control\",\n \"Reservoir Control\",\n \"Treatment\",\n \"Reservoir Treatment\"]:\n raise ValueError(\"Unknown treatment: '{}'\".format(trt))\n\n # Remove \"None\"s and \"0\"s\n treatment = np.array(treatment)\n timeunit = np.array(timeunit)\n xs = np.array(xs)\n invalid = np.logical_or(treatment == \"None\", timeunit == 0)\n treatment = list(treatment[~invalid])\n timeunit = list(timeunit[~invalid])\n xs = [xi for ii, xi in enumerate(xs) if ~invalid[ii]]\n\n # convert to ndarray\n xs = [np.array(xi, dtype=float) for xi in xs]\n\n # remove nan/inf values\n xs = [xi[~np.logical_or(np.isnan(xi), np.isinf(xi))] for xi in xs]\n\n ######################Differential Deformation#############################\n # If the user selected 'Control-Reservoir' and/or 'Treatment-Reservoir'\n Median_DiffDef = []\n TimeUnit, Treatment = [], []\n if 'Reservoir Control' in treatment or 'Reservoir Treatment' in treatment:\n if model == 'glmm':\n Head_string = \"GENERALIZED LINEAR MIXED MODEL ON BOOTSTAP-DISTRIBUTIONS: \\n\" +\\\n \"---Results are in log space (loglink was used)--- \\n\"\n if model == 'lmm':\n Head_string = \"LINEAR MIXED MODEL ON BOOTSTAP-DISTRIBUTIONS: \\n\"\n # Find the timeunits for Control\n where_contr_ch = np.where(np.array(treatment) == 'Control')\n timeunit_contr_ch = np.array(timeunit)[where_contr_ch]\n # Find the timeunits for Treatment\n where_treat_ch = np.where(np.array(treatment) == 'Treatment')\n timeunit_treat_ch = np.array(timeunit)[where_treat_ch]\n\n for n in np.unique(timeunit_contr_ch):\n where_time = np.where(np.array(timeunit) == n)\n xs_n = np.array(xs)[where_time]\n treatment_n = np.array(treatment)[where_time]\n where_contr_ch = np.where(np.array(treatment_n) == 'Control')\n xs_n_contr_ch = xs_n[where_contr_ch]\n where_contr_res = np.where(\n np.array(treatment_n) == 'Reservoir Control')\n xs_n_contr_res = xs_n[where_contr_res]\n\n # check that corresponding Controls are selected\n if (len(where_contr_ch[0]) != 1 or\n len(where_contr_res[0]) != 1):\n msg = \"Controls for channel and reservoir must be given\" \\\n +\" exactly once (repetition {})!\".format(n)\n raise ValueError(msg)\n\n # Apply the Bootstraping algorithm to Controls\n y = np.array(xs_n_contr_ch)[0]\n yR = np.array(xs_n_contr_res)[0]\n [Median, MedianR] = diffdef(y, yR)\n Median_DiffDef.append(Median - MedianR)\n # TimeUnit is a number for the day or the number of the repeat\n TimeUnit.extend(np.array(n).repeat(len(Median)))\n Treatment.extend(np.array(['Control']).repeat(len(Median)))\n\n for n in np.unique(timeunit_treat_ch):\n where_time = np.where(np.array(timeunit) == n)\n xs_n = np.array(xs)[where_time]\n treatment_n = np.array(treatment)[where_time]\n xs_n_contr_res = xs_n[where_contr_res]\n where_treat_ch = np.where(np.array(treatment_n) == 'Treatment')\n xs_n_treat_ch = xs_n[where_treat_ch]\n where_treat_res = np.where(\n np.array(treatment_n) == 'Reservoir Treatment')\n xs_n_treat_res = xs_n[where_treat_res]\n\n # check that corresponding Treatments are selected\n if (len(where_treat_ch[0]) != 1 or\n len(where_treat_res[0]) != 1):\n msg = \"Treatments for channel and reservoir must be given\" \\\n +\" exactly once (repetition {})!\".format(n)\n raise ValueError(msg)\n\n # Apply the Bootstraping algorithm to Treatments\n y = np.array(xs_n_treat_ch)[0]\n yR = np.array(xs_n_treat_res)[0]\n [Median, MedianR] = diffdef(y, yR)\n Median_DiffDef.append(Median - MedianR)\n # TimeUnit is a number for the day or the number of the repeat\n TimeUnit.extend(np.array(n).repeat(len(Median)))\n Treatment.extend(np.array(['Treatment']).repeat(len(Median)))\n\n # Concat all elements in the lists\n xs = np.concatenate(Median_DiffDef)\n xs = np.array(xs).ravel()\n treatment = np.array(Treatment)\n timeunit = np.array(TimeUnit)\n\n else: # If there is no 'Reservoir Channel' selected don't apply bootstrapping\n if model == 'glmm':\n Head_string = \"GENERALIZED LINEAR MIXED MODEL: \\n\" +\\\n \"---Results are in log space (loglink was used)--- \\n\"\n if model == 'lmm':\n Head_string = \"LINEAR MIXED MODEL: \\n\"\n\n for i in range(len(xs)):\n # Expand every unit in treatment and timeunit to the same length as the\n # xs[i] they are supposed to describe\n # Using the \"repeat\" function also characters can be handled\n treatment[i] = np.array([treatment[i]]).repeat(len(xs[i]), axis=0)\n timeunit[i] = np.array([timeunit[i]]).repeat(len(xs[i]), axis=0)\n\n # Concat all elements in the lists\n xs = np.concatenate(xs)\n treatment = np.concatenate(treatment)\n timeunit = np.concatenate(timeunit)\n\n # Open a pyper instance\n r1 = pyper.R(RCMD=RCMD)\n # try to fix unicode decode errors by forcing english\n r1('Sys.setenv(LANG = \"en\")')\n r1.assign(\"xs\", xs)\n # Transfer the vectors to R\n r1.assign(\"treatment\", treatment)\n r1.assign(\"timeunit\", timeunit)\n # Create a dataframe which contains all the data\n r1(\"RTDC=data.frame(xs,treatment,timeunit)\")\n # Load the necessary library for Linear Mixed Models\n lme4resp = r1(\"library(lme4)\").decode(\"utf-8\")\n if lme4resp.count(\"Error\"):\n # Tell the user that something went wrong\n raise OSError(\"R installation at {}: {}\\n\".format(RCMD, lme4resp) +\n \"\"\"Please install 'lme4' via:\n {} -e \"install.packages('lme4', repos='http://cran.r-project.org')\n \"\"\".format(RCMD)\n )\n\n # Random intercept and random slope model\n if model == 'glmm':\n r1(\"Model = glmer(\" + modelfunc + \",RTDC,family=Gamma(link='log'))\")\n r1(\"NullModel = glmer(\" + nullmodelfunc + \",RTDC,family=Gamma(link='log'))\")\n if model == 'lmm':\n r1(\"Model = lmer(\" + modelfunc + \",RTDC)\")\n r1(\"NullModel = lmer(\" + nullmodelfunc + \",RTDC)\")\n\n r1(\"Anova = anova(Model,NullModel)\")\n Model_string = r1(\"summary(Model)\").decode(\"utf-8\").split(\"\\n\", 1)[1]\n Anova_string = r1(\"Anova\").decode(\"utf-8\").split(\"\\n\", 1)[1]\n Coef_string = r1(\"coef(Model)\").decode(\"utf-8\").split(\"\\n\", 2)[2]\n # Cleanup output\n Coef_string = Coef_string.replace('attr(,\"class\")\\n', '')\n Coef_string = Coef_string.replace('[1] \"coef.mer\"\\n', '')\n #\"anova\" from R does a likelihood ratio test which gives a p-Value\n p = np.array(r1.get(\"Anova$Pr[2]\"))\n\n # Obtain p-Value using a normal approximation\n # Extract coefficients\n r1(\"coefs <- data.frame(coef(summary(Model)))\")\n r1(\"coefs$p.normal=2*(1-pnorm(abs(coefs$t.value)))\")\n\n # Convert to array, depending on platform or R version, this is a DataFrame\n # or a numpy array, so we convert it to an array. Because on Windows the\n # result is an array with subarrays of type np.void, we must access the\n # elements with Coeffs[0][0] instead of Coeffs[0,0].\n Coeffs = np.array(r1.get(\"coefs\"))\n # The Average value of treatment 1\n Estimate = Coeffs[0][0]\n # The Std Error of the average value of treatment 1\n StdErrorEstimate = Coeffs[0][1]\n # treatment 2 leads to a change of the Estimate by the value \"FixedEffect\"\n FixedEffect = Coeffs[1][0]\n StdErrorFixEffect = Coeffs[1][1]\n\n # Before getting effect and error for y, transform back (there happened a log transformation in the glmer)\n estim_y = np.exp(Estimate)\n #estim_y_error = abs(np.exp(Estimate+StdErrorEstimate)-np.exp(Estimate-StdErrorEstimate))\n fixef_y = np.exp(Estimate + FixedEffect) - np.exp(Estimate)\n #fixef_y_error = abs(np.exp(Estimate+StdErrorFixEffect)-np.exp(Estimate-StdErrorFixEffect))\n\n full_summary = Head_string + Model_string +\\\n \"\\nCOEFFICIENT TABLE:\\n\" + Coef_string +\\\n \"\\nLIKELIHOOD RATIO TEST (MODEL VS. NULLMODEL): \\n\" +\\\n Anova_string\n\n if model == \"glmm\":\n full_summary += \"\\nESTIMATE AND EFFECT TRANSFORMED BACK FROM LOGSPACE\" +\\\n \"\\nEstimate = \\t\" + str(estim_y) +\\\n \"\\nFixed effect = \\t\" + str(fixef_y)\n\n results = {\"Full Summary\": full_summary,\n \"p-Value (Likelihood Ratio Test)\": p,\n \"Estimate\": Estimate,\n \"Std. Error (Estimate)\": StdErrorEstimate,\n \"Fixed Effect\": FixedEffect,\n \"Std. Error (Fixed Effect)\": StdErrorFixEffect}\n return results\n","repo_name":"ZELLMECHANIK-DRESDEN/ShapeOut","sub_path":"shapeout/lin_mix_mod.py","file_name":"lin_mix_mod.py","file_ext":"py","file_size_in_byte":22461,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"90"}
+{"seq_id":"14004295694","text":"import requests\nimport os\n\n\ndef get_jwt_token(settings):\n granttype = 'client_credentials'\n token_service_url= settings['token_service_url'][:-1]\n client_id = settings['clientid'][:-1]\n client_secret = settings['clientsecret']\n token_response = requests.get(url=token_service_url, params={'grant_type': granttype},\n auth=(client_id, client_secret))\n if token_response.status_code != 200:\n print(token_response.status_code, token_response.text)\n exit(-1)\n jwt = token_response.json()['access_token']\n return jwt\n\n\ndef test_http(host, port, path, token, proxyHostname):\n r = requests.get(f\"http://{host}:{port}{path}\",\n headers={'SAP-CP-Connectivity-Service-Token': token},\n proxies={'http': proxyHostname})\n return r\n\n\nif __name__ == '__main__':\n jwt_settings = {\n \"clientid\": os.getenv(\"CP_CLIENT_ID\", \"Not set\"),\n \"clientsecret\": os.getenv(\"CP_CLIENT_SECRET\", \"Not set\"),\n \"token_service_url\": os.getenv(\"CP_TOKEN_SERVICE_URL\", \"Not set\")\n }\n host = os.getenv(\"SERVICE_HOST\", \"virtual-service-exposed-by-cloud-connector\") #to be replaced by cloud connector hostname\n port = os.getenv(\"SERVICE_PORT\", 5050) #to be replaced by cloud connector port\n proxyHostname = os.getenv(\"CONNECTIVITY_PROXY_HOST\", \"http://connectivity-proxy.connectivity-proxy.svc.cluster.local:20003\") #to be replaced by connectivity proxy hostname\n\n try:\n print(f\"Attempting to establish connection for client {jwt_settings['clientid']}\")\n print(\"### Testing Connection ###\")\n req = test_http(host, port, \"/aicore\", get_jwt_token(jwt_settings), proxyHostname)\n print(\"Connection Established\")\n except Exception as e:\n print(e)\n assert False\n","repo_name":"SAP-samples/ai-core-samples","sub_path":"06_Cloud_Connector_Demo/images/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"90"}
+{"seq_id":"1923134628","text":"import os\nimport json\n\n\ndef load_user():\n p = os.path.join(\"resources\", \"player_save.json\")\n try:\n with open(p, \"r\") as f:\n d = json.load(f)\n except IOError:\n d = {\"mileage earned\": 0,\n \"mileage\": 0,\n \"miles travelled\": 0,\n \"total_time\": 0,\n \"best trip\": 0,\n \"crashes\": 0,\n \"unlocked\": [],\n \"achievements\": []}\n return User(d)\n\n\nclass User(object):\n def __init__(self, user_dict):\n self.mileage_earned = user_dict[\"mileage earned\"]\n self.mileage = user_dict[\"mileage\"]\n self.best_trip = user_dict[\"best trip\"]\n self.crashes = user_dict[\"crashes\"]\n self.unlocked = user_dict[\"unlocked\"]\n self.achievements = user_dict[\"achievements\"]\n\n def to_dict(self):\n d = {\"mileage earned\": self.mileage_earned,\n \"mileage\": self.mileage,\n \"best trip\": self.best_trip,\n \"crashes\": self.crashes,\n \"unlocked\": self.unlocked,\n \"achievements\": self.achievements}\n return d\n\n def save(self):\n with open(os.path.join(\"resources\", \"player_save.json\"), \"w\") as f:\n json.dump(self.to_dict(), f)\n","repo_name":"iminurnamez/outta-my-lane","sub_path":"data/components/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"2773685652","text":"import sys\nsys.dont_write_bytecode = True #Prevents creation of .pyc files\nimport discord\nfrom discord.ext import commands\nimport asyncio\nimport aiosqlite\nimport math\n\nclass EloComputations():\n def __init__(self, bot):\n pass\n\n async def winProb(self, eloA, eloB):\n return 1 / (1 + math.pow(10, (eloB - eloA) / 400))\n\n async def computeElo(self, eloA, expectedA, outcome, k = 32):\n return eloA + k * (outcome - expectedA)\n\nclass Elo(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n async def getElo(self, user):\n async with aiosqlite.connect('elo.db') as db:\n cursor = await db.execute(\"SELECT elo FROM elo WHERE user_id = ?\", (user.id,))\n elo = await cursor.fetchone()\n\n if elo is None:\n await db.execute(\"INSERT INTO elo VALUES (?, ?, ?, ?)\", (user.id, 1000, 0, 0))\n await db.commit()\n return 1000\n\n else:\n return elo[0]\n\n async def updateStats(self, personA, personB, outcome):\n eloA = await self.getElo(personA)\n eloB = await self.getElo(personB)\n\n expectedA = await EloComputations(self).winProb(eloA, eloB)\n expectedB = await EloComputations(self).winProb(eloB, eloA)\n\n eloA = await EloComputations(self).computeElo(eloA, expectedA, outcome)\n eloB = await EloComputations(self).computeElo(eloB, expectedB, 1 - outcome)\n \n async with aiosqlite.connect('elo.db') as db:\n await db.execute(\"UPDATE elo SET elo = ? WHERE user_id = ?\", (eloA, personA.id))\n await db.execute(\"UPDATE elo SET elo = ? WHERE user_id = ?\", (eloB, personB.id))\n \n if outcome == 1:\n await db.execute(\"UPDATE elo SET wins = wins + 1 WHERE user_id = ?\", (personA.id,))\n await db.execute(\"UPDATE elo SET losses = losses + 1 WHERE user_id = ?\", (personB.id,))\n\n else:\n await db.execute(\"UPDATE elo SET wins = wins + 1 WHERE user_id = ?\", (personB.id,))\n await db.execute(\"UPDATE elo SET losses = losses + 1 WHERE user_id = ?\", (personA.id,))\n\n await db.commit()\n\n async def getStats(self, user):\n async with aiosqlite.connect('elo.db') as db:\n cursor = await db.execute(\"SELECT wins, losses FROM elo WHERE user_id = ?\", (user.id,))\n stats = await cursor.fetchone()\n\n if stats is None:\n await db.execute(\"INSERT INTO elo VALUES (?, ?, ?, ?)\", (user.id, 1000, 0, 0))\n await db.commit()\n return 0, 0\n\n else:\n return stats[0], stats[1] \n\n @commands.command()\n async def elo(self, ctx, member: discord.Member = None):\n if member is None:\n member = ctx.author\n\n elo = await self.getElo(member)\n \n eloEmbed = discord.Embed(color = discord.Color.blue())\n eloEmbed.set_author(name = f\"{member.name}'s Elo\", icon_url = member.avatar_url)\n eloEmbed.add_field(name = \"Elo\", value = f\"{elo}\")\n await ctx.send(embed = eloEmbed)\n\n @commands.command()\n async def compare(self, ctx, memberA: discord.Member, memberB: discord.Member = None):\n if memberB is None:\n memberB = ctx.author\n\n eloA = await self.getElo(memberA)\n eloB = await self.getElo(memberB)\n winProb = await EloComputations(self).winProb(eloA, eloB)\n\n compareEmbed = discord.Embed(title = f\"{memberB.name} vs {memberA.name}\", color = discord.Color.blue())\n compareEmbed.add_field(name = \"Elo\", value = f\"{eloB} vs {eloA}\")\n compareEmbed.add_field(name = \"Win Probability\", value = f\"{round(winProb, 2) * 100}%\")\n await ctx.send(embed = compareEmbed)\n\n @commands.command()\n async def stats(self, ctx, member: discord.Member = None):\n if member is None:\n member = ctx.author\n\n wins, losses = await self.getStats(member)\n\n statsEmbed = discord.Embed(color = discord.Color.blue())\n statsEmbed.set_author(name = f\"{member.name}'s stats\", icon_url = member.avatar_url)\n statsEmbed.add_field(name = \"Wins\", value = wins)\n statsEmbed.add_field(name = \"Losses\", value = losses)\n\n try:\n statsEmbed.add_field(name = \"Winrate\", value = f\"{round(wins / (wins + losses), 2) * 100}%\")\n\n except ZeroDivisionError:\n statsEmbed.add_field(name = \"Winrate\", value = \"0%\")\n\n await ctx.send(embed = statsEmbed)\n\ndef setup(bot):\n bot.add_cog(Elo(bot))","repo_name":"joshua-noel/personalDiscordBot","sub_path":"cogs/elo.py","file_name":"elo.py","file_ext":"py","file_size_in_byte":4554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"38332609968","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 2 16:48:53 2018\n\n@author: jdbul\n\"\"\"\n\n\"\"\"\n\n\n\nRUN THIS FILE AFTER TURN CALL_DATA_ANALYSIS.PY FIRST! THE OBJECTS ARE NEEDED!\n\n\n\n\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport datetime\n\n#%%\n\"\"\"\nGet percentages for topics of departments\n\"\"\"\n\ntopics.info()\n# rename\ntopics.rename(columns={'KB_Article' : 'Dept'}, inplace=True)\n\n# get total counts by dept using existing count col\ndepartment_dist_df = topics.groupby(['Dept'], as_index=False)['Count'].count()\n\n# Get percentage of total for each dept\n\ndepartment_dist_df['Percentage'] = department_dist_df.Count.apply(lambda x: x/np.sum(department_dist_df['Count']))\n\n# Sort and append average duration by dept\n\ntemp_dur = topics.groupby(['Dept'], as_index=False)['Seconds'].mean()\ndepartment_dist_df = pd.merge(department_dist_df, temp_dur, how='inner', on='Dept')\n\ntemp_std = topics.groupby(['Dept'], as_index=True)['Seconds'].std()\n\ndepartment_dist_df = pd.merge(department_dist_df, pd.DataFrame(temp_std), left_on='Dept', right_index=True, how='inner')\ndepartment_dist_df.set_index('Dept', inplace=True)\ndepartment_dist_df.drop(columns = ['Count'], inplace=True)\ndepartment_dist_df.rename(columns = {'Seconds_x' : 'MeanSeconds', 'Seconds_y':'StdDev'}, inplace=True)\n\n#department_dist_df.to_csv('Data/Duration_distributions.csv')\n#%%\n\"\"\"\nget std deviation and append\n\nThis can be done more simply above, this was to confirm wonky results\n\"\"\"\nsd_dict = {}\nkey_list = department_dist_df.Dept.unique()\n\nfor i in key_list:\n sd_dict[i] = []\n\nfor i in range(len(topics)):\n sd_dict[topics.Dept[i]].append(topics.Seconds[i])\n \ndep_sd = {}\n\nfor i in key_list:\n dep_sd[i] = np.mean(sd_dict[i])\n\n\n# this dataframe now has the data what percent of topics have what durations, we need to condense\n\n#%%\n\"\"\"\nGet call arrival distribution based on data\nUse daily data to extrapolate arrival rate, abadonment time avg(for customer patience) by day of the week\nThis can then be combined with topic duration data\n\"\"\"\n\narrival_df = daily_data[['CallsPresented', 'Day_of_Week']]\n\nday_dict = dict(zip([2,3,4,5,6],['Monday','Tuesday','Wednesday','Thursday','Friday']))\n\n# drop an anomaly of low volume day, this was the first day of operation\narrival_df = arrival_df.drop(arrival_df.index[0])\n\n#add column with categorical days\nday_list = []\nfor i in range(len(arrival_df)):\n t = day_dict[arrival_df.Day_of_Week[i]]\n day_list.append(t)\n\narrival_df['Weekday'] = day_list\narrival_df.sort_values(['Day_of_Week'], ascending=True, inplace=True)\narrival_df.rename(columns = {'CallsPresented':'Daily Call Arrivals'}, inplace=True)\n#%%\n\"\"\"\nbox plot\n\"\"\"\nplt.figure()\nsns.set_style('whitegrid')\n_ = sns.boxplot(x='Weekday', y='Daily Call Arrivals', data=arrival_df)\n_.set_title('Call Volume Distribution by Day of Week, 2013-2015')\nsns.despine(left=True)\n\n\n#%%\n\"\"\"\nAbandonment rate\n\"\"\"\n\nabandon_df = daily_data[['CallsPresented', 'Max_Abandon_Per_Day', 'Day_of_Week']]\n\n#drop that pesky row here\nabandon_df = abandon_df.drop(abandon_df.index[0])\n\n#add column with categorical days using daydict from above\nday_list = []\nfor i in range(len(abandon_df)):\n t = day_dict[abandon_df.Day_of_Week[i]]\n day_list.append(t)\n\nabandon_df['Weekday'] = day_list\nabandon_df.sort_values(['Day_of_Week'], ascending=True, inplace=True)\nabandon_df.rename(columns = {'CallsPresented':'Daily Call Arrivals'}, inplace=True)\nabandon_df['Percent of Calls Abandoned'] = (abandon_df['Max_Abandon_Per_Day']/abandon_df['Daily Call Arrivals'])*100\n\n#%%\n\"\"\"\nboxplot\n\"\"\"\nplt.figure()\nsns.set_style('whitegrid')\np = sns.boxplot(y='Percent of Calls Abandoned', x='Weekday', data=abandon_df)\np.set_title('Percent of Daily Calls Abandonment by Day of Week, 2013-2015')\nsns.despine(left=True)\nvals = p.get_yticks()\np.set_yticklabels(['{:.0%}'.format(x/100) for x in vals])\n\n#%%\n\"\"\"\nGet means by day for arrivals and abandons and compare with newer data to see \nif levels are similar.\n\"\"\"\n\nsummary_df = abandon_df\n\nsummary_df['Calls Answered'] = summary_df['Daily Call Arrivals'] - summary_df['Max_Abandon_Per_Day']\n\nmeans_by_day = summary_df.groupby('Weekday', as_index=False).mean()\nmeans_by_day.sort_values(['Day_of_Week'], ascending=True, inplace=True)\n\n#%%\n\"\"\"\nBring in new data (article data, parse by date to get day of week)\n\"\"\"\n\n\nnew_daily = article_data\nnew_daily['Date'] = article_data['Created On']\nnew_daily['Date'] = pd.to_datetime(new_daily['Date'], format='%m/%d/%Y %H:%M')\nnew_daily['Date2'] = new_daily['Date'].apply(lambda x: datetime.datetime.date(x))\nnew_daily['Day_of_Week'] = new_daily['Date'].apply(lambda x: datetime.datetime.weekday(x)+2)\nnew_daily['CountNew'] = 1\n\n#%%\n\"\"\"\nGroup new data by mean calls handled by day of week\n\"\"\"\n\nnew_daily_count = new_daily.groupby(['Date2'], as_index=False)['CountNew'].count()\nnew_daily_day = new_daily.groupby(['Date2'], as_index=False)['Day_of_Week'].mean()\n\nnew_daily = new_daily_day.merge(new_daily_count, how='inner', on='Date2')\n\nday_list = []\nfor i in range(len(new_daily)):\n t = day_dict[new_daily.Day_of_Week[i]]\n day_list.append(t)\n\nnew_daily['Weekday'] = day_list\n\nnew_daily_summary = new_daily.groupby(['Weekday'], as_index=False).mean()\n\nnew_daily_summary.sort_values(['Day_of_Week'], ascending=True, inplace=True)\nnew_daily_summary = new_daily_summary.drop(columns = ['Day_of_Week'])\n\n#%%\n\"\"\"\nCompare these calls handled to old calls handled, to see if scaling up is necessary\nAssuming abandoned percentages hold true\n\"\"\"\n\nmeans_by_day.info()\nnew_daily_summary.info()\n\ncall_amount_combined = means_by_day.merge(new_daily_summary, how='inner', on='Weekday')\n\ncall_amount_combined['Ratio_OldoverNew'] = call_amount_combined['Calls Answered']/call_amount_combined['CountNew']\n\ncall_amount_combined['Arrivals'] = call_amount_combined['CountNew']/(1-(call_amount_combined['Percent of Calls Abandoned']/100))\n\n\n#this is daily totals for simulation\ncalls_by_day_for_simulation = call_amount_combined[['Weekday', 'Arrivals', 'Percent of Calls Abandoned']]\n#calls_by_day_for_simulation.to_csv(\"Data/calls_by_day_for_simulation.csv\")\n#%%\n\"\"\"\nTry to get an hourly breakdown of calls by weekday. This will be looked at as\na percentage so that it can scale correctly\n\"\"\"\n\n\ndaily_hour = article_data\ndaily_hour['Date'] = article_data['Created On']\ndaily_hour['Date'] = pd.to_datetime(daily_hour['Date'], format='%m/%d/%Y %H:%M')\ndaily_hour['Count'] = 1\ndaily_hour['Hour of the Day'] = daily_hour['Date'].dt.hour\ndaily_hour['Date2'] = daily_hour['Date'].apply(lambda x: datetime.datetime.date(x))\ndaily_hour = daily_hour.groupby(['Date2', 'Hour of the Day'], as_index=False)['Count'].count()\ndaily_hour['Day_of_Week'] = daily_hour['Date2'].apply(lambda x: datetime.datetime.weekday(x)+2)\n\n\nday_list = []\nfor i in range(len(daily_hour)):\n t = day_dict[daily_hour.Day_of_Week[i]]\n day_list.append(t)\n\ndaily_hour['Weekday'] = day_list\n\ndaily_hourly_summary_calls = daily_hour.groupby(['Weekday', \"Hour of the Day\"], as_index=False)['Day_of_Week','Count'].mean()\ndaily_hourly_summary_calls.sort_values(['Day_of_Week', 'Hour of the Day'], ascending=True, inplace=True)\n\n#%%\n\"\"\"\nCombine hourly calls handled to total est. calls to get percentage for total hourly arrivals\n\"\"\"\n\ntotals_by_day_hourly = daily_hourly_summary_calls.groupby('Weekday', as_index=True)['Count'].sum()\n\ndaily_hourly_summary_calls['Proportion'] = 0\nfor i in range(len(daily_hourly_summary_calls)):\n daily_hourly_summary_calls['Proportion'].iloc[i] = daily_hourly_summary_calls['Count'].iloc[i] / totals_by_day_hourly.loc[daily_hourly_summary_calls.Weekday.iloc[i]]\n\nfinal_hourly_dist = daily_hourly_summary_calls\n#%%\n \ntotal_dict = {}\n\nfor i in range(len(calls_by_day_for_simulation)):\n total_dict[calls_by_day_for_simulation['Weekday'][i]] = calls_by_day_for_simulation['Arrivals'][i]\n\nfinal_hourly_dist['Arrivals'] = np.nan\nfor i in range(len(final_hourly_dist)):\n final_hourly_dist['Arrivals'].iloc[i] = final_hourly_dist['Proportion'].loc[i] * total_dict[final_hourly_dist['Weekday'].iloc[i]]\n\nhourly_arrival_dist_for_simulation = final_hourly_dist.drop(columns=['Day_of_Week', 'Count', 'Proportion'])\n\nhourly_arrival_dist_for_simulation.set_index(['Weekday', 'Hour of the Day'], inplace=True)\n\n#hourly_arrival_dist_for_simulation.to_csv('Data/hourly_arrival_dist_for_simulation.csv')\n\n\n\n\n\n#%%\n","repo_name":"jdbul33/bug-free-octo-umbrella","sub_path":"Distribution_Calculations.py","file_name":"Distribution_Calculations.py","file_ext":"py","file_size_in_byte":8360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"21555408143","text":"import dataclasses\nfrom pathlib import Path\nfrom unittest import TestCase\n\nfrom django_csv.model_csv.csv.dclass import DataClassCsv\nfrom django_csv.model_csv.readers import CsvReader\n\n\n@dataclasses.dataclass(slots=True)\nclass Publisher:\n name: str\n headquarter: str\n\n\n@dataclasses.dataclass(slots=True)\nclass Book:\n title: str\n price: int\n publisher: Publisher\n is_on_sale: bool\n description: str\n\n def __str__(self) -> str:\n return self.title\n\n @property\n def name(self) -> str:\n return f\"{self.title} ({self.publisher.name})\"\n\n\nclass PublisherCsv(DataClassCsv):\n class Meta:\n dclass = Publisher\n fields = \"__all__\"\n\n def column_country(self, instance: Publisher, **kwargs) -> str:\n return instance.headquarter.split(\",\")[1].strip()\n\n def column_city(self, instance: Publisher, **kwargs) -> str:\n return instance.headquarter.split(\",\")[0].strip()\n\n def field_headquarter(self, values: dict, **kwargs) -> str:\n city = values[\"city\"].strip()\n country = values[\"country\"].strip()\n return city + \", \" + country\n\n\nclass BookWithPublisherCsv(DataClassCsv):\n pbl = PublisherCsv.as_part(related_name=\"publisher\")\n\n pbl_name = pbl.AttributeColumn(header=\"Publisher\", attr_name=\"name\")\n pbl_country = pbl.MethodColumn(\n header=\"Country\", method_suffix=\"country\", value_name=\"country\"\n )\n pbl_city = pbl.MethodColumn(header=\"City\", method_suffix=\"city\", value_name=\"city\")\n\n class Meta:\n dclass = Book\n fields = \"__all__\"\n auto_assign = True\n\n headers = {\n \"is_on_sale\": \"is on sale\",\n }\n\n\nclass PartTest(TestCase):\n def test_headers(self):\n self.assertListEqual(\n BookWithPublisherCsv._meta.get_headers(for_read=True),\n [\n \"title\",\n \"price\",\n \"is on sale\",\n \"description\",\n \"Publisher\",\n \"Country\",\n \"City\",\n ],\n )\n\n def test_dataclass_for_read(self):\n with (Path(__file__).parent / \"test_data\" / \"book.csv\").open() as f:\n reader = CsvReader(file=f)\n mcsv = BookWithPublisherCsv.for_read(\n table=reader.get_table(table_starts_from=1)\n )\n if not mcsv.is_valid():\n self.fail(f\"Validation Error: {mcsv.errors}\")\n instances = list(mcsv.get_instances())\n self.assertEqual(len(instances), 50)\n\n for i, instance in enumerate(instances, 1):\n with self.subTest(f\"row = {i}\"):\n self.assertIsInstance(instance, Book)\n self.assertIsInstance(instance.publisher, Publisher)\n\n def test_dataclass_for_write(self):\n publishers = [\n Publisher(\n name=f\"Publisher {i}\",\n headquarter=f\"City {i}, Country {i}\",\n )\n for i in range(10)\n ]\n\n books = [\n Book(\n title=f\"Book {i}\",\n price=i * 100,\n publisher=publishers[i % 10],\n is_on_sale=i % 2 == 0,\n description=f\"Description {i}\",\n )\n for i in range(50)\n ]\n\n mcsv = BookWithPublisherCsv.for_write(instances=books)\n body = mcsv.get_table(header=False)\n\n for i, row in enumerate(body):\n with self.subTest(f\"row = {i}\"):\n self.assertListEqual(\n row,\n [\n f\"Book {i}\",\n str(i * 100),\n \"yes\" if i % 2 == 0 else \"no\",\n f\"Description {i}\",\n f\"Publisher {i % 10}\",\n f\"Country {i % 10}\",\n f\"City {i % 10}\",\n ],\n )\n","repo_name":"rikunosuke/django-csv","sub_path":"tests/test_dclass/test_as_part.py","file_name":"test_as_part.py","file_ext":"py","file_size_in_byte":3874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"70683290216","text":"import discord, config, os, aiohttp\nfrom discord.ext import commands\nfrom discord_slash import SlashCommand, SlashContext\n\nos.environ[\"JISHAKU_NO_UNDERSCORE\"] = \"True\"\nos.environ[\"JISHAKU_NO_DM_TRACEBACK\"] = \"True\"\nos.environ[\"JISHAKU_HIDE\"] = \"True\"\n\nintents = discord.Intents.default()\nintents.members = True\nintents.presences = True\nbot = commands.Bot(command_prefix=config.prefix, intents=intents)\nslash = SlashCommand(bot, sync_commands=True, override_type=True)\nbot.load_extension(\"jishaku\")\n\n@bot.check\nasync def bot_check(ctx):\n if not ctx.guild:\n if ctx.command.name not in [\"help\", \"invite\"]:\n emb = discord.Embed(description=\"This command doesn't work in DM.\", colour=discord.Colour.red())\n try:\n await ctx.reply(embed=emb, mention_author=False)\n except:\n await ctx.send(embed=emb)\n return False\n return True\n\n@bot.event\nasync def on_ready():\n bot.session = aiohttp.ClientSession()\n print(\"ready as\", bot.user)\n\nfor filename in os.listdir(\"./cogs\"):\n if filename.endswith(\".py\"):\n bot.load_extension(f\"cogs.{filename[:-3]}\")\n\nbot.run(config.token)\n","repo_name":"ssebastianoo/SpotifyLyrics","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"90"}
+{"seq_id":"70202245097","text":"import math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.distributions.categorical import Categorical\nfrom torch.distributions.multivariate_normal import MultivariateNormal\nfrom UTIL.tensor_ops import my_view\n\nclass MultiHeadAttention(nn.Module):\n # taken from https://github.com/wouterkool/attention-tsp/blob/master/graph_encoder.py\n def __init__(\n self,\n n_heads,\n input_dim,\n embed_dim=None,\n val_dim=None,\n key_dim=None\n ):\n super(MultiHeadAttention, self).__init__()\n\n if val_dim is None:\n assert embed_dim is not None, \"Provide either embed_dim or val_dim\"\n val_dim = embed_dim // n_heads\n if key_dim is None:\n key_dim = val_dim\n\n self.n_heads = n_heads\n self.input_dim = input_dim\n self.embed_dim = embed_dim\n self.val_dim = val_dim\n self.key_dim = key_dim\n\n self.norm_factor = 1 / math.sqrt(key_dim) # See Attention is all you need\n\n self.W_query = nn.Parameter(torch.Tensor(n_heads, input_dim, key_dim))\n self.W_key = nn.Parameter(torch.Tensor(n_heads, input_dim, key_dim))\n self.W_val = nn.Parameter(torch.Tensor(n_heads, input_dim, val_dim))\n\n if embed_dim is not None:\n self.W_out = nn.Parameter(torch.Tensor(n_heads, key_dim, embed_dim))\n\n self.init_parameters()\n\n def init_parameters(self):\n for param in self.parameters():\n stdv = 1. / math.sqrt(param.size(-1))\n param.data.uniform_(-stdv, stdv)\n\n def forward(self, q, k=None, v=None, mask=None, return_attn=False, return_attn_weight=False):\n if q.dim()<=3: \n out = self.forward_(q, k, v, mask, return_attn, return_attn_weight)\n if return_attn:\n out, attn = out\n assert attn.shape[0]==1\n attn = attn.squeeze(0)\n return out, attn\n return out\n\n\n hyper_dim = q.shape[:-2]\n q = my_view(q, [-1, *q.shape[-2:]])\n if k is not None: \n k = my_view(k, [-1, *k.shape[-2:]])\n if v is not None: \n v = my_view(v, [-1, *v.shape[-2:]])\n if mask is not None: mask = my_view(mask, [-1, *mask.shape[-2:]])\n out = self.forward_(q, k, v, mask, return_attn, return_attn_weight)\n if return_attn:\n out, attn = out\n if hyper_dim is not None:\n out = out.view(*hyper_dim, *out.shape[-2:])\n attn = attn.view(*hyper_dim, *attn.shape[-2:]) #??\n return out, attn\n else:\n if hyper_dim is not None:\n out = out.view(*hyper_dim, *q.shape[-2:])\n return out\n\n def forward_(self, q, k=None, v=None, mask=None, return_attn=False, return_attn_weight=False):\n \"\"\"\n :param q: queries (batch_size, n_query, input_dim)\n :param k: data (batch_size, n_key/graph_size, input_dim)\n :param mask: mask (batch_size, n_query, graph_size) or viewable as that (i.e. can be 2 dim if n_query == 1)\n Mask should contain 1 if attention is not possible (i.e. mask is negative adjacency)\n :return:\n \"\"\"\n if k is None:\n k = q # compute self-attention\n if v is None:\n v = k\n # k should be (batch_size, graph_size, input_dim)\n batch_size, graph_size, input_dim = k.size()\n n_query = q.size(1)\n assert q.size(0) == batch_size\n assert q.size(2) == input_dim\n assert input_dim == self.input_dim, \"Wrong embedding dimension of input\"\n\n kflat = k.contiguous().view(-1, input_dim)\n qflat = q.contiguous().view(-1, input_dim)\n vflat = v.contiguous().view(-1, input_dim)\n\n # last dimension can be different for keys and values\n shp = (self.n_heads, batch_size, graph_size, -1)\n shp_q = (self.n_heads, batch_size, n_query, -1)\n\n # Calculate queries, (n_heads, n_query, graph_size, key/val_size)\n Q = torch.matmul(qflat, self.W_query).view(shp_q)\n # Calculate keys and values (n_heads, batch_size, graph_size, key/val_size)\n K = torch.matmul(kflat, self.W_key).view(shp)\n V = torch.matmul(vflat, self.W_val).view(shp)\n\n # Calculate compatibility (n_heads, batch_size, n_query, graph_size)\n compatibility = self.norm_factor * torch.matmul(Q, K.transpose(2, 3))\n if return_attn_weight:\n assert self.n_heads == 1\n if mask is not None:\n mask = mask.view(1, batch_size, n_query, graph_size).expand_as(compatibility)\n compatibility[mask.bool()] = -math.inf\n return compatibility.squeeze(0)\n\n # Optionally apply mask to prevent attention\n if mask is not None: # expand to n_heads\n mask = mask.view(1, batch_size, n_query, graph_size).expand_as(compatibility)\n compatibility[mask.bool()] = -math.inf\n\n attn = F.softmax(compatibility, dim=-1)\n\n # If there are nodes with no neighbours then softmax returns nan so we fix them to 0\n if mask is not None:\n attnc = attn.clone()\n attnc[mask.bool()] = 0\n attn = attnc\n\n # 为了在这里解决 0*nan = nan 的问题,输入必须将V中的nan转化为0\n heads = torch.matmul(attn, V)\n\n out = torch.mm(\n heads.permute(1, 2, 0, 3).contiguous().view(-1, self.n_heads * self.val_dim),\n self.W_out.view(-1, self.embed_dim)\n ).view(batch_size, n_query, self.embed_dim)\n\n if return_attn:\n return out, attn\n return out\n\n\n\n\nclass SimpleAttention(nn.Module):\n def __init__(self, h_dim):\n super().__init__()\n self.W_query = nn.Parameter(torch.Tensor(h_dim, h_dim))\n self.W_key = nn.Parameter(torch.Tensor(h_dim, h_dim))\n self.W_val = nn.Parameter(torch.Tensor(h_dim, h_dim))\n self.init_parameters()\n\n def init_parameters(self):\n for param in self.parameters():\n stdv = 1. / math.sqrt(param.size(-1))\n param.data.uniform_(-stdv, stdv)\n\n def forward(self, k, q, v, mask=None):\n Q = torch.matmul(q, self.W_query) \n K = torch.matmul(k, self.W_key) \n V = torch.matmul(v, self.W_val)\n\n norm_factor = 1 / math.sqrt(Q.shape[-1])\n compat = norm_factor * torch.matmul(Q, K.transpose(-1, -2)) \n if mask is not None: compat[mask.bool()] = -math.inf\n # 为了在这里解决 0*nan = nan 的问题,输入必须将V中的nan转化为0\n score = torch.nan_to_num(F.softmax(compat, dim=-1), 0)\n return torch.matmul(score, V) \n","repo_name":"binary-husky/unreal-map","sub_path":"PythonExample/hmp_minimal_modules/ALGORITHM/common/attention.py","file_name":"attention.py","file_ext":"py","file_size_in_byte":6687,"program_lang":"python","lang":"en","doc_type":"code","stars":145,"dataset":"github-code","pt":"90"}
+{"seq_id":"21599058931","text":"\"\"\"\nsupport for reading/writing files in STS format\n\"\"\"\n\nfrom codecs import open\nfrom os.path import join, normpath\nimport numpy as np\nfrom numpy.lib.recfunctions import merge_arrays \n\n\n# find root of Git repos\nrepos_dir = normpath(join(__file__, \"../../../..\"))\n\n# directory containing original STS files\ndata_dir = join(repos_dir, \"data\")\n\n\n\n# default for maximum size of sentences in chars \n_max_sent_size = 1024\n\n_from_utf8 = lambda s: s.decode(\"utf-8\")\n_converters = {0:_from_utf8, 1:_from_utf8}\n\n\ndef read_system_input(filename, max_sent_size=_max_sent_size):\n # unfortunately merge_array doesn't work with \"object\" as data type,\n # so use fixed size unicode string\n s_type = \"U{}\".format(max_sent_size) \n return np.loadtxt(filename,\n dtype=[(\"s1\", s_type),(\"s2\", s_type)],\n # Default comment char \"#\" occurs in text.\n # seems impossible to switch off comments,\n # so use some wacky control char (bell)\n comments=\"\\a\",\n delimiter=\"\\t\",\n converters=_converters) \n \n \ndef read_system_output(filename, with_confidence=False):\n if with_confidence:\n return np.loadtxt(filename, dtype=[(\"output\",\"f\"), (\"confidence\",\"f\")])\n else:\n return np.loadtxt(filename, usecols=(0,), dtype=[(\"output\",\"f\")])\n\n\ndef read_gold_standard(filename):\n return np.loadtxt(filename, dtype=[(\"gold\",\"f\")])\n\n\ndef read(input_fname, gold_fname, output_fname=None, with_confidence=False,\n max_sent_size=_max_sent_size):\n inp = read_system_input(input_fname, max_sent_size=max_sent_size)\n gold = read_gold_standard(gold_fname)\n if output_fname:\n out = read_system_output(output_fname, with_confidence)\n return merge_arrays((inp, gold, out), flatten=True)\n else:\n return merge_arrays((inp, gold), flatten=True)\n \n \ndef write_scores(filename, scores, confidence=None):\n outf = open(filename, \"w\")\n \n if not confidence:\n confidence = np.ones(scores.shape[0])\n \n for s, c in zip(scores, confidence):\n outf.write(\"{:f}\\t{:f}\\n\".format(s, c)) \n \n outf.close()\n \n \ndef id2filenames(dir, type, ids):\n \"\"\"\n Create mapping from STS identifiers to corresponding filenames.\n \n Parameters\n ----------\n dir: str\n directory containing files\n type: str\n \"input\" or \"gs\"\n ids: list of str\n identifiers\n \n Returns\n -------\n dict\n dictionary that maps STS ids to corresponding filenames\n \n Note\n ----\n Does not check if the files really exist\n \n Example\n -------\n >>> id2filenames(\"data/STS2012-test\", \"input\", [\"MSRpar\", \"MSRvid\"])\n {'MSRpar': 'data/STS2012-test/STS.input.MSRpar.txt', \n 'MSRvid': 'data/STS2012-test/STS.input.MSRvid.txt'}\n \n \"\"\"\n return {id: join(dir, \"STS.{}.{}.txt\".format(type, id)) \n for id in ids} \n \n\n ","repo_name":"STS-NTNU/STS13","sub_path":"lib/python/sts/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":3006,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"90"}
+{"seq_id":"70304625256","text":"from torch.utils import data\nfrom torchvision import transforms as T\nfrom torchvision.datasets import ImageFolder\nfrom PIL import Image\nimport torch\nimport os\nimport random\nimport glob\nimport shutil\nimport mat73\nimport numpy as np\n\nestimation = False\n\nclass CACD(data.Dataset):\n \"\"\"Dataset class for the CACD dataset\"\"\"\n \n def __init__(self, image_dir, attr_path, age_group, age_group_mode, additional_dataset, transform, mode ):\n \"\"\"Initialize and preprocess the CACD dataset / num of data: 163446 / age max: 62, min: 14 \"\"\"\n \n self.image_dir = image_dir\n self.attr_path = attr_path\n self.age_group_mode = age_group_mode\n if self.age_group_mode == 0: \n self.age_group = age_group\n elif self.age_group_mode == 1:\n self.age_group = 5\n elif self.age_group_mode == 2:\n self.age_group = 4\n else:\n self.age_group = self.age_group\n self.transform = transform\n self.mode = mode\n self.additional_dataset = additional_dataset\n self.train_dataset = []\n self.test_dataset = []\n self.preprocess()\n\n if mode == 'train':\n self.num_images = len(self.train_dataset)\n else:\n self.num_images = len(self.test_dataset)\n \n def preprocess(self):\n \"\"\"Preprocess the CACD dataset\"\"\"\n\n attr = mat73.loadmat(self.attr_path)\n print(\"CACD Attribute Loaded\")\n image_list = os.listdir(self.image_dir)\n idx = np.arange(len(image_list))\n \n max_age = attr.celebrityImageData.age.max()\n min_age = attr.celebrityImageData.age.min()\n dist_age = ((max_age - min_age)/self.age_group)\n \n random.seed(1234)\n random.shuffle(idx)\n count = 0\n for i in idx:\n count += 1\n #print(i)\n filename = attr.celebrityImageData.name[i][0] #'23_Katie_Findlay_0013.jpg'\n age = attr.celebrityImageData.age[i] # 23.0\n # labels = []\n label = []\n label_age = min_age\n img_age_group = 0\n if self.age_group_mode == 0:\n for j in range(self.age_group):\n if (age >= min_age + j* (dist_age) and age < min_age + (j+1)*(dist_age)):\n label.append(1)\n img_age_group = j\n\n else:\n if max_age == (min_age + (j+1)*(dist_age)):\n if age == max_age:\n label.append(1)\n img_age_group = j\n else:\n label.append(0)\n else:\n label.append(0)\n elif self.age_group_mode == 1: \n for j in range(self.age_group):\n if age <= 14:\n label = [1, 0, 0, 0, 0]\n elif (age > 14) and (age <= 25):\n label = [0, 1, 0, 0, 0]\n elif (age > 25) and (age <= 40):\n label = [0, 0, 1, 0, 0]\n elif (age > 40) and (age <= 60):\n label = [0, 0, 0, 1, 0]\n elif (age > 60):\n label = [0, 0, 0, 0, 1]\n \n elif self.age_group_mode == 2: \n for j in range(self.age_group):\n age_label = [age - min_age]\n if (age >= 14) and (age < 26):\n img_age_group = 0\n label = [0]\n elif (age >= 26) and (age < 38):\n img_age_group = 1\n label = [1]\n elif (age >= 38) and (age < 50):\n img_age_group = 2\n label = [2]\n elif (age >= 50) and (age <= 62):\n img_age_group = 3\n label = [3]\n \n else:\n label = [0] * self.age_group\n age_idx = int(age - min_age)\n label[int(age_idx)] = 1\n \n if self.age_group_mode != 2: \n if len(label) != self.age_group:\n print(filename)\n print(age)\n print(\"----------------------error ----------------\")\n # labels.append(label)\n \n if not os.path.exists('data/CACD'):\n os.makedirs('data/CACD')\n if not os.path.exists('data/CACD/test'):\n os.makedirs('data/CACD/test')\n\n src_dir = self.image_dir\n dst_dir = 'data/CACD/test'\n\n for k in range(self.age_group):\n dir_name = 'age_group{}'.format(k)\n if not os.path.exists(os.path.join(dst_dir, dir_name)):\n os.makedirs(os.path.join(dst_dir, dir_name))\n\n if count < 1601:\n if estimation == True:\n self.test_dataset.append([filename, age_label, label])\n else:\n jpgfile = os.path.join(src_dir, filename) \n self.test_dataset.append([jpgfile, label]) \n \n dst_dir = os.path.join(dst_dir, 'age_group{}'.format(img_age_group))\n if not os.path.exists(os.path.join(dst_dir, filename)):\n shutil.copy(jpgfile, dst_dir) \n\n else:\n jpgfile = os.path.join(src_dir, filename)\n if estimation == True:\n self.train_dataset.append([jpgfile, age_label, label])\n else: \n self.train_dataset.append([jpgfile, label])\n\n if self.additional_dataset:\n utk_dir = '../UTKFace'\n fgnet_dir = '../FGNET/images'\n\n utk_list = os.listdir(utk_dir)\n fgnet_dir = os.listdir(fgnet_dir)\n\n utk_len = len(utk_list)\n fgnet_len = len(fgnet_dir)\n\n utk_idx = np.arange(utk_len)\n fgnet_idx = np.arange(fgnet_len)\n\n random.seed(1234)\n random.shuffle(utk_idx)\n random.shuffle(fgnet_idx)\n\n for i in utk_idx:\n filename = utk_list[i]\n jpgfile = os.path.join(utk_dir, filename)\n age = int(filename.split('_')[0])\n age_label = [age-min_age]\n if (age < 14) and (age > 62):\n pass\n\n elif (age >= 14) and (age < 26):\n label = [0]\n elif (age >= 26) and (age < 38):\n label = [1]\n elif (age >= 38 ) and (age < 50):\n label = [2]\n elif (age >= 50) and (age <= 62):\n label = [3]\n \n \n if estimation == True:\n self.train_dataset.append([jpgfile, age_label, label])\n else: \n self.train_dataset.append([jpgfile, label])\n \n print(\"UTKFace dataset loaded\")\n\n\n print(\"test dataset length: \", len(self.test_dataset))\n print(\"train dataset length: \", len(self.train_dataset))\n\n def __getitem__(self, index):\n \"\"\"Return one image and its corresponding attribute label\"\"\"\n dataset = self.train_dataset if self.mode == 'train' else self.test_dataset\n if estimation ==True:\n jpgfile, age_label, label = dataset[index]\n else:\n jpgfile, label = dataset[index]\n\n image = Image.open(os.path.join(jpgfile))\n if self.additional_dataset != True:\n return jpgfile, self.transform(image), torch.FloatTensor(label)\n else:\n if image.size == (250, 250):\n if estimation == True: \n return jpgfile, self.transform(image), torch.FloatTensor(age_label), torch.FloatTensor(label)\n else: \n return jpgfile, self.transform(image), torch.FloatTensor(label)\n else:\n transform0 = []\n transform0.append(T.RandomHorizontalFlip())\n transform0.append(T.Resize(128))\n transform0.append(T.ToTensor())\n transform0.append(T.Normalize(mean = [0.5, 0.5, 0.5], std = [0.5, 0.5, 0.5])) # mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5\n transform0 = T.Compose(transform0)\n if estimation == True:\n return jpgfile, transform0(image), torch.FloatTensor(age_label), torch.FloatTensor(label)\n else:\n return jpgfile, transform0(image), torch.FloatTensor(label)\n\n def __len__(self):\n \"\"\"Return the number of iamges\"\"\"\n return self.num_images\n\n\n\ndef get_loader2(image_dir, attr_path, age_group, age_group_mode, crop_size = 160, image_size = 128, batch_size = 16, dataset = 'CACD', additional_dataset =True, mode = 'train', num_workers=1):\n \"\"\"Build and return a data loader\"\"\"\n\n transform = []\n if mode == 'train':\n transform.append(T.RandomHorizontalFlip())\n transform.append(T.CenterCrop(crop_size))\n transform.append(T.Resize(image_size))\n transform.append(T.ToTensor())\n transform.append(T.Normalize(mean = [0.5, 0.5, 0.5], std = [0.5, 0.5, 0.5])) # mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5\n transform = T.Compose(transform)\n\n if dataset == 'CACD':\n dataset = CACD(image_dir, attr_path, age_group,age_group_mode, additional_dataset, transform, mode)\n \n if mode == 'train':\n data_loader = data.DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=(mode=='train'),\n num_workers=num_workers)\n else:\n data_loader = data.DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=(mode=='train'),\n num_workers=num_workers)\n\n return data_loader","repo_name":"HAN-oQo/Aging_StarGAN","sub_path":"CACD_loader.py","file_name":"CACD_loader.py","file_ext":"py","file_size_in_byte":10185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"18441625699","text":"from functools import lru_cache\nfrom bisect import bisect\nfrom sys import stdin\ninput = stdin.readline\nA, B, Q = map(int, input().split())\nS = [int(input()) for _ in range(A)]\nT = [int(input()) for _ in range(B)]\n\n@lru_cache(maxsize=None)\ndef func(f, x):\n L = S if f else T\n i = bisect(L, x)\n if i == 0:\n return abs(x - L[0])\n elif i == len(L):\n return abs(x - L[-1])\n else:\n return min(abs(x - L[i]), abs(x - L[i - 1]))\n\nfor x in [int(input()) for _ in range(Q)]:\n ans = float(\"inf\")\n i = bisect(S, x)\n if i != 0:\n ans = min(ans, func(0, S[i - 1]) + x - S[i - 1])\n if i != A:\n ans = min(ans, func(0, S[i]) + S[i] - x)\n i = bisect(T, x)\n if i != 0:\n ans = min(ans, func(1, T[i - 1]) + x - T[i - 1])\n if i != B:\n ans = min(ans, func(1, T[i]) + T[i] - x)\n print(ans)","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03112/s633761493.py","file_name":"s633761493.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"4128732801","text":"from datetime import datetime\nfrom datetime import timedelta\n\n\n# This class is used to create a the distance graph and an address list\n# All information pertaining distance betweens addresses is dealt in this class\nclass Distance:\n def __init__(self, distance_data, address_list):\n self.distance_data = distance_data\n self.address_list = address_list\n\n # Two addresses are given and the indexes of the addresses in the address list are found\n # and then those indexes are used in the distance data graph to find the corresponding distance.\n # The distance is found no matter the order of the addresses given\n # Time Complexity: O(N) because the find index function takes O(N) at worst case\n # Space Compleixty: O(1) because only variables are declared\n # Return: Returns the float value when found in the distance data graph\n def distance_between(self, address1, address2):\n if address1 == '3575 W Valley Central Station bus Loop':\n address1 = '3575 W Valley Central Sta bus Loop'\n if address2 == '3575 W Valley Central Station bus Loop':\n address2 = '3575 W Valley Central Sta bus Loop'\n\n try:\n if isinstance(address1, int):\n address1_index = address1\n else:\n address1_index = self.find_index(address1)\n if isinstance(address2, int):\n address2_index = address2\n else:\n address2_index = self.find_index(address2)\n\n except ValueError as e:\n print(\"Address not found\")\n return\n\n dist_between1 = self.distance_data[address1_index][address2_index]\n dist_between2 = self.distance_data[address2_index][address1_index]\n\n if dist_between1:\n return float(dist_between1)\n elif dist_between2:\n return float(dist_between2)\n else:\n return 0\n\n # Time Complexity: O(N) because of the .index() function\n # Space Complexity: O(1) because no variable or structures are created\n # Return: Returns the index of the given index in the address list\n def find_index(self, address):\n if address == '3575 W Valley Central Station bus Loop':\n address = '3575 W Valley Central Sta bus Loop'\n return self.address_list.index(address)\n\n # This function is where the Nearest Neighbor algorithm is implemented\n # The function takes in the list of package IDs that must be visited\n # A set of the distinct addresss that must be visited is created\n # A list containing an addresses and the package IDs that are going to that address\n # is created\n\n # A list of the unvisited addresses is created and looped through to find the shortest\n # distance from the current address which starts at the WGU hub. That address is then\n # removed from the unvisited list and the algorithm loops through until the list is empty\n\n # The milage from the current address to the next address is calculated and the packaage's\n # status is updated\n # Time Complexity: O(N^3) at worst because of the while and for loops using the distance between\n # function which also takes O(N) time\n # Space Complexity: O(N^2) because of the 2D list but at worst could be O(N^3)\n # if all packages are going to the same address. This would be a rare case and would basically be O(N^2)\n # if this happened. Time compelxity would also basically be O(1).\n # Returns a list containing the truck name, time the truck returns to the hub, and the milage it took\n def min_distance(self, from_address, packages, package_list, start_time, truck_name):\n unvisited = []\n total_dist = 0\n address_set = set()\n timer = start_time\n\n package_address_list = []\n\n u = from_address\n\n # Time Complexity: O(N^2) on average because of the double for loop. At worst case could be\n # O(N^3) because of the 'in' function for the set if hashed inefficiently\n # Space Compleixty: O(1)\n for i in package_list:\n address = packages.get_address(i)\n unvisited.append(address)\n if address in address_set:\n for x in package_address_list:\n if address == x[0]:\n x[1].append(i)\n else:\n address_set.add(address)\n package_address_list.append([packages.get_address(i), [i]])\n\n # Time Complexity: At worst would be O(N^3) because of looping through the unvisisted list twice\n # and the distance bewteen function takes O(N) time.\n # Space Complexity: O(N^2) at worst because of the time list in the while loop\n while unvisited:\n min_dist = unvisited[0]\n for j in unvisited:\n if j == min_dist:\n continue\n if (self.distance_between(u, j)) <= self.distance_between(u, min_dist):\n min_dist = j\n\n next_address = min_dist\n dist = self.distance_between(u, next_address)\n\n time_list = []\n for i in package_address_list:\n if next_address in i:\n time_list = i[1]\n break\n\n # Time Complexity: O(N) on average beacuse of for loop but O(N^2)\n # at worst because of the update time function if the hash function\n # is inefficient\n # Space Complexity: O(N) because of variable in the for loop\n for x in range(len(time_list)):\n if not x:\n\n ot = datetime.strptime(timer, '%I:%M:%S %p')\n ot += timedelta(minutes=(dist / 18 * 60))\n\n old_time = ot.time().strftime('%I:%M:%S %p')\n timer = old_time\n packages.update_time(time_list[x], f'Delivered at {old_time}')\n else:\n ot = datetime.strptime(timer, '%I:%M:%S %p')\n old_time = ot.time().strftime('%I:%M:%S %p')\n timer = old_time\n packages.update_time(time_list[x], f'Delivered at {old_time}')\n total_dist += dist\n u = next_address\n unvisited.remove(min_dist)\n\n total_dist += self.distance_between(from_address, u)\n return [truck_name, timer, total_dist]\n","repo_name":"butalidn/Delivery-Package-Project","sub_path":"distance.py","file_name":"distance.py","file_ext":"py","file_size_in_byte":6360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"17260901224","text":"import cv2\r\nimport numpy as np\r\n\r\nimage=cv2.imread('cats.jpg')\r\ncv2.imshow('Cats',image)\r\n\r\n# blank=np.zeros(image.shape[:2],dtype='uint8') # image.shape[:2] : same dimensions with original cat images\r\n# cv2.imshow('Blank',blank)\r\nblank=np.zeros(image.shape,dtype='uint8') # image.shape[:2] : same dimensions with original cat images\r\n#cv2.imshow('Blank',blank)\r\n\r\ngray=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\r\ncv2.imshow('Gray Cats',gray)\r\n\r\nblur=cv2.GaussianBlur(gray,(5,5),cv2.BORDER_DEFAULT)\r\ncv2.imshow('Blur Cats',blur)\r\n\r\n# ctrl+ö hızlı comment\r\nblankCanny=np.zeros(image.shape,dtype='uint8') # image.shape[:2] : same dimensions with original cat images\r\ncanny=cv2.Canny(image,125,175)\r\ncv2.imshow('Canny Edges Cats',canny)\r\ncontoursCanny,hierarchiesCanny=cv2.findContours(canny,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)\r\nprint(f'{len(contoursCanny)} contour(s) found Canny !')\r\ncv2.drawContours(blankCanny,contoursCanny,-1,(0,0,255),1)\r\ncv2.imshow('ContoursCanny Drawn with Red',blankCanny)\r\n\r\nblankThresh=np.zeros(image.shape,dtype='uint8') # image.shape[:2] : same dimensions with original cat images\r\nret,thresh=cv2.threshold(gray,125,255,cv2.THRESH_BINARY)\r\ncv2.imshow('Threshold',thresh)\r\ncontoursThresh,hierarchies2=cv2.findContours(thresh,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)\r\nprint(f'{len(contoursThresh)} contour(s) found Thresh !')\r\ncv2.drawContours(blankThresh,contoursThresh,-1,(0,0,255),1)\r\ncv2.imshow('ContoursThresh Drawn with Red',blankThresh)\r\n\r\nblankBlur=np.zeros(image.shape,dtype='uint8') # image.shape[:2] : same dimensions with original cat images\r\ncannyBlur=cv2.Canny(blur,125,175)\r\ncv2.imshow('Canny Blur Edges Cats',cannyBlur)\r\ncontoursBlur,hierarchiesBlur=cv2.findContours(cannyBlur,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)\r\nprint(f'{len(contoursBlur)} contour(s) found Canny Blur!')\r\ncv2.drawContours(blankBlur,contoursBlur,-1,(255,0,0),1)\r\ncv2.imshow('ContoursBlur Drawn with Blue',blankBlur)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ncv2.waitKey(0)\r\n","repo_name":"SadettinKayali/OpenCV_EN","sub_path":"EN_opencv1/Part1_Basics_Contours.py","file_name":"Part1_Basics_Contours.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"44790810534","text":"from django.shortcuts import render, redirect, reverse\nfrom django.core.mail import send_mail\nfrom django.contrib import messages\nfrom django.conf import settings\nfrom .forms import ContactForm\n# Create your views here.\n\n\ndef faq(request):\n \"\"\" A view to return the frequently ask question page \"\"\"\n\n return render(request, 'company/faq.html')\n\n\ndef privatepolicy(request):\n \"\"\" A view to return the private policy \"\"\"\n\n return render(request, 'company/private-policy.html')\n\n\ndef about_us(request):\n \"\"\"A view to return info page\"\"\"\n return render(request, 'company/about-us.html')\n\n\ndef contactus(request, *args, **kwargs):\n \"\"\"\n Displays Contact Us page form.\n \"\"\"\n if request.method == 'POST':\n form = ContactForm(request.POST)\n if form.is_valid():\n name = form.cleaned_data['name'],\n email = form.cleaned_data['email'],\n subject = form.cleaned_data['subject'],\n message = form.cleaned_data['message'],\n form.save()\n\n # send mail combining field forms\n send_mail({subject}, f'{name}, {email}, {message}',\n settings.EMAIL_HOST_USER, [settings.EMAIL_HOST_USER],\n fail_silently=False)\n messages.success(\n request, 'Thank you for contacting us \\\n - we will reply as soon as possible!')\n\n # redirect to home page\n return redirect(reverse('home'))\n else:\n messages.error(\n request, 'Something went wrong with your submission.\\\n Please try again.'\n )\n else:\n form = ContactForm()\n\n return render(request, 'company/contact-us.html', {'form': form})\n","repo_name":"Kierandoolan/The-FitFuel-Depot","sub_path":"company/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"40128015645","text":"# Modify your code to look for lines that contain either of the following artists: Pablo Picasso or Claude Monet and\n# print only those two lines out.\n# Return the two rows in a separate file called output.txt\n# Bonus: Remove the two above lines from Artists.csv and save the new file as Artists2.csv separately.\n\nfrom csv import DictReader\n\nartists = []\nwith open('Artists.csv', 'r') as file:\n csv_dict_reader = DictReader(file)\n for row in csv_dict_reader:\n if row['DisplayName'] == 'Claude Monet' or row['DisplayName'] == 'Pablo Picasso':\n artists.append(row)\nprint(artists)\nwith open('output.txt', 'w') as file1:\n for line in artists:\n file1.write(str(line) + '\\n')\nfile1.close()\n","repo_name":"DavinderSohal/Python","sub_path":"Activities/Activity_10/artits.py","file_name":"artits.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"74730407977","text":"import matplotlib\r\nmatplotlib.use('PS')\r\nmatplotlib.rcParams['text.usetex']=True\r\nmatplotlib.rcParams['text.latex.unicode']=True\r\nimport pylab as plt\r\nplt.switch_backend('PS')\r\n\r\nstring = r'z=${value}^{upper}_{lower}$'.format(\r\n value='{' + str(0.27) + '}',\r\n upper='{+' + str(0.01) + '}',\r\n lower='{-' + str(0.01) + '}')\r\nprint(string)\r\n\r\nfig = plt.figure(figsize=(3,1))\r\nfig.text(0.1,0.5,string,size=24,va='center')\r\nfrom matplotlib.backends.backend_pdf import PdfPages\r\npp = PdfPages('issue5076.pdf')\r\npp.savefig(fig)\r\npp.close()","repo_name":"alex-gavric/gaussianPlumeModelLibraryTesting","sub_path":"latex_test.py","file_name":"latex_test.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"71347951658","text":"import logging\nimport os\nFILE_PATH = os.path.dirname(os.path.abspath(__file__))\nos.chdir(FILE_PATH)\n\n\nLOGGING_FILE = os.path.join(FILE_PATH,'Run.log')\n\nlogging.basicConfig(filename=LOGGING_FILE, filemode='w', format='%(message)s') # Without Name\nlogger = logging.getLogger('Maverick')\nlogger.setLevel(logging.INFO)\n\n# Log to console\nconsole_handler = logging.StreamHandler()\nformatter = logging.Formatter('%(message)s')\nconsole_handler.setFormatter(formatter)\nlogger.addHandler(console_handler)\nimport time\nimport pandas as pd\n\n\n\nclass InfoHub:\n def __init__(self):\n pass\nINFO_HUB = InfoHub()\n\n\ndef demo_run():\n # Do something\n\n df = pd.DataFrame({'N': INFO_HUB.N }, index=[0])\n\n\n print('Run once.')\n INFO_HUB.N += 1\n time.sleep(1)\n\n\n return df\n\n\n\ndef looper(target_list):\n\n res_list = []\n while INFO_HUB.N < (len(target_list)-1):\n try:\n\n # Automation Start Here\n\n # Run some code\n df = demo_run()\n \n\n # Automation End Here\n\n res_list.append(df)\n except Exception as e:\n logger.error(\"\\n\" + \"#\"*40 + \" Error: looper \" + \"#\" * 40, exc_info=True)\n logger.error(\"#\" * 100 + '\\n')\n\n df = pd.concat(res_list)\n return df\n\n\ndef main():\n\n # Fake code for demo\n target_list = ['example1', 'example2', 'example3']\n\n N = 0\n INFO_HUB.N = N\n\n # Auto restart if error\n df = looper(target_list)\n\n print(df)\n\n\n\n\nif __name__ == '__main__':\n main()\n print('Done!')","repo_name":"GooseHuang/Utils_Auto_Restart","sub_path":"Utils_Auto_Restart.py","file_name":"Utils_Auto_Restart.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"70445945578","text":"import mysql.connector\n\ntry:\n db = mysql.connector.connect(\n host=\"localhost\", user=\"root\", password=\"\", database=\"phpdasar\"\n )\n mycursor = db.cursor()\nexcept Exception as e:\n print(e)\n\ntable = \"bimo\"\n\n\ndef query(query):\n mycursor.execute(query)\n return mycursor.fetchall()\n\n\ndef tambah(nama, kelas, email):\n mycursor.execute(\n f\"INSERT INTO `bimo` (`id`, `nama`, `kelas`, `email`) VALUES (NULL, '{nama}', '{kelas}', '{email}')\"\n )\n db.commit()\n\ndef ubah(id, nama, kelas, email):\n mycursor.execute(\n f\"UPDATE {table} SET `id` = '{id}', `nama` = '{nama}', `kelas` = '{kelas}', `email` = '{email}' WHERE `bimo`.`id` = {id}\"\n )\n db.commit()\n\ndef hapus(id):\n mycursor.execute(f\"DELETE FROM {table} WHERE id={id}\")\n db.commit()\n","repo_name":"yusrilarzaqi/my-project","sub_path":"CRUD_MAHASISWA/functions/function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"9812705415","text":"from __future__ import unicode_literals\nimport frappe\n\nfrom frappe.utils import getdate, nowdate\nfrom frappe import _\nfrom frappe.model.document import Document\nfrom frappe.utils import cstr\nfrom datetime import datetime, date, timedelta\n\nclass Attendance(Document):\n\tdef validate_duplicate_record(self):\n\t\tres = frappe.db.sql(\"\"\"select name from `tabAttendance` where employee = %s and attendance_date = %s\n\t\t\tand name != %s and docstatus != 2\"\"\",\n\t\t\t(self.employee, getdate(self.attendance_date), self.name))\n\t\tif res:\n\t\t\tfrappe.throw(_(\"Attendance for employee {0} is already marked\").format(self.employee))\n\n\tdef check_leave_record(self):\n\t\tleave_record = frappe.db.sql(\"\"\"select leave_type, half_day, half_day_date from `tabLeave Application`\n\t\t\twhere employee = %s and %s between from_date and to_date and status = 'Approved'\n\t\t\tand docstatus = 1\"\"\", (self.employee, self.attendance_date), as_dict=True)\n\t\tif leave_record:\n\t\t\tfor d in leave_record:\n\t\t\t\tif d.half_day_date == getdate(self.attendance_date):\n\t\t\t\t\tself.status = 'Half Day'\n\t\t\t\t\tfrappe.msgprint(_(\"Employee {0} on Half day on {1}\").format(self.employee, self.attendance_date))\n\t\t\t\telse:\n\t\t\t\t\tself.status = 'On Leave'\n\t\t\t\t\tself.leave_type = d.leave_type\n\t\t\t\t\tfrappe.msgprint(_(\"Employee {0} is on Leave on {1}\").format(self.employee, self.attendance_date))\n\n\t\tif self.status == \"On Leave\" and not leave_record:\n\t\t\tfrappe.throw(_(\"No leave record found for employee {0} for {1}\").format(self.employee, self.attendance_date))\n\n\tdef validate_attendance_date(self):\n\t\tdate_of_joining = frappe.db.get_value(\"Employee\", self.employee, \"date_of_joining\")\n\n\t\t# leaves can be marked for future dates\n\t\tif self.status != 'On Leave' and not self.leave_application and getdate(self.attendance_date) > getdate(nowdate()):\n\t\t\tfrappe.throw(_(\"Attendance can not be marked for future dates\"))\n\t\telif date_of_joining and getdate(self.attendance_date) < getdate(date_of_joining):\n\t\t\tfrappe.throw(_(\"Attendance date can not be less than employee's joining date\"))\n\n\tdef validate_employee(self):\n\t\temp = frappe.db.sql(\"select name from `tabEmployee` where name = %s and status = 'Active'\",\n\t\t \tself.employee)\n\t\tif not emp:\n\t\t\tfrappe.throw(_(\"Employee {0} is not active or does not exist\").format(self.employee))\n\n\tdef validate(self):\n\t\tfrom erpnext.controllers.status_updater import validate_status\n\t\tvalidate_status(self.status, [\"Present\", \"Absent\", \"On Leave\", \"Half Day\", \"Holiday\"])\n\t\tself.validate_attendance_date()\n\t\tself.validate_duplicate_record()\n\t\tself.check_leave_record()\n\t\tself.calculate_time()\n\n\tdef before_update_after_submit(self):\n\t\tself.calculate_time()\n\n\tdef calculate_time(self):\n\t\tself.late_entry = 0\n\t\tself.early_exit = 0\n\t\tself.over_time = 0\n\t\tself.early_exit_time = None\n\t\tself.late_entry_time = None\n\t\tself.overtime_time = None\n\t\tself.early_exit_mins = 0\n\t\tself.late_entry_mins = 0\n\t\tself.overtime_mins = 0\n\t\tself.total_time = \"00:00:00\"\n\t\tif self.status == \"Present\": self.status = \"Absent\"\n\t\tshift = None\n\t\tif not self.override_shift:\n\t\t\tget_shift = frappe.db.sql(\"select shift from `tabEmployee Shift` where parent = %s and %s between start_date and end_date order by idx\",(self.employee, self.attendance_date))\n\t\t\tif get_shift:\n\t\t\t\tshift = get_shift[0][0]\n\t\t\telse:\n\t\t\t\tshift = frappe.db.get_value(\"Employee\", self.employee, \"default_shift\")\n\t\t\tself.shift = shift\n\t\tif not self.shift: frappe.throw(\"Please select shift in employee master of employee {}\".format(str(self.employee)))\n\t\ts = frappe.get_doc(\"Shift Type\", self.shift)\n\t\tholiday_list = frappe.db.get_value(\"Employee\", self.employee, \"holiday_list\")\n\t\tif not holiday_list:\n\t\t\tholiday_list = s.holiday_list\n\t\tself.holiday_list = holiday_list\n\t\tif self.get_holiday() and not self.override_holiday:\n\t\t\tself.status = \"Holiday\"\n\t\tstart_time = s.start_time\n\t\tend_time = s.end_time\n\t\tactual_start_time = s.actual_start_time\n\t\tover_time_allowed = s.pre_approved_overtime\n\t\tif self.time_in and self.time_out and self.time_out != \"00:00:00\" and self.time_in != \"00:00:00\" and self.time_in != \"0:00:00\" and self.time_out != \"0:00:00\":\n\t\t\tif self.status == \"Absent\": self.status = \"Present\"\n\t\t\tdiff = 0\n\t\t\tdurr = 0\n\t\t\ttotal_seconds = 0\n\t\t\ttotal_mins = 0\n\t\t\ttotal_mins_for_half_day = 0\n\t\t\t# time_in = frappe.utils.data.get_datetime(self.time_in)\n\t\t\t# time_out = frappe.utils.data.get_datetime(self.time_out)\n\t\t\tif \".\" in str(self.time_in): self.time_in = str(self.time_in).split(\".\")[0]\n\t\t\tif \".\" in str(self.time_out): self.time_out = str(self.time_out).split(\".\")[0]\n\t\t\tdiff = datetime.strptime(str(self.time_out), \"%H:%M:%S\") - datetime.strptime(str(self.time_in), \"%H:%M:%S\")\n\t\t\tif diff.days < 0:\n\t\t\t\tdiff = timedelta(days=0, seconds=diff.seconds, microseconds=diff.microseconds)\n\t\t\tdurr += diff.total_seconds()\n\t\t\ttotals = durr / 60\n\t\t\ttotal_mins = int(totals)\n\t\t\ttotal_mins_for_half_day = total_mins\n\t\t\tmins_secs = total_mins * 60\n\t\t\ttotal_secs = int(durr - mins_secs)\n\t\t\ttotal_hours = 0\n\t\t\twhile total_mins >= 60:\n\t\t\t\ttotal_hours += 1\n\t\t\t\ttotal_mins -= 60\n\t\t\tself.total_time = str(total_hours).zfill(2)+\":\"+str(total_mins).zfill(2)+\":\"+str(total_secs).zfill(2)\n\t\t\tif self.status != \"Holiday\":\n\t\t\t\tif datetime.strptime(str(self.time_out), \"%H:%M:%S\") < datetime.strptime(str(end_time), \"%H:%M:%S\"):\n\t\t\t\t\tself.early_exit = 1\n\t\t\t\t\tp = datetime.strptime(str(end_time), \"%H:%M:%S\").time()\n\t\t\t\t\tm = datetime.strptime(str(self.time_out), \"%H:%M:%S\").time()\n\t\t\t\t\tself.early_exit_time = datetime.combine(date.today(), p) - datetime.combine(date.today(), m)\n\t\t\t\t\tself.early_exit_mins = get_mins(self.early_exit_time)\n\t\t\t\t\tif self.status == \"Absent\": self.status = \"Present\"\n\t\t\t\telif datetime.strptime(str(self.time_out), \"%H:%M:%S\") > datetime.strptime(str(end_time), \"%H:%M:%S\"):\n\t\t\t\t\ttimeout = self.time_out\n\t\t\t\t\tif datetime.strptime(str(self.time_out), \"%H:%M:%S\") > datetime.strptime(str(over_time_allowed), \"%H:%M:%S\"):\n\t\t\t\t\t\ttimeout = over_time_allowed\n\t\t\t\t\tp = datetime.strptime(str(end_time), \"%H:%M:%S\").time()\n\t\t\t\t\tm = datetime.strptime(str(timeout), \"%H:%M:%S\").time()\n\t\t\t\t\tself.overtime_time = datetime.combine(date.today(), m) - datetime.combine(date.today(), p)\n\t\t\t\t\tconverted_ot = datetime.strptime(str(self.overtime_time), \"%H:%M:%S\")\n\t\t\t\t\tif converted_ot >= datetime.strptime(\"04:00:00\", \"%H:%M:%S\"):\n\t\t\t\t\t\tself.overtime_time = \"04:00:00\"\n\t\t\t\t\telif converted_ot >= datetime.strptime(\"03:00:00\", \"%H:%M:%S\") and converted_ot <= datetime.strptime(\"03:59:59\", \"%H:%M:%S\"):\n\t\t\t\t\t\tself.overtime_time = \"03:00:00\"\n\t\t\t\t\telif converted_ot >= datetime.strptime(\"02:00:00\", \"%H:%M:%S\") and converted_ot <= datetime.strptime(\"02:59:59\", \"%H:%M:%S\"):\n\t\t\t\t\t\tself.overtime_time = \"02:00:00\"\n\t\t\t\t\telif converted_ot >= datetime.strptime(\"01:00:00\", \"%H:%M:%S\") and converted_ot <= datetime.strptime(\"01:59:59\", \"%H:%M:%S\"):\n\t\t\t\t\t\tself.overtime_time = \"01:00:00\"\n\t\t\t\t\telif converted_ot >= datetime.strptime(\"00:00:00\", \"%H:%M:%S\") and converted_ot <= datetime.strptime(\"00:59:59\", \"%H:%M:%S\"):\n\t\t\t\t\t\tself.overtime_time = None\n\t\t\t\t\tif self.overtime_time:\n\t\t\t\t\t\tself.over_time = 1\n\t\t\t\t\t\tself.overtime_mins = get_mins(self.overtime_time)\n\t\t\t\t\t\tif self.status == \"Absent\": self.status = \"Present\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.over_time = 0\n\t\t\t\t\t\tself.overtime_mins = 0\n\t\t\t\tif datetime.strptime(str(self.time_in), \"%H:%M:%S\") > datetime.strptime(str(actual_start_time), \"%H:%M:%S\"):\n\t\t\t\t\tself.late_entry = 1\n\t\t\t\t\tp = datetime.strptime(str(self.time_in), \"%H:%M:%S\").time()\n\t\t\t\t\tm = datetime.strptime(str(start_time), \"%H:%M:%S\").time()\n\t\t\t\t\tself.late_entry_time = datetime.combine(date.today(), p) - datetime.combine(date.today(), m)\n\t\t\t\t\tself.late_entry_mins = get_mins(self.late_entry_time)\n\t\t\t\t\t# frappe.throw(str(self.late_entry_mins))\n\t\t\t\t\tif self.status == \"Absent\": self.status = \"Present\"\n\t\telse:\n\t\t\tself.total_time = \"00:00:00\"\n\n\tdef get_holiday(self):\n\t\tif self.holiday_list:\n\t\t\tholiday_list = frappe.get_doc(\"Holiday List\", self.holiday_list)\n\t\t\tholiday_dates = dict((str(h.holiday_date), h) for h in holiday_list.get(\"holidays\"))\n\t\t\tif str(self.attendance_date) in holiday_dates.keys():\n\t\t\t\treturn 1\n\t\t\telse:\n\t\t\t\treturn 0\n\n@frappe.whitelist()\ndef check():\n\tdiff = datetime.strptime(str(\"10:05:00\"), \"%H:%M:%S\") - datetime.strptime(str(\"05:05:00\"), \"%H:%M:%S\")\n\tdurr = diff.total_seconds()\n\t# p = datetime.strptime(str(\"23:01:00\"), \"%H:%M:%S\").time()\n\t# m = datetime.strptime(str(\"00:01:00\"), \"%H:%M:%S\").time()\n\t# res = datetime.combine(date.today(), p) - datetime.combine(date.today(), m)\n\t# print(res)\n\timport math\n\ttotal_hours = math.floor(durr / 1000 / 60 / 60)\n\tdurr -= total_hours*1000*60*60\n\ttotal_mins = math.floor(durr / 1000 / 60)\n\tdurr -= total_mins*1000*60\n\ttotal_secs = math.floor(durr / 1000)\n\tprint(str(total_hours)+\":\"+str(total_mins)+\":\"+str(total_secs))\n\n@frappe.whitelist()\ndef get_events(start, end, filters=None):\n\tevents = []\n\n\temployee = frappe.db.get_value(\"Employee\", {\"user_id\": frappe.session.user})\n\n\tif not employee:\n\t\treturn events\n\n\tfrom frappe.desk.reportview import get_filters_cond\n\tconditions = get_filters_cond(\"Attendance\", filters, [])\n\tadd_attendance(events, start, end, conditions=conditions)\n\treturn events\n\ndef add_attendance(events, start, end, conditions=None):\n\tquery = \"\"\"select name, attendance_date, status\n\t\tfrom `tabAttendance` where\n\t\tattendance_date between %(from_date)s and %(to_date)s\n\t\tand docstatus < 2\"\"\"\n\tif conditions:\n\t\tquery += conditions\n\n\tfor d in frappe.db.sql(query, {\"from_date\":start, \"to_date\":end}, as_dict=True):\n\t\te = {\n\t\t\t\"name\": d.name,\n\t\t\t\"doctype\": \"Attendance\",\n\t\t\t\"date\": d.attendance_date,\n\t\t\t\"title\": cstr(d.status),\n\t\t\t\"docstatus\": d.docstatus\n\t\t}\n\t\tif e not in events:\n\t\t\tevents.append(e)\n\ndef mark_absent(employee, attendance_date, shift=None):\n\temployee_doc = frappe.get_doc('Employee', employee)\n\tif not frappe.db.exists('Attendance', {'employee':employee, 'attendance_date':attendance_date, 'docstatus':('!=', '2')}):\n\t\tdoc_dict = {\n\t\t\t'doctype': 'Attendance',\n\t\t\t'employee': employee,\n\t\t\t'attendance_date': attendance_date,\n\t\t\t'status': 'Absent',\n\t\t\t'company': employee_doc.company,\n\t\t\t'shift': shift\n\t\t}\n\t\tattendance = frappe.get_doc(doc_dict).insert()\n\t\tattendance.submit()\n\t\treturn attendance.name\n\ndef get_mins(time):\n\ttime = str(time).split(\":\")\n\tdelta = timedelta(hours=int(time[0]), minutes=int(time[1]), seconds=int(time[2]))\n\ttotal_seconds = delta.total_seconds()\n\t# minutes = int(total_seconds // 60)\n\t# seconds = int(total_seconds % 60)\n\treturn total_seconds\n\ndef update_it():\n\tdata = frappe.db.sql(\"select name from `tabAttendance`\")\n\tcount=0\n\tfor d in data:\n\t\tdoc = frappe.get_doc(\"Attendance\", d[0])\n\t\tdoc.save()\n\t\tcount+=1\n\t\tprint(count)","repo_name":"MuhammadZubair12/erpihsan","sub_path":"erpnext/hr/doctype/attendance/attendance.py","file_name":"attendance.py","file_ext":"py","file_size_in_byte":10558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"18905642471","text":"import time\n\n\nclass TreeNode:\n def __init__(self, val, left=None, right=None):\n self.val, self.left, self.right = val, left, right\n\n\nclass BinaryTree:\n # for now, we can directly set the root to an already created tree\n # insertion and deletion can be done on BSTs and not here on BTs\n def __init__(self, root):\n self.root = root\n\n def inorder_recursive(self):\n print('Inorder is: ', end='')\n self.inorder_recursive_helper(self.root)\n print()\n\n def inorder_recursive_helper(self, node):\n if node is not None:\n self.inorder_recursive_helper(node.left)\n print(node.val, end=', ')\n self.inorder_recursive_helper(node.right)\n\n def inorder_iterative(self):\n stack = [[self.root, False]]\n\n while stack:\n if not stack[-1][1]:\n if stack[-1][0].left is not None:\n stack.append([stack[-1][0].left, False])\n else:\n stack[-1][1] = True\n else:\n node = stack[-1][0]\n print(node.val, end=', ')\n stack.pop()\n if stack:\n stack[-1][1] = True\n if node.right is not None:\n stack.append([node.right, False])\n\n print()\n\n # Without extra stack space\n def inorder_iterative_better(self):\n curr = self.root\n stack = []\n\n while curr is not None or stack:\n while curr is not None:\n stack.append(curr)\n curr = curr.left\n\n curr = stack.pop()\n print(curr.val, end=', ')\n curr = curr.right\n\n\nif __name__ == '__main__':\n root_node = TreeNode(10, TreeNode(7, TreeNode(6), TreeNode(8)), TreeNode(12, TreeNode(11), TreeNode(13)))\n\n BinaryTree(root_node).inorder_iterative_better()\n","repo_name":"kaushalpranav/python-ds-training","sub_path":"standard_ds_stash/1007_Tree Traversals.py","file_name":"1007_Tree Traversals.py","file_ext":"py","file_size_in_byte":1873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"18455963399","text":"import sys\ninput = sys.stdin.readline\n\nN, K = map(int,input().split())\ntd = [list(map(int, input().split())) for _ in range(N)]\ntd.sort(key=lambda x: (x[0],x[1]),reverse=True)\n\nx= []\ny= []\ny.append(td[0][1])\nfor i in range(1,N):\n if td[i][0] != td[i-1][0]:\n y.append(td[i][1])\n else:\n x.append(td[i][1])\nx.sort(reverse=True)\ny.sort(reverse=True)\nans = 0\nsum_y = 0\nsum_x = []\ntmp = 0\nsum_x.append(0)\nfor i in range(len(x)):\n tmp += x[i]\n sum_x.append(tmp)\n\nfor i in range(1,min(K,len(y))+1):\n sum_y += y[i-1]\n if len(sum_x) < K-i+1: continue\n ans = max(ans, sum_y + sum_x[K-i] + i*i)\n \nprint(ans)\n","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03148/s419509170.py","file_name":"s419509170.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"20044047941","text":"import itertools\n\nnumbers_collection = [\n {'a', 'b', 'c', 'd', 'e', 'f'},\n {'b', 'c'},\n {'a', 'b', 'g', 'e', 'd'},\n {'a', 'b', 'g', 'c', 'd'},\n {'b', 'g', 'c', 'f'},\n {'a', 'f', 'g', 'c', 'd'},\n {'a', 'c', 'd', 'e', 'f', 'g'},\n {'a', 'b', 'c'},\n {'a', 'b', 'c', 'd', 'e', 'f', 'g'},\n {'a', 'b', 'c', 'd', 'f', 'g'},\n]\n\n\ndef prepare_sets(seg):\n first_seg, second_seg = set(), set()\n for i in seg:\n if i.isupper():\n first_seg.update(i.lower())\n else:\n second_seg.update(i.lower())\n return first_seg, second_seg\n\n\ndef find_combinations(lit_seg, broken_seg):\n count = 0\n for L in range(0, len(broken_seg) + 1):\n for subset in itertools.combinations(broken_seg, L):\n union_lit_and_broken = lit_seg.union(set(subset))\n for item in numbers_collection:\n if item == union_lit_and_broken:\n count += 1\n return count\n\n\ndef seven_segment(lit_seg, broken_seg):\n first_lit_seg, second_lit_seg = prepare_sets(lit_seg)\n first_broken_seg, second_broken_seg = prepare_sets(broken_seg)\n first_number = find_combinations(first_lit_seg, first_broken_seg)\n second_number = find_combinations(second_lit_seg, second_broken_seg)\n return first_number * second_number\n\n\nif __name__ == '__main__':\n assert seven_segment({'B', 'C', 'b', 'c'}, {'A'}) == 2, '11, 71'\n assert seven_segment({'B', 'C', 'a', 'f', 'g', 'c', 'd'}, {'A', 'G', 'D', 'e'}) == 6, '15, 16, 35, 36, 75, 76'\n assert seven_segment({'B', 'C', 'a', 'f', 'g', 'c', 'd'}, {'A', 'G', 'D', 'F', 'b', 'e'}) == 20, '15...98'\n print('\"Run\" is good. How is \"Check\"?')\n","repo_name":"dkarpelevich/checkio","sub_path":"scientific_expedition/seven-segment.py","file_name":"seven-segment.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"15457654272","text":"\"\"\"\r\nThis module includes the core functions of SANXI robot\r\nClass: Sanxi, whose base class is RS232 in communication.py module\r\nFunctions: search_origin()\r\n\r\nAuthor: Mr SoSimple\r\n\"\"\"\r\n\r\nimport threading\r\nimport time\r\nimport re\r\n\r\nfrom communication import Message_control\r\n\r\n\r\n\r\nclass Sanxi(Message_control):\r\n __VE_MAX = 250000 # 最大速度\r\n __AC_MAX = 250000 # 最大加速度\r\n __DE_MAX = 250000 # 最大减速度\r\n\r\n def __init__(self):\r\n super(Sanxi, self).__init__()\r\n self.__mode = '\\x10'\r\n self.return_code = '' # Sanxi串口返回数据\r\n self.jn_value = [] # 伪实时关节空间坐标值\r\n self.xyz_value = [] # 伪实时笛卡尔空间坐标值\r\n self.start_update_sanxi_output_timer = None\r\n # 正则表达式预编译\r\n self.G_detect_pattern = re.compile(r'.*G.*') # 检测 G字符 与下面表达式联合抽取返回的坐标值\r\n self.jn_pattern = re.compile(r'.*J1=(.*) J2=(.*) J3=(.*) J4=(.*) J5=(.*) J6=(.*)[\\r\\s]')\r\n self.xyz_pattern = re.compile(r'.*X=(.*) Y=(.*) Z=(.*) A=(.*) B=(.*) C=(.*) D=(.*)[\\r\\s]')\r\n\r\n\r\n\r\n def connect_sanxi(self, port_name):\r\n \"\"\"\r\n 连接三喜机器人\r\n :param port_name: string,串口名称 example:port_name='COM3'\r\n :return: Bool\r\n \"\"\"\r\n self.set_port(port_name)\r\n self.set_baud_rate(115200)\r\n self.set_timeout(0.1)\r\n if self.connect() is True:\r\n return True\r\n else:\r\n return False\r\n\r\n def disconnect_sanxi(self):\r\n \"\"\"\r\n 断开三喜机器人串口连接\r\n :return: Bool\r\n \"\"\"\r\n if self.disconnect():\r\n return True\r\n else:\r\n return False\r\n\r\n\r\n def start_update_sanxi_output(self):\r\n \"\"\"\r\n 开始更新机器人返回的消息,包括坐标信息\r\n :return: None\r\n \"\"\"\r\n self.start_refresh()\r\n self.start_update_sanxi_output_timer = threading.Timer(0.01, self.__extract_output_info)\r\n self.start_update_sanxi_output_timer.start()\r\n\r\n def __extract_output_info(self):\r\n \"\"\"\r\n 接收返回消息并抽取返回消息中的坐标信息\r\n :return: None\r\n \"\"\"\r\n # print('enter extract_output_info')\r\n if self.return_code != self.message:\r\n # update return_code\r\n self.return_code = self.message\r\n # print('return code =', self.return_code)\r\n # update state info\r\n jn_match = self.jn_pattern.match(str(self.return_code)) # 正则表达式匹配\r\n xyz_match = self.xyz_pattern.match(str(self.return_code))\r\n G_match = self.G_detect_pattern.match(str(self.return_code))\r\n # print(jn_match, xyz_match, G_match)\r\n if G_match is None:\r\n if jn_match:\r\n self.jn_value.clear()\r\n for i in range(1, 7):\r\n self.jn_value.append(jn_match.group(i))\r\n if xyz_match:\r\n self.xyz_value.clear()\r\n for i in range(1, 8):\r\n self.xyz_value.append(xyz_match.group(i))\r\n self.start_update_sanxi_output_timer = threading.Timer(0.01, self.__extract_output_info)\r\n self.start_update_sanxi_output_timer.start()\r\n # print('leave extract_output_info')\r\n\r\n def stop_update_sanxi_output(self):\r\n \"\"\"\r\n 停止更新机器人返回的消息,包括坐标信息\r\n :return: None\r\n \"\"\"\r\n self.start_update_sanxi_output_timer.cancel()\r\n self.stop_refresh()\r\n\r\n def search_origin(self):\r\n \"\"\"\r\n 启动搜寻原点内置程序,原理:限位光电开关\r\n :return: None\r\n \"\"\"\r\n self.send('\\x30')\r\n time.sleep(0.1)\r\n self.send('\\x10')\r\n time.sleep(0.1)\r\n self.send('\\x12')\r\n time.sleep(0.1)\r\n\r\n def back2origin(self, wait=False):\r\n \"\"\"\r\n 复位:回到原点\r\n :return: None\r\n \"\"\"\r\n self.send('\\x30')\r\n time.sleep(0.1)\r\n self.send('\\x10')\r\n time.sleep(0.1)\r\n self.send('\\x15')\r\n time.sleep(0.1)\r\n if wait:\r\n while self.return_code != '\\x10':\r\n self.send('\\x05')\r\n time.sleep(0.3)\r\n\r\n def stop(self):\r\n \"\"\"\r\n quick stop\r\n :return: None\r\n \"\"\"\r\n self.send('\\x30')\r\n time.sleep(0.2)\r\n self.send('\\x10')\r\n time.sleep(0.1)\r\n\r\n def set_motion_para(self, vep, acp, dep):\r\n \"\"\"\r\n 设置运动参数\r\n :param vep: 速度百分比\r\n :param acp: 加速度百分比\r\n :param dep: 减速度百分比\r\n :return: None\r\n \"\"\"\r\n self.changeto_mode14()\r\n ve = vep * self.__VE_MAX / 100\r\n ac = acp * self.__AC_MAX / 100\r\n de = dep * self.__DE_MAX / 100\r\n self.send('G07 VE={}\\n'.format(str(ve)))\r\n time.sleep(0.05)\r\n self.send('G07 AC={}\\n'.format(str(ac)))\r\n time.sleep(0.05)\r\n self.send('G07 DE={}\\n'.format(str(de)))\r\n time.sleep(0.05)\r\n\r\n def set_return_data_mode(self, mode='cartesian space'):\r\n \"\"\"\r\n 设置返回数据模式,直角坐标模式 或 关节坐标模式,默认前者\r\n :param mode: string\r\n :return: Bool\r\n \"\"\"\r\n if mode == 'cartesian space':\r\n self.send('G07 GCM=1\\n')\r\n return True\r\n elif mode == 'joint space':\r\n self.send('G07 GCM=0\\n')\r\n return True\r\n else:\r\n return False\r\n\r\n def changeto_mode14(self):\r\n self.send('\\x30')\r\n time.sleep(0.05)\r\n self.send('\\x10')\r\n time.sleep(0.05)\r\n self.send('\\x14')\r\n time.sleep(0.02)\r\n\r\n def rect_move(self, mode, **rect_dict):\r\n \"\"\"\r\n 直角坐标运动,点对点, 或直线\r\n :param mode: mode='p2p' OR mode='line'\r\n :param rect_dict: 字典——直角坐标目标值,{'X': *, ...}\r\n :return:\r\n \"\"\"\r\n send_data = ''\r\n if mode == 'p2p':\r\n send_data += 'G20 '\r\n if mode == 'line':\r\n send_data += 'G21 '\r\n all_keys = ['X', 'Y', 'Z', 'A', 'B', 'C', 'D']\r\n for key in all_keys:\r\n if rect_dict[key] and (rect_dict[key] != ' '):\r\n send_data += '{0}={1} '.format(key, str(rect_dict[key]))\r\n send_data += '\\n'\r\n self.changeto_mode14()\r\n self.send(send_data)\r\n\r\n def multi_joints_motion(self, **j_dict):\r\n \"\"\"\r\n 关节运动,点对点\r\n :param j_dict: 字典——六轴目标值,{'J*': **, ...}\r\n :return:\r\n \"\"\"\r\n send_data = 'G00 '\r\n all_keys = []\r\n for i in range(1, 7):\r\n all_keys.append('J{}'.format(str(i)))\r\n for key in all_keys:\r\n if j_dict[key] and (j_dict[key] != ' '):\r\n send_data += '{0}={1} '.format(key, str(j_dict[key]))\r\n send_data += '\\n'\r\n self.changeto_mode14()\r\n self.send(send_data)\r\n\r\n def single_joint_motion_start(self, n, is_positive):\r\n \"\"\"\r\n 第n轴单轴运动\r\n :param n: 第n轴单动,eg 1 代表第一轴\r\n :param is_positive: True-顺时针或 向上, False-逆时针或向下\r\n :return:\r\n \"\"\"\r\n if n in [2, 3, 5]:\r\n if is_positive:\r\n send_data = 'J{}+\\n'.format(str(n))\r\n else:\r\n send_data = 'J{}-\\n'.format(str(n))\r\n else:\r\n if is_positive:\r\n send_data = 'J{}-\\n'.format(str(n))\r\n else:\r\n send_data = 'J{}+\\n'.format(str(n))\r\n self.changeto_mode14()\r\n self.send(send_data)\r\n\r\n def single_joint_motion_stop(self, n):\r\n \"\"\"\r\n 第n轴停止单轴运动\r\n :param n: 第n轴停止单动,eg 1 代表第一轴\r\n :return:\r\n \"\"\"\r\n send_data = 'J{}0\\n'.format(str(n))\r\n self.send(send_data)","repo_name":"ShepherdSosimple/Sanxi_Control_UI","sub_path":"sanxi_core.py","file_name":"sanxi_core.py","file_ext":"py","file_size_in_byte":8091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"29653981401","text":"# Standard library imports\nimport os\nimport sys\n\n# Local application imports\nbase_dir = os.path.dirname(os.path.abspath(__file__))\nparent_dir = os.path.dirname(base_dir)\nsys.path.append(parent_dir)\nfrom graph_adj_list import VertexDFS, GraphDFS\n\n\ndef build_pancake_graph():\n \"\"\" Builds a Graph for a Pancake recipe \"\"\"\n pancake_graph = GraphDFS()\n ingredients = [\"3/4 cup milk\", \"1 egg\", \"1 Tbl Oil\", \"1 cup mix\"]\n instructions = [\n \"heat syrup\",\n \"heat griddle\",\n \"pour 1/4 cup\",\n \"turn when bubbly\",\n \"eat\",\n ]\n\n pancake_graph.add_edge(ingredients[0], ingredients[3])\n pancake_graph.add_edge(ingredients[1], ingredients[3])\n pancake_graph.add_edge(ingredients[2], ingredients[3])\n\n pancake_graph.add_edge(ingredients[3], instructions[0])\n pancake_graph.add_edge(ingredients[3], instructions[2])\n pancake_graph.add_edge(instructions[1], instructions[2])\n pancake_graph.add_edge(instructions[2], instructions[3])\n pancake_graph.add_edge(instructions[3], instructions[4])\n pancake_graph.add_edge(instructions[0], instructions[4])\n\n return pancake_graph\n\n\ndef topological_sorting(dfs_graph):\n \"\"\" Sorts the vertices in descending order by finish time \"\"\"\n finish_times = [vertex.finish for vertex in dfs_graph]\n finish_times.sort()\n finish_times.reverse()\n\n sorted_vertices = []\n\n for finish_time in finish_times:\n for vertex in dfs_graph:\n if vertex.finish == finish_time:\n sorted_vertices.append(vertex)\n\n return sorted_vertices\n\n\ndef main():\n pancake_graph = build_pancake_graph()\n pancake_graph.dfs()\n\n for vertex in pancake_graph:\n print(vertex.id, vertex.discovery, vertex.finish)\n\n sorted_graph = topological_sorting(pancake_graph)\n\n for vertex in sorted_graph:\n print(vertex.id, vertex.finish)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"miguel-osuna/PS-Algos-and-DS-using-Python","sub_path":"Section8_Graphs/topological_sorting/pancakes.py","file_name":"pancakes.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"16511792640","text":"import requests\n\nurl = \"https://docker.registry.htb/v2\"\nheader = {'Authorization': 'Basic YWRtaW46YWRtaW4='}\n\nfilepath = '/home/jkr/HackSpace/misc/wordlist.txt'\nwith open(filepath) as fp:\n line = fp.readline()\n cnt = 1\n while line:\n # print(\"Line {}: {}\".format(cnt, line.strip()))\n print(\"Trying: \", line.strip())\n pp = url+'/'+line.strip()\n r = requests.get(pp,headers=header)\n print(r.status_code)\n line = fp.readline()\n cnt += 1\n","repo_name":"ev1lm0rty/Writeups","sub_path":"HackTheBox/Boxes/Registry/recon/recon.py","file_name":"recon.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"32544136226","text":"# 1.\nclass Technic:\n def __init__(self, name: str, price: float, availability: bool):\n self.name = name\n self.price = price\n self.availability = availability\n\n def __repr__(self):\n return self.name\n\n\nproducts = list()\n\nproducts.append(Technic(\"Samsung c100\", 3500.00, True))\nproducts.append(Technic(\"Dexp URSUS K48\", 6499.00, True))\nproducts.append(Technic(\"Dexp URSUS C18 Kid's\", 4299.00, True))\n\ncost_criterion = 5000 # Критерий дороговизны\n\nexpensive = list()\ncheap = list()\n\nfor product in products:\n expensive.append(product) if product.price > cost_criterion else cheap.append(\n product\n )\n\n# print(expensive) # [Dexp URSUS K48]\n# print(cheap) # [Samsung c100, Dexp URSUS C18 Kid's]\n\n# 2.\nclass Technic:\n def __init__(self, name: str, price: float, availability: bool):\n self.name = name\n self.price = price\n self.availability = availability\n\n def __repr__(self):\n return self.name\n\n def __lt__(self, other):\n if isinstance(other, Technic):\n return len(self.name) < len(other.name)\n else:\n raise TypeError\n\n def __gt__(self, other):\n if isinstance(other, Technic):\n return len(self.name) > len(other.name)\n else:\n raise TypeError\n\n def __eq__(self, other):\n if isinstance(other, Technic):\n return len(self.name) == len(other.name)\n else:\n raise TypeError\n\n\n# foo = Technic(\"foo\", 1000.00, True)\n# bar = Technic(\"bar\", 1000.00, True)\n# foobar = Technic(\"foobar\", 1000.00, True)\n\n# print(f\"foo > bar: {foo > bar}\") # False\n# print(f\"foo < bar: {foo < bar}\") # False\n# print(f\"foo == bar: {foo == bar}\") # True\n\n# print(f\"foo > foobar: {foo > foobar}\") # False\n# print(f\"foo < foobar: {foo < foobar}\") # True\n# print(f\"foo == foobar: {foo == foobar}\") # False\n\n# print(foo > 10) # TypeError\n","repo_name":"volodya-wtf/dev_test","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"23119570667","text":"from django.urls import path\nfrom . import views\nfrom .views import classViewset,subjectViewset\nfrom rest_framework.routers import DefaultRouter\n\nrouter = DefaultRouter()\nrouter.register(r'class', classViewset, basename='class')\nrouter.register(r'subject', subjectViewset, basename='subject')\n# router.register(r'result',resultViewset, basename='result')\n\nurlpatterns = [\n path('',views.home,name='home'),\n path('student/register',views.registerStudent,name='student_register'),\n path('student/',views.studentListView,name='students_list'),\n path('student//',views.studentDetailView,name='student_detail'),\n path('student/dashboard//',views.dashboardStudent,name='student_dashboard'),\n path('staff/register',views.registerStaff,name='staff_register'),\n path('staff/',views.staffListView,name='students_list'),\n path('staff//',views.staffDetailView,name='staff_detail'),\n path('staff/dashboard//',views.dashboardStaff,name='staff_dashboard'),\n path('staff/addresult',views.createResultView,name=\"add_result\"),\n path('staff/addresult//',views.updateResultView,name=\"update_result\"),\n\n]\n\nurlpatterns += router.urls","repo_name":"arunravi74/SchoolManagementSystem","sub_path":"users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"18410117729","text":"MOD = 10 ** 9 + 7\nINF = 10 ** 11\nimport sys\nsys.setrecursionlimit(100000000)\n\ndef main():\n N = int(input())\n S = [input() for _ in range(N)]\n\n cnt = 0\n ba = 0\n a = 0\n b = 0\n for s in S:\n for i in range(len(s) - 1):\n if s[i] == 'A' and s[i + 1] == 'B':\n cnt += 1\n if s[0] == 'B' and s[-1] == 'A':\n ba += 1\n elif s[0] == 'B':\n b += 1\n elif s[-1] == 'A':\n a += 1\n \n if a > 0 and b > 0:\n cnt += ba + 1 + min(a - 1,b - 1)\n elif a > 0:\n cnt += ba\n elif b > 0:\n cnt += ba\n else:\n if ba > 0:\n cnt += ba - 1\n print(cnt)\nif __name__ == '__main__':\n main() ","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03049/s526636844.py","file_name":"s526636844.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"18295873432","text":"import Connection\nimport threading\nimport Neuron as neuron\nfrom NeuralNetwork import Network\n\ndef TrainNet(inputs1, net1, outputs1, id, result_list):\n\twhile True:\n\t\t#print (\"Training,\", id)\n\t\terr= 0\n\t\tfor i in range(len(inputs1)):\n\t\t\tnet1[0].setInput(inputs1[i])\n\t\t\tnet1[0].feedForward()\n\t\t\tnet1[0].backPropagate(outputs1[i])\n\t\t\terr= err+ net1[0].getError(outputs1[i])\n\t\tprint (\"Error: \", err)\n\t\tif err < 0.6:\n\t\t\tprint (\"I have finished\", id)\n\t\t\tresult_list[id]= net1[0]\n\t\t\tbreak\ndef main():\n\tresult_list= [[None], [None]]\n\ttopology= []\n\ttopology.append(2)\n\ttopology.append(3)\n\ttopology.append(3)\n\ttopology.append(1)\n\tnet1= Network(topology)\n\tnet2= Network(topology)\n\tneuron.eta= 0.09\n\tneuron.alpha= 0.015\n\tinputs1= [[0, 0],[0, 1]]\n\toutputs1=[[0],[1]]\n\tinputs2= [[1, 0],[1, 1]]\n\toutputs2=[[1],[0]]\n\tthreads= []\n\tnets= [net1, net2]\n\toutputs=[outputs1, outputs2]\n\tinputs= [inputs1, inputs2]\n\tfor i in range(2):\n\t\tt= threading.Thread(target=TrainNet, args=(inputs[i], [nets[i]], outputs[i], i, result_list))\n\t\tthreads.append(t)\n\t\tt.start()\n\tfor i in range(2):\n\t\tthreads[i].join()\n\tnet1= result_list[0]\n\tnet2= result_list[1]\n\twhile True:\n\t\ta= int(input(\"First input: \"))\n\t\tb= int(input(\"Second input: \"))\n\t\tif (a>0):\n\t\t\tnet2.setInput([a, b])\n\t\t\tnet2.feedForward()\n\t\t\tprint (net1.getThResults())\n\t\telse:\n\t\t\tnet1.setInput([a, b])\n\t\t\tnet1.feedForward()\n\t\t\tprint (net1.getThResults())\nif __name__ == '__main__':\n\tmain()\n","repo_name":"jramonrod/NeuralNetworkImplementation","sub_path":"src/paralelmain.py","file_name":"paralelmain.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"18405450579","text":"import numpy\nN, K = map(int, input().split())\n\np = 0\nfor i in range(1, N + 1):\n m = numpy.log2(K / i)\n \n if m < 0:\n m = 0\n \n p += 0.5 ** numpy.ceil(m)\n \nprint(p / N)","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03043/s865358658.py","file_name":"s865358658.py","file_ext":"py","file_size_in_byte":194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"41989482121","text":"import logging\nimport sys\nfrom logging import handlers\nfrom pathlib import Path\n\nimport coloredlogs\n\nfrom constants import config\n\nlog_level = logging.DEBUG if config.debug.state else logging.INFO\n\n# Log format\nlog_format = '%(asctime)s [%(levelname)s] - [%(filename)s > %(funcName)s() > %(lineno)s]: %(message)s'\nlog_date_format = '%m/%d/%Y|%I:%M:%S %p'\n\n# Info color/format logger\nfieldstyle = {\n \"asctime\": {\"color\": \"green\"},\n \"levelname\": {\n \"bold\": True,\n \"color\": \"black\"\n },\n \"filename\": {\"color\": \"cyan\"},\n \"funcName\": {\"color\": \"blue\"}\n}\n\n# Message color/format logger\nlevelstyles = {\n \"critical\": {\n \"bold\": True,\n \"color\": \"red\"\n },\n \"debug\": {\"color\": \"green\"},\n \"error\": {\"color\": \"red\"},\n \"info\": {\"color\": \"magenta\"},\n \"warning\": {\"color\": \"yellow\"}\n}\n\n# Creating logger\nlog = logging.getLogger(__name__)\nlog.setLevel(log_level)\n\nstreamhdlr = logging.StreamHandler(sys.stdout)\nlog.addHandler(streamhdlr)\n\nif config.debug.log_file:\n # File handler\n log_file = Path(\"logs\", \"bot.log\")\n log_file.parent.mkdir(exist_ok=True)\n file_handler = handlers.RotatingFileHandler(log_file, maxBytes=5242880, backupCount=7, encoding=\"utf8\")\n file_handler.setFormatter(logging.Formatter(log_format))\n file_handler.setLevel(log_level)\n log.addHandler(file_handler)\n\ncoloredlogs.install(\n level=log_level,\n logger=log,\n fmt=log_format,\n datefmt=log_date_format,\n field_styles=fieldstyle,\n level_styles=levelstyles\n)\n","repo_name":"Pandabweer/dpy-dispy","sub_path":"log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"18172507339","text":"K = int(input())\nif K%2==0 or K%5==0:\n print(-1)\nelse:\n ans = 1\n rest = 7%K\n while rest != 0:\n rest *= 10\n rest += 7\n rest %= K\n ans += 1\n print(ans)","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p02596/s170879556.py","file_name":"s170879556.py","file_ext":"py","file_size_in_byte":192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"74765096937","text":"\nimport math\nimport ipaddress\n\nwhile True:\n \n #validates the IP address that the user inputs\n def validateIP():\n global listIP\n global ip\n while True:\n ip = input(\"\\nWhat's the IP? \")\n listIP = ip.split(\".\")\n if (ip.startswith(\"169.254.\")):\n print(\"Invalid ip, please try again\") #apipa IP is an invalid IP\n elif (len(listIP) == 4) and (ip != \"127.0.0.1\") and (listIP[1] <= \"255\") and (listIP[2] <= \"255\") and (listIP[3] <= \"255\"):\n print(\"Valid IP\\n\")\n break\n else:\n print(\"Invalid IP, please try again\\n\")\n validateIP()\n\n #validates the subnet mask the user inputs\n def validateMask():\n global list_subnets\n global subnet\n while True:\n subnet = input(\"What's the subnet mask? REMINDER: Must be in IP format: \")\n subnet_octets = ['0', '128', '192', '224', '240', '248', '252', '254', '255']\n list_subnets = subnet.split(\".\")\n if (len(list_subnets) == 4) and (list_subnets[0] == \"255\") and (list_subnets[1] in subnet_octets) and\\\n (list_subnets[2] in subnet_octets) and (list_subnets[3] in subnet_octets) and (list_subnets[0] >= list_subnets[1] >= list_subnets[2] >= list_subnets[3]):\n print(\"Valid Subnet Mask\\n\")\n break\n else:\n print(\"Invalid Subnet Mask, please try again\")\n validateMask()\n\n def calculate():\n \n network_add = ipaddress.IPv4Network(ip + '/' + subnet, False)\n print(\"The network address is: \" + str(network_add))\n \n list_hosts = list(network_add.hosts())\n print(\"Available Hosts: \", len(list_hosts))\n\n first_host = list(network_add.hosts())[0]\n last_host_position = len(list(network_add.hosts())) - 1\n last_host = list(network_add.hosts())[last_host_position]\n print(\"The host address range is: \", first_host, \"-\" ,last_host)\n\n calculate()\n\n","repo_name":"HarveyHuangMSFT/IP-Subnet-Calculator.py","sub_path":"ipcalculator.py","file_name":"ipcalculator.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"18153618159","text":"#!/usr/bin/env python\nn = int(input())\nans = 0 \n\nis_primes = [True for _ in range(n+1)]\nis_primes[0] = is_primes[1] = False\nminFactor = [-1 for _ in range(n+1)]\n\nfor i in range(2, n+1):\n if not is_primes[i]: continue\n minFactor[i] = i \n for j in range(i*i, n+1, i): \n is_primes[j] = False\n minFactor[j] = i \n\n\ndef fast_factorization(n):\n data = {}\n while n > 1:\n if minFactor[n] not in data:\n data[minFactor[n]] = 1 \n else:\n data[minFactor[n]] += 1\n n //= minFactor[n]\n return data\n\n\ndef count_divisors(n):\n ret = 1 \n for v in fast_factorization(n).values():\n ret *= (v+1)\n return ret \n\nfor i in range(1, n): \n ans += count_divisors(i)\n\nprint(ans)\n","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p02548/s782069472.py","file_name":"s782069472.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"72157873578","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Project: DevelopBasic \n# Software: PyCharm\n# DateTime: 2018-10-23 10:17\n# File: 5-三级菜单崔工版本.py\n# __author__: 天晴天朗\n# Email: tqtl@tqtl.org\n\nmenu = {\n '北京': {\n '海淀': {\n '五道口': {\n 'soho': {},\n '网易': {},\n 'google': {}\n },\n '中关村': {\n '爱奇艺': {},\n '汽车之家': {},\n 'youku': {},\n },\n '上地': {\n '百度': {},\n },\n },\n '昌平': {\n '沙河': {\n '老男孩': {},\n '北航': {},\n },\n '天通苑': {},\n '回龙观': {},\n },\n '朝阳': {},\n '东城': {},\n },\n '上海': {\n '闵行': {\n \"人民广场\": {\n '炸鸡店': {}\n }\n },\n '闸北': {\n '火车战': {\n '携程': {}\n }\n },\n '浦东': {},\n },\n '山东': {},\n}\n\n\"\"\"\n1、循环字典对象;\n2、使用列表保存中间状态,append及pop进行相对位置的存储与删除;\n3、判断用户输入的值并给出提示语;\n4、exit()方法退出程序:\n\"\"\"\ninfo = \"\"\"\n说明:输入q或Q退出程序,b或B返回上一层或首层菜单;\n\"\"\"\ncurrent_layer, layers = (menu, [])\nback_opt = (\"B\", \"b\")\nexit_opt = (\"Q\", \"q\")\n\nwhile 1:\n print(info)\n for key in current_layer: print(key)\n choice = input(\"请输入您的选项:\").strip()\n if not choice: continue\n if choice in current_layer:\n layers.append(current_layer)\n current_layer = current_layer[choice]\n elif choice in back_opt:\n if len(layers) != 0:\n current_layer = layers.pop()\n else:\n print(\"已经回到第一层,请输入下级选项:\")\n elif choice in exit_opt:\n exit(\"退出程序\")\n","repo_name":"cuixiaozhao/DevelopBasic","sub_path":"Chapter-03/5-三级菜单崔工版本.py","file_name":"5-三级菜单崔工版本.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"39683046453","text":"from functools import cache\nfrom itertools import product\nfrom math import gcd\n\n\ndef reduce(numerator: int, denominator: int) -> tuple[int, int]:\n \"\"\"reduces fraction to its lowest terms\"\"\"\n # divide numerator, denominator by their GCD\n frac_gcd = gcd(numerator, denominator)\n numerator //= frac_gcd\n denominator //= frac_gcd\n\n return numerator, denominator\n\n\n@cache\ndef series_capacitor(c_1: tuple[int, int], c_2: tuple[int, int]) -> \\\ntuple[int, int]:\n \"\"\"takes two fractional capacitances n1/d1 and n2/d2 as an integer tuple \n and returns the capacitance of the two in series\"\"\"\n # take reciprocals of c_1, c_2\n (d_1, n_1) = c_1\n (d_2, n_2) = c_2\n\n return reduce(d_1 * d_2, n_1 * d_2 + d_1 * n_2) # add and take reciprocal\n\n\n@cache\ndef parallel_capacitor(c_1: tuple[int, int], c_2: tuple[int, int]) -> \\\ntuple[int, int]:\n \"\"\"takes two fractional capacitances n1/d1 and n2/d2 as an integer tuple \n and returns the capacitance of the two in parallel\"\"\"\n (n_1, d_1) = c_1\n (n_2, d_2) = c_2\n\n return reduce(n_1 * d_2 + d_1 * n_2, d_1 * d_2)\n\n\n@cache\ndef capacitor_possibilities(n: int) -> set[tuple[int]]:\n \"\"\"returns a set of all capacitances that can be formed with n \n constant-capacitance capacitors (specified with variable base)\"\"\"\n capacitance_set = set() # possible capacitances\n base = 1, 1 # base capacitance\n \n if n == 1: # only one capacitor, return C of capacitor\n return {base}\n\n for i in range(1, n):\n for c_1, c_2 in product(capacitor_possibilities(n - i), \n capacitor_possibilities(i)):\n capacitance_set.add(series_capacitor(c_1, c_2))\n capacitance_set.add(parallel_capacitor(c_1, c_2))\n\n return capacitance_set\n\n\ndef main():\n # declare variables\n limit = 18\n c_set = set()\n\n # for D(n): loop through all numbers of capacitors from 1 to n\n for i in range(1, limit + 1): \n c_set.update(capacitor_possibilities(i))\n print(i, len(capacitor_possibilities(i)))\n\n # print result\n print(len(c_set))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"mattblferrer/euler","sub_path":"151-200/155.py","file_name":"155.py","file_ext":"py","file_size_in_byte":2111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"18230156726","text":"#! /usr/bin/env python3\n\nfrom ws.client import API\nfrom ws.utils import dmerge, RateLimited\nfrom ws.interactive import require_login, ask_yesno\n\n@RateLimited(1, 1)\ndef delete_page(api, title, pageid):\n print(\"Deleting page '{}' (pageid={})\".format(title, pageid))\n api.call_with_csrftoken(action=\"delete\", pageid=pageid, reason=\"Unused category\", tags=\"wiki-scripts\")\n\ndef main(args, api):\n unused_categories = [p[\"title\"] for p in api.list(list=\"querypage\", qppage=\"Unusedcategories\", qplimit=\"max\")]\n\n result = {}\n query = api.call_api_autoiter_ids(action=\"query\", prop=\"revisions\", rvprop=\"content|timestamp\", rvslots=\"main\", titles=unused_categories)\n\n for chunk in query:\n dmerge(chunk, result)\n\n pages = result[\"pages\"]\n for page in sorted(pages.values(), key=lambda p: p[\"title\"]):\n title = page[\"title\"]\n pageid = page[\"pageid\"]\n content = page[\"revisions\"][0][\"slots\"][\"main\"][\"*\"]\n\n print()\n print(f\"Unused category title: {title}\")\n print(f\"Content:\\n{content}\\n\")\n delete = ask_yesno(\"Delete the page?\")\n if delete is True:\n delete_page(api, title, pageid)\n\nif __name__ == \"__main__\":\n import ws.config\n\n argparser = ws.config.getArgParser(description=\"Delete unused categories (interactive)\")\n API.set_argparser(argparser)\n\n args = ws.config.parse_args(argparser)\n\n api = API.from_argparser(args)\n require_login(api)\n\n main(args, api)\n","repo_name":"lahwaacz/wiki-scripts","sub_path":"delete-unused-categories.py","file_name":"delete-unused-categories.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"90"}
+{"seq_id":"37346214021","text":"#!/usr/bin/env python\n# GoodFET Client Library\n# \n# (C) 2009 Travis Goodspeed \n#\n# This code is being rewritten and refactored. You've been warned!\n\nfrom typing import Dict, Union, Optional, Any, List, Tuple\n\nimport sys \nimport time\nimport string\nimport io\nimport struct\nimport glob\nimport os; \nimport sqlite3; \n\nfmt: Tuple[str, str, str, str] = (\"B\", \" Any:\n import GoodFET, GoodFETCC, GoodFETAVR, GoodFETSPI, GoodFETMSP430, GoodFETNRF, GoodFETCCSPI; # type: ignore\n if(name==\"GoodFET\" or name==\"monitor\"): return GoodFET.GoodFET();\n elif name==\"cc\" or name==\"cc51\": return GoodFETCC.GoodFETCC();\n elif name==\"cc2420\" or name==\"ccspi\": return GoodFETCCSPI.GoodFETCCSPI();\n elif name==\"avr\": return GoodFETAVR.GoodFETAVR();\n elif name==\"spi\": return GoodFETSPI.GoodFETSPI();\n elif name==\"msp430\": return GoodFETMSP430.GoodFETMSP430();\n elif name==\"nrf\": return GoodFETNRF.GoodFETNRF();\n \n print(\"Unsupported target: {}\".format(name))\n sys.exit(0);\n\nclass SymbolTable:\n \"\"\"GoodFET Symbol Table\"\"\"\n db: Any =sqlite3.connect(\":memory:\");\n \n def __init__(self, *args: Any, **kargs: Any) -> None:\n self.db.execute(\"create table if not exists symbols(adr,name,memory,size,comment);\");\n\n def get(self, name: str) -> Any:\n self.db.commit();\n\n c: Any =self.db.cursor();\n\n try:\n c.execute(\"select adr,memory from symbols where name=?\",(name,));\n for row in c:\n sys.stdout.flush();\n return row[0];\n except:# sqlite3.OperationalError:\n pass\n\n return eval(name);\n\n def define(self, adr: Any, name: str, comment: str=\"\", memory: str=\"vn\", size: int=16) -> None:\n self.db.execute(\"insert into symbols(adr,name,memory,size,comment)\"\n \"values(?,?,?,?,?);\", (\n adr,name,memory,size,comment));\n\nclass GoodFET:\n \"\"\"GoodFET Client Library\"\"\"\n\n besilent: int = 0;\n app: int = 0;\n verb: int = 0;\n count: int = 0;\n data: bytearray = bytearray([]);\n verbose: bool = False\n \n GLITCHAPP: int = 0x71;\n MONITORAPP: int = 0x00;\n symbols: SymbolTable = SymbolTable();\n \n def __init__(self, *args: Any, **kargs: Any) -> None:\n self.data: bytearray = bytearray([]); \n\n def getConsole(self) -> Any:\n from GoodFETConsole import GoodFETConsole; # type: ignore\n return GoodFETConsole(self);\n\n def name2adr(self, name: str) -> str:\n return self.symbols.get(name);\n\n def timeout(self) -> None:\n print(\"timeout\\n\");\n\n def serInit(self, port: Optional[str]=None, timeout: int=2, attemptlimit: Optional[int]=None) -> None:\n \"\"\"Open a serial port of some kind.\"\"\"\n import re; \n \n if port is None:\n port = os.environ.get(\"GOODFET\");\n\n self.pyserInit(port, timeout, attemptlimit);\n\n def pyserInit(self, port, timeout, attemptlimit):\n \"\"\"Open the serial port\"\"\"\n import serial; # type: ignore\n fixserial: bool = False;\n \n if os.name == 'nt' and sys.version.find('64 bit') != -1:\n print(\"WARNING: PySerial requires a 32-bit Python build in Windows.\");\n \n if port is None and os.environ.get(\"GOODFET\") != None:\n glob_list = glob.glob(os.environ.get(\"GOODFET\"));\n if len(glob_list) > 0:\n port = glob_list[0];\n else:\n port = os.environ.get(\"GOODFET\");\n if port is None:\n glob_list = glob.glob(\"/dev/tty.usbserial*\");\n if len(glob_list) > 0:\n port = glob_list[0];\n if port is None:\n glob_list = glob.glob(\"/dev/ttyUSB*\");\n if len(glob_list) > 0:\n port = glob_list[0];\n if port is None:\n glob_list = glob.glob(\"/dev/ttyU0\");\n if len(glob_list) > 0:\n port = glob_list[0];\n if port is None and os.name=='nt':\n from scanwin32 import winScan; # type: ignore\n scan=winScan();\n for order,comport,desc,hwid in sorted(scan.comports()):\n try:\n if hwid.index('FTDI')==0:\n port=comport;\n except:\n pass\n \n baud: int = 115200;\n\n if(os.environ.get(\"platform\")=='arduino' or os.environ.get(\"board\")=='arduino'):\n baud=19200 #Slower, for now.\n\n self.serialport = serial.Serial(\n port,\n baud,\n parity = serial.PARITY_NONE,\n timeout=timeout\n )\n \n self.verb = 0;\n attempts: int = 0;\n connected = False;\n while connected is False:\n while self.verb != 0x7F or self.data != b\"http://goodfet.sf.net/\":\n\n if attemptlimit is not None and attempts >= attemptlimit:\n return\n\n elif attempts == 2 and os.environ.get(\"board\") != 'telosb':\n print(\"See the GoodFET FAQ about missing info flash.\");\n self.serialport.timeout = 0.2;\n\n elif attempts == 100:\n print(\"Tried 100 times to connect and failed.\")\n print(\"Continuing to try forever.\", end='')\n self.verbose = True # Something isn't going right, give the user more info\n\n elif attempts > 100 and attempts % 10 == 0:\n print('.', end='')\n\n #TelosB reset, prefer software to I2C SPST Switch.\n if (os.environ.get(\"board\") == 'telosb'):\n self.telosBReset();\n\n elif (os.environ.get(\"board\") == 'z1'):\n self.bslResetZ1(invokeBSL=0);\n\n elif (os.environ.get(\"board\")=='apimote1') or (os.environ.get(\"board\")=='apimote'):\n self.serialport.setRTS(1);\n self.serialport.setDTR(1);\n self.serialport.setRTS(0);\n else:\n self.serialport.setRTS(1);\n self.serialport.setDTR(1);\n self.serialport.setDTR(0);\n \n attempts = attempts + 1;\n self.readcmd(); #Read the first command.\n\n if self.verb!=0x7f:\n self.readcmd();\n\n connected=True;\n\n if attempts >= 100:\n print(\"\") # Add a newline\n olds: str = self.infostring();\n clocking: Any = self.monitorclocking();\n\n for foo in range(1,30):\n if not self.monitorecho():\n if self.verbose:\n print(\"Comm error on {0} try, resyncing out of {1}.\".format(foo ,clocking));\n connected=False;\n break;\n\n if self.verbose: \n print(\"Connected after {} attempts.\".format(attempts));\n\n self.mon_connected();\n self.serialport.timeout = 12;\n\n def serClose(self) -> None:\n self.serialport.close();\n\n def telosSetSCL(self, level: int) -> None:\n '''Helper function for support of the TelosB platform.'''\n self.serialport.setRTS(not level)\n\n def telosSetSDA(self, level: int) -> None:\n '''Helper function for support of the TelosB platform.'''\n self.serialport.setDTR(not level)\n\n def telosI2CStart(self) -> None:\n '''Helper function for support of the TelosB platform.'''\n self.telosSetSDA(1)\n self.telosSetSCL(1)\n self.telosSetSDA(0)\n\n def telosI2CStop(self) -> None:\n '''Helper function for support of the TelosB platform.'''\n self.telosSetSDA(0)\n self.telosSetSCL(1)\n self.telosSetSDA(1)\n\n def telosI2CWriteBit(self, bit: int) -> None:\n '''Helper function for support of the TelosB platform.'''\n self.telosSetSCL(0)\n self.telosSetSDA(bit)\n time.sleep(2e-6)\n self.telosSetSCL(1)\n time.sleep(1e-6)\n self.telosSetSCL(0)\n\n def telosI2CWriteByte(self, byte: int) -> None:\n '''Helper function for support of the TelosB platform.'''\n self.telosI2CWriteBit( byte & 0x80 );\n self.telosI2CWriteBit( byte & 0x40 );\n self.telosI2CWriteBit( byte & 0x20 );\n self.telosI2CWriteBit( byte & 0x10 );\n self.telosI2CWriteBit( byte & 0x08 );\n self.telosI2CWriteBit( byte & 0x04 );\n self.telosI2CWriteBit( byte & 0x02 );\n self.telosI2CWriteBit( byte & 0x01 );\n self.telosI2CWriteBit( 0 ); # \"acknowledge\"\n\n def telosI2CWriteCmd(self, addr: int, cmdbyte: int) -> None:\n '''Helper function for support of the TelosB platform.'''\n self.telosI2CStart()\n self.telosI2CWriteByte( 0x90 | (addr << 1) )\n self.telosI2CWriteByte( cmdbyte )\n self.telosI2CStop()\n\n def bslResetZ1(self, invokeBSL: int=0) -> None:\n '''\n Helper function for support of the Z1 mote platform.\n Applies BSL entry sequence on RST/NMI and TEST/VPP pins.\n By now only BSL mode is accessed.\n @type invokeBSL: Integer\n @param invokeBSL: 1 for a complete sequence, or 0 to only access RST/NMI pin\n Applies BSL entry sequence on RST/NMI and TEST/VPP pins\n Parameters:\n invokeBSL = 1: complete sequence\n invokeBSL = 0: only RST/NMI pin accessed\n \n By now only BSL mode is accessed\n '''\n\n if invokeBSL:\n time.sleep(0.1)\n self.writepicROM(0xFF, 0xFF)\n time.sleep(0.1)\n else:\n time.sleep(0.1)\n self.writepicROM(0xFF, 0xFE)\n time.sleep(0.1)\n\n def writepicROM(self, address: int, data: int) -> int:\n ''' Writes data to @address'''\n\n for i in range(7,-1,-1):\n self.picROMclock((address >> i) & 0x01)\n\n self.picROMclock(0)\n\n recbuf: int = 0\n\n for i in range(7,-1,-1):\n s: int = ((data >> i) & 0x01)\n if i < 1:\n r: bool = not self.picROMclock(s, True)\n else:\n r = not self.picROMclock(s)\n recbuf = (recbuf << 1) + r\n\n self.picROMclock(0, True)\n return recbuf\n\n def readpicROM(self, address: int) -> int:\n ''' reads a byte from @address'''\n for i in range(7,-1,-1):\n self.picROMclock((address >> i) & 0x01)\n\n self.picROMclock(1)\n\n recbuf: int = 0\n r: int = 0\n\n for i in range(7,-1,-1):\n r = self.picROMclock(0)\n recbuf = (recbuf << 1) + r\n\n self.picROMclock(r)\n return recbuf\n \n #This seems more reliable when slowed.\n def picROMclock(self, masterout: int, slow: bool=True) -> int:\n self.serialport.setRTS(masterout)\n self.serialport.setDTR(1)\n self.serialport.setDTR(0)\n\n if slow:\n time.sleep(0.02)\n\n return self.serialport.getCTS()\n\n def picROMfastclock(self, masterout: int) -> int:\n return self.picROMclock(masterout, False)\n\n def telosBReset(self, invokeBSL: int=0) -> None:\n # \"BSL entry sequence at dedicated JTAG pins\"\n # rst !s0: 0 0 0 0 1 1\n # tck !s1: 1 0 1 0 0 1\n # s0|s1: 1 3 1 3 2 0\n\n # \"BSL entry sequence at shared JTAG pins\"\n # rst !s0: 0 0 0 0 1 1\n # tck !s1: 0 1 0 1 1 0\n # s0|s1: 3 1 3 1 0 2\n\n if invokeBSL:\n self.telosI2CWriteCmd(0,1)\n self.telosI2CWriteCmd(0,3)\n self.telosI2CWriteCmd(0,1)\n self.telosI2CWriteCmd(0,3)\n self.telosI2CWriteCmd(0,2)\n self.telosI2CWriteCmd(0,0)\n else:\n self.telosI2CWriteCmd(0,3)\n self.telosI2CWriteCmd(0,2)\n\n # This line was not defined inside the else: block, not sure where it\n # should be however\n self.telosI2CWriteCmd(0,0)\n time.sleep(0.250) #give MSP430's oscillator time to stabilize\n self.serialport.flushInput() #clear buffers\n\n def writecmd(self, app: int, verb: int, count: int=0, data: Optional[bytes]=None)-> bytearray:\n \"\"\"Write a command and some data to the GoodFET.\"\"\"\n self.serialport.write(bytearray([app, verb]))\n\n self.serialport.write(bytearray([count & 0xFF, count >> 8]))\n\n if self.verbose:\n print(\"Tx: ( {0}, {1}, {2} )\".format( app, verb, count ))\n \n if count != 0:\n if data is not None:\n out: bytearray = bytearray(data[:count])\n\n self.serialport.write(out)\n\n if not self.besilent:\n return self.readcmd()\n else:\n return bytearray([0])\n\n def readcmd(self) -> bytearray:\n \"\"\"Read a reply from the GoodFET.\"\"\"\n while 1:\n try:\n self.app = ord(self.serialport.read(1));\n self.verb = ord(self.serialport.read(1));\n \n #Fixes an obscure bug in the TelosB.\n if self.app == 0x00:\n while self.verb == 0x00:\n self.verb = ord(self.serialport.read(1));\n \n self.count=(\n ord(self.serialport.read(1)) + (ord(self.serialport.read(1))<<8)\n );\n\n if self.verbose:\n print(\"Rx: ( {0}, {1}, {2} )\".format( self.app, self.verb, self.count ))\n \n #Debugging string; print, but wait.\n if self.app == 0xFF:\n if self.verb == 0xFF:\n print(\"# DEBUG %s\" % self.serialport.read(self.count))\n\n \t elif self.verb==0xFE:\n print(\"# DEBUG 0x{:x}\".format(struct.unpack(fmt[self.count-1], self.serialport.read(self.count))[0]))\n elif self.verb==0xFD:\n print(\"# NOP.\")\n \n sys.stdout.flush();\n\n else:\n self.data = bytearray(self.serialport.read(self.count))\n return self.data;\n\n except TypeError:\n if self.connected:\n print(\"Warning: waiting for serial read timed out (most likely).\")\n #sys.exit(-1)\n return self.data;\n\n #Glitching stuff.\n def glitchApp(self,app):\n \"\"\"Glitch into a device by its application.\"\"\"\n self.data=[app&0xff];\n self.writecmd(self.GLITCHAPP,0x80,1,self.data);\n #return ord(self.data[0]);\n def glitchVerb(self,app,verb,data):\n \"\"\"Glitch during a transaction.\"\"\"\n if data==None: data=[];\n self.data=[app&0xff, verb&0xFF]+data;\n self.writecmd(self.GLITCHAPP,0x81,len(self.data),self.data);\n #return ord(self.data[0]);\n def glitchstart(self):\n \"\"\"Glitch into the AVR application.\"\"\"\n self.glitchVerb(self.APP,0x20,None);\n def glitchstarttime(self):\n \"\"\"Measure the timer of the START verb.\"\"\"\n return self.glitchTime(self.APP,0x20,None);\n def glitchTime(self,app,verb,data):\n \"\"\"Time the execution of a verb.\"\"\"\n if data==None: data=[];\n self.data=[app&0xff, verb&0xFF]+data;\n print(\"Timing app %02x verb %02x.\" % (app,verb));\n self.writecmd(self.GLITCHAPP,0x82,len(self.data),self.data);\n time=ord(self.data[0])+(ord(self.data[1])<<8);\n print(\"Timed to be %i.\" % time);\n return time;\n def glitchVoltages(self,low=0x0880, high=0x0fff):\n \"\"\"Set glitching voltages. (0x0fff is max.)\"\"\"\n self.data=[low&0xff, (low>>8)&0xff,\n high&0xff, (high>>8)&0xff];\n self.writecmd(self.GLITCHAPP,0x90,4,self.data);\n #return ord(self.data[0]);\n def glitchRate(self,count=0x0800):\n \"\"\"Set glitching count period.\"\"\"\n self.data=[count&0xff, (count>>8)&0xff];\n self.writecmd(self.GLITCHAPP,0x91,2,\n self.data);\n #return ord(self.data[0]);\n \n #Monitor stuff\n def silent(self,s=0):\n \"\"\"Transmissions halted when 1.\"\"\"\n self.besilent=s;\n print(\"besilent is %i\" % self.besilent);\n self.writecmd(0,0xB0,1,[s]);\n connected=0;\n def mon_connected(self):\n \"\"\"Announce to the monitor that the connection is good.\"\"\"\n self.connected=1;\n self.writecmd(0,0xB1,0,[]);\n def out(self,byte):\n \"\"\"Write a byte to P5OUT.\"\"\"\n self.writecmd(0,0xA1,1,[byte]);\n def dir(self,byte):\n \"\"\"Write a byte to P5DIR.\"\"\"\n self.writecmd(0,0xA0,1,[byte]);\n def call(self,adr):\n \"\"\"Call to an address.\"\"\"\n self.writecmd(0,0x30,2,\n [adr&0xFF,(adr>>8)&0xFF]);\n def execute(self,code):\n \"\"\"Execute supplied code.\"\"\"\n self.writecmd(0,0x31,2,#len(code),\n code);\n def MONpeek8(self,address):\n \"\"\"Read a byte of memory from the monitor.\"\"\"\n self.data=[address&0xff,address>>8];\n self.writecmd(0,0x02,2,self.data);\n #self.readcmd();\n return ord(self.data[0]);\n def MONpeek16(self,address):\n \"\"\"Read a word of memory from the monitor.\"\"\"\n return self.MONpeek8(address)+(self.MONpeek8(address+1)<<8);\n def peek(self,address):\n \"\"\"Read a word of memory from the monitor.\"\"\"\n return self.MONpeek8(address)+(self.MONpeek8(address+1)<<8);\n def eeprompeek(self,address):\n \"\"\"Read a word of memory from the monitor.\"\"\"\n print(\"EEPROM peeking not supported for the monitor.\");\n #return self.MONpeek8(address)+(self.MONpeek8(address+1)<<8);\n def peekbysym(self,name):\n \"\"\"Read a value by its symbol name.\"\"\"\n #TODO include memory in symbol.\n reg=self.symbols.get(name);\n return self.peek8(reg,\"data\");\n def pokebysym(self,name,val):\n \"\"\"Write a value by its symbol name.\"\"\"\n #TODO include memory in symbol.\n reg=self.symbols.get(name);\n return self.pokebyte(reg,val);\n def pokebyte(self,address,value,memory=\"vn\"):\n \"\"\"Set a byte of memory by the monitor.\"\"\"\n self.data=[address&0xff,address>>8,value];\n self.writecmd(0,0x03,3,self.data);\n return ord(self.data[0]);\n def poke16(self,address,value):\n \"\"\"Set a word of memory by the monitor.\"\"\"\n self.MONpoke16(address,value);\n def MONpoke16(self,address,value):\n \"\"\"Set a word of memory by the monitor.\"\"\"\n self.pokebyte(address,value&0xFF);\n self.pokebyte(address,(value>>8)&0xFF);\n return value;\n def setsecret(self,value):\n \"\"\"Set a secret word for later retreival. Used by glitcher.\"\"\"\n #self.eeprompoke(0,value);\n #self.eeprompoke(1,value);\n print(\"Secret setting is not yet suppored for this target.\");\n print(\"Aborting.\");\n \n def getsecret(self):\n \"\"\"Get a secret word. Used by glitcher.\"\"\"\n #self.eeprompeek(0);\n print(\"Secret getting is not yet suppored for this target.\");\n print(\"Aborting.\");\n sys.exit();\n \n def dumpmem(self,begin,end):\n i=begin;\n while i[-\\w]+)', views.ViewPost),\n url(r'^get_post/', views.GetPost),\n url(r'^publish_post/', views.PublishPost),\n url(r'home_section/$',views.HomeSection,name='home'),\n url(r'^about_section/$',views.AboutSection,name='about'),\n url(r'^posts/$',views.PostPageView,name='posts'),\n\n]","repo_name":"chiranjev/Django-testing","sub_path":"DjangoTestApp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"28997277161","text":"#1\nduration = input('Введите число: ')\nif duration.isdigit():\n duration = int(duration)\n print(duration // 3600, duration // 60 % 60, duration % 60)\nelse:\n print('Вы ввели не число!')\n\n#2\nmy_list = [i ** 3 for i in range(1, 1001, 2)]\nm_sum = 0\nfor num in my_list:\n my_sum = 0\n for my_sum in str(num):\n my_sum = int(my_sum)\n my_sum += int(my_sum)\n if my_sum % 7 == 0:\n m_sum += num\nprint(m_sum)\n\nmy_list = [i ** 3 for i in range(1, 1001, 2)]\nm_sum = 0\nfor num in my_list:\n num +=17\n my_sum = 0\n for my_sum in str(num):\n my_sum = int(my_sum)\n my_sum += int(my_sum)\n if my_sum % 7 == 0:\n m_sum += num\nprint(m_sum)\n\n#3\n\nPercent = input('Введите число: ')\nif Percent.isdigit():\n Percent = int(Percent)\nelse:\n print('Вы ввели не число!')\nif Percent == 1: word = 'процент'\nelif Percent <= 4: word = 'процента'\nelse: word = 'процентов'\nprint(Percent, word)\n\n\n","repo_name":"Grigoriy-Git/less","sub_path":"grigoriy.shuvalov_dz_#1.py","file_name":"grigoriy.shuvalov_dz_#1.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"18526963259","text":"n,c=map(int,input().split())\nd=[list(map(int,input().split())) for _ in range(c)]\nz=[]\no=[]\nt=[]\nfor i in range(n):\n a=list(map(int,input().split()))\n for j in range(n):\n if (i+j+2)%3==0:\n z.append(a[j])\n elif (i+j+2)%3==1:\n o.append(a[j])\n else:\n t.append(a[j])\nzz=[0]*c\noo=[0]*c\ntt=[0]*c\nfor i in range(c):\n su=0\n for j in z:\n su+=d[j-1][i]\n zz[i]=su\n su=0\n for j in o:\n su+=d[j-1][i]\n oo[i]=su\n su=0\n for j in t:\n su+=d[j-1][i]\n tt[i]=su\nx=10**10\nfor i in range(c):\n for j in range(c):\n if i==j:\n continue\n for k in range(c):\n if i==k or j==k:\n continue\n x=min(x,zz[i]+oo[j]+tt[k])\nprint(x)","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03330/s381164923.py","file_name":"s381164923.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"22294795064","text":"\"\"\"\nAuthor: Andrew Harris\nPython 3.8\n\"\"\"\nimport logging\nimport os\n\nimport pandas as pd\nfrom ete3 import Tree\nfrom tqdm import tqdm\n############################### Set up logger #################################\ndef set_logger_level(WORKING_DIR, LOG_LEVEL):\n logger = logging.getLogger(__name__)\n # Remove existing log file if present\n if os.path.exists(WORKING_DIR / 'logs/topobinner.log'):\n os.remove(WORKING_DIR / 'logs/topobinner.log')\n formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\n file_handler = logging.FileHandler(WORKING_DIR / 'logs/topobinner.log')\n file_handler.setFormatter(formatter)\n stream_handler = logging.StreamHandler()\n logger.addHandler(file_handler)\n logger.addHandler(stream_handler)\n logger.setLevel(LOG_LEVEL)\n return logger\n\n############################## Helper Functions ###############################\ndef remove_heterotachy_info(l):\n \"\"\"Remove any information in bracketsete3 \n does not support this format of newick\"\"\"\n if (\"[\" not in l) and (\"]\" not in l):\n return l\n open_brackets = [i for i, x in enumerate(l) if x == \"[\"]\n close_brackets = [i for i, x in enumerate(l) if x == \"]\"]\n final_string = f'{l[:open_brackets[0]]}'\n for ob, cb in zip(open_brackets[1:], close_brackets[:-1]):\n final_string += l[cb+1:ob]\n final_string += l[close_brackets[-1]+1:]\n return final_string\n\ndef tv_header_validation(df):\n \"\"\"Return False if first four required column headers are not valid\"\"\"\n required_cols = list(df.columns[:4])\n try:\n assert required_cols == [\"Chromosome\", \"Window\", \"NewickTree\", \"TopologyID\"]\n return True\n except AssertionError:\n return False\n\n############################### Main Function ################################\ndef topobinner(TREEVIEWER_FN, UPDATED_TV_FILENAME, TOPOBIN_ROOTED, WORKING_DIR, LOG_LEVEL):\n logger = set_logger_level(WORKING_DIR, LOG_LEVEL) # Setup log file level\n # Load in Tree Viewer excel file\n df = pd.read_excel(TREEVIEWER_FN)\n df = df.reset_index(drop=True)\n # Validate headers\n header_check = tv_header_validation(df)\n if not header_check:\n raise AssertionError(\"Input file headers are not valid, please ensure required headers are correct.\")\n trees = df['NewickTree']\n topologies = dict()\n logger.info(f\"{len(trees):,} trees to run\")\n # Set root boolean value\n TOPOBIN_ROOTED == False if TOPOBIN_ROOTED == 'Y' else True\n # Bin Trees\n tqdm_text = \"{}\".format(\"topobinner\").zfill(3)\n with tqdm(total=len(trees), desc=tqdm_text) as pbar:\n for n, t in enumerate(trees):\n if t == \"NoTree\":\n pbar.update(1)\n continue\n elif len(topologies.keys()) == 0:\n topologies[n] = {'count': 1, 'idx': [n]}\n pbar.update(1)\n continue\n else:\n new_topology = True\n for idx in topologies.keys():\n if df.at[idx, 'NewickTree'] == \"NoTree\":\n continue\n t1 = Tree(remove_heterotachy_info(t))\n t2 = Tree(remove_heterotachy_info(df.at[idx, 'NewickTree']))\n comparison = t1.compare(t2, unrooted=TOPOBIN_ROOTED)\n rf = comparison['rf']\n if rf == 0:\n topologies[idx]['count'] += 1\n topologies[idx]['idx'].append(n)\n new_topology = False\n break\n else:\n continue\n if new_topology:\n topologies[n] = {'count': 1, 'idx': [n]}\n pbar.update(1)\n continue\n else:\n pbar.update(1)\n continue\n # Sort topologies dictionary by 'count'\n topologies = {k: v for k, v in sorted(topologies.items(), key=lambda item: item[1]['count'], reverse=True)}\n # Set zfill number\n if len(topologies.keys()) < 100:\n zfillnum = 3\n elif 100 < len(topologies.keys()) < 1000:\n zfillnum = 4\n else:\n zfillnum = 5\n # Update DataFrame TopologyID column with results\n overview_df = pd.DataFrame({\n \"TopologyID\": [(\"Tree\" + \"{}\".format(str(i)).zfill(zfillnum)) for i in range(1, len(topologies.keys())+1)],\n \"Count\": [topologies[i][\"count\"] for i in topologies.keys()],\n \"Rank\": [i for i in range(1, len(topologies.keys())+1)],\n })\n topoCount = 1\n for topo in topologies.keys():\n idx = topologies[topo]['idx']\n for i in idx:\n df.at[i, 'TopologyID'] = \"Tree\" + \"{}\".format(topoCount).zfill(zfillnum)\n continue\n topoCount += 1\n # Output updated Tree Viewer file\n df.to_excel(UPDATED_TV_FILENAME, index=False)\n logger.info(f\"\\n{overview_df}\")\n return\n\n\n","repo_name":"harris-2374/THEx","sub_path":"src/thexb/STAGE_topobinner.py","file_name":"STAGE_topobinner.py","file_ext":"py","file_size_in_byte":4921,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"90"}
+{"seq_id":"42319335222","text":"\nfrom __future__ import unicode_literals\nfrom flask import Flask\nfrom OpenSSL import SSL\n\n\napp = Flask(__name__, static_folder='static')\napp.debug = True\n\n@app.route('/test_ssl')\ndef test_ssl():\n return 'SSL hello'\n\nif __name__ == '__main__':\n context = SSL.Context(SSL.SSLv23_METHOD)\n context.use_privatekey_file('ssl.key')\n context.use_certificate_file('ssl.crt')\n app.run(host='', port=24444, ssl_context=context)","repo_name":"gaorx/Roboq","sub_path":"RoboqTestServer/RoboqTestHttpsServer.py","file_name":"RoboqTestHttpsServer.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"}
+{"seq_id":"36186275106","text":"import adventofcode\n\n\ndef part1(input):\n \"\"\"\n >>> part1(['forward 5', 'down 5', 'forward 8', 'up 3', 'down 8', 'forward 2'])\n 150\n \"\"\"\n coords = [0, 0]\n for line in input:\n command = line.split(' ')\n if command[0] == \"forward\":\n coords[0] += int(command[1])\n if command[0] == \"down\":\n coords[1] += int(command[1])\n if command[0] == \"up\":\n coords[1] -= int(command[1])\n return coords[0] * coords[1]\n\n\ndef part2(input):\n \"\"\"\n >>> part2(['forward 5', 'down 5', 'forward 8', 'up 3', 'down 8', 'forward 2'])\n 900\n \"\"\"\n coords = [0, 0, 0]\n for line in input:\n command = line.split(' ')\n if command[0] == \"forward\":\n coords[0] += int(command[1])\n coords[1] += int(command[1]) * coords[2]\n if command[0] == \"down\":\n coords[2] += int(command[1])\n if command[0] == \"up\":\n coords[2] -= int(command[1])\n return coords[0] * coords[1]\n\n\ndef main():\n puzzle_input = adventofcode.read_input(2)\n adventofcode.answer(1, 1660158, part1(puzzle_input))\n adventofcode.answer(2, 1604592846, part2(puzzle_input))\n\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n main()\n","repo_name":"clefever/aoc2021","sub_path":"day02.py","file_name":"day02.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"9510354872","text":"import math\nfrom hittable import HitRecord, Hittable\nfrom vec3 import Vec3, dot\n\nclass Sphere(Hittable):\n \"\"\"Documentation for Sphere. Class to create sphere.\n Input parameters:\n\n center of sphere -> cen of type Vec3\n radius -> r of type Vec3\n \"\"\"\n def __init__(self, center, radius, name):\n #super(Sphere, self).__init__()\n # super(Sphere, self).__init__()\n self.center = center\n self.radius = radius\n self.name = name\n\n #def sphere_hit(self, r, t_min, t_max, rec):\n def hit(self, r, t_min, t_max, rec):\n oc = r.origin - self.center\n a = r.direction.length_squared()\n half_b = dot(oc, r.direction)\n c = oc.length_squared() - self.radius*self.radius\n discriminant = half_b*half_b - a*c\n if discriminant < 0:\n return False\n sqrtd = math.sqrt(discriminant)\n\n # Find the nearest root that lies in acceptable range\n root = (-half_b-sqrtd)/a\n if root < t_min or t_max < root:\n root = (-half_b+sqrtd)/a\n if root < t_min or t_max < root:\n return False\n\n rec.t = root\n rec.p = r.at(rec.t)\n rec.normal = (rec.p-self.center)/self.radius\n\n # Add surface side determination to class\n outward_normal = (rec.p-self.center)/self.radius\n rec.set_face_normal(r, outward_normal)\n\n return True\n","repo_name":"mbrank/optika","sub_path":"python_to_cpp/sphere.py","file_name":"sphere.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"246387687","text":"import itertools\nimport os\nfrom collections import Counter\n\nimport cv2\nimport numpy as np\n\n\ndef select_sample(grayscale, step=50, threshold=100):\n mask = grayscale.copy()\n with_nans = grayscale.copy().astype('float32')\n with_nans[mask >= threshold] = np.nan\n height, _ = grayscale.shape\n selected_rows = []\n for row_number in range(0, height, step):\n selected_rows.append(with_nans[row_number])\n return selected_rows\n\n\ndef drop_nans(h):\n return h[np.isfinite(h)]\n\n\ndef compare_hist(h1, h2):\n # rotated_h1 = np.fliplr([h1])[0]\n regular = compare_hist_one_way(h1, h2)\n # rotated = compare_hist_one_way(rotated_h1, h2)\n # return max(regular, rotated)\n return regular\n\n\ndef compare_hist_one_way(h1, h2):\n h1 = h1.astype('float32')\n h2 = h2.astype('float32')\n l1 = np.size(h1)\n l2 = np.size(h2)\n if l1 == l2:\n # return np.correlate(h1, h2)\n return cv2.compareHist(h1, h2, cv2.HISTCMP_CORREL)\n elif l1 > l2:\n return find_max_similarity(h1, h2, l1, l2)\n else:\n return find_max_similarity(h2, h1, l2, l1)\n\n\ndef find_max_similarity(longer, shorter, l_longer, l_shorter):\n diff = l_longer - l_shorter\n max_comparison_result = -1\n for offset in range(diff):\n comparison_result = compare_hist_one_way(longer[offset:offset + l_shorter], shorter)\n max_comparison_result = max(max_comparison_result, comparison_result)\n return max_comparison_result\n\n\n#\n# file1 = 'rotated2.jpg'\n# file2 = 'rotated3.jpg'\n# gray1 = cv2.imread(file1, 0)\n# gray2 = cv2.imread(file2, 0)\n#\n# rows1 = select_sample(gray1)\n# rows2 = select_sample(gray2)\n#\n# median1 = extract_the_meat(np.nanmedian(rows1, axis=0))\n# median2 = extract_the_meat(np.nanmedian(rows2, axis=0))\n#\n# _, axarr = plt.subplots(nrows=2, ncols=1, sharex='col')\n# axarr[0].plot(range(0, np.size(median1)), median1)\n# axarr[1].plot(range(0, np.size(median2)), median2)\n#\n# plt.show()\n\ndef do_nothing(color_image):\n return color_image\n\n\ndef equalize_light(color_image):\n yuv = cv2.cvtColor(color_image, cv2.COLOR_BGR2YUV)\n y, u, v = cv2.split(yuv)\n # clahe = cv2.createCLAHE(clipLimit=2.0)\n clahe = cv2.createCLAHE(clipLimit=80.0, tileGridSize=(6, 6))\n y_equalized = clahe.apply(y)\n # y_equalized = cv2.equalizeHist(y)\n light_equalized = cv2.merge((y_equalized, u, v))\n return cv2.cvtColor(light_equalized, cv2.COLOR_YUV2BGR)\n\n\ndef bilateral_filtered(color_image):\n return cv2.bilateralFilter(color_image, 9, 75, 75)\n\n\ndef convert_to_grayscale(color_image):\n preprocessed = do_nothing(color_image)\n return cv2.cvtColor(preprocessed, cv2.COLOR_BGR2GRAY)\n\n\ndef compare(numbers=['13', '14', '15', '16', '17']):\n codes = []\n for number in numbers:\n for file in os.listdir(number):\n if file.endswith('.png'):\n filename = number + '/' + file\n color_image = cv2.imread(filename)\n hist = compute_code(color_image)\n # diff = derivative(hist)\n codes.append((number, filename, normalize(hist)))\n\n # code = longest_code(sample)\n # codes.append((number, code))\n\n # print('{}, {}'.format(diff, len(diff)))\n\n outcomes = []\n threshold = 0.5\n combinations = itertools.combinations(codes, 2)\n for (first, second) in combinations:\n splint1, file1, hist1 = first\n splint2, file2, hist2 = second\n result = compare_hist(hist1, hist2)\n print('{}-{}: {}'.format(file1, file2, result))\n if splint1 == splint2:\n if result > threshold:\n outcomes.append('PP')\n else:\n outcomes.append('FN')\n else:\n if result > threshold:\n outcomes.append('FP')\n else:\n outcomes.append('NN')\n\n print(Counter(outcomes))\n\n\ndef compute_code(color_image):\n grayscale = convert_to_grayscale(color_image)\n sample = select_sample(grayscale, step=20)\n # sample = select_sample(image)\n hist = histogram(sample)\n return hist\n\n\ndef normalize(h):\n norm = np.linalg.norm(h)\n if norm == 0:\n return h\n return h / norm\n\n\ndef histogram(sample):\n statistic = np.nanmean(sample, axis=0)\n return drop_nans(statistic)\n\n\ndef derivative(hist):\n return np.diff(hist)\n\n\n# Experimental\ndef longest_code(color_image, breadth=20):\n grayscale = cv2.cvtColor(color_image, cv2.COLOR_BGR2GRAY)\n mask = grayscale.copy()\n with_nans = grayscale.copy().astype('float32')\n with_nans[mask >= 200] = np.nan\n\n index = -1\n max_index = 0\n max_length = 0\n for row in with_nans:\n index += 1\n length = np.count_nonzero(~np.isnan(row))\n if length > max_length:\n max_length = length\n max_index = index\n\n rows = with_nans[max_index - breadth: max_index + breadth]\n # return normalize(histogram(rows))\n hist = histogram(rows)\n margin = int(len(hist) / 100)\n return hist[5 * margin:-5 * margin]\n\n\n# compare()\n\n\n# def close_to_zero(deriv, thresh):\n# indexes = []\n# for index in range(np.size(deriv)):\n# if -thresh <= deriv[index] <= thresh:\n# indexes.append(index)\n#\n# return indexes\n#\n#\n# def rapid_change(filename):\n# img = cv2.imread(filename)\n# filtered = cv2.bilateralFilter(img, 9, 75, 75)\n# gray = cv2.cvtColor(filtered, cv2.COLOR_BGR2GRAY)\n# sample = select_sample(gray)\n# hist = histogram(sample)\n# diff = derivative(hist)\n# indexes = close_to_zero(diff, 0.1)\n# # for index in indexes:\n# # hist[index] = 255\n# return np.outer(np.ones(300), hist)\n#\n#\n\ndef longest_diff(param):\n gray = cv2.imread(param, 0)\n sample = select_sample(gray, step=17)\n hist = histogram(sample)\n return normalize(hist)\n","repo_name":"maciejbiela/masters-thesis-code","sub_path":"color_histogram.py","file_name":"color_histogram.py","file_ext":"py","file_size_in_byte":5806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"34927016162","text":"import sys\nimport numpy as np\nfrom matplotlib import pyplot\nimport seaborn as sns\nimport BinClass\nimport LineReg\nimport Decompo\nimport Clustering\n\n\ndef regression_dataset():\n x = np.arange(0, 10).reshape(10, 1)\n noize = np.random.normal(loc=0, scale=0.3, size=10).reshape(10, 1)\n y = np.sin(x) + noize\n return x, y\n\n\ndef classification_dataset():\n noize = np.random.normal(loc=0, scale=0.3, size=40).reshape(20, 2)\n zeros = np.zeros([10, 1])\n ones = np.ones([10, 1])\n negative = np.hstack([ones, zeros])\n positive = np.hstack([zeros, ones])\n x = np.vstack([negative, positive]) + noize\n y = np.vstack([np.zeros(10), np.ones(10)]).reshape(20, 1)\n x /= x.max(axis=0)\n\n return x, y\n\n\ndef fourier():\n '''\n 標準偏差0.3の正規分布によるノイズをかけたsin関数をデータセットとして使用し、フーリエ級数での回帰\n '''\n\n x, y = regression_dataset()\n pyplot.scatter(x, y, label='data')\n prd = LineReg.Fourier(m=3, lam=1.5)\n prd.fit(x, y)\n pyplot.plot(x, np.sin(x), label='true')\n pyplot.plot(x, prd.predict(x), label='pred')\n pyplot.legend()\n pyplot.show()\n\n\ndef perceptron():\n '''\n パーセプトロンによる分類\n '''\n x, y = classification_dataset()\n\n pyplot.scatter(x[:10, 0], x[:10, 1], marker='x')\n pyplot.scatter(x[10:, 0], x[10:, 1], marker='o')\n\n clf = BinClass.Perceptron(eta=1, itr=100)\n clf.fit(x, y)\n\n x = np.arange(-0.1, 1, 0.01)\n y = -(clf.w[0]+clf.w[1]*x)/clf.w[2]\n pyplot.plot(x, y, label='Boundary')\n\n pyplot.show()\n\n\ndef pca():\n '''\n PCAによる二次元から一次元への次元削減\n '''\n noize = np.random.normal(loc=0, scale=0.3, size=10)\n x = np.arange(0, 10)\n y = x*0.7+noize\n pyplot.scatter(x, y, color='red', label='original')\n X = np.hstack([x.reshape(10, 1), y.reshape(10, 1)])\n comp = Decompo.PCA(n_components=1)\n comp.fit(X)\n X = comp.transform(X)\n pyplot.scatter(X, np.zeros(10), color='blue', label='composed')\n pyplot.show()\n\ndef kmean():\n X, _ = classification_dataset()\n\n kmn = Clustering.Kmean(2, 100)\n label = kmn.fit(X)\n\n X_a = X[label == 0, :]\n X_b = X[label == 1, :]\n pyplot.scatter(X_a[:, 0], X_a[:, 1], color='blue', label='group 1')\n pyplot.scatter(X_b[:, 0], X_b[:, 1], color='red', label='group 2')\n pyplot.show()\n \n\n\n\nif __name__ == '__main__':\n sns.set()\n\n if '--Fourier' in sys.argv:\n fourier()\n elif '--Perceptron' in sys.argv:\n perceptron()\n elif '--PCA' in sys.argv:\n pca()\n elif '--Kmean' in sys.argv:\n kmean()\n elif len(sys.argv) > 1:\n print('No such options')\n else:\n print('--Fourier -> フーリエ級数を基底関数とした回帰')\n print('--Perceptron -> パーセプトロンによる分類')\n print('--PCA -> PCAによる次元削減')\n print('--Kmean -> Kmeansによるクラスタリング')\n","repo_name":"juravrik/ML_Study","sub_path":"sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":2956,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"31163749361","text":"# simple implementation of CAM in PyTorch for the networks such as ResNet, DenseNet, SqueezeNet, Inception\n\nimport io\nimport requests\nfrom PIL import Image\nfrom torchvision import models, transforms\nfrom torch.autograd import Variable\nfrom torch.nn import functional as F\nimport numpy as np\nimport cv2\nimport pdb\nimport os\nimport torch\nfrom network import *\n\n# input image\nLABELS_URL = 'https://s3.amazonaws.com/outcome-blog/imagenet/labels.json'\nIMG_URL = 'http://media.mlive.com/news_impact/photo/9933031-large.jpg'\n\n# networks such as googlenet, resnet, densenet already use global average pooling at the end, so CAM could be used directly.\nmodel_id = 2\nif model_id == 1:\n net = models.squeezenet1_1(pretrained=True)\n finalconv_name = 'features' # this is the last conv layer of the network\nelif model_id == 2:\n #net = models.resnet50(pretrained=True)\n finalconv_name = 'layer4'\n model_resume_path ='./record/spatial/model_best.pth.tar'\n # 2. load pretrained model\n net = resnet50(pretrained=True, channel=3, num_classes=51).cuda()\n if os.path.isfile(model_resume_path):\n print(\"==> loading checkpoint '{}'\".format(model_resume_path))\n checkpoint = torch.load(model_resume_path)\n start_epoch = checkpoint['epoch']\n best_prec1 = checkpoint['best_prec1']\n net.load_state_dict(checkpoint['state_dict'])\n print(\"==> loaded checkpoint '{}' (epoch {}) (best_prec1 {})\"\n .format(model_resume_path, checkpoint['epoch'], best_prec1))\nelif model_id == 3:\n net = models.densenet161(pretrained=True)\n finalconv_name = 'features'\n\nnet.eval()\n\n# hook the feature extractor\nfeatures_blobs = []\ndef hook_feature(module, input, output):\n print(\"output.shape: \", output.shape)\n features_blobs.append(output.data.cpu().numpy())\n\nnet._modules.get(finalconv_name).register_forward_hook(hook_feature)\n\nprint(\"net._modules.get(finalconv_name): \", net._modules.get(finalconv_name))\n\n# get the softmax weight\nparams = list(net.parameters())\nweight_softmax = np.squeeze(params[-2].cpu().data.numpy())\n\nprint(\"params[-1].cpu().data.numpy().shape: \",params[-1].cpu().data.numpy().shape)\nprint(\"params[-2].cpu().data.numpy().shape: \",params[-2].cpu().data.numpy().shape)\nprint(\"weight_softmax.shape: \", weight_softmax.shape)\n\ndef returnCAM(feature_conv, weight_softmax, class_idx):\n # generate the class activation maps upsample to 256x256\n size_upsample = (256, 256)\n bz, nc, h, w = feature_conv.shape\n #print(\"feature_conv.shape: \", feature_conv.shape)\n output_cam = []\n print(len(class_idx), class_idx)\n for idx in class_idx:\n #print(\"weight_softmax[idx].shape: \", weight_softmax[idx].shape)\n print(\"feature_conv.shape: \", feature_conv.shape)\n print(\"feature_conv.reshape((nc, h*w)).shape: \", feature_conv.reshape((nc, h*w)).shape)\n cam = weight_softmax[idx].dot(feature_conv.reshape((nc, h*w)))\n print(\"cam.shape: \", cam.shape)\n cam = cam.reshape(h, w)\n cam = cam - np.min(cam)\n cam_img = cam / np.max(cam)\n cam_img = np.uint8(255 * cam_img)\n #print(\"cam_img.shape: \", cam_img.shape)\n #print(\"cam_img: \", cam_img)\n output_cam.append(cv2.resize(cam_img, size_upsample))\n print(\"len(output_cam): \", len(output_cam))\n return output_cam\n\n\nnormalize = transforms.Normalize(\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]\n)\npreprocess = transforms.Compose([\n transforms.Scale((224,224)),\n transforms.ToTensor(),\n normalize\n])\n\n# response = requests.get(IMG_URL)\n# img_pil = Image.open(io.BytesIO(response.content))\n# img_pil.save('test.jpg')\n\n\ntrain_name=np.load(\"./saved_weights/train_name.npy\")\ntrain_name = train_name.reshape(280,22)\njump2290_train_name = train_name[1]\nimg_indx= 1\nfor img_indx in range(22):\n img_path = jump2290_train_name[img_indx]\n\n img_pil = Image.open(img_path)\n img_tensor = preprocess(img_pil)\n img_variable = Variable(img_tensor.unsqueeze(0)).cuda()\n logit = net(img_variable)\n\n # download the imagenet category list\n # classes = {int(key):value for (key, value)\n # in requests.get(LABELS_URL).json().items()}\n\n\n h_x = F.softmax(logit, dim=1).data.squeeze()\n probs, idx = h_x.sort(0, True)\n probs = probs.cpu().numpy()\n idx = idx.cpu().numpy()\n \n # output the prediction\n for i in range(0, 5):\n print('{:.3f} -> {}'.format(probs[i], idx[i]))\n\n # generate class activation mapping for the top1 prediction\n CAMs = returnCAM(features_blobs[0], weight_softmax, idx)\n\n # render the CAM and output\n print('output CAM.jpg for the top1 prediction: %s'%idx[0])\n #img = cv2.imread('test.jpg')\n img = cv2.imread(img_path)\n img = cv2.resize(img, (256, 256))\n height, width, _ = img.shape\n heatmap = cv2.applyColorMap(CAMs[2], cv2.COLORMAP_JET)\n #heatmap = cv2.applyColorMap(CAMs[0], cv2.COLORMAP_JET)\n result = heatmap * 0.3 + img* 0.5\n result_name = './CAM_result/CAM_'+img_path.split('/')[-3] + img_path.split('/')[-2] + img_path.split('/')[-1]\n print(\"result_name: \", result_name)\n cv2.imwrite(result_name, result)\n","repo_name":"LiliMeng/two-stream-action-recognition","sub_path":"CAM_localization.py","file_name":"CAM_localization.py","file_ext":"py","file_size_in_byte":5126,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"90"}
+{"seq_id":"31419969084","text":"from cryptography.hazmat.primitives.ciphers import Cipher, algorithms\nfrom cryptography.hazmat.backends import default_backend\n\n\nclass AlienTranslator:\n\n def __init__(self, human2alien, key, nonce):\n self.m2e_dic = human2alien\n self.e2m_dic = {}\n\n for m in self.m2e_dic.keys():\n self.e2m_dic[self.m2e_dic[m]] = m\n\n algorithm = algorithms.ChaCha20(key, nonce)\n self.cipher = Cipher(algorithm, mode=None, backend=default_backend())\n\n def encrypt_to_code(self, msg):\n encryptor = self.cipher.encryptor()\n ct = encryptor.update(msg.encode())\n\n raw_code_num = int.from_bytes(ct, byteorder='big')\n code_text = \"\"\n\n while raw_code_num > 0:\n code_text += self.m2e_dic[raw_code_num & 15]\n raw_code_num = raw_code_num >> 4\n\n return code_text\n\n def decrypt_to_msg(self, code):\n decryptor = self.cipher.decryptor()\n raw_code_num = 0\n end = len(code)\n blen = 0\n\n for i in range(len(code) - 1, -1, -1):\n if code[i:end] in self.e2m_dic:\n raw_code_num = raw_code_num << 4\n raw_code_num = raw_code_num | self.e2m_dic[code[i:end]]\n end = i\n blen += 1\n\n msg = decryptor.update(raw_code_num.to_bytes(blen // 2, byteorder='big'))\n\n return msg.decode()\n","repo_name":"minging234/code_for_fun","sub_path":"alien_dictionary.py","file_name":"alien_dictionary.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"36201427876","text":"import numpy as np\nfrom proccessFiles import processCSV\nimport random as r\nimport scipy\nimport scipy.optimize\nimport math\nBIAS_X = 1\ndef splitExamples(examples):\n x = []\n y = []\n for i in range(len( examples)):\n xterm = examples[i][:-1]\n xterm.append(BIAS_X)\n x.append(xterm)\n \n y.append(examples[i][-1:][0])\n x = np.array(x)\n y = np.array(y)\n return (x,y)\n\nclass svm(object):\n \n def __init__(self, examples, t, learningrate,learninga,C,learningtype=\"sgd\",kernalTrue = False):\n self.C = C\n self.r = learningrate\n self.a = learninga\n self.t = t\n self.learningRate = lambda t : self.r/(1+(self.r/self.a)*t)\n self.examples = examples\n if(type(examples) is str):\n self.examples = processCSV(examples)\n \n self.x, self.y = splitExamples(self.examples)\n \n\n self.w = np.zeros(len(self.examples[0]))\n #len(self.examples[0])-1\n #self.bias = self.w[-1:]\n if learningtype == \"sgd\":\n self.sgdSVM()\n if learningtype == \"dual\":\n self.alpha = np.zeros(len(self.examples))\n self.kernalTrue = kernalTrue\n self.dualSVM()\n if learningtype == \"kernal\":\n self.alpha = np.zeros(len(self.examples))\n self.kernalTrue = True\n self.dualSVM()\n\n def prediction(self,sample):\n xi = sample[:-1]\n xi.append(BIAS_X) #bias value\n return np.dot(self.w,xi) #+ self.bias\n\n def updateWeight(self,example,t):\n xi = example[:-1]\n xi.append(BIAS_X) #bias value\n yi = example[-1:]\n xy = np.multiply(xi,yi)\n rc = self.learningRate(t)*self.C\n w0 = self.w[:-1]\n w0 = np.concatenate((w0,[0]))\n #update = self.learningRate(t)*(xy + np.multiply(-2*(1/t),self.w))\n update = -self.learningRate(t)*w0 + self.learningRate(t)*self.C*len(self.examples)*np.multiply(xi,yi)\n self.w = self.w + update\n \n \n \n \n def sgdSVM(self):\n for t in range(1,self.t):\n r.shuffle(self.examples)\n wrongguess = False\n for i in range(len(self.examples)):\n pred = self.prediction(self.examples[i])\n error = self.examples[i][len(self.examples[i])-1] * pred\n if error <= 1:\n self.updateWeight(self.examples[i],t)\n wrongguess = True\n else: \n w0 = self.w[:-1]\n \n w0 = np.multiply(w0,(1-self.learningRate(t)))\n self.w = np.concatenate((w0,self.w[-1:]))\n if ( not wrongguess) :\n break\n \n \n #self.bias = self.bias + np.multiply(example[-1:],rc)\n #previous\n #for j in range(len(self.w)):\n # xy = np.multiply(example[j],example[-1:])\n # self.w[j] = (1 - self.learningRate(t))*self.w[j] + xy*rc\n #self.bias = (1 - self.learningRate(t))*self.bias + example[-1:][0]*rc\n \n return self.w\n\n\n\n\n def wstar(self):\n \"\"\"calculate wstar from alpha star and examples\n w* = sum(i) {aiyixi}\n \"\"\"\n wstar = np.zeros(len(self.x[0][:-1]))\n for i in range(len(self.x)):\n calc = np.multiply(self.x[i][:-1],self.y[i]*self.alpha[i])\n wstar += calc\n \n \"\"\"\"\n calculate bias\n b* = (1/#notzero alpha) sum(notzero alpha) (yi - wTxi)\n \"\"\"\n bias = 0\n count = 0\n for i in range(len(self.x)):\n if(self.alpha[i] > 0.0001):\n bias += (self.y[i] - np.dot(wstar,self.x[i][:-1]))\n count += 1\n bias = (1/count)*bias\n \n wstar = np.concatenate((wstar,[bias]))\n \"\"\" \n j = math.floor(r.uniform(0,len(self.x)))\n wstar = np.concatenate((wstar,[0]))\n bias = self.y[j] - np.dot(self.x[j],wstar)\n wstar[len(wstar)-1] = bias\"\"\"\n return wstar\n \n def objective(self,alpha,x,y,kernal):\n \"\"\"objective function for dual SVM \n (1/2) sum(i){sum(j){ yiyj aiaj xi^Txj}} - sum(i){ai}\n \n s.t. 0= 0:\n count +=1\n total +=1\n print(\"correct spliting: \",str(count),\"/\",str(total))\n\n examples = [[1,2,1],[0,3,1],[2,3,-1],[2,4,-1]]\n \n \n Svm = svm(examples,10,0.1,2,c[0],\"sgd\")\n\n count = 0\n total = 0\n for example in examples:\n pred = Svm.prediction(example)\n if example[-1:][0] * pred >= 0:\n count +=1\n total +=1\n print(\"correct bais test: \",str(count),\"/\",str(total))\n\n ","repo_name":"MrIsaar/MachineLearning","sub_path":"SVM/svm.py","file_name":"svm.py","file_ext":"py","file_size_in_byte":7137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"27096520388","text":"from spack import *\n\n\nclass Ray(CMakePackage):\n \"\"\"Parallel genome assemblies for parallel DNA sequencing\"\"\"\n\n homepage = \"http://denovoassembler.sourceforge.net/\"\n url = \"https://downloads.sourceforge.net/project/denovoassembler/Ray-2.3.1.tar.bz2\"\n\n version('2.3.1', '82f693c4db60af4328263c9279701009')\n\n depends_on('mpi')\n\n @run_after('build')\n def make(self):\n mkdirp(prefix.bin)\n make('PREFIX=%s' % prefix.bin)\n\n def install(self, spec, prefix):\n make('install')\n","repo_name":"matzke1/spack","sub_path":"var/spack/repos/builtin/packages/ray/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"90"}
+{"seq_id":"4284892759","text":"import pyttsx3\r\nimport requests\r\nprint('----------此版本解决了人工智能第一代的网络卡顿和延迟,加入了最新的人工智能库(os库:提供通用的、基本的操作系统交互功能),邵宗贤作-2022')\r\nprint('你好,我是小思')\r\nprint('欢迎为您服务')\r\nengline = pyttsx3.init()\r\nengline.setProperty('voice','zh')\r\nline1 = '你好,主人'\r\nline2 = '欢迎回来'\r\nline3 = '我是小思'\r\nline4 = '您的AI第一代加强版人工智能语言帮手'\r\nline5 = '欢迎为您服务'\r\nengline.say(line1)\r\nengline.say(line2)\r\nengline.say(line3)\r\nengline.say(line4)\r\nengline.say(line5)\r\nengline.runAndWait()\r\nwhile True:\r\n a=input()\r\n url='https://api.ownthink.com/bot?appid=9ffcb5785ad9617bf4e64178ac64f7b1&spoken=%s'%a\r\n te=requests.get(url).json()\r\n data=te['data']['info']['text']\r\n print(data)","repo_name":"dirde12078904/xiao-si","sub_path":"人工智能/人工智能(加强版).py","file_name":"人工智能(加强版).py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"90"}
+{"seq_id":"21898017307","text":"import platform\nimport shutil\nimport os\nimport sqlite3\nimport requests\nfrom requests_html import HTMLSession\nfrom glob import glob\n\ndef fetch_os():\n '''Returns (OS, Version) of the currently running OS.'''\n system = platform.system()\n if system == \"Windows\":\n return (system, platform.release())\n elif system == \"Darwin\":\n return (\"Mac OS\", platform.mac_ver()[0])\n elif system == \"Linux\":\n return (\"Linux\", platform.release())\n else:\n return (\"N/A\", \"N/A\")\n\ndef copy_firefox_cookie_db(OS):\n '''Copies the cookies sqlite database from Firefox from a given\n OS. Currently only Mac OS is supported. Future work will merge\n copy functions together and take OS and Browser as arguemnts.'''\n try:\n os.remove(\"./ff_cookies.sqlite\")\n except:\n pass\n if OS[0] == \"Mac OS\":\n folder = os.path.expanduser(\"~\")+\"/Library/Application Support/Firefox/Profiles/\"\n else:\n folder = \"N/A\"\n cookie_location = glob(folder + \"*/\")[0] + \"cookies.sqlite\" \n try:\n shutil.copyfile(cookie_location, \"./ff_cookies.sqlite\")\n return True\n except:\n return False\n\ndef copy_chrome_cookie_db(OS):\n '''Copies the cookies sqlite database from Chrome from a given\n OS. Currently only Mac OS is supported. Future work will merge\n copy functions together and take OS and Browser as arguemnts.'''\n try:\n os.remove(\"./ch_cookies.sqlite\")\n except:\n pass\n if OS[0] == \"Mac OS\":\n folder = os.path.expanduser(\"~\")+\"/Library/Application Support/Google/Chrome/Default/\"\n else:\n folder = \"N/A\"\n cookie_location = folder + \"Cookies\"\n print(cookie_location)\n try:\n shutil.copyfile(cookie_location, \"./ch_cookies.sqlite\")\n return True\n except:\n return False\n\n\ndef fetch_twitter_token(dbfile, browser):\n '''Retreive auth_token cookie for specific browsers.\n Presently works for Firefox. Chrome support is in progress,\n but only retreives the encrypted cookie. Decryption is not\n yet available.'''\n conn = sqlite3.connect(dbfile)\n c = conn.cursor()\n if browser == \"FF\":\n query = \"SELECT value FROM moz_cookies WHERE baseDomain='twitter.com' AND name='auth_token'\"\n if browser == \"CH\":\n query = \"SELECT encrypted_value FROM cookies WHERE host_key='.twitter.com' AND name='auth_token'\"\n for row in c.execute(query):\n return row[0]\n pass\n\ndef login(cookie):\n url = \"https://twitter.com/home\"\n cookies = dict(auth_token=cookie)\n session = HTMLSession()\n #r = requests.get(url, cookies=cookies, allow_redirects=True)\n r = session.get(url, cookies=cookies)\n session.resolve_redirects(r, session.get(url, cookies=cookies))\n r.html.render()\n print(r.text)\n\ndef main():\n OS = fetch_os()\n copy_firefox_cookie_db(OS)\n #copy_chrome_cookie_db(OS)\n #print(fetch_twitter_token(\"./ff_cookies.sqlite\", \"FF\"))\n #print(fetch_twitter_token(\"./ch_cookies.sqlite\", \"CH\"))\n login(fetch_twitter_token(\"./ff_cookies.sqlite\", \"FF\"))\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"EvanGrill/CS445_Internet_Security","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"6005551261","text":"\"\"\"\nMatchingNet classifier head. \nAdapted from https://github.com/nupurkmr9/S2M2_fewshot/blob/master/methods/matchingnet.py\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import Tensor\nfrom .utils import compute_prototypes\n\nclass MN_head(nn.Module):\n\n def __init__(\n self) -> None:\n super().__init__()\n\n\n def forward(self, query_images: Tensor, support_images: Tensor, support_labels) -> Tensor:\n \"\"\"Take one task of few-shot support examples and query examples as input,\n output the logits of each query examples.\n\n Args:\n query_images: query examples. size: [num_query, c, h, w]\n support_images: support examples. size: [num_support, c, h, w]\n support_labels: labels of support examples. size: [num_support, way]\n Output:\n classification_scores: The calculated logits of query examples.\n size: [num_query, way]\n \"\"\"\n if query_images.dim() == 4:\n query_images = F.adaptive_avg_pool2d(query_images, 1).squeeze_(-1).squeeze_(-1)\n support_images = F.adaptive_avg_pool2d(support_images, 1).squeeze_(-1).squeeze_(-1)\n\n assert support_images.dim() == query_images.dim() == 2\n\n support_images = F.normalize(support_images, p=2, dim=1, eps=1e-12)\n query_images = F.normalize(query_images, p=2, dim=1, eps=1e-12)\n\n one_hot_label = F.one_hot(support_labels,num_classes = torch.max(support_labels).item()+1).float()\n\n #[num_support, num_query]\n scores = support_images.mm(query_images.transpose(0,1))\n\n #[num_query, n_way]\n classification_scores = compute_prototypes(scores, one_hot_label).transpose(0,1)\n\n\n # The original paper use cosine simlarity, but here we scale it by 100 to strengthen\n # highest probability after softmax\n classification_scores = F.relu(classification_scores) * 100\n classification_scores = F.softmax(classification_scores, dim=1)\n\n return classification_scores\n\n\ndef create_model():\n return MN_head()\n","repo_name":"Frankluox/CloserLookAgainFewShot","sub_path":"architectures/classifier/MatchingNet.py","file_name":"MatchingNet.py","file_ext":"py","file_size_in_byte":2112,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"90"}
+{"seq_id":"19124296508","text":"# Pseudocode production run using models\n\nfrom views_runs import Series, Run, Model, RunResult\n\nseries = Series.get(series_name)\n\nrun = Run.get(series, level_of_analysis, outcome)\n# Gets current run, based on current date\n# This also retrieves previously trained model objects\n# from last month\n\nrun.partitioner\n# The data partitioner is defined by combining the run date with a formula for how\n# to divide the data (ex. percentage train / test / calibrate).\n\nfor model in run.models:\n if model.retrain:\n model = retrain_model(model)\n model.publish()\n\n result = Result.new(model)\n\n result.publish()\n","repo_name":"UppsalaConflictDataProgram/production_run_sketch","sub_path":"example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"18536571939","text":"class Unionfind:\n \n def __init__(self,n):\n self.uf = [-1]*n\n\n def find(self,x):\n if self.uf[x] < 0:\n return x\n else:\n self.uf[x] = self.find(self.uf[x])\n return self.uf[x]\n\n def same(self,x,y):\n return self.find(x) == self.find(y)\n\n def union(self,x,y):\n x = self.find(x)\n y = self.find(y)\n if x == y:\n return \n if self.uf[x] > self.uf[y]:\n x,y = y,x\n self.uf[x] += self.uf[y]\n self.uf[y] = x\n\n def size(self,x):\n x = self.find(x)\n return -self.uf[x]\n\nn,m = map(int,input().split())\nl = list(map(int,input().split()))\nu = Unionfind(n)\nfor i in range(m):\n x,y = map(int,input().split())\n u.union(x-1,y-1)\n\nl1 = [[] for i in range(n)]\nl2 = [[] for i in range(n)]\nfor i in range(n):\n x = u.find(i)\n l1[x].append(i)\n l2[x].append(l[i]-1)\nans = 0\nfor i in range(n):\n a = set(l1[i])\n b = set(l2[i])\n ans += len(a&b)\nprint(ans)\n","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03354/s333662848.py","file_name":"s333662848.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"21180252826","text":"def leftRightDifference(nums):\n \n n = len(nums)\n res = [0]*n\n for i in range(n):\n leftsum = 0\n rightsum = 0\n for j in range(0,i):\n leftsum+=nums[j]\n for k in range(i+1,n):\n rightsum+=nums[k]\n res[i]=abs(leftsum-rightsum)\n\n return res","repo_name":"Tettey1/A2SV","sub_path":"leetcode-solutions/left-right-sum-difference.py","file_name":"left-right-sum-difference.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"13222285870","text":"import json\nimport os\n\nimport numpy as np\nimport torch\nfrom tqdm import tqdm\n\n\nclass Trainer(object):\n def __init__(self, args, device='cpu'):\n self.args = args\n self.max_epochs = args.max_epochs\n self.report_steps = args.report_steps\n self.save_steps = args.save_steps\n self.checkpoint_dir = args.checkpoint_dir\n\n self.device = device\n\n def step(self, batch_data, cls_method, model_name, model, ):\n if cls_method == 'one-pass':\n eid, sent_item, labels = batch_data\n labels = labels.to(self.device)\n if 'roberta' in model_name:\n input_ids, attn_mask = \\\n sent_item['input_ids'].squeeze(1).to(self.device), \\\n sent_item['attention_mask'].squeeze(1).to(self.device)\n logits = model(input_ids, attn_mask) # [b, 1]\n else:\n input_ids, attn_mask, token_type_ids = \\\n sent_item['input_ids'].squeeze(1).to(self.device), \\\n sent_item['attention_mask'].squeeze(1).to(self.device), \\\n sent_item['token_type_ids'].squeeze(1).to(self.device)\n logits = model(input_ids, attn_mask, token_type_ids) # [b, 1]\n elif cls_method == 'two-pass':\n eid, sent1_item, sent2_item, labels = batch_data\n labels = labels.to(self.device)\n if 'roberta' in model_name:\n input_ids1, attn_mask1 = \\\n sent1_item['input_ids'].squeeze(1).to(self.device), \\\n sent1_item['attention_mask'].squeeze(1).to(self.device)\n input_ids2, attn_mask2 = \\\n sent2_item['input_ids'].squeeze(1).to(self.device), \\\n sent2_item['attention_mask'].squeeze(1).to(self.device)\n logits = model(input_ids1, input_ids2, attn_mask1, attn_mask2)\n else:\n input_ids1, attn_mask1, token_type_ids1 = \\\n sent1_item['input_ids'].squeeze(1).to(self.device), \\\n sent1_item['attention_mask'].squeeze(1).to(self.device), \\\n sent1_item['token_type_ids'].squeeze(1).to(self.device)\n input_ids2, attn_mask2, token_type_ids2 = \\\n sent2_item['input_ids'].squeeze(1).to(self.device), \\\n sent2_item['attention_mask'].squeeze(1).to(self.device), \\\n sent2_item['token_type_ids'].squeeze(1).to(self.device)\n logits = model(input_ids1, input_ids2, attn_mask1, attn_mask2, token_type_ids1, token_type_ids2)\n else:\n raise ValueError(f'cls_method {cls_method} is not supported.')\n return logits, labels, eid\n\n def train(self, train_dataloader, dev_dataloader, model, model_name, cls_method, optimizer, criterion):\n model.train()\n total_steps = 0\n best_acc, running_loss = 0.0, 0.0\n for epoch in range(self.max_epochs):\n for _, batch_data in enumerate(tqdm(train_dataloader)):\n total_steps += 1\n logits, labels, _ = self.step(batch_data, cls_method, model_name, model)\n\n optimizer.zero_grad()\n loss = criterion(logits.squeeze(-1), labels.float())\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item()\n if total_steps % self.report_steps == 0:\n print(f'Epoch#{epoch}, total steps#{total_steps}, loss:{running_loss / self.report_steps}')\n running_loss = 0.0\n if total_steps % self.save_steps == 0:\n print('-' * 20, \"Evaluating\", '-' * 20)\n eval_loss, eval_acc, results = self.evaluate(dev_dataloader, model, model_name, cls_method,\n criterion)\n eval_loss, eval_acc = round(eval_loss, 4), round(eval_acc, 4)\n print(f'Evaluate result: '\n f'Epoch#{epoch}, total steps#{total_steps}, loss:{eval_loss}, acc: {eval_acc}')\n if eval_acc >= best_acc:\n print(f\"Get new Best Result:{eval_acc},Saving...\")\n best_acc = eval_acc\n save_path = os.path.join(self.checkpoint_dir,\n f'ckpt_ep{epoch}_step{total_steps}_loss{eval_loss}_acc{eval_acc}.pt')\n torch.save(model.module.state_dict(), save_path)\n print(\"Saved!\")\n with open(os.path.join(self.checkpoint_dir, 'logging/',\n f'log_ep{epoch}_step{total_steps}_loss{loss}_acc{eval_acc}.json'), 'w') as f:\n eval_dict = {\n 'eval_loss': eval_loss,\n 'eval_acc': eval_acc,\n 'results': results\n }\n json.dump(eval_dict, f)\n torch.cuda.empty_cache()\n\n def evaluate(self, dataloader, model, model_name, cls_method, criterion):\n running_loss = 0.\n\n eid_list, label_list, pred_list = [], [], []\n results = []\n model.eval()\n for _, batch_data in enumerate(tqdm(dataloader)):\n logits, labels, eid = self.step(batch_data, cls_method, model_name, model)\n\n loss = criterion(logits.squeeze(-1), labels.float())\n running_loss += loss.item()\n probs = logits.unsqueeze(-1).detach().cpu().numpy()\n preds = probs.copy()\n preds[preds > 0.5] = 1\n preds[preds <= 0.5] = 0\n preds = preds.squeeze()\n eid = eid.detach().cpu().numpy()\n labels = labels.detach().cpu().numpy()\n preds = preds.detach().cpu().numpy()\n eid_list.extend(eid)\n label_list.extend(labels)\n pred_list.extend(preds)\n running_loss /= len(dataloader)\n accuracy = float(np.mean(np.equal(pred_list, label_list)))\n for eid, label, pred in zip(eid_list, label_list, pred_list):\n results.append({\n 'eid': int(eid),\n 'label': int(label),\n 'pred': int(pred)\n })\n model.train()\n return running_loss, accuracy, results\n\n def predict(self, dataloader, model, model_name, cls_method\n ):\n eid_list, pred_list = [], []\n results = []\n model.eval()\n for _, batch_data in enumerate(tqdm(dataloader)):\n logits, _, eid = self.step(batch_data, cls_method, model_name, model)\n probs = logits.unsqueeze(-1).detach().cpu().numpy()\n preds = probs.copy()\n preds[preds > 0.5] = 1\n preds[preds <= 0.5] = 0\n preds = preds.squeeze()\n eid = eid.detach().cpu().numpy()\n preds = preds.detach().cpu().numpy()\n eid_list.extend(eid)\n pred_list.extend(preds)\n\n for eid, pred in zip(eid_list, pred_list):\n results.append({\n 'eid': int(eid),\n 'pred': int(pred)\n })\n\n return results\n","repo_name":"ChenXianyu2002/NLUProject2022","sub_path":"PairSim/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":7242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"15799079836","text":"#!/usr/bin/env python\n\nimport pymcu\nfrom time import sleep\n\nmb = pymcu.mcuModule()\n\nnum_of_pins = 8\npins = range(1, num_of_pins + 1)\ncount = 0\n\ntry:\n while True:\n for (pin, value) in zip(pins, [int(count & (1 << n) > 0) for n in range(num_of_pins)]):\n if value:\n mb.pinHigh(pin)\n else:\n mb.pinLow(pin)\n\n sleep(0.5)\n count = (count + 1) % 256\nexcept KeyboardInterrupt:\n mb.pinLow(pins)\n mb.close()\n","repo_name":"cfletcher1856/pymcu","sub_path":"Binary-Counting/counter.py","file_name":"counter.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"90"}
+{"seq_id":"2057360165","text":"### author : alexis lebis\n\nimport argparse\nimport csv\nimport sys\n\ndef create_arg_parser():\n # Creates and returns the ArgumentParser object\n\n parser = argparse.ArgumentParser(description='This script helps with the creation of the criteria for a HAL collection (www.hal.science).\\n\\n It works with a csv file, formatted as [lastName:str, firstName:str,idHAL:str,ORCiD:str,scholarArrival:AAAA-MM-JJ,scholarDeparture:AAAA-MM-JJ,nonActive:AAAA-MM-JJ,reinstatement:AAAA-MM-JJ]. Please note that \"nonActive\" is used as the meaning of \"mise en disponibilité\" in the french civil service ; \"reinstatement\" means that someone who takes his/her \"mise en disponibilité\" has returned.\\n\\n This software comes without any guarantee : please always check the output before using it for your HAL Collection.\\n\\nAuthor: Alexis Lebis')\n parser.add_argument('csvPath',\n help='Path to the input csv file.')\n parser.add_argument('--output',\n help='Path and name of the file to where the criteria should be saved. Use default hal_criteria if missing')\n parser.add_argument('-v', '--verbose', help=\"Increase output verbosity by displaying HAL criteria produced.\", action=\"store_true\")\n return parser\n\nif __name__ == \"__main__\":\n arg_parser = create_arg_parser()\n parsed_args = arg_parser.parse_args(sys.argv[1:])\n\nPREFIX_IDHAL = \"authIdHal_s:\"\nPREFIX_ORCID = \"authORCIDIdExt_s:\"\nPREFIX_SUBMISSION = \"producedDate_tdate:\" #ISO 8601 -- should it be publicationDate_tdate ?\n\nwith open(parsed_args.csvPath, newline='') as csvfile:\n\n reader = csv.DictReader(csvfile)\n print(\"Generating HAL new criteria...\")\n critHAL = \"\"\n\n for row in reader:\n idHAL = \"\"\n ORCiD = \"\"\n autID = \"\"\n arrive= \"\"\n depart= \"\"\n ad = \"\"\n d1 = \"\"\n rd1 =\"\"\n drd1 = \"\"\n tmpCst = \"\"\n\n rowCrit = \"\"\n\n print(\"* Processing: \"+row['firstName'] + \" \"+ row['lastName'])\n \n if(not(row['idHAL'] == \"\")):\n idHAL = PREFIX_IDHAL+\"(\"+row['idHAL']+\")\"\n \n if(not(row['ORCiD'] == \"\")):\n ORCiD = PREFIX_ORCID+\"(\"+row['ORCiD']+\")\"\n ORCiD = ORCiD.replace(\"-\",\"\")\n\n #Building logical prop :\n ## (scholar_id OR isWorkingForInstitution)\n ### scholar_id = idHAL OR ORCiD\n ### isWorkingForInstitution <-> (arrive >= datePubli AND datePubli < depart) AND ( datePubli <= dispo1 OR datePubli >= retourDispo1)\n autID = idHAL\n if(idHAL and ORCiD):\n autID += \" OR \"\n autID += ORCiD\n \n #IF non idHAL or ORCiD, can't identify author ! Print error then abort this scholar\n if(autID == \"\"):\n print(\"There was an issue while parsing \"+row['firstName'] + row['lastName']+\"'s id. Aborting his/her HAL criteria. Please check the .csv file. If the scholar does not have an idHAL, please make him/her create one.\")\n continue\n\n #Adding temporal conditions now\n\n ##Building scholar arriving and departure\n if(not(row[\"scholarArrival\"] == \"\")):\n arrive = \"[\"+row[\"scholarArrival\"]+\"T00:00:00Z\"\n if(not(row[\"scholarDeparture\"] == \"\")):\n depart = row[\"scholarDeparture\"] +\"T00:00:00Z\"+ \"}\" #excluding last element\n \n if(arrive):\n if(depart):\n ad = PREFIX_SUBMISSION + arrive + \" TO \" + depart\n else:\n ad = PREFIX_SUBMISSION + arrive + \" TO * ]\"\n else:\n if(depart):\n ad = PREFIX_SUBMISSION + \"[* TO \"+ depart\n else:\n ad = \"\"\n\n ## scholar \"dispo\"\n if(not(row[\"nonActive\"] == \"\")):\n d1 = row[\"nonActive\"]\n if(not(row[\"reinstatement\"] == \"\")):\n rd1 = row[\"reinstatement\"]\n if(d1):\n if(rd1):\n drd1 = \"NOT \"+ PREFIX_SUBMISSION + \"{\" + d1 + \"T00:00:00Z TO \"+ rd1+\"T00:00:00Z]\"\n else: #not come back yet\n drd1 = \"NOT \"+ PREFIX_SUBMISSION + \"{\" + d1 + \"T00:00:00Z TO *]\"\n else:\n if(rd1):\n print(\"The .csv file contains strange information regarding the 'dispo' of \"+row['prenom'] + row['nom']+\". Please, check and fix. For now, skipping this constraint.\")\n d1 = \"\"\n rd1 = \"\"\n drd1 = \"\"\n\n tmpCst = ad\n if(tmpCst and drd1):\n tmpCst += \" AND \"\n tmpCst += drd1\n\n\n rowCrit = \"(\"+autID+\")\"\n if(tmpCst):\n rowCrit += \" AND (\"+tmpCst+\")\"\n\n if(critHAL):\n critHAL += \" OR \"\n critHAL += \"(\"+rowCrit+\")\"\n\n print(\"HAL criteria generated.\")\n if(parsed_args.verbose):\n print(critHAL)\n\n path = \"\"\n if(parsed_args.output):\n path = parsed_args.output\n else:\n path = \"hal_criteria.txt\"\n f = open(path, \"w\")\n f.write(critHAL)\n f.close\n print(\"HAL criteria saved to:\"+ path)\n","repo_name":"alexislebis/hal-CCB","sub_path":"hal_ccb.py","file_name":"hal_ccb.py","file_ext":"py","file_size_in_byte":4971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"34505675323","text":"#%%\nimport os\nGPU = \"0,1,2\"\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=GPU\n\nimport random\nimport config as cfg\nfrom PIL import Image\nimport numpy as np\nfrom utilsF import *\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\nclass Generator(object):\n \n \"\"\" this has been changed since now the list of cropped image in list_frames\n is built by the function \n \"\"\"\n def _single_input_generator(self, video):\n # print(\"video: \", video)\n selected_person = random.choice(range( len(self.annotation[video]['p_l']))) # select person from all persons\n label = self._label_generator(video, selected_person)\n\n org_list, frame_list = self._frame_mask_list_generator(video, selected_person)\n final_input = np.concatenate([frame_list, org_list], axis=-1)\n return final_input, label\n \n ###\n \n \"\"\" \n The following function replaces _frame_list_generator and _mask_list_generator\n \"\"\"\n def _frame_mask_list_generator(self, video, selected_person):\n frame_list = []\n org_list = []\n mask_list =[]\n # video = 0 #'qrkff49p4E4'\n frame_id_list =[0,4,9,14,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,44,49,54,59]#np.array(np.arange(60))\n mask_list = []\n # mask_id_list =frame_id_list.copy()#n\n for frame_id in frame_id_list:\n frame = self.annotation[video]['f_l'][frame_id]\n # print(\"frame_id:\", frame_id)\n bbox = self.annotation[video]['p_l'][selected_person][\"bb_l\"][frame_id]\n v_id = self.annotation[video]['v_id']\n pathF = os.path.join(cfg.VIDEOS_DATASET_PATH, v_id, frame+'.png' )\n mask_id = frame_id\n \n mask = self.annotation[video]['f_l'][mask_id]\n p_id = self.annotation[video]['p_l'][selected_person]['p_id']\n pathM = os.path.join(cfg.SEGMENTS_DATASET_PATH, v_id, mask+'_'+str(p_id)+'.png' )\n try:\n img = read_image(pathF )\n mask = read_image(pathM) \n except:\n img = Image.new('RGB', (cfg.WIDTH, cfg.HEIGHT), color = (0, 0, 0))\n mask = Image.new('L', (cfg.WIDTH, cfg.HEIGHT), color=0)\n # sng_p, width, height = img_tranfrom(img, bbox)\n sng_p, width, height = img_tranfrom_8_points(img, bbox)\n \n org_img = imm_resize(np.array(img))\n cropped_imm = imm_mask_crop_resize(sng_p, mask) \n maskx = mask_resize(mask)\n \n \"\"\" \n Now I add noise to the cropped image. You should try both \n versions: with noise and without noise\n The noise serve to avoid having all zeros in the mask. \n The noise improves the network ability to find regular patterns (I hope!!!)\n \"\"\"\n noisy_cropped_imm = add_noise(cropped_imm, maskx)\n \n \"\"\" \n Here note that rescaling the image between 0 and 1 is done ONLY HERE!!!\n To be sure that there are no double normalizations\n \"\"\"\n org_list.append(org_img/255.0)\n # frame_list.append(cropped_imm/255.0) ### without added noise \n frame_list.append(noisy_cropped_imm/255.0) ## with added noise to remove the zeroes\n # mask_list.append(np.array(mask)/1.0)\n return org_list, frame_list#, mask_list\n \n### \n \n \"\"\" \n This is the old function, now replaced by __frame_mask_list_generator \n \"\"\"\n # def _frame_list_generator(self, video, selected_person):\n # frame_list = []\n # org_list = []\n # frame_id_list =[0,4,9,14,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,44,49,54,59]#np.array(np.arange(60))\n # for frame_id in frame_id_list:\n # frame = self.annotation[video]['f_l'][frame_id]\n # bbox = self.annotation[video]['p_l'][selected_person][\"bb_l\"][frame_id]\n # v_id = self.annotation[video]['v_id']\n # path = os.path.join(cfg.VIDEOS_DATASET_PATH, v_id, frame+'.png' )\n # try:\n # img = read_image(path)\n # except:\n # img = Image.new('RGB', (cfg.WIDTH, cfg.HEIGHT), color = (0, 0, 0))\n # # sng_p, width, height = img_tranfrom(img, bbox)\n # sng_p, width, height = img_tranfrom_8_points(img, bbox)\n # imgx = imm_resize(sng_p)\n # org_img = imm_resize(np.array(img))\n \n # org_list.append(org_img)\n # frame_list.append(imgx)\n # return frame_list\n # frame_list = np.array(frame_list)\n # org_list = np.array(org_list)\n # return frame_list, org_list# np.reshape(frame_list,(3,224,224,60))\n # \"\"\" This is the old function, now replaced by __frame_mask_list_generator \"\"\"\n # def _mask_list_generator(self, video, selected_person):\n # mask_list = []\n # mask_id_list =[0,4,9,14,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,44,49,54,59]#np.array(np.arange(60))\n # for mask_id in mask_id_list:\n # mask = self.annotation[video]['f_l'][mask_id]\n # bbox = self.annotation[video]['p_l'][selected_person][\"bb_l\"][mask_id]\n # p_id = self.annotation[video]['p_l'][selected_person]['p_id']\n # v_id = self.annotation[video]['v_id']\n # path = os.path.join(cfg.SEGMENTS_DATASET_PATH, v_id, mask+'_'+str(p_id)+'.png' )\n # try:\n # img = read_image(pathF)\n # except:\n # img = Image.new('L', (cfg.WIDTH, cfg.HEIGHT), color=0)\n \n # # sng_p, width, height = img_tranfrom(img, bbox)\n # imgx = mask_resize(img)\n # mask_list.append(imgx)\n # return mask_list\n \n # def combine(frame_lsit, mask_list):\n # new_list = []\n # for imm, mask in zip(mask_list, frame_list):\n # combined = image_resize(imm, mask)\n\n # mask_list = np.array(mask_list)\n # return mask_list# np.reshape(mask_list,(3,224,224,60))\n \n \n\n def _label_generator(self, video, selected_person):\n action_list = self.annotation[video]['p_l'][selected_person]['a_l']\n action_list = list(set(action_list))\n label= np.zeros(80)\n for action in action_list:\n # if action==37 or action==4:\n label[action-1]=1\n return label\n\n\nclass Data_Loader(Generator):\n def __init__(self):\n self.annotation = file_reader(cfg.ANNOTATION_PATH)\n self.train_list, self.val_list = Data_Loader.split_dataset(len(self.annotation))\n self.train_ds = self.initilize_ds(self.train_list)\n self.val_ds = self.initilize_ds(self.val_list)\n\n\n\n @classmethod\n def split_dataset(cls, ds_size):\n total_list = np.arange(ds_size)\n np.random.seed(seed=1717)\n np.random.shuffle(total_list)\n # print(\"total_list: \", total_list[:3])\n # total_list = total_list[:cfg.DATASET_SIZE]\n # divider =round(cfg.DATASET_SIZE*cfg.SPLIT_RATIO)\n divider =round(ds_size*cfg.SPLIT_RATIO)\n return total_list[:divider-6], total_list[divider+1:]\n\n @classmethod\n def input_generator(cls, id_list):\n for idx in range(len(id_list)):\n yield id_list[idx]\n\n\n \n def read_transform(self, idx):\n [frame_list, label] = tf.py_function(self._single_input_generator, [idx], [tf.float32, tf.int32])\n return frame_list, label\n\n \n def initilize_ds(self, list_ids):\n ds = tf.data.Dataset.from_generator(Data_Loader.input_generator , args= [list_ids], output_types= (tf.int32))\n ds = ds.map(self.read_transform, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n ds = ds.prefetch(tf.data.experimental.AUTOTUNE)\n return ds\n\n\n# ds = Data_Loader()\n# train_ds = ds.train_ds\n# # val_ds =ds.val_ds\n\n# for [f, l] in train_ds.take(2):\n# print(f.shape)\n# print(l)\n# plt.imshow(f[15][:, :, :3].numpy())\n \n# plt.show()\n# plt.imshow(f[15][:, :, 3:].numpy())\n# plt.show()\n \n# np.save(f\"./results/one_input.npy\",f)\n# np.save(f\"./results/one_input_mask.npy\",m)\n# for [f, m, l] in val_ds.take(10):\n# print(f.shape)\n# print(m.shape)\n# print(l)\n# plt.imshow(f[0].numpy())\n# plt.show()\n# plt.imshow(m[0].numpy())\n# plt.show()\n\n\n# %%\n","repo_name":"ferrotem/RotConv3D","sub_path":"data_loaderF.py","file_name":"data_loaderF.py","file_ext":"py","file_size_in_byte":8462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"}
+{"seq_id":"9405371544","text":"# %%\n# https://sudipghimire.com.np\n\"\"\"\nType Hinting in Python\n\n- Introduced first in python 3.6\n- Hinting is not strict so the statement can contain different datatypes\n- Hintings are useful specifically for the development purpose rather than execution\n- Hintings do not make python statically typed, but adds confidence to the programmers\n- it makes programmers productive by hinting the exact data type so that we do not need to browse the source\n\n\"\"\"\nfrom typing import Dict, List\n\n\n# %% Some Examples of hinting in statements\n\na: int = 5 # same as a = 5\n\nx: str = \"ssdsd\"\n\nb: float = 5.5 # same as b = 5.5\nc: str = 'Tyrion Lannister'\nd: list = [1, 2, 3, 4, 5, 'abc']\ne: tuple = (1, 2, 3)\n\n# List\n\n# %% Compound Types\nimport typing\nfrom typing import List\nl1: List['int'] = [4, 5]\n\n\nl2: List = ['abc', 45]\n\nd1: Dict[str, str] = {'k': 'v'}\n\n\n# these are the basic Type hintings.\n# Creating custom types will be discussed in the python L2 course since\n# this part requires the knowledge of classes and inheritance\n\nimport math\ndef add_int(x, y):\n \n return( math.floor(x) + y)\n","repo_name":"ghimiresdp/python-notes","sub_path":"c03_advanced_data_types/code/c0305_type_hinting.py","file_name":"c0305_type_hinting.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"65"}
+{"seq_id":"16148773767","text":"\"\"\"\n## NLP Application\n\nDESCRIPTION\n\nAuthor: [Chen Li](https://vbn.aau.dk/en/persons/142294)\nSource: [Github](https://github.com/TBC)\n\"\"\"\nimport streamlit as st\nimport time\n\nfrom PIL import Image\nfrom transformers import pipeline, set_seed\nfrom configuration import Config\nfrom main.TG.text_generation import TextGeneration\nfrom main.SA.sen_twitter import TwitterClient\nfrom main.QA.bert import QA\nfrom transformers import pipeline\nfrom main.SU.summarization import *\nfrom transformers import GPT2Tokenizer, GPT2LMHeadModel\n\n# config file defines the necessary parameters\ncfg = Config()\n\ndef dis_home_page():\n st.title(cfg.home_title)\n st.markdown(cfg.home_des)\n # img = Image.open(cfg.botx_face_path)\n # st.image(img, width=250)\n\n\ndef dis_QA_page():\n st.title(cfg.QA_title)\n st.markdown(cfg.QA_des)\n img = Image.open(cfg.QA_image_path)\n st.image(img, width=700)\n st.write('---')\n message_context = st.text_area(\"Give some context first\", \"Type Here\")\n message_question = st.text_area(\"You may ask me a question now\", \"Type Here\")\n click = st.button(\"Show me answer\")\n if click:\n with st.spinner(\"Wait...\"):\n qa = QA(cfg.QA_model_path)\n answer = qa.predict(message_context.title(), message_question.title())\n st.success(answer['answer'])\n # model details\n st.write('---')\n st.header(\"Model Details\")\n st.subheader(\"Information-retrieval based Qeustion Answer\")\n st.markdown(cfg.QA_process)\n st.subheader(\"Model Architecture\")\n st.markdown(cfg.QA_model_overview)\n img = Image.open(cfg.QA_bert_model_image_path)\n st.image(img, width=550)\n st.write('---')\n st.header(\"Reference\")\n st.info(cfg.QA_about)\n\n\ndef dis_TG_page():\n st.title(cfg.TG_title)\n st.markdown(cfg.TG_des)\n img = Image.open(cfg.TG_image_path)\n st.image(img, width=200)\n message = st.text_area(\"Enter your snippet\", \"Type Here\")\n click = st.button(\"Generate Response\")\n generator = pipeline('text-generation', model='gpt2')\n set_seed(42)\n if click:\n with st.spinner(\"Wait...\"):\n sentence = generator(str(message.title()), max_length=100, num_return_sequences=1)\n st.success(sentence[0]['generated_text'])\n # model details\n st.write('---')\n st.header(\"Model Details\")\n st.subheader(\"What is GPT2\")\n st.markdown(cfg.TG_model_overview)\n st.subheader(\"Model Architecture\")\n img = Image.open(cfg.TG_architecture_image_path)\n st.image(img, width=550)\n\n st.write('---')\n st.header(\"About\")\n st.info(cfg.TG_about)\n\n\ndef dis_SA_page():\n st.title(cfg.SA_title)\n st.markdown(cfg.SA_des)\n img = Image.open(cfg.SA_image_path)\n st.image(img, width=700)\n st.header(\"Analysis Twitter Topic.\")\n message = st.text_area(\"Let's play with tweet first\",\"Type Here\")\n click = st.button(\"Sentiment Detection for tweet\")\n if click:\n with st.spinner(\"Wait...\"):\n sa = TwitterClient()\n ptweetsPer, ntweetsPer, netweetsPer, ptweets, ntweets, netweets = sa.run(message.title())\n # percentage results\n per_results = ptweetsPer + '\\n\\r' + ntweetsPer + '\\n\\r' + netweetsPer\n # details results\n pt = [item['text'] for item in ptweets]\n nt = [item['text'] for item in ntweets]\n net = [item['text'] for item in netweets]\n\n st.success(\"Result Distribution: \\n\\r\" + per_results)\n st.text(\"Positive Tweets:\")\n st.info(pt)\n st.text(\"Negative Tweets: \")\n st.info(nt)\n st.text(\"Neutral Tweets:\")\n st.info(net)\n st.write('---')\n st.header(\"Analysis sentences.\")\n sentence = st.text_area(\"Let's give a sentence\", \"Type Here\")\n click_sen = st.button(\"Sentiment Detection for sentence\")\n if click_sen:\n with st.spinner(\"Wait...\"):\n classifier = pipeline(\"sentiment-analysis\")\n results = classifier([sentence.title()])\n\n st.success(\"The sentence is predicted as a \" + str(results[0]['label']) + \" and the score is \" + str(results[0]['score']))\n\n\ndef dis_SU_page():\n st.title(cfg.SU_title)\n st.markdown(cfg.SU_des)\n img = Image.open(cfg.SU_image_path)\n st.image(img, width=500)\n\n summarizer = load_summarizer()\n\n message = st.text_area(\"Please enter your text here\", \"Type Here\")\n click = st.button(\"Generate Summarization\")\n\n max = st.sidebar.slider('Select max', 50, 500, step=10, value=150)\n min = st.sidebar.slider('Select min', 10, 450, step=10, value=50)\n do_sample = st.sidebar.checkbox(\"Do sample\", value=False)\n if click and message:\n with st.spinner(\"Generating Summary..\"):\n chunks = generate_chunks(message)\n res = summarizer(chunks,\n max_length=max,\n min_length=min,\n do_sample=do_sample)\n sentence = ' '.join([summ['summary_text'] for summ in res])\n # st.write(text)\n st.success(sentence)\n st.write('---')\n st.header(\"Model Details\")\n st.subheader(\"What is BART\")\n st.markdown(cfg.SU_model_overview)\n st.subheader(\"Model Architecture\")\n img = Image.open(cfg.SU_architecture_image_path)\n st.image(img, width=550)\n\n st.write('---')\n st.header(\"About\")\n st.info(cfg.SU_about)\n\n\ndef set_sidebar():\n # set the navigation menu\n st.sidebar.header('Navigation')\n nav_choice = st.sidebar.radio('', cfg.nav_menu)\n # NLP_choice = st.sidebar.selectbox(\"Select Activity\", cfg.NLP_menu)\n # CV_choice = st.sidebar.selectbox(\"Select Activity\", cfg.CV_menu)\n # RL_choice = st.sidebar.selectbox(\"Select Activity\", cfg.RL_menu)\n # change the navigation\n if nav_choice == 'Home':\n dis_home_page()\n elif nav_choice == 'Natural Language Processing (NLP)':\n NLP_choice = st.sidebar.selectbox(\"Select Activity\", cfg.NLP_menu)\n if NLP_choice == 'Text Generation':\n dis_TG_page()\n\n elif NLP_choice == 'Sentiment Analysis':\n dis_SA_page()\n\n elif NLP_choice == 'Question & Answering':\n dis_QA_page()\n\n elif NLP_choice == 'Summarization':\n dis_SU_page()\n\n elif nav_choice == 'NLP + Computer Vision':\n CV_choice = st.sidebar.selectbox(\"Select Activity\", cfg.CV_menu)\n if CV_choice == 'Facial Recognition':\n dis_home_page()\n\n elif CV_choice == 'Object Detection':\n dis_home_page()\n\n elif CV_choice == 'Other':\n dis_home_page()\n elif nav_choice == 'NLP + Computer Vision':\n RL_choice = st.sidebar.selectbox(\"Select Activity\", cfg.RL_menu)\n if RL_choice == 'Dialogue Policy Planning':\n dis_home_page()\n\n elif RL_choice == 'Other':\n dis_home_page()\n st.sidebar.header('Contributes')\n st.sidebar.info(cfg.contr_info)\n st.sidebar.header('About')\n st.sidebar.info(cfg.abt_info)\n\n\ndef main():\n # import the css style\n with open(r\"style.css\") as f:\n st.markdown(''.format(f.read()), unsafe_allow_html=True)\n\n # set sidebar\n set_sidebar()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"lcroy/MaxStreamlit","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"}
+{"seq_id":"7067645877","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom .models import List\nfrom django.contrib import messages\nfrom .forms import List_Forms\n\ndef index(request):\n if request.method == 'POST':\n form = List_Forms(request.POST or None)\n form.save()\n all_items = List.objects.all\n messages.success(request, 'Item has been added successfully.')\n return render(request, 'index.html', {'all_items': all_items})\n else:\n all_items = List.objects.all\n return render(request, 'index.html', {'all_items': all_items})\n\ndef delete(request, list_id):\n item = List.objects.get(pk = list_id)\n item.delete()\n messages.success(request, 'Item has been successfully deleted.')\n return redirect('index')\n\ndef cross_off(request, list_id):\n item = List.objects.get(pk = list_id)\n item.completed = True\n item.save()\n return redirect('index')\n\ndef uncross(request, list_id):\n item = List.objects.get(pk = list_id)\n item.completed = False\n item.save()\n return redirect('index')","repo_name":"soumadittya/todo_app","sub_path":"main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"}
+{"seq_id":"40042174285","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.patheffects as pe\nimport os\nfrom modelselection import kfold\nfrom fysstatistics import Regressor\nfrom typing import Callable, Optional, Tuple, Any\nfrom sampler import Sampler\nfrom sklearn.model_selection import train_test_split\nif \"JPY_PARENT_PID\" in os.environ:\n from tqdm import tqdm_notebook as tqdm\nelse:\n from tqdm import tqdm\n\n\nclass Ensemble:\n def __init__(self, sampler: Sampler,\n num_members: int = 10, sample_size: int = 100,\n order: int = 2) -> None:\n self.sampler = sampler\n self.num_members = num_members\n self.sample_size = sample_size\n self.order = order\n\n def run(self, Reg: Regressor) -> None:\n self.sampler.set_noise(0.0)\n X, Y = self.sampler(self.sample_size)\n Xtest, Ytest = self.sampler(self.sample_size)\n noise = np.random.normal(0, 0.1, (self.num_members, self.sample_size))\n train = np.tile(Y, (self.num_members, 1)) + noise\n dfs = self.order\n self.degrees_of_freedom = np.zeros(dfs)\n self.MSE_train = np.zeros((dfs, self.num_members))\n self.MSE_test = np.zeros_like(self.MSE_train)\n self.MSE_true = np.zeros_like(self.MSE_train)\n self.variance = np.zeros(dfs)\n self.bias = np.zeros_like(self.variance)\n progress = tqdm(total=dfs*self.num_members)\n for i, order in enumerate(range(1, self.order+1)):\n prediction = np.zeros_like(train)\n for n in range(self.num_members):\n regressor = Reg(X, train[n, :])\n regressor.fit([order, order], interactions=False)\n prediction[n, :] = regressor.predict(Xtest)\n self.MSE_train[i, n] = regressor.mse()\n self.MSE_true[i, n] = regressor.mse(X, Y)\n self.MSE_test[i, n] = regressor.mse(Xtest, Ytest)\n progress.update(1)\n\n mean = prediction.mean(axis=0)\n var = prediction.var(axis=0)\n squared_bias = (Ytest - mean)**2\n\n\n self.variance[i] = var.mean()\n self.bias[i] = squared_bias.mean()\n\n self.degrees_of_freedom[i] = order+order#regressor.df()\n\n\n def plot_train_test(self, ax: Optional[Any] = None) -> Tuple[Any, Any]:\n if ax is None:\n fig, ax = plt.subplots(1)\n else:\n fig = None\n\n dfs = np.tile(self.degrees_of_freedom, (self.num_members, 1)).T\n ax.plot(dfs, self.MSE_train, color=lighten_color('dodgerblue'),\n alpha=0.1, linewidth=0.5)\n ax.plot(dfs, self.MSE_test, '-', color=lighten_color('forestgreen'),\n alpha=0.1, linewidth=0.5)\n train_mean = np.mean(self.MSE_train, axis=1)\n test_mean = np.mean(self.MSE_test, axis=1)\n ax.plot(self.degrees_of_freedom, train_mean, '-',\n label='Train', color='dodgerblue', linewidth=0.4)\n ax.plot(self.degrees_of_freedom, test_mean,\n '-', label='Test', color='forestgreen', linewidth=0.4)\n\n ax.set_ylabel(\"MSE\")\n ax.set_xlabel(\"Complexity\")\n lgd = fig.legend(loc='lower left',# mode='expand', \n ncol=2,\n bbox_to_anchor=(0.3, 1.02, 1, 0.2))\n\n return fig, ax\n\n\n def plot_decomposition(self, ax: Optional[Any] = None) -> Tuple[Any, Any]:\n if ax is None:\n fig, ax = plt.subplots(1)\n else:\n fig = None\n\n dfs = np.tile(self.degrees_of_freedom, (self.num_members, 1)).T\n #ax.plot(dfs, self.MSE_true, '-', color=lighten_color('forestgreen'),\n # alpha=0.1, linewidth=0.5)\n train_mean = np.mean(self.MSE_train, axis=1)\n test_mean = np.mean(self.MSE_test, axis=1)\n true_mean = np.mean(self.MSE_true, axis=1)\n ax.plot(self.degrees_of_freedom, test_mean, '-',\n label='Test', color='forestgreen', linewidth=0.4)\n #ax.plot(self.degrees_of_freedom, test_mean,\n # '-', label='Test', color='forestgreen', linewidth=0.4,\n # alpha=0.5)\n #ax.plot(self.degrees_of_freedom, true_mean,\n # '-', label='True', color='forestgreen', linewidth=0.8)\n #ax.plot(self.degrees_of_freedom, true_mean + 0.1**2,\n # '--', label=r\"$bias^2 + Var + \\sigma_{\\varepsilon}^2$\",\n # linewidth=0.4)\n ax.plot(self.degrees_of_freedom, self.bias, label='$bias^2$')\n ax.plot(self.degrees_of_freedom, self.variance, label='Var')\n ax.plot(self.degrees_of_freedom, self.bias + self.variance, \n '--', label=\"$bias^2 + Var$\")\n\n ax.set_ylabel(\"MSE\")\n ax.set_xlabel(\"Complexity\")\n fig.legend(loc='lower left', # mode='expand', \n ncol=2,\n bbox_to_anchor=(-1.3, 1.02, 1, 0.2))\n\n return fig, ax\n\n\n\ndef lighten_color(color, amount=0.5):\n \"\"\"\n Lightens the given color by multiplying (1-luminosity) by the given amount.\n Input can be matplotlib color string, hex string, or RGB tuple.\n\n Examples:\n >> lighten_color('g', 0.3)\n >> lighten_color('#F034A3', 0.6)\n >> lighten_color((.3,.55,.1), 0.5)\n \"\"\"\n import matplotlib.colors as mc\n import colorsys\n try:\n c = mc.cnames[color]\n except:\n c = color\n c = colorsys.rgb_to_hls(*mc.to_rgb(c))\n return colorsys.hls_to_rgb(c[0], 1 - amount * (1 - c[1]), c[2])\n","repo_name":"ErlendLima/FYSSTK4155","sub_path":"projects/project1/src/ensemble.py","file_name":"ensemble.py","file_ext":"py","file_size_in_byte":5449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"}
+{"seq_id":"10793131637","text":"#Programa 07\nimport os\nnombre_alumno,nota_1,nota_2,nota_3=\"\",0,0,0\n\n#INPUT VIA OS\nnombre_alumno=os.sys.argv[1]\nnota_1=int(os.sys.argv[2])\nnota_2=int(os.sys.argv[3])\nnota_3=int(os.sys.argv[4])\n\n#PROCESSING\n#Si el promedio supera 14, mostrar \"nota aprobatoria\"\nif(promedio > 14 ):\n print(promedio, \"nota aprobatoria\")\n#fin_if\n","repo_name":"AliciaVilchez/t06_Vilchez_Orlandini","sub_path":"chambergo/Simple7.py","file_name":"Simple7.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"}
+{"seq_id":"40616096543","text":"\"\"\"'first_migration_table'\n\nRevision ID: 411085461db7\nRevises: 36a9621f1b96\nCreate Date: 2019-12-16 15:25:07.524112\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '411085461db7'\ndown_revision = '36a9621f1b96'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('messages', sa.Column('is_read', sa.Boolean(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('messages', 'is_read')\n # ### end Alembic commands ###\n","repo_name":"HilelBenhamou/MessaginSystemeFlask","sub_path":"migrations/versions/411085461db7_first_migration_table.py","file_name":"411085461db7_first_migration_table.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"65"}
+{"seq_id":"26842411535","text":"from django.urls import path, include\r\nfrom . import views\r\n\r\n\r\nurlpatterns = [\r\n path('', views.index, name='index'),\r\n path('', views.detail, name='detail'),\r\n path('about', views.about, name='about'),\r\n path('user/', views.user, name='user'),\r\n path('buy_page/', views.buy_page, name='buy_page'),\r\n path('signup', views.sign_up, name='signup'),\r\n path('login', views.login_user, name='login'),\r\n path('logout', views.django_logout, name='logout'),\r\n]\r\n","repo_name":"michtom/ski_shop","sub_path":"ski_rental_shop/strona/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"}
+{"seq_id":"27130029021","text":"# -*- coding: utf-8 -*-\n\"\"\" Anisotropic (:mod:`fluidsim.base.forcing.anisotropic`)\n==========================================================\n\n.. autoclass:: TimeCorrelatedRandomPseudoSpectralAnisotropic\n :members:\n :private-members:\n\n\"\"\"\n\nfrom math import degrees\nfrom math import pi\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\n\nfrom fluiddyn.calcul.easypyfft import fftw_grid_size\n\nfrom fluidsim.base.forcing.specific import TimeCorrelatedRandomPseudoSpectral\nfrom fluidsim.util import ensure_radians\n\n\nclass TimeCorrelatedRandomPseudoSpectralAnisotropic(\n TimeCorrelatedRandomPseudoSpectral\n):\n \"\"\"Random normalized anisotropic forcing.\n\n .. inheritance-diagram:: TimeCorrelatedRandomPseudoSpectralAnisotropic\n\n \"\"\"\n\n tag = \"tcrandom_anisotropic\"\n\n @classmethod\n def _complete_params_with_default(cls, params):\n \"\"\"This static method is used to complete the *params* container.\"\"\"\n super(\n TimeCorrelatedRandomPseudoSpectral, cls\n )._complete_params_with_default(params)\n\n params.forcing._set_child(\n \"tcrandom_anisotropic\",\n {\"angle\": \"45°\", \"delta_angle\": None, \"kz_negative_enable\": False},\n )\n\n params.forcing.tcrandom_anisotropic._set_doc(\n \"\"\"\n\nSee :mod:`fluidsim.base.forcing.anisotropic`.\n\nangle: float or str\n\n Angle between the wavevector and the horizontal characterising the forcing\n region.\n\ndelta_angle: float or None\n\n Control the shape of the forcing region in k-space. If None, ``(khmin,\n khmax, kvmin, kvmax)`` are computed from the angle,\n ``params.forcing.nkmin_forcing`` and ``params.forcing.nkmax_forcing``.\n\nkz_negative_enable: bool\n\n If True, modes with negative kz are also forced.\n\n\"\"\"\n )\n\n def __init__(self, sim):\n super().__init__(sim)\n\n if self.params.forcing.normalized.type == \"particular_k\":\n raise NotImplementedError\n\n def _create_params_coarse(self, fft_size):\n params_coarse = super()._create_params_coarse(fft_size)\n\n self.angle = angle = ensure_radians(self.params.forcing[self.tag].angle)\n\n tmp = self.params.forcing.tcrandom_anisotropic\n try:\n delta_angle = tmp.delta_angle\n except AttributeError:\n # loading old simul with delta_angle\n delta_angle = None\n else:\n delta_angle = ensure_radians(delta_angle)\n\n if delta_angle is None:\n self.khmax_forcing = np.sin(angle) * self.kmax_forcing\n self.kvmax_forcing = np.cos(angle) * self.kmax_forcing\n else:\n self.khmin_forcing = (\n np.sin(angle - 0.5 * delta_angle) * self.kmin_forcing\n )\n self.kvmin_forcing = (\n np.cos(angle + 0.5 * delta_angle) * self.kmin_forcing\n )\n self.khmax_forcing = (\n np.sin(angle + 0.5 * delta_angle) * self.kmax_forcing\n )\n self.kvmax_forcing = (\n np.cos(angle - 0.5 * delta_angle) * self.kmax_forcing\n )\n\n if hasattr(params_coarse.oper, \"nz\"):\n # 3d\n kymax_forcing = self.khmax_forcing\n else:\n # 2d\n kymax_forcing = self.kvmax_forcing\n\n # The \"+ 1\" aims to give some gap between the kxmax and\n # the boundary of the oper_coarse.\n try:\n params_coarse.oper.nx = 2 * fftw_grid_size(\n int(self.khmax_forcing / self.oper.deltakx) + 1\n )\n except AttributeError:\n pass\n\n try:\n params_coarse.oper.ny\n except AttributeError:\n pass\n else:\n params_coarse.oper.ny = 2 * fftw_grid_size(\n int(kymax_forcing / self.oper.deltaky) + 1\n )\n\n try:\n params_coarse.oper.nz\n except AttributeError:\n pass\n else:\n params_coarse.oper.nz = 2 * fftw_grid_size(\n int(self.kvmax_forcing / self.oper.deltakz) + 1\n )\n\n return params_coarse\n\n def _compute_cond_no_forcing(self):\n \"\"\"Computes condition no forcing of the anisotropic case.\"\"\"\n angle = self.angle\n\n tmp = self.params.forcing.tcrandom_anisotropic\n try:\n delta_angle = tmp.delta_angle\n except AttributeError:\n # loading old simul with delta_angle\n delta_angle = None\n else:\n delta_angle = ensure_radians(delta_angle)\n\n kf_min = self.kmin_forcing\n kf_max = self.kmax_forcing\n\n try:\n self.params.oper.nz\n except AttributeError:\n ndim = 2\n else:\n ndim = 3\n\n if delta_angle is None:\n self.khmin_forcing = np.sin(angle) * self.kmin_forcing\n self.kvmin_forcing = np.cos(angle) * self.kmin_forcing\n\n if ndim == 2:\n Kh = self.oper_coarse.KX\n Kv = self.oper_coarse.KY\n else:\n Kh = np.sqrt(self.oper_coarse.Kx**2 + self.oper_coarse.Ky**2)\n Kv = self.oper_coarse.Kz\n\n COND_NO_F_KH = np.logical_or(\n Kh > self.khmax_forcing, Kh < self.khmin_forcing\n )\n\n COND_NO_F_KV = np.logical_or(\n Kv > self.kvmax_forcing, Kv < self.kvmin_forcing\n )\n\n if self.params.forcing.tcrandom_anisotropic.kz_negative_enable:\n COND_NO_F_KV = np.logical_and(\n COND_NO_F_KV,\n np.logical_or(\n Kv < -self.kvmax_forcing, Kv > -self.kvmin_forcing\n ),\n )\n\n COND_NO_F = np.logical_or(COND_NO_F_KH, COND_NO_F_KV)\n COND_NO_F[self.oper_coarse.shapeK_loc[0] // 2] = True\n COND_NO_F[:, self.oper_coarse.shapeK_loc[1] - 1] = True\n\n else:\n if ndim == 2:\n K = np.sqrt(self.oper_coarse.KX**2 + self.oper_coarse.KY**2)\n Kv = self.oper_coarse.KY\n else:\n K = np.sqrt(\n self.oper_coarse.Kx**2\n + self.oper_coarse.Ky**2\n + self.oper_coarse.Kz**2\n )\n Kv = self.oper_coarse.Kz\n\n K_nozero = K.copy()\n K_nozero[K_nozero == 0] = 1e-14\n\n theta = np.arccos(Kv / K_nozero)\n del K_nozero\n\n COND_NO_F_K = np.logical_or(K > kf_max, K < kf_min)\n\n COND_NO_F_THETA = np.logical_or(\n theta > angle + 0.5 * delta_angle,\n theta < angle - 0.5 * delta_angle,\n )\n\n if self.params.forcing.tcrandom_anisotropic.kz_negative_enable:\n COND_NO_F_THETA = np.logical_and(\n COND_NO_F_THETA,\n np.logical_or(\n theta < pi - angle - 0.5 * delta_angle,\n theta > pi - angle + 0.5 * delta_angle,\n ),\n )\n\n COND_NO_F = np.logical_or(COND_NO_F_K, COND_NO_F_THETA)\n COND_NO_F[self.oper_coarse.shapeK_loc[0] // 2] = True\n COND_NO_F[:, self.oper_coarse.shapeK_loc[1] - 1] = True\n\n return COND_NO_F\n\n def plot_forcing_region(self):\n \"\"\"Plots the forcing region\"\"\"\n pforcing = self.params.forcing\n\n khmin_forcing = self.khmin_forcing\n khmax_forcing = self.khmax_forcing\n kvmin_forcing = self.kvmin_forcing\n kvmax_forcing = self.kvmax_forcing\n kf_min = self.kmin_forcing\n kf_max = self.kmax_forcing\n\n tmp = self.params.forcing.tcrandom_anisotropic\n try:\n delta_angle = tmp.delta_angle\n except AttributeError:\n # loading old simul with delta_angle\n delta_angle = None\n\n try:\n self.params.oper.nz\n except AttributeError:\n ndim = 2\n else:\n ndim = 3\n\n # Define forcing region\n coord_x = khmin_forcing\n coord_y = kvmin_forcing\n width = khmax_forcing - khmin_forcing\n height = kvmax_forcing - kvmin_forcing\n\n if ndim == 2:\n Kh = self.oper_coarse.KX\n Kv = self.oper_coarse.KY\n deltakh = self.oper.deltakx\n deltakv = self.oper.deltaky\n else:\n Kh = np.sqrt(self.oper_coarse.Kx**2 + self.oper_coarse.Ky**2)\n Kv = self.oper_coarse.Kz\n deltakh = self.oper.deltakx\n deltakv = self.oper.deltakz\n\n fig, ax = plt.subplots()\n ax.set_aspect(\"equal\")\n\n title = (\n pforcing.type\n + \"; \"\n + rf\"$nk_{{min}} = {pforcing.nkmin_forcing} \\delta k_v$; \"\n + rf\"$nk_{{max}} = {pforcing.nkmax_forcing} \\delta k_v$; \"\n + \"\\n\"\n + r\"$\\theta_f = {:.0f}^\\circ$; \".format(degrees(self.angle))\n + rf\"Forced modes = {self.nb_forced_modes}\"\n )\n\n ax.set_title(title)\n ax.set_xlabel(r\"$k_h$\")\n ax.set_ylabel(r\"$k_v$\")\n\n # Parameters figure\n\n # Set limits to 125% of the kf_max\n factor = 1.2\n ax.set_xlim([0.0, factor * kf_max])\n ax.set_ylim([0.0, factor * kf_max])\n\n xticks = np.arange(0.0, factor * kf_max, deltakv)\n yticks = np.arange(0.0, factor * kf_max, deltakv)\n ax.set_xticks(xticks)\n ax.set_yticks(yticks)\n\n if delta_angle is None:\n # Plot forcing region\n ax.add_patch(\n patches.Rectangle(\n xy=(coord_x, coord_y), width=width, height=height, fill=False\n )\n )\n\n # Plot lines forcing region\n ax.plot(\n [khmin_forcing, khmin_forcing],\n [0, kvmin_forcing],\n \"k--\",\n linewidth=0.8,\n )\n ax.plot(\n [khmax_forcing, khmax_forcing],\n [0, kvmin_forcing],\n \"k--\",\n linewidth=0.8,\n )\n ax.plot(\n [0, khmin_forcing],\n [kvmin_forcing, kvmin_forcing],\n \"k--\",\n linewidth=0.8,\n )\n ax.plot(\n [0, khmin_forcing],\n [kvmax_forcing, kvmax_forcing],\n \"k--\",\n linewidth=0.8,\n )\n\n # Location labels 0.8% the length of the axis\n factor = 0.008\n loc_label_y = abs(Kv).max() * factor\n loc_label_x = abs(Kh).max() * factor\n\n ax.text(loc_label_x + khmin_forcing, loc_label_y, r\"$k_{h,min}$\")\n ax.text(loc_label_x + khmax_forcing, loc_label_y, r\"$k_{h,max}$\")\n ax.text(loc_label_x, kvmin_forcing + loc_label_y, r\"$k_{v,min}$\")\n ax.text(loc_label_x, kvmax_forcing + loc_label_y, r\"$k_{v,max}$\")\n\n else:\n # Plot forcing region\n ax.add_patch(\n patches.Arc(\n xy=(0, 0),\n width=(kf_min + kf_max),\n height=(kf_min + kf_max),\n angle=0,\n theta1=90.0 - degrees(self.angle),\n theta2=90.0,\n linestyle=\"dotted\",\n )\n )\n\n ax.add_patch(\n patches.Arc(\n xy=(0, 0),\n width=2.1 * kf_max,\n height=2.1 * kf_max,\n angle=0,\n theta1=90.0\n - degrees(self.angle)\n - 0.5 * degrees(delta_angle),\n theta2=90.0\n - degrees(self.angle)\n + 0.5 * degrees(delta_angle),\n linestyle=\"--\",\n )\n )\n\n ax.add_patch(\n patches.Arc(\n xy=(0, 0),\n width=2 * kf_min,\n height=2 * kf_min,\n angle=0,\n theta1=90.0\n - degrees(self.angle)\n - 0.5 * degrees(delta_angle),\n theta2=90.0\n - degrees(self.angle)\n + 0.5 * degrees(delta_angle),\n linestyle=\"-\",\n )\n )\n ax.add_patch(\n patches.Arc(\n xy=(0, 0),\n width=2 * kf_max,\n height=2 * kf_max,\n angle=0,\n theta1=90.0\n - degrees(self.angle)\n - 0.5 * degrees(delta_angle),\n theta2=90.0\n - degrees(self.angle)\n + 0.5 * degrees(delta_angle),\n linestyle=\"-\",\n )\n )\n\n # Plot arc kmin and kmax\n ax.add_patch(\n patches.Arc(\n xy=(0, 0),\n width=2 * kf_min,\n height=2 * kf_min,\n angle=0,\n theta1=0.0,\n theta2=90.0,\n linestyle=\"-.\",\n )\n )\n ax.add_patch(\n patches.Arc(\n xy=(0, 0),\n width=2 * kf_max,\n height=2 * kf_max,\n angle=0,\n theta1=0.0,\n theta2=90.0,\n linestyle=\"-.\",\n )\n )\n\n # Plot lines angle & lines forcing region\n xmin = khmin_forcing\n xmax = self.kmax_forcing * np.sin(self.angle - 0.5 * delta_angle)\n ymin = self.kmin_forcing * np.cos(self.angle - 0.5 * delta_angle)\n ymax = kvmax_forcing\n ax.plot([xmin, xmax], [ymin, ymax], color=\"k\", linewidth=1)\n\n xmin = self.kmin_forcing * np.sin(self.angle + 0.5 * delta_angle)\n xmax = khmax_forcing\n ymin = kvmin_forcing\n ymax = self.kmax_forcing * np.cos(self.angle + 0.5 * delta_angle)\n ax.plot([xmin, xmax], [ymin, ymax], color=\"k\", linewidth=1)\n\n # Location labels kmin and kmax\n factor = 0.015\n loc_label_y = abs(Kv).max() * factor\n loc_label_x = abs(Kh).max() * factor\n\n ax.text(loc_label_y + self.kmin_forcing, loc_label_y, r\"$k_{f,min}$\")\n ax.text(loc_label_x + self.kmax_forcing, loc_label_y, r\"$k_{f,max}$\")\n\n # Location label angle \\theta\n factor = 1.1\n loc_label_y = (\n (kf_min + kf_max) * 0.5 * np.cos(self.angle * 0.5) * factor\n )\n loc_label_x = (\n (kf_min + kf_max) * 0.5 * np.sin(self.angle * 0.5) * factor\n )\n\n ax.text(loc_label_x, loc_label_y, r\"$\\theta_f$\")\n\n # Location label delta_angle \\delta \\theta\n factor = 1.1\n loc_label_y = kf_max * np.cos(self.angle) * factor\n loc_label_x = kf_max * np.sin(self.angle) * factor\n\n ax.text(loc_label_x, loc_label_y, r\"$\\delta \\theta_f$\")\n\n # Plot forced modes in red\n indices_forcing = np.argwhere(self.COND_NO_F == False)\n for i, index in enumerate(indices_forcing):\n if ndim == 2:\n ax.plot(\n Kh[0, index[1]],\n Kv[index[0], 0],\n \"ro\",\n label=\"Forced mode\" if i == 0 else \"\",\n )\n else:\n ax.plot(\n Kh[0, index[1], index[2]],\n Kv[index[0], 0, 0],\n \"ro\",\n label=\"Forced mode\" if i == 0 else \"\",\n )\n\n ax.grid(linestyle=\"--\", alpha=0.4)\n ax.legend()\n\n\nclass TimeCorrelatedRandomPseudoSpectralAnisotropic3D(\n TimeCorrelatedRandomPseudoSpectralAnisotropic\n):\n \"\"\"Random normalized anisotropic forcing.\n\n .. inheritance-diagram:: TimeCorrelatedRandomPseudoSpectralAnisotropic3D\n\n \"\"\"\n\n tag = \"tcrandom_anisotropic\"\n\n @classmethod\n def _complete_params_with_default(cls, params):\n super()._complete_params_with_default(params)\n params.forcing.tcrandom_anisotropic.delta_angle = \"10°\"\n","repo_name":"fluiddyn/fluidsim","sub_path":"fluidsim/base/forcing/anisotropic.py","file_name":"anisotropic.py","file_ext":"py","file_size_in_byte":16310,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"65"}
+{"seq_id":"38313970193","text":"t = int(input())\r\nwhile(t>0):\r\n n = int(input())\r\n s = str(n)\r\n\r\n maximum = int(max(s))\r\n lth = len(s)\r\n #print(lth)\r\n if lth == 2:\r\n another = 3\r\n elif lth == 3:\r\n another = 6\r\n elif lth == 4:\r\n another = 10\r\n elif lth == 1:\r\n another = 1\r\n\r\n ans = 10*(maximum-1) + another\r\n print(ans)\r\n t = t-1\r\n \r\n","repo_name":"Oshayer-Siddique/PYTHON_CP","sub_path":"codeforces 677 div 3 A.py","file_name":"codeforces 677 div 3 A.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"}
+{"seq_id":"22686245864","text":"import time\nimport random\nimport asyncio\nimport multiprocessing\n\nfrom atlasbuggy import Orchestrator, Node, run\n\n\nclass MainThreadHogger(Node):\n def __init__(self, enabled=True):\n super(MainThreadHogger, self).__init__(enabled)\n\n async def loop(self):\n while True:\n self.logger.info(\"loop: broadcasting time\")\n producer_time = time.time()\n\n counter = 0\n\n # stalls the whole main thread\n t0 = time.time()\n while time.time() - t0 < 1:\n counter += 1\n await self.broadcast((producer_time, counter))\n\n await asyncio.sleep(0.5)\n self.logger.info(\"producer time: %s\" % producer_time)\n\n\nDEMO_EXCEPTION_IN_PROCESS = False\n\n\nclass OffloadWithProcess(Node):\n def __init__(self, enabled=True):\n self.exit_event = multiprocessing.Event()\n self.read_queue = multiprocessing.Queue()\n self.write_queue = multiprocessing.Queue()\n self.process = multiprocessing.Process(target=self.process_fn)\n\n super(OffloadWithProcess, self).__init__(enabled)\n\n def process_fn(self):\n time_to_wait = 1\n while not self.exit_event.is_set():\n process_time = time.time()\n\n counter = 0\n t0 = time.time()\n\n while time.time() - t0 < time_to_wait:\n counter += 1\n self.read_queue.put((process_time, counter))\n # time.sleep(0.0001)\n time.sleep(time_to_wait / 5)\n if self.exit_event.is_set():\n break\n\n while not self.write_queue.empty():\n time_to_wait = self.write_queue.get()\n\n if DEMO_EXCEPTION_IN_PROCESS:\n self.exit_event.set()\n raise Exception(\"Something bad happened!!\")\n\n self.logger.info(\"Process put %s items on the pipe\" % counter)\n\n async def setup(self):\n self.process.start()\n\n async def loop(self):\n message_num = 0\n while True:\n if self.exit_event.is_set():\n return\n\n counter = 0\n while not self.read_queue.empty():\n process_time, counter = self.read_queue.get()\n producer_time = time.time()\n self.broadcast_nowait((message_num, producer_time, process_time, counter))\n counter += 1\n message_num += 1\n\n time_to_wait = random.random()\n self.logger.info(\"set time to wait: %s\" % time_to_wait)\n self.write_queue.put(time_to_wait)\n\n if counter > 0:\n self.logger.info(\"broadcast %s items, last num was %s\" % (counter, message_num))\n await asyncio.sleep(0.5)\n\n async def teardown(self):\n self.logger.info(\"closing process\")\n self.exit_event.set()\n\n\nclass ConsumerNode(Node):\n def __init__(self, enabled=True):\n super(ConsumerNode, self).__init__(enabled)\n\n self.producer_tag = \"producer\"\n self.producer_sub = self.define_subscription(self.producer_tag)\n self.producer_queue = None\n self.producer = None\n\n def take(self):\n self.producer_queue = self.producer_sub.get_queue()\n self.producer = self.producer_sub.get_producer()\n\n self.logger.info(\"Got producer named '%s'\" % self.producer.name)\n\n async def loop(self):\n while True:\n if not self.producer_queue.empty():\n self.logger.info(\"Consuming %s items\" % self.producer_queue.qsize())\n while not self.producer_queue.empty():\n message_num, producer_time, process_time, counter = self.producer_queue.get_nowait()\n consumer_time = time.time()\n\n self.logger.info(\n \"n: %s, process time: %s, qsize: %s, counter: %s\" % (\n message_num, process_time, self.producer_queue.qsize(),\n counter))\n self.logger.info(\"time diff: %s\" % (consumer_time - producer_time))\n await asyncio.sleep(0.0)\n\n\nclass MyOrchestrator(Orchestrator):\n def __init__(self, event_loop):\n super(MyOrchestrator, self).__init__(event_loop)\n\n # producer = MainThreadHogger()\n producer = OffloadWithProcess()\n consumer = ConsumerNode()\n\n self.add_nodes(producer, consumer)\n self.subscribe(producer, consumer, consumer.producer_tag)\n\n\nrun(MyOrchestrator)\n","repo_name":"AtlasBuggy/atlasbuggy","sub_path":"atlasbuggy/examples/subscriptions/processor_heavy.py","file_name":"processor_heavy.py","file_ext":"py","file_size_in_byte":4490,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"65"}
+{"seq_id":"71756684366","text":"def main():\n prime_nos = []\n def simpleSieve(limit):\n nonlocal prime_nos\n n = [True for i in range(limit+1)]\n for i in range(2,limit+1):\n if (n[i] == True):\n for j in range(2*i, limit+1, i):\n n[j] = False\n \n for p in range(2, limit+1):\n if (n[p] == True):\n prime_nos.append(p)\n arr = [1, 2, 4, 3, 29, 11, 7, 8, 9]\n simpleSieve(max(arr))\n mx = 0 \n ct = 0\n for i in arr:\n if i in prime_nos:\n ct+=1\n else:\n if mx < ct:\n mx = ct\n ct = 0 \n print(mx)\nmain()","repo_name":"bplumber/LeetCode","sub_path":"Day 39/The Longest Prime Sub-array.py","file_name":"The Longest Prime Sub-array.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"}
+{"seq_id":"34101796027","text":"from django.shortcuts import render\nfrom bitlyshortener import Shortener\nimport requests\nfrom .models import LinkCONTAINER\nfrom django.contrib import messages\n# Create your views here.\n\ndef crul_check(request):\n\tall_data = LinkCONTAINER.objects.all()\n\tif request.method ==\"POST\":\n\t\theaders = {\n\t\t 'Authorization': 'Bearer 0456afc0a7bc25171f29a735616cfe3d9c8e0904',\n\t\t 'Content-Type': 'application/json',\n\t\t}\n\n\t\tdata = { \n\t\t\t\t\"long_url\":request.POST.get(\"link_short_input\"),\n\t\t \t\t\"domain\": \"bit.ly\",\n\t\t \t\t\"group_guid\": \"Bl655fA0nGi\",\n\t\t \t\t\n\t\t \t\t}\n\t\tprint(data)\n\n\t\tresponse = requests.post('https://api-ssl.bitly.com/v4/shorten', headers=headers, json=data)\n\t\t# print('-----------------------------------',response.__dict__)\n\t\tvalue_single = response.json()\n\t\tprint('This is url you have got-----------------------------------',value_single[\"link\"])\n\t\tcheck_link_first = LinkCONTAINER.objects.filter(long_url=data['long_url']).first()\n\t\tif check_link_first:\n\t\t\tmessages.error(request,\"Url already present in Database\")\n\t\t\treturn render(request,'index.html',{'check_link_first':check_link_first})\n\t\telse:\n\t\t\turl_contain = LinkCONTAINER(long_url=data['long_url'],short_url=value_single[\"link\"])\n\t\t\turl_contain.save()\n\t\t\tmessages.success(request,\"Url shorten successfully\")\n\n\n\n\treturn render(request,'index.html',{'all_data':all_data})\n\n\n\n# def crul_check(request):\n# \tif request.method == \"POST\":\n# \t\ttokens_pool = ['0456afc0a7bc25171f29a735616cfe3d9c8e0904']\n# \t\tshortener = Shortener(tokens=tokens_pool, max_cache_size=128)\n# \t\turl = []\n# \t\tget_url = request.POST.get('link_short_input')\n# \t\tprint(\"-This is get_url----------\",get_url)\n# \t\tif get_url == \"\":\n# \t\t\tmessages.error(request,\"Please enter url in given field\")\n# \t\telse:\n# \t\t\turl.append(get_url)\n# \t\t\tprint('THIS IS URL-------',url)\n# \t\t\tsort_url = shortener.shorten_urls(url)\n# \t\t\tfor value in sort_url:\n# \t\t\t\tprint(\"sort url------------------------------------: \",value)\n# \t\t\t\tif value.startswith('htt'):\n# \t\t\t\t\t# url_contain = LinkCONTAINER(long_url=get_url,short_url=value)\n# \t\t\t\t\tcheck_link_first = LinkCONTAINER.objects.filter(long_url=get_url).first()\n# \t\t\t\t\tif check_link_first:\n# \t\t\t\t\t\tmessages.error(request,\"Url already present in Database\")\n# \t\t\t\t\t\treturn render(request,'index.html',{'check_link_first':check_link_first})\n# \t\t\t\t\telse:\n# \t\t\t\t\t\turl_contain.save()\n# \t\t\t\t\t\tmessages.success(request,\"Url shorten successfully\")\n# \treturn render(request,'index.html')","repo_name":"amitK691/Bitly","sub_path":"crul/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"}
+{"seq_id":"1903321931","text":"import numpy as np\n\n#lista Python\nl = [20, 30, 10, 40]\n#converter lista para array\na = np.array(l)\n\n#print (l)\n#print (a)\nb = a[:]\nc = a.copy()\nc[1] = 999\na[1] = 1000\nprint (a)\nprint (b)\nprint (c)\n#print(type(a))\n#print(type(b))\n\n#print(a[1])\n#print(b[1])\n\n#b[1] = 789\n#print (a)\n#print (b)\n#print(a[1])\n#print(b[1])\n","repo_name":"Sunshine199438/CC6o-Python","sub_path":"Nunpy/Aula 04/Mat05.py","file_name":"Mat05.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"}
+{"seq_id":"7465894921","text":"__license__ = 'GPL 3'\n__copyright__ = '2006, Ed Summers '\n__docformat__ = 'restructuredtext en'\n\n\nclass URL:\n '''\n Class for representing a URL in an opensearch v1.1 query\n '''\n\n def __init__(self, type='', template='', method='GET'):\n self.type = type\n self.template = template\n self.method = 'GET'\n self.params = []\n","repo_name":"kovidgoyal/calibre","sub_path":"src/calibre/utils/opensearch/url.py","file_name":"url.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":17176,"dataset":"github-code","pt":"65"}
+{"seq_id":"2366679886","text":"# http://oj.leetcode.com/problems/3sum/\n# Given an array S of n integers, are there elements a, b, c in S such that a + b + c = 0? Find all unique triplets in the array which gives the sum of zero.\n\n# Note:\n# Elements in a triplet (a,b,c) must be in non-descending order. (ie, a ≤ b ≤ c)\n# The solution set must not contain duplicate triplets.\n# For example, given array S = {-1 0 1 2 -1 -4},\n\n# A solution set is:\n# (-1, 0, 1)\n# (-1, -1, 2)\n\nclass Solution:\n # @return a list of lists of length 3, [[val1,val2,val3]]\n def threeSum(self, num):\n if len(num) < 3:\n return []\n \n num.sort()\n result = []\n i = 0\n \n if len(num) < 3:\n return result\n \n while i < len(num):\n j = i + 1\n k = len(num) - 1\n while j < k:\n sum = num[i] + num[j] + num[k]\n if sum == 0:\n triplet = [num[i], num[j], num[k]]\n if self.notContain(result, triplet):\n result.append(triplet)\n j+=1\n elif sum > 0:\n k-=1\n else:\n j+=1\n \n i+=1\n return result\n \n def notContain(self, result, triplet):\n for r in result:\n if r == triplet:\n return False\n return True","repo_name":"chihungyu1116/leetcode","sub_path":"ThreeSumEqualZero.py","file_name":"ThreeSumEqualZero.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"}
+{"seq_id":"28859295380","text":"def prov(chislo):\n kol_chet = 0\n while chislo != 0:\n if chislo % 10 > 4:\n kol_chet += 1\n chislo //= 10\n else:\n chislo //= 10\n return kol_chet\n\n\nspis = []\n\nfor chislo in range(5903, 174203+1):\n if len(list(map(str, str(chislo)))) == len(set(list(map(str,str(chislo))))) and prov(chislo) == 3:\n spis.append(chislo)\nprint(len(spis))\n\nfor prov in range(len(spis)):\n print(spis[prov])","repo_name":"KotisKotlyandii/lessons1","sub_path":"ege17/74.py","file_name":"74.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"}
+{"seq_id":"30678859872","text":"import requests\n\n\n# /api\nclass MissionCenter:\n\n def __init__(self, base_url, headers, cookies):\n self.base_url = base_url\n self.headers = headers\n self.cookies = cookies\n\n def get_tasks(self, status, begin, end, **kwargs):\n base_url = self.base_url\n headers = self.headers\n cookies = self.cookies\n for k, v in kwargs.items():\n if k == 'base_url':\n base_url = v\n elif k == 'header':\n headers = v\n elif k == 'cookies':\n cookies = v\n return requests.get(f'{base_url}/api/tasks/{status}', headers=headers,\n params={'begin': begin, 'end': end}, cookies=cookies)\n\n def init_mission(self, **kwargs):\n base_url = self.base_url\n headers = self.headers\n cookies = self.cookies\n for k, v in kwargs.items():\n if k == 'base_url':\n base_url = v\n elif k == 'header':\n headers = v\n elif k == 'cookies':\n cookies = v\n return requests.post(f'{base_url}/api/task/init', headers=headers, cookies=cookies)\n\n def get_task(self, task_id, **kwargs):\n base_url = self.base_url\n headers = self.headers\n cookies = self.cookies\n for k, v in kwargs.items():\n if k == 'base_url':\n base_url = v\n elif k == 'header':\n headers = v\n elif k == 'cookies':\n cookies = v\n return requests.get(f'{base_url}/api/task/{task_id}', headers=headers, cookies=cookies)\n\n def get_numbers(self, task_id, begin, end, status, **kwargs):\n base_url = self.base_url\n headers = self.headers\n cookies = self.cookies\n for k, v in kwargs.items():\n if k == 'base_url':\n base_url = v\n elif k == 'header':\n headers = v\n elif k == 'cookies':\n cookies = v\n return requests.get(f'{base_url}/api/task/{task_id}/number/list', headers=headers, cookies=cookies,\n params={\n 'task_id': task_id,\n 'begin': begin,\n 'end': end,\n 'status': status\n })\n\n def start_mission(self, task_ids, **kwargs):\n base_url = self.base_url\n headers = self.headers\n cookies = self.cookies\n for k, v in kwargs.items():\n if k == 'base_url':\n base_url = v\n elif k == 'header':\n headers = v\n elif k == 'cookies':\n cookies = v\n return requests.put(f'{base_url}/api/tasks/start', headers=headers, cookies=cookies,\n data={\n 'open_diagnosis': 0,\n 'task_ids': task_ids\n })\n\n def submit_mission(self, task_id, task_setting, **kwargs):\n base_url = self.base_url\n headers = self.headers\n cookies = self.cookies\n for k, v in kwargs.items():\n if k == 'base_url':\n base_url = v\n elif k == 'header':\n headers = v\n elif k == 'cookies':\n cookies = v\n return requests.post(f'{base_url}/api/task/{task_id}/v2', headers=headers, cookies=cookies,\n data=task_setting)\n\n def add_numbers(self, task_id, contacts, **kwargs):\n base_url = self.base_url\n headers = self.headers\n cookies = self.cookies\n for k, v in kwargs.items():\n if k == 'base_url':\n base_url = v\n elif k == 'header':\n headers = v\n elif k == 'cookies':\n cookies = v\n return requests.put(f'{base_url}/api/task/{task_id}/numbers/add', headers=headers, cookies=cookies,\n data={\n 'contact_datas': contacts\n })\n\n def upload_numbers(self, task_id, excel_file, **kwargs):\n base_url = self.base_url\n headers = self.headers\n cookies = self.cookies\n for k, v in kwargs.items():\n if k == 'base_url':\n base_url = v\n elif k == 'header':\n headers = v\n elif k == 'cookies':\n cookies = v\n return requests.post(f'{base_url}/api/task/{task_id}/numbers/upload', headers=headers, cookies=cookies,\n files={'file': excel_file})\n\n\nif __name__ == \"__main__\":\n url = \"http://call-test.tangees.com\"\n header = {\n 'Cookie': 'SecurityCenterDuId=IllPYTYvSllFMjBlRWFydTlGa1lDWWhjPSI.FSNHGQ.eghMoXas_yCVbgocZMvMGoOvyyE;accountCenterSessionId=.eJw9jk1rwzAQRP-Lzj2sVlqvlGMphEKd0kIo8cXoY0Xixi7EaV1c-t8rcuhxhnnD-1Gfs1z6U1Yb1aDVkbwXYiyAbAwl0szqTvXlIvNRbUo4z1LjbZ-5EYfWuASGcyheEzc6-xAFvMHK2sjeNtSkKOSiCxQxBV20WEBbtMmRMgoGp7MEECCEwMmVxghrK9HGWBK7rHMyjhyIDwZ8YUIL3lWvSST3c_iS_vrR5_gvON8E2_VgDkO7tOv-uhu6oTsBdNv98vT2aHdj7cbXscUX2673788P6bsepmOYJjlXeJGofv8A-dpVDg.FSNHIA.2sdkzLXiwHoI1MYtRFjVctafCUc'\n }\n cookie = {}\n test = MissionCenter(url, header, cookie)\n # get_tasks\n print(test.get_tasks('draft', '0', '1').json())\n # init_task\n init_task_res = test.init_mission().json()\n print(init_task_res)\n t_id = init_task_res['task_id']\n # get_task\n print(test.get_task(task_id=t_id).json())\n # get_numbers\n print(test.get_numbers(t_id, '0', '10', 'no_call').json())\n # start_mission\n print(test.start_mission(t_id).json())\n # submit_mission\n t_setting = {\n 'name': 'api_test',\n 'graph_id': '61849c29a3552266652dc7f5',\n 'version_id': '61a46d8aa35522703b36af89',\n 'call_line_model': 2,\n 'call_port_ids': '61d6a114a35522259f5fe5c7',\n 'robot_ids': '60e81728e285480159f317d7',\n 'smart_diagnose_trigger': 1,\n 'smart_diagnose_config': '{\"open_filter_by_number\":true,\"harass_rule\":{\"filter_by_call_result\":{\"trigger\":false},\"filter_by_intention_result\":{\"trigger\":false,\"options\":[]},\"filter_by_days_anti_harass\":{\"trigger\":false}}}',\n 'showRule': 'false',\n 'sms_trigger': 0,\n 'is_transfer': 0,\n 'smart_schedule_trigger': 1,\n 'is_timed_task': 0,\n 'redial_trigger': 0,\n 'label_trigger': 0,\n 'cc_assign_trigger': 0,\n }\n print(test.submit_mission(t_id, t_setting).json())\n # add_numbers\n print(test.add_numbers(t_id,\n '[{\"number\": \"18218644344\", \"contact\": \"jack\", \"enterprise\": \"jack\\'s\", \"vars\": {}}]').json())\n # upload_numbers\n e_file = open('C:/Users/TUNGEE/Desktop/数据流/random_phone_number/随机生成号码包20.xlsx', 'rb')\n print(test.upload_numbers(t_id, e_file).json())\n","repo_name":"Cchenyw/SmartCall","sub_path":"mission_center.py","file_name":"mission_center.py","file_ext":"py","file_size_in_byte":6916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"}
+{"seq_id":"32589975754","text":"import sys\nfrom bisect import bisect_left, bisect_right\n\nN = int(input())\nproducts = list(map(int, sys.stdin.readline().rstrip().split(' ')))\nM = int(input())\nwant = list(map(int, sys.stdin.readline().rstrip().split(' ')))\n\n# 이진탐색으로 w가 product안에 있는지 확인해보자\nproducts.sort()\nfor w in want:\n if bisect_right(products, w) != bisect_left(products, w): # w가 존재함\n print('yes')\n else:\n print('no')\n","repo_name":"seoyeonhwng/algorithm","sub_path":"이것이_코딩테스트다/부품 찾기.py","file_name":"부품 찾기.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"}
+{"seq_id":"36611732886","text":"import os, sys\nsys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))\nfrom TextUtils import TextUtils\nfrom XPath import XPath\nfrom Recipe import Recipe\nimport re\n\ndef parse(html_content, url=None):\n recipe = Recipe()\n xpath = XPath(html_content)\n\n # Title\n title = xpath.single_node_query('//meta[@itemprop=\"og:title/@content\"]', 'title')\n if title == None:\n title = xpath.single_node_query('//title/text()', 'title') \n if title == None:\n title = \"Recipe from {}\".format(url) \n\n recipe.title = title\n\n # Photo\n photo_url = xpath.single_node_query('//meta[@property=\"og:image\"]/@content', 'photo_url')\n if re.search(\"\\.(jpeg|jpg)\", photo_url, re.I):\n photo_url = TextUtils.url_relative_to_absolute(url, photo_url)\n recipe.photo_url = photo_url\n\n return recipe\n\n","repo_name":"stevechuck/recipe-parser","sub_path":"lib/parsers/General.py","file_name":"General.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"}
+{"seq_id":"14098697867","text":"import datetime\nimport os\nimport wave\n\nimport numpy as np\n\nfrom .util import randName\n\n\nclass AudioMannger:\n def __init__(self,\n robot,\n frame_length=160,\n frame=10,\n data_width=2,\n vad_default=300):\n # 二进制 pcm 流 \n self.audios = b''\n self.asr_result = \"\"\n # Speech 核心主体\n self.robot = robot\n\n self.file_dir = \"source\"\n os.makedirs(self.file_dir, exist_ok=True)\n self.vad_deafult = vad_default\n self.vad_threshold = vad_default\n self.vad_threshold_path = os.path.join(self.file_dir,\n \"vad_threshold.npy\")\n\n # 10ms 一帧\n self.frame_length = frame_length\n # 10帧,检测一次 vad\n self.frame = frame\n # int 16, 两个bytes\n self.data_width = data_width\n # window\n self.window_length = frame_length * frame * data_width\n\n # 是否开始录音\n self.on_asr = False\n self.silence_cnt = 0\n self.max_silence_cnt = 4\n self.is_pause = False # 录音暂停与恢复\n\n def init(self):\n if os.path.exists(self.vad_threshold_path):\n # 平均响度文件存在\n self.vad_threshold = np.load(self.vad_threshold_path)\n\n def clear_audio(self):\n # 清空 pcm 累积片段与 asr 识别结果\n self.audios = b''\n\n def clear_asr(self):\n self.asr_result = \"\"\n\n def compute_chunk_volume(self, start_index, pcm_bins):\n # 根据帧长计算能量平均值\n pcm_bin = pcm_bins[start_index:start_index + self.window_length]\n # 转成 numpy\n pcm_np = np.frombuffer(pcm_bin, np.int16)\n # 归一化 + 计算响度\n x = pcm_np.astype(np.float32)\n x = np.abs(x)\n return np.mean(x)\n\n def is_speech(self, start_index, pcm_bins):\n # 检查是否没\n if start_index > len(pcm_bins):\n return False\n # 检查从这个 start 开始是否为静音帧\n energy = self.compute_chunk_volume(\n start_index=start_index, pcm_bins=pcm_bins)\n # print(energy)\n if energy > self.vad_threshold:\n return True\n else:\n return False\n\n def compute_env_volume(self, pcm_bins):\n max_energy = 0\n start = 0\n while start < len(pcm_bins):\n energy = self.compute_chunk_volume(\n start_index=start, pcm_bins=pcm_bins)\n if energy > max_energy:\n max_energy = energy\n start += self.window_length\n self.vad_threshold = max_energy + 100 if max_energy > self.vad_deafult else self.vad_deafult\n\n # 保存成文件\n np.save(self.vad_threshold_path, self.vad_threshold)\n print(f\"vad 阈值大小: {self.vad_threshold}\")\n print(f\"环境采样保存: {os.path.realpath(self.vad_threshold_path)}\")\n\n def stream_asr(self, pcm_bin):\n # 先把 pcm_bin 送进去做端点检测\n start = 0\n while start < len(pcm_bin):\n if self.is_speech(start_index=start, pcm_bins=pcm_bin):\n self.on_asr = True\n self.silence_cnt = 0\n print(\"录音中\")\n self.audios += pcm_bin[start:start + self.window_length]\n else:\n if self.on_asr:\n self.silence_cnt += 1\n if self.silence_cnt > self.max_silence_cnt:\n self.on_asr = False\n self.silence_cnt = 0\n # 录音停止\n print(\"录音停止\")\n # audios 保存为 wav, 送入 ASR\n if len(self.audios) > 2 * 16000:\n file_path = os.path.join(\n self.file_dir,\n \"asr_\" + datetime.datetime.strftime(\n datetime.datetime.now(),\n '%Y%m%d%H%M%S') + randName() + \".wav\")\n self.save_audio(file_path=file_path)\n self.asr_result = self.robot.speech2text(file_path)\n self.clear_audio()\n return self.asr_result\n else:\n # 正常接收\n print(\"录音中 静音\")\n self.audios += pcm_bin[start:start + self.window_length]\n start += self.window_length\n return \"\"\n\n def save_audio(self, file_path):\n print(\"保存音频\")\n wf = wave.open(file_path, 'wb') # 创建一个音频文件,名字为“01.wav\"\n wf.setnchannels(1) # 设置声道数为2\n wf.setsampwidth(2) # 设置采样深度为\n wf.setframerate(16000) # 设置采样率为16000\n # 将数据写入创建的音频文件\n wf.writeframes(self.audios)\n # 写完后将文件关闭\n wf.close()\n\n def end(self):\n # audios 保存为 wav, 送入 ASR\n file_path = os.path.join(self.file_dir, \"asr.wav\")\n self.save_audio(file_path=file_path)\n return self.robot.speech2text(file_path)\n\n def stop(self):\n self.is_pause = True\n self.audios = b''\n\n def resume(self):\n self.is_pause = False\n","repo_name":"PaddlePaddle/PaddleSpeech","sub_path":"demos/speech_web/speech_server/src/AudioManeger.py","file_name":"AudioManeger.py","file_ext":"py","file_size_in_byte":5396,"program_lang":"python","lang":"en","doc_type":"code","stars":9126,"dataset":"github-code","pt":"65"}
+{"seq_id":"11574350749","text":"# initial imports\r\nimport random\r\nfrom turtle import Turtle, Screen\r\nfrom snake import Snake\r\nfrom food import Food\r\nfrom scoreboard import ScoreBoard\r\nimport time\r\n\r\n# screen attributes and functions\r\nmain_screen = Screen()\r\nmain_screen.bgcolor(\"black\")\r\nmain_screen.setup(600, 600)\r\nmain_screen.title(\"Snake Game in Bilkent!!\")\r\nmain_screen.tracer(0)\r\n\r\nsnake = Snake()\r\n\r\nscore_board = ScoreBoard()\r\n\r\n\r\nx_rand = random.randint(-270, 270)\r\ny_rand = random.randint(-270, 270)\r\n\r\nfood = Food(x_rand, y_rand)\r\n\r\nmain_screen.onkey(snake.turn_left, \"Left\")\r\nmain_screen.onkey(snake.turn_right, \"Right\")\r\nmain_screen.listen()\r\n\r\n\r\n\r\n\r\n# game instances\r\ngame_on = True\r\n\r\nwhile game_on:\r\n\r\n score_board.create_board()\r\n main_screen.update()\r\n time.sleep(0.1)\r\n snake.follow()\r\n\r\n\r\n if abs(food.get_position() - snake.get_position()) < 20:\r\n x_rand = random.randint(-270, 270)\r\n y_rand = random.randint(-270, 270)\r\n food.set_position(x_rand, y_rand)\r\n snake.grow()\r\n score_board.increase_score()\r\n\r\n if snake.segments[0].xcor() > 300 or snake.segments[0].xcor() < -300 or snake.segments[0].ycor() > 300 or snake.segments[0].ycor() < -300:\r\n snake.collide()\r\n game_on = False\r\n score_board.final_result()\r\n\r\n for index in range(1, len(snake.segments)):\r\n if abs(snake.segments[0].xcor() - snake.segments[index].xcor()) < 1 and abs(snake.segments[0].ycor() - snake.segments[index].ycor()) < 1:\r\n snake.collide()\r\n game_on = False\r\n score_board.final_result()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\"\"\"\r\ngame_on = True\r\nwhile game_on:\r\n main_screen.setup()\r\n time.sleep(0.1)\r\n\r\n move()\r\n follow()\r\n turn_left()\r\n follow()\r\n\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nmain_screen.listen()\r\n\r\n\r\nmain_screen.exitonclick()","repo_name":"farukbeygo/snake_game","sub_path":"snakeGame_finalVersion/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"}
+{"seq_id":"71318081166","text":"# coding:utf-8\nimport numpy as np\nfrom cost_time import cost_time\n\n\n@cost_time\ndef binary_search(lst, item):\n first = 0\n last = len(lst) - 1\n found = False\n\n while first <= last and not found:\n midpoint = (first + last) // 2\n if lst[midpoint] == item:\n found = True\n else:\n if item < lst[midpoint]:\n last = midpoint - 1\n else:\n first = midpoint + 1\n return found\n\n\n# 递归版本\n@cost_time\ndef binary_search2(lst, item):\n if len(lst) == 0:\n return False\n else:\n midpoint = len(lst) // 2\n\n if lst[midpoint] == item:\n return True\n else:\n if item < lst[midpoint]:\n return binary_search2(lst[:midpoint], item)\n else:\n return binary_search2(lst[midpoint + 1:], item)\n\n\nif __name__ == '__main__':\n unordered_list = np.random.randint(1, 20001, size=10000).tolist()\n ordered_list = sorted(unordered_list)\n print(ordered_list)\n item = 255\n print(binary_search(ordered_list, item))\n print(binary_search2(ordered_list, item))\n","repo_name":"articuly/data_structure_algorithms","sub_path":"二分搜索.py","file_name":"二分搜索.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"}
+{"seq_id":"14419644383","text":"file = open('day20-input.txt', 'r')\r\nlines = file.readlines()\r\n\r\n\r\ndef calculate_coordinate(key: int, mix_count: int) -> int:\r\n values = [int(line.strip()) * key for line in lines]\r\n indices = [i for i in range(len(values))]\r\n\r\n for _ in range(mix_count):\r\n for index in range(len(indices)):\r\n source_index = indices.index(index)\r\n destination_index = (source_index + values[source_index]) % (len(values) - 1)\r\n\r\n index_pop = indices.pop(source_index)\r\n value_pop = values.pop(source_index)\r\n\r\n indices.insert(destination_index, index_pop)\r\n values.insert(destination_index, value_pop)\r\n\r\n # print(values)\r\n\r\n coordinates = 0\r\n value_0_index = values.index(0)\r\n for index in range(value_0_index + 1000, value_0_index + 4000, 1000):\r\n coordinates += values[index % len(values)]\r\n\r\n return coordinates\r\n\r\n\r\nprint(f\"Coordinates is {calculate_coordinate(1, 1)}\")\r\n\r\n# Part 2\r\nprint(f\"Coordinates is {calculate_coordinate(811589153, 10)}\")\r\n","repo_name":"jejbr84/advent-of-code-2022","sub_path":"day20.py","file_name":"day20.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"}
+{"seq_id":"21161809417","text":"import multiprocessing as mp\nimport os\nimport re\nimport sys\nfrom random import random\nfrom select import select\nfrom time import sleep\nimport pytest\nfrom _pytest.main import ExitCode\n\nfrom . import mpsing # pylint: disable=relative-beyond-top-level\n\n\nclass PytestScheduler(mpsing.MultiprocessSingleton):\n \"\"\"A pretty custom test execution scheduler.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the scheduler.\n\n Not to be called directly, since this is a singleton. Use\n `PytestScheduler.instance()` to get the scheduler object.\n \"\"\"\n super().__init__()\n self._mp_singletons = [self]\n self.session = None\n\n def register_mp_singleton(self, mp_singleton):\n \"\"\"Register a multi-process singleton object.\n\n Since the scheduler will be handling the main testing loop, it needs\n to be aware of any multi-process singletons that must be serviced\n during the test run (i.e. polled and allowed to handle method\n execution in the server context).\n \"\"\"\n self._mp_singletons.append(mp_singleton)\n\n @staticmethod\n def do_pytest_addoption(parser):\n \"\"\"Pytest hook. Add concurrency command line option.\"\"\"\n avail_cpus = len(os.sched_getaffinity(0))\n # Defaulting to a third of the available (logical) CPUs sounds like a\n # good enough plan.\n default = max(1, int(avail_cpus / 3))\n parser.addoption(\n \"--concurrency\",\n \"--concurrency\",\n dest=\"concurrency\",\n action=\"store\",\n type=int,\n default=default,\n help=\"Concurrency level (max number of worker processes to spawn).\"\n )\n\n def pytest_sessionstart(self, session):\n \"\"\"Pytest hook. Called at pytest session start.\n\n This will execute in the server context (before the tests are\n executed).\n \"\"\"\n self.session = session\n\n def pytest_runtest_logreport(self, report):\n \"\"\"Pytest hook. Called whenever a new test report is ready.\n\n This will execute in the worker / child context.\n \"\"\"\n self._add_report(report)\n\n def pytest_runtestloop(self, session):\n \"\"\"Pytest hook. The main test scheduling and running loop.\n\n Called in the server process context.\n \"\"\"\n # Don't run tests on test discovery\n if session.config.option.collectonly:\n return True\n\n # max_concurrency = self.session.config.option.concurrency\n schedule = [\n {\n # Performance batch: tests that measure performance, and need\n # to be run in a non-cuncurrent environment.\n 'name': 'performance',\n 'concurrency': 1,\n 'patterns': [\n \"/performance/.+\",\n ],\n 'items': []\n },\n {\n # Unsafe batch: tests that, for any reason, are not\n # concurrency-safe, and therefore need to be run sequentially.\n 'name': 'unsafe',\n 'concurrency': 1,\n 'patterns': [\n \"/functional/test_initrd.py\",\n \"/functional/test_max_vcpus.py\",\n \"/functional/test_rate_limiter.py\",\n \"/functional/test_signals.py\",\n \"/build/test_coverage.py\"\n ],\n 'items': []\n },\n {\n # Safe batch: tests that can be run safely in a concurrent\n # environment.\n 'name': 'safe',\n # FIXME: we still have some framework concurrency issues\n # which prevent us from successfully using `max_concurrency`.\n # 'concurrency': max_concurrency,\n 'concurrency': 1,\n 'patterns': [\n \"/functional/.+\",\n \"/build/.+\",\n \"/security/.+\"\n ],\n 'items': []\n },\n {\n # Unknown batch: a catch-all batch, scheduling any tests that\n # haven't been categorized to run sequentially (since we don't\n # know if they are concurrency-safe).\n 'name': 'unknown',\n 'concurrency': 1,\n 'patterns': [\".+\"],\n 'items': []\n }\n ]\n\n # Go through the list of tests and assign each of them to its\n # corresponding batch in the schedule.\n for item in session.items:\n # A test can match any of the patterns defined by the batch,\n # in order to get assigned to it.\n next(\n # Found a matching batch. No need to look any further.\n batch['items'].append(item) for batch in schedule\n if re.search(\n \"|\".join([\"({})\".format(x) for x in batch['patterns']]),\n \"/\".join(item.listnames()),\n ) is not None\n )\n\n # Filter out empty batches.\n schedule = [batch for batch in schedule if batch['items']]\n\n for batch in schedule:\n self._raw_stdout(\n \"\\n[ \",\n self._colorize('yellow', batch['name']),\n \" | \",\n \"{} tests\".format(len(batch['items'])),\n \" | \",\n \"{} worker(s)\".format(batch['concurrency']),\n \" ]\\n\"\n )\n self._run_batch(batch)\n\n return \"stahp\"\n\n @pytest.mark.tryfirst\n # pylint: disable=unused-argument\n # pylint: disable=no-self-use\n def pytest_sessionfinish(self, session, exitstatus):\n \"\"\"Pytest hook. Wrap up the whole testing session.\n\n Since the scheduler is more or less mangling the test session in order\n to distribute test items to worker processes, the main pytest process\n can become unaware of test failures and errors. Using this session\n wrap-up hook to set the correct exit code.\n \"\"\"\n trep = session.config.pluginmanager.getplugin(\"terminalreporter\")\n if \"error\" in trep.stats:\n session.exitstatus = ExitCode.INTERNAL_ERROR\n if \"failed\" in trep.stats:\n session.exitstatus = ExitCode.TESTS_FAILED\n\n def _run_batch(self, batch):\n \"\"\"Run the tests in this batch, spread across multiple workers.\n\n Called in the server process context.\n \"\"\"\n max_workers = batch['concurrency']\n items_per_worker = max(1, int(len(batch['items']) / max_workers))\n workers = []\n while batch['items']:\n # Pop `items_per_worker` out from this batch and send them to\n # a new worker.\n worker_items = batch['items'][-items_per_worker:]\n del batch['items'][-items_per_worker:]\n\n # Avoid storming the host with too many workers started at once.\n _delay = random() + len(workers) / 5.0 if max_workers > 1 else 0\n\n # Create the worker process and start it up.\n worker = mp.Process(\n target=self._worker_main,\n args=(worker_items, _delay)\n )\n workers.append(worker)\n worker.start()\n\n # Main loop, reaping workers and processing IPC requests.\n while workers:\n rlist, _, _ = select(self._mp_singletons, [], [], 0.1)\n for mps in rlist:\n mps.handle_ipc_call()\n _ = [w.join() for w in workers if not w.is_alive()]\n workers = [w for w in workers if w.is_alive()]\n\n def _worker_main(self, items, startup_delay=0):\n \"\"\"Execute a bunch of test items sequentially.\n\n This is the worker process entry point and main loop.\n \"\"\"\n sys.stdin.close()\n # Sleeping here to avoid storming the host when many workers are\n # started at the same time.\n #\n # TODO: investigate storming issue;\n # Not sure what the exact problem is, but worker storms cause an\n # elevated response time on the API socket. Since the reponse\n # time is measured by our decorators, it also includes the\n # Python libraries overhead, which might be non-negligible.\n sleep(startup_delay if startup_delay else 0)\n\n # Restrict the session to this worker's item list only.\n # I.e. make pytest believe that the test session is limited to this\n # worker's job.\n self.session.items = items\n\n # Disable the terminal reporting plugin, so it doesn't mess up\n # stdout, when run in a multi-process context.\n # The terminal reporter plugin will remain enabled in the server\n # process, gathering data via worker calls to `_add_report()`.\n trep = self.session.config.pluginmanager.get_plugin(\"terminalreporter\")\n self.session.config.pluginmanager.unregister(trep)\n\n for item, nextitem in zip(\n self.session.items,\n self.session.items[1:] + [None]\n ):\n item.ihook.pytest_runtest_protocol(item=item, nextitem=nextitem)\n\n @mpsing.ipcmethod\n def _add_report(self, report):\n \"\"\"Send a test report to the server process.\n\n A report is generated for every test item, and for every test phase\n (setup, call, and teardown).\n \"\"\"\n # Translation matrix from (when)x(outcome) to pytest's\n # terminalreporter plugin stats (dictionary) key.\n key_xlat = {\n \"setup.passed\": \"\",\n \"setup.failed\": \"error\",\n \"setup.skipped\": \"skipped\",\n \"call.passed\": \"passed\",\n \"call.failed\": \"failed\",\n \"call.skipped\": \"skipped\",\n \"teardown.passed\": \"\",\n \"teardown.failed\": \"error\",\n \"teardown.skipped\": \"\"\n }\n stats_key = key_xlat[\"{}.{}\".format(report.when, report.outcome)]\n\n trep = self.session.config.pluginmanager.get_plugin(\"terminalreporter\")\n if trep:\n if stats_key not in trep.stats:\n trep.stats[stats_key] = []\n trep.stats[stats_key].append(report)\n\n if stats_key:\n self._report_progress(report.nodeid, stats_key)\n\n def _report_progress(self, nodeid, outcome):\n \"\"\"Show the user some nice progress indication.\"\"\"\n outcome_cols = {\n \"passed\": \"green\",\n \"failed\": \"red\",\n \"error\": \"red\",\n \"skipped\": \"yellow\"\n }\n if outcome not in outcome_cols:\n return\n\n color = outcome_cols[outcome]\n self._raw_stdout(\n \" \",\n self._colorize(color, \"{:10}\".format(outcome.upper())),\n self._colorize(color, nodeid)\n if outcome in [\"error\", \"failed\"]\n else nodeid,\n \"\\n\"\n )\n\n @staticmethod\n def _colorize(color, msg):\n \"\"\"Add an ANSI / terminal color escape code to `msg`.\n\n If stdout is not a terminal, `msg` will just be encoded into a byte\n stream, without adding any ANSI decorations.\n Note: the returned value will always be a stream of bytes, not a\n string, since the result needs to be sent straight to the\n terminal.\n \"\"\"\n if not isinstance(msg, bytes):\n msg = str(msg).encode(\"utf-8\")\n if not sys.stdout.isatty():\n return msg\n term_codes = {\n 'red': b\"\\x1b[31m\",\n 'yellow': b\"\\x1b[33m\",\n 'green': b\"\\x1b[32m\",\n 'reset': b\"\\x1b(B\\x1b[m\"\n }\n return term_codes[color] + msg + term_codes['reset']\n\n @staticmethod\n def _raw_stdout(*words):\n \"\"\"Send raw-byte output to stdout.\n\n All arguments are concatenated and, if necessary, encoded into raw\n byte streams, before being written to stdout.\n \"\"\"\n byte_words = [\n w if isinstance(w, bytes) else str(w).encode(\"utf-8\")\n for w in words\n ]\n buf = b\"\".join(byte_words)\n os.write(sys.stdout.fileno(), buf)\n","repo_name":"ucsdsysnet/faasnap-firecracker","sub_path":"tests/framework/scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":12136,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"65"}
+{"seq_id":"14781947030","text":"# Name: Ofir Cohen\n# ID: 312255847\n# Date: 22/4/2020\n\nimport numpy as np\nimport matplotlib.pylab as plt\n\n\ndef sigmoid(w, x):\n\tz = np.dot(x, w)\n\treturn 1 / (1 + np.exp(-z))\n\ndef log_likelihod(x, y, w):\n\tz = np.dot(x, w)\n\treturn np.sum(y * z - np.log(1 + np.exp(z)))\n\n\ndef logistic_regression(x, y, num_steps, learning_rate, add_interxept=False):\n\tif(add_interxept):\n\t\tintercept = np.ones((x.shape[0], 1))\n\t\tx = np.hstack((intercept, x))\n\t\n\tw = np.zeros(x.shape[1])\n\t\n\tfor step in range(num_steps):\n\t\twx = np.dot(w, x.T)\n\t\test_y = sigmoid(w, x)\n\t\terr = y - est_y\n\t\tgradient = np.dot(x.T, err)\n\t\tw += learning_rate * gradient\n\t\tif step % 10000 == 0:\n\t\t\tprint(log_likelihod(x, y, w))\n\t\t\n\treturn w\n\n\ndef main():\n\tnp.random.seed(12)\n\tnum_observation = 5000\n\tmeans = [[1,4], [1,3], [1,2], [1,1]]\n\n\tfor mean in means:\n\t\tx1 = np.random.multivariate_normal([0, 0], [[1 , 0.75], [0.75, 1]], num_observation)\n\t\tx2 = np.random.multivariate_normal(mean, [[1 ,0.75], [0.75, 1]], num_observation)\n\t\tx = np.vstack((x1, x2)).astype(np.float32)\n\t\ty = np.hstack((np.zeros(num_observation), np.ones(num_observation)))\n\t\tplt.figure(figsize=(12, 4))\n\t\tplt.scatter(x[:, 0], x[:, 1], c=y, alpha=0.4)\n\t\tplt.title(\"mean: {}\".format(mean))\n\t\tplt.show()\n\t\t\n\t\tweights = logistic_regression(x, y, num_steps=100000, learning_rate=5e-5, add_interxept=True)\n\t\tprint(weights)\n\t\t\n\t\tdata_with_intercept = np.hstack((np.ones((x.shape[0], 1)), x))\n\t\tpreds = np.round(sigmoid(weights, data_with_intercept))\n\t\t\n\t\tprint(\"Accuracy from scratch {0}\".format((preds == y).sum().astype(float) / len(preds)))\n\t\t\n\t\tplt.figure(figsize=(12, 8))\n\t\tplt.scatter(x[:, 0], x[:, 1], c=(preds == y)-1, alpha=.8, s=50)\n\t\tplt.show()\n\n\nif __name__ == \"__main__\":\n\tmain()","repo_name":"ofircohen205/DeepLearning","sub_path":"ex1/ex1a.py","file_name":"ex1a.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"}
+{"seq_id":"32830262765","text":"from django.urls import path\nfrom . import views\nfrom.feed import latest_feed\n\nurlpatterns = [\n path('',views.index, name=\"home\"),\n path('samp/',views.samp, name=\"entry\"),\n path('login1/',views.login1, name=\"login\"),\n path('register/',views.register, name=\"register\"),\n path('logout/',views.logout, name=\"logout\"),\n path('feed/',latest_feed()),\n ]","repo_name":"Rajeev1127/Rajeev1127","sub_path":"trends2022/home/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"}
+{"seq_id":"29783530957","text":"# heap_median_maintenance.py\r\n\"\"\"\r\nWith the following text file: Median.txt\r\n\r\nThe goal of this problem is to implement the \"Median Maintenance\" \r\nalgorithm (covered in the Week 5 lecture on heap applications). The \r\ntext file contains a list of the integers from 1 to 10000 in \r\nunsorted order; you should treat this as a stream of numbers, arriving\r\none by one. Letting x-subscript i denote the i-th number of the file, \r\nthe k-th median m-subscript k is defined as the median of the numbers \r\nx-subscript 1,..., x-subscript k. (So, if k is odd, then m-subscript k \r\nis ((k+1)/2)th smallest number among x-subscript 1,...,x-subscript k; \r\nif k is even, then m-subscript k is the (k/2)th smallest number among\r\nx-subscript 1,...,x-subscript k.)\r\n\r\nFind the sum of these 10000 medians, modulo 10000 \r\n(i.e., only the last 4 digits). That is, you should compute\r\n(m-subscript 1 + m-subscript 2 + ... + m-subscript 10000) mod 10000.\r\n\r\nOPTIONAL EXERCISE: Compare the performance achieved by heap-based and \r\nsearch-tree-based implementations of the algorithm.\r\n\"\"\"\r\n\r\nimport heap as h\r\n\r\ndef main():\r\n\r\n # Used to store highest half of elements\r\n high_half_heap = h.Heap()\r\n # Used to store lowest half of elements\r\n low_half_heap = h.Heap(\"max\")\r\n\r\n median, median_total = 0, 0\r\n\r\n with open(\"Median.txt\") as f:\r\n # Exclude the newline character, \\n\r\n for line in f:\r\n x = int(line.rstrip('\\n'))\r\n\r\n # Put x into the correct heap\r\n # If x > median, place x in high_half_heap\r\n if x > median:\r\n high_half_heap.insert(x)\r\n else:\r\n low_half_heap.insert(x)\r\n \r\n # Check if heaps need to be rebalanced\r\n rebalance(high_half_heap, low_half_heap)\r\n\r\n # Calculate the median given the new x\r\n median = find_median(high_half_heap, low_half_heap)\r\n\r\n # Add the median to the median_total\r\n median_total += median\r\n \r\n # Once the file is exhausted return_value = median_total%10000\r\n output = find_last_four(median_total)\r\n print(\"The last four digits of the sums of the medians is {}.\".format(output))\r\n\r\ndef rebalance(h_high, h_low):\r\n \"\"\"\r\n rebalance prevents one heap from hoarding all the values by \r\n keeping the difference in the number of items between each \r\n heap less than 2.\r\n \"\"\"\r\n\r\n if abs(h_high.length() - h_low.length()) > 1:\r\n if h_high.length() > h_low.length():\r\n root, prs = h_high.extract_root()\r\n if prs:\r\n h_low.insert(root)\r\n else:\r\n root, prs = h_low.extract_root()\r\n if prs:\r\n h_high.insert(root)\r\n rebalance(h_high, h_low)\r\n\r\ndef find_median(h_high, h_low):\r\n \"\"\"\r\n find_median determines the median value between the two heaps.\r\n \"\"\"\r\n\r\n count = h_high.length() + h_low.length()\r\n # r is the median's index\r\n r = 0\r\n if count%2 == 0:\r\n r = count//2\r\n else:\r\n r = (count + 1)//2\r\n \r\n if r > h_low.length():\r\n median, _ = h_high.peek()\r\n else:\r\n median, _ = h_low.peek()\r\n \r\n return median\r\n\r\ndef find_last_four(total):\r\n return total%10000\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"jeffvswanson/DataStructuresAndAlgorithms","sub_path":"Stanford/ProgrammingQuestion6_HeapsSearchTreesAndHashTables/Question2a_HeapMedianMaintenance/heap_median_maintenance.py","file_name":"heap_median_maintenance.py","file_ext":"py","file_size_in_byte":3302,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"65"}
+{"seq_id":"12419443767","text":"# @author : Buddhadeb Mondal\n\n# Import Libraries\nimport mediapipe as mp\nimport cv2\nimport numpy as np\nimport uuid\nimport os\nimport logging\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n# Draw Hands\nmp_drawing = mp.solutions.drawing_utils\nmp_hands = mp.solutions.hands\n\n# Load class names\nf = open('gesture.names', 'r')\nclassNames = f.read().split('\\n')\nf.close()\nprint(classNames)\n\n# Video Capture\ncap = cv2.VideoCapture(0)\nwith mp_hands.Hands(min_detection_confidence=0.8, min_tracking_confidence=0.5) as hands:\n while cap.isOpened():\n ret, frame = cap.read()\n # cv2.imshow(\"Hand Tracking\", frame)\n\n # Changing BGR to RGB to make it work in Media Pipe\n image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n # Flip on horizontal\n image = cv2.flip(image, 1)\n # Set flag\n image.flags.writeable = False\n # Detections\n results = hands.process(image)\n # Set flag to true\n image.flags.writeable = True\n # RGB 2 BGR\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n # Detections\n # print(results)\n # logging.info(results)\n\n logging.info(results.multi_hand_landmarks)\n\n if results.multi_hand_landmarks:\n for num, hand in enumerate(results.multi_hand_landmarks):\n mp_drawing.draw_landmarks(image, hand, mp_hands.HAND_CONNECTIONS,\n mp_drawing.DrawingSpec(color=(121, 22, 76), thickness=4, circle_radius=4),\n mp_drawing.DrawingSpec(color=(250, 44, 250), thickness=2, circle_radius=2))\n logging.info(mp_hands.HAND_CONNECTIONS)\n\n # write image\n # os.mkdir('Output Images')\n\n # cv2.imwrite(os.path.join('Output Images', '{}.jpg'.format(uuid.uuid1())), image)\n\n cv2.imshow(\"Hand Gesture Tracking\", image)\n\n if cv2.waitKey(1) == ord('q'):\n break\n\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"buddhadeb33/Hand_gesture_recognition","sub_path":"Hand_Gesture_recognition_2.py","file_name":"Hand_Gesture_recognition_2.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"}
+{"seq_id":"30545559688","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\nclass Node(object):\n def __init__(self, data, pnext=None):\n self.data = data\n self.next = pnext\n\n def __repr__(self):\n return repr(self.data)\n\n\nclass LinkList(object):\n def __init__(self):\n self.size = 0\n self.header = None\n\n def __repr__(self):\n link_list = []\n item = self.header\n while item.next:\n link_list.append(item.data)\n item = self._next(item, 1)\n return repr(link_list)\n\n def _last(self, item):\n # if item.next is None:\n # return item\n # else:\n # return self._last(item.next)\n return item if item.next is None else self._last(item.next)\n\n def append(self, item):\n if isinstance(item, Node):\n pass\n else:\n item = Node(item)\n if not self.header:\n self.header = item\n self.size += 1\n else:\n last = self._last(self.header)\n last.next = item\n self.size += 1\n\n @staticmethod\n def _next(item, count):\n if not isinstance(item, Node):\n raise Exception(\"need a Node\")\n while count:\n count -= 1\n item = item.next\n return item\n\n def update(self, index, item):\n if isinstance(item, Node):\n pass\n else:\n item = Node(item)\n update_item = self._next(self.header, index)\n update_item.data = item\n\n def insert(self, index, item):\n if isinstance(item, Node):\n pass\n else:\n item = Node(item)\n insert_item = self._next(self.header, index)\n item.next = insert_item.next\n insert_item.next = item\n self.size += 1\n\n def getitem(self, index):\n item = self._next(self.header, index)\n return item.data\n\n def delete(self, index):\n if index == 0:\n self.header = self.header.next\n self.size -= 1\n else:\n item = self._next(self.header, index - 1)\n item.next = item.next.next\n self.size -= 1\n\n def is_empty(self):\n return self.size == 0\n\n\nif __name__ == \"__main__\":\n link_list = LinkList()\n for i in range(5):\n link_list.append(i)\n print(link_list)\n print(link_list.getitem(2))\n link_list.insert(2, 10)\n print(link_list)\n link_list.delete(1)\n print(link_list)\n","repo_name":"iamdavidzeng/iadz","sub_path":"python/src/iadz/built_in/class/linklist_demo.py","file_name":"linklist_demo.py","file_ext":"py","file_size_in_byte":2448,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"65"}
+{"seq_id":"37764153024","text":"import datetime\nimport os.path\nimport pickle\nimport sys\n\nimport backtrader as bt\nfrom backtrader.indicators import EMA\n\n\nclass TestStrategy(bt.Strategy):\n def log(self, txt, dt=None):\n \"\"\" Logging function fot this strategy\"\"\"\n dt = dt or self.datas[0].datetime.date(0)\n # print('%s, %s' % (dt.isoformat(), txt))\n\n @staticmethod\n def percent(today, yesterday):\n return float(today - yesterday) / today\n\n def __init__(self):\n self.dataclose = self.datas[0].close\n self.volume = self.datas[0].volume\n\n self.order = None\n self.buyprice = None\n self.buycomm = None\n\n me1 = EMA(self.data, period=12)\n me2 = EMA(self.data, period=26)\n self.macd = me1 - me2\n self.signal = EMA(self.macd, period=9)\n\n bt.indicators.MACDHisto(self.data)\n\n def notify_order(self, order):\n if order.status in [order.Submitted, order.Accepted]:\n return\n if order.status in [order.Completed]:\n if order.isbuy():\n self.log(\n \"BUY EXECUTED, Price: %.2f, Cost: %.2f, Comm %.2f\"\n % (order.executed.price, order.executed.value, order.executed.comm)\n )\n\n self.buyprice = order.executed.price\n self.buycomm = order.executed.comm\n self.bar_executed_close = self.dataclose[0]\n else:\n self.log(\n \"SELL EXECUTED, Price: %.2f, Cost: %.2f, Comm %.2f\"\n % (order.executed.price, order.executed.value, order.executed.comm)\n )\n self.bar_executed = len(self)\n\n elif order.status in [order.Canceled, order.Margin, order.Rejected]:\n self.log(\"Order Canceled/Margin/Rejected\")\n\n self.order = None\n\n def notify_trade(self, trade):\n if not trade.isclosed:\n return\n\n self.log(\"OPERATION PROFIT, GROSS %.2f, NET %.2f\" % (trade.pnl, trade.pnlcomm))\n\n def next(self):\n self.log(\"Close, %.2f\" % self.dataclose[0])\n if self.order:\n return\n\n if not self.position:\n condition1 = self.macd[-1] - self.signal[-1]\n condition2 = self.macd[0] - self.signal[0]\n if condition1 < 0 and condition2 > 0:\n self.log(\"BUY CREATE, %.2f\" % self.dataclose[0])\n self.order = self.buy()\n\n else:\n condition = (self.dataclose[0] - self.bar_executed_close) / self.dataclose[\n 0\n ]\n if condition > 0.1 or condition < -0.1:\n self.log(\"SELL CREATE, %.2f\" % self.dataclose[0])\n self.order = self.sell()\n\n\ndef run_cerebro(stock_file, result):\n \"\"\"\n 运行策略\n :param stock_file: 股票数据文件位置\n :param result: 回测结果存储变量\n \"\"\"\n\n cerebro = bt.Cerebro()\n\n cerebro.addstrategy(TestStrategy)\n\n # 加载数据到模型中\n data = bt.feeds.GenericCSVData(\n dataname=stock_file,\n fromdate=datetime.datetime(2010, 1, 1),\n todate=datetime.datetime(2020, 4, 25),\n dtformat=\"%Y%m%d\",\n datetime=2,\n open=3,\n high=4,\n low=5,\n close=6,\n volume=10,\n reverse=True,\n )\n cerebro.adddata(data)\n\n # 本金10000,每次交易100股\n cerebro.broker.setcash(10000)\n cerebro.addsizer(bt.sizers.FixedSize, stake=100)\n\n # 万五佣金\n cerebro.broker.setcommission(commission=0.0005)\n\n # 运行策略\n cerebro.run()\n\n # 剩余本金\n money_left = cerebro.broker.getvalue()\n\n # 获取股票名字\n stock_name = stock_file.split(\"\\\\\")[-1].split(\".csv\")[0]\n\n # 将最终回报率以百分比的形式返回\n result[stock_name] = float(money_left - 10000) / 10000\n\n\nfiles_path = \"stocks/\"\nresult = {}\n\n# 遍历所有股票数据\nfor stock in os.listdir(files_path):\n modpath = os.path.dirname(os.path.abspath(sys.argv[0]))\n datapath = os.path.join(modpath, files_path + stock)\n print(datapath)\n try:\n run_cerebro(datapath, result)\n except Exception as e:\n print(e)\n\n\nf = open(\"./batch_macd_result.txt\", \"wb\")\npickle.dump(result, f)\nf.close()\n","repo_name":"UFund-Me/Qbot","sub_path":"pytrader/doc/03.macd_in_A_market/batch_macd.py","file_name":"batch_macd.py","file_ext":"py","file_size_in_byte":4218,"program_lang":"python","lang":"en","doc_type":"code","stars":4844,"dataset":"github-code","pt":"65"}
+{"seq_id":"74189138768","text":"from tkinter import *\nfrom tkinter import filedialog\n\nroot= Tk()\n\ndef abrirFichero():\n fichero=filedialog.askopenfilename(title=\"Abrir\", initialdir=\"C:\", filetypes=((\"Ficheros de Excel\", \"*.xlsx\"),(\"Ficheros de texto\",\"*.txt\"),\n (\"Todos los ficheros\",\"*.*\"))) \n #initialdir=\"C:\" el directorio donde quiero iniciar la busqueda\n\n print(fichero)\n\n\nButton(root, text=\"Abrir fichero\", command=abrirFichero).pack()\n\nroot.mainloop()","repo_name":"yeisonCh/Python_primeros_pasos","sub_path":"Graficos/prueba_abrir_archivo.py","file_name":"prueba_abrir_archivo.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"}
+{"seq_id":"31873978376","text":"from config import get_env\nimport os\nimport json\n\nclass LangHelper:\n def __init__(self, lang='en', bootcamp=\"\"):\n path = get_env('SETTING_FILE').rsplit('/', 1)\n filename = os.path.join(path[0], bootcamp, path[1])\n\n with open(filename) as f:\n data = json.load(f)[\"lang\"][lang]\n self.helper = data[\"helper\"]\n self.err_nbarg = data[\"err_nbarg\"]\n self.err_fmtarg = data[\"err_fmtarg\"]\n self.not_logged = data[\"not_logged\"]\n self.not_registered = data[\"not_registered\"]\n self.allready_registered = data[\"allready_registered\"]\n self.registration_success = data[\"registration_success\"]\n self.unregistration_success = data[\"unregistration_success\"]\n self.not_available = data[\"not_available\"]\n self.subject_success = data[\"subject_success\"]\n self.notdwl_subject = data[\"notdwl_subject\"]\n self.already_inpool = data[\"already_inpool\"]\n self.correctionmatch_success = data[\"correctionmatch_success\"]\n self.already_matched = data[\"already_matched\"]\n self.info = data[\"info\"]\n self.match_message = data[\"match_message\"]\n \n ","repo_name":"42-AI/sir-hiss","sub_path":"app/utils/langhelper.py","file_name":"langhelper.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"}
+{"seq_id":"28649007464","text":"#Python Implementation Insertion Sort\n\ndef InsertionSort(list):\n\n #We consider the first element to be already sorted\n\n for i in range (1, len(list)):\n\n key = list[i]\n j = i-1\n\n while key=0:\n\n list[j], list[j+1] = list[j+1], list[j]\n \n j -= 1\n \n\n return list\n\n \nlist = [2, 13, 7, 4, 9, 3, 6]\n\nprint(InsertionSort(list))","repo_name":"nguyentungg/Algorithms","sub_path":"Sorting/Insertion Sort/InsertionSort.py","file_name":"InsertionSort.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"65"}
+{"seq_id":"42931096947","text":"import rospy\nfrom autoware_msgs.msg import Lane\nfrom visualization_msgs.msg import MarkerArray, Marker\nfrom geometry_msgs.msg import Point, Vector3\nfrom std_msgs.msg import ColorRGBA\n\ndef got_waypoints(lane):\n markers_array = MarkerArray()\n marker = Marker()\n markers_array.markers.append(marker)\n marker.header.frame_id = '/map'\n marker.ns = 'waypoint_path'\n marker.id = 0\n marker.action = Marker.MODIFY\n marker.type = Marker.LINE_STRIP\n marker.scale = Vector3(x=0.2, y=0.2, z=0.2)\n\n for waypoint in lane.waypoints:\n marker.points.append(waypoint.pose.pose.position)\n #marker.colors.append(ColorRGBA(r=waypoint.cost, g=1-waypoint.cost, b=0, a=1))\n marker.colors.append(ColorRGBA(r=0.2, g=0.2, b=1, a=1))\n \n pub.publish(markers_array)\n\nrospy.init_node('waypoint_path_marker')\nrospy.Subscriber('/final_waypoints', Lane, got_waypoints)\npub = rospy.Publisher('/local_waypoints_mark', MarkerArray, queue_size=10)\nrospy.spin()","repo_name":"Propeng/planning","sub_path":"path_marker.py","file_name":"path_marker.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"}
+{"seq_id":"4765274801","text":"import PIL\nimport numpy as np\nmean=(91.4953, 103.8827, 131.0912)\n\ndef load_data(path=''):\n\n img = PIL.Image.open(path)\n im_shape = np.array(img.size) # in the format of (width, height, *)\n img = img.convert('RGB')\n x = np.array(img) # image has been transposed into (height, width)\n x = x[:, :, ::-1] - mean\n return x\n\n","repo_name":"aascode/FG_2020","sub_path":"Video/video_utils/VGG_face2/src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"65"}
+{"seq_id":"15811001298","text":"import json\nimport logging\n\nfrom .method import Method, create_method_class\n\nclass Object(object):\n _log = logging.getLogger(\"caffa-object\")\n\n _methods = []\n\n def __init__(self, json_object=\"\", client=None, local=False):\n if isinstance(json_object, dict):\n self._fields = json_object\n else:\n self._fields = json.loads(json_object)\n\n self._client = client\n self._local = local\n\n if not self._local:\n assert self._client is not None\n\n self._method_list = []\n for method in self.__class__._methods:\n method_instance = method(self_object = self)\n setattr(self, method.static_name(), method_instance)\n self._method_list.append(method_instance)\n \n\n @classmethod\n def create(cls, **kwargs):\n return cls(json_object=kwargs, client=None, local=True)\n\n @property\n def keyword(self):\n return self._fields[\"keyword\"]\n\n def client(self):\n return self._client\n\n def to_dict(self):\n content = {}\n for key in self._fields:\n value = self.get(key)\n if isinstance(value, Object):\n value = value.to_dict()\n content[key] = value\n return content\n\n def to_json(self):\n return json.dumps(self.to_dict())\n\n def field_keywords(self):\n keywords = []\n for keyword in self._fields:\n keywords.append(keyword)\n return keywords\n\n def get(self, field_keyword):\n value = None\n if not self._local:\n value = json.loads(self._client.get_field_value(self._fields[\"uuid\"], field_keyword))\n elif self._fields and field_keyword in self._fields:\n value = self._fields[field_keyword]\n\n if value is None:\n raise Exception(\"Field \" + field_keyword + \" did not exist in object\")\n\n if isinstance(value, dict):\n keyword = value[\"keyword\"]\n schema = self._client.schema(keyword)\n cls = create_class(keyword, schema)\n value = cls(value, self._client)\n return value\n\n def set(self, field_keyword, value):\n if isinstance(value, Object):\n value = object.to_json()\n if not self._local:\n self._client.set_field_value(self.uuid, field_keyword, value)\n else:\n self._fields[field_keyword][\"value\"] = value\n\n def create_field(self, keyword, type, value):\n self._fields[keyword] = {\"type\": type, \"value\": value}\n\n def set_fields(self, **kwargs):\n for key, value in kwargs.items():\n self.set(key, value)\n\n def execute(self, object_method, arguments):\n return self.client().execute(self.uuid, object_method.name(), arguments)\n\n def methods(self):\n return self._method_list\n\n def dump(self):\n return json.dumps(self.to_json())\n\ndef make_read_lambda(property_name):\n return lambda self: self.get(property_name)\n\ndef make_write_lambda(property_name):\n return lambda self, value: self.set(property_name, value)\n\n\ndef create_class(name, schema):\n def __init__(self, json_object=\"\", client=None, local=False): \n Object.__init__(self, json_object, client, local)\n \n newclass = type(name, (Object,),{\"__init__\": __init__})\n \n if \"properties\" in schema:\n for property_name in schema[\"properties\"]:\n if property_name != \"keyword\" and property_name != \"methods\":\n setattr(newclass, property_name, property(fget=make_read_lambda(property_name), fset=make_write_lambda(property_name)))\n elif property_name == \"methods\":\n for method_name, method_schema in schema[\"properties\"][\"methods\"][\"properties\"].items():\n method_schema = method_schema[\"properties\"]\n newclass._methods.append(create_method_class(method_name, method_schema))\n\n return newclass\n","repo_name":"lindkvis/caffa-python","sub_path":"object.py","file_name":"object.py","file_ext":"py","file_size_in_byte":3926,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"65"}
+{"seq_id":"44734844168","text":"N = int(input())\ndice = list(list(map(int,input().split())) for i in range(N))#여기까진 인풋을 받는과정\ndef find_top(i):#i가 바닥인데 바닥의 인덱스를 받으면 위로 오는 부분의 인덱스를 반환하는 함수\n if i == 0:\n return 5\n if i == 1:\n return 3\n if i == 2:\n return 4\n if i == 3:\n return 1\n if i == 4:\n return 2\n if i == 5:\n return 0\nans = 0\nfor bottom in range(6):#바닥이 하나가 정해지면 나머진 다 정해져있는 경우기 때문에 첫 주사위의 각 면을 바닥으로 하고 그 떄의 결과들을 비교해서 최대값을 찾는 부분\n top = find_top(bottom) #1번주사위의 꼭대기 면\n top_num = dice[0][top] #1번주사위의 꼭대기 숫자가 2번주사위의 꼭대기 숫자와 같아야하니까\n smaller = min(top,bottom) #바닥과 꼭대기 중에서 더 작은 인덱스 값을 가지는거\n larger = max(top,bottom) #더 큰 인덱스 가지는거죠\n temp = max(dice[0][:smaller]+dice[0][smaller+1:larger]+dice[0][larger+1:])#옆면의 최대값을 찾습니다.\n for i in range(1,N): #0번주사위는 했으니까 1번주사위부터 N-1번주사위까지 올라가면서 옆면의 최대값을 찾아서 temp에 더해줍니다.\n for k in range(6):\n if dice[i][k] == top_num:#바로 아래있는 주사위의 윗면 값이랑 같은 면을 찾아서 바닥에 둡니다.\n nb = k#nb-> nowbottom\n break\n\n top = find_top(nb)#꼭대기로 갈 면을 찾고\n top_num = dice[i][top]# 새로운 꼭대기를 업데이트\n smaller = min(top, nb)\n larger = max(top, nb)\n temp += max(dice[i][:smaller] + dice[i][smaller + 1:larger] + dice[i][larger + 1:])#i번쨰 주사위의 옆면중 가장 큰값\n if temp>ans:#모든 주사위를 다 쌓고 옆면의 최대값을더해준 ���이 현재까지의 최대값보다 크다면\n ans = temp#갱신\nprint(ans)\n","repo_name":"YoonSeok-Woo/studygroup","sub_path":"yoonseok/0826/BOJ_15703.py","file_name":"BOJ_15703.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"}
+{"seq_id":"4000970484","text":"import flask\nfrom oslo_log import log as logging\n\nfrom designate.central import rpcapi as central_rpcapi\n\n\nLOG = logging.getLogger(__name__)\ncentral_api = central_rpcapi.CentralAPI()\nblueprint = flask.Blueprint('quotas', __name__)\n\nKEYS_TO_SWAP = {\n 'zones': 'domains',\n 'zone_records': 'domain_records',\n 'zone_recordsets': 'domain_recordsets',\n 'recordset_records': 'recordset_records',\n 'api_export_size': 'api_export_size',\n}\n\nKEYS_TO_SWAP_REVERSE = {\n 'domains': 'zones',\n 'domain_records': 'zone_records',\n 'domain_recordsets': 'zone_recordsets',\n 'recordset_records': 'recordset_records',\n 'api_export_size': 'api_export_size',\n}\n\n\ndef swap_keys(quotas, reverse=False):\n\n if reverse:\n quotas = {KEYS_TO_SWAP_REVERSE[k]: quotas[k] for k in quotas}\n else:\n quotas = {KEYS_TO_SWAP[k]: quotas[k] for k in quotas}\n return quotas\n\n\n@blueprint.route('/quotas/', methods=['GET'])\ndef get_quotas(tenant_id):\n context = flask.request.environ.get('context')\n\n quotas = central_api.get_quotas(context, tenant_id)\n\n quotas = swap_keys(quotas)\n\n return flask.jsonify(quotas)\n\n\n@blueprint.route('/quotas/', methods=['PUT', 'POST'])\ndef set_quota(tenant_id):\n context = flask.request.environ.get('context')\n values = flask.request.json\n\n values = swap_keys(values, reverse=True)\n\n for resource, hard_limit in values.items():\n central_api.set_quota(context, tenant_id, resource, hard_limit)\n\n quotas = central_api.get_quotas(context, tenant_id)\n quotas = swap_keys(quotas)\n\n return flask.jsonify(quotas)\n\n\n@blueprint.route('/quotas/', methods=['DELETE'])\ndef reset_quotas(tenant_id):\n context = flask.request.environ.get('context')\n\n central_api.reset_quotas(context, tenant_id)\n\n return flask.Response(status=200)\n","repo_name":"gongwayne/Openstack","sub_path":"designate/api/v1/extensions/quotas.py","file_name":"quotas.py","file_ext":"py","file_size_in_byte":1840,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"65"}
+{"seq_id":"52233331","text":"\nfrom .RedBlackTreeNodeABC import MutableRedBlackTreeNodeABC\nfrom .OrderedUnbalancedMultiWayTreeNodeABC import OrderedBinaryTreeNodeABC\n\nclass OrderedMutableRedBlackTreeNodeABC(MutableRedBlackTreeNodeABC,\n OrderedBinaryTreeNodeABC):\n # should avoid call insert_entity_at_leaf directly since \"ordered\"\n # using insert_entity_as_first/insert_entity_as_last\n\n \n def iter_entities(self):\n yield self.entity\n def iter_reversed_entities(self):\n yield self.entity\n\n # root -> root\n # should follow methods be tree methods instead of node methods?\n def insert_entity_as_first(self, entity):\n assert self.is_root()\n key = self.entity2key(entity)\n leaf = self.subtree_find_begin_leaf(key)\n return leaf.insert_entity_at_leaf(entity)\n def insert_entity_as_last(self, entity):\n assert self.is_root()\n key = self.entity2key(entity)\n leaf = self.subtree_find_end_leaf(key)\n return leaf.insert_entity_at_leaf(entity)\n\n\n def subtree_contains(self, key):\n node = self.subtree_find_maybe_first_nonleaf(key)\n return node.is_nonleaf()\n def subtree_get_first_entity(self, key):\n node = self.subtree_find_maybe_first_nonleaf(key)\n if node.is_leaf():\n raise KeyError(key)\n return node.entity\n def subtree_get_last_entity(self, key):\n node = self.subtree_find_maybe_last_nonleaf(key)\n if node.is_leaf():\n raise KeyError(key)\n return node.entity\n def subtree_find_maybe_first_nonleaf(self, key):\n # if return leaf, then not found\n leaf = self.subtree_find_begin_leaf(key)\n try:\n nonleaf = leaf.leaf_inorder_succ_nonleaf()\n except StopIteration:\n return leaf\n k = self.entity2key(nonleaf.entity)\n if self.key_lt(key, k):\n # key < k\n return leaf\n return nonleaf\n def subtree_find_maybe_last_nonleaf(self, key):\n # if return leaf, then not found\n leaf = self.subtree_find_end_leaf(key)\n try:\n nonleaf = leaf.leaf_inorder_prev_nonleaf()\n except StopIteration:\n return leaf\n k = self.entity2key(nonleaf.entity)\n if self.key_lt(k, key):\n # k < key\n return leaf\n return nonleaf\n \n \n def subtree_remove_first_entity(self, key):\n node = self.subtree_find_maybe_first_nonleaf(key)\n if node.is_leaf():\n raise KeyError(key)\n return node.remove_entity_at_nonleaf()\n \n def subtree_remove_last_entity(self, key):\n node = self.subtree_find_maybe_last_nonleaf(key)\n if node.is_leaf():\n raise KeyError(key)\n return node.remove_entity_at_nonleaf()\n\n\n\n\n","repo_name":"edt-yxz-zzd/python3_src","sub_path":"nn_ns/data_structure/RedBlackTree-OrderedSet-backup-20180506/RedBlackTree/backup-20160810/OrderedMutableRedBlackTreeNodeABC.py","file_name":"OrderedMutableRedBlackTreeNodeABC.py","file_ext":"py","file_size_in_byte":2809,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"65"}
+{"seq_id":"12876144044","text":"# from pprint import pprint as print\n\n# addend = []\n\n# with open('data.in') as f:\n# data = f.readlines()\n# repeat = len(data)\n# data = list(map(list, data))\n# for d in data:\n# addend.append(3 * d[:-1])\n\n# count = 0\n# tree = '#'\n\n# row, column = 0, 0\n\n# while True:\n# if row == 0:\n# row += 1\n# column += 3\n# continue\n# try:\n# check = addend[row][column]\n# row += 1\n# column += 3\n# if check == tree:\n# count += 1\n# except IndexError:\n# break\n\n# print(count)\n\nwith open('data.in') as f:\n biome = f.readlines()\n biome = list(map(lambda s: s[:-1], biome))\n\ntree = '#'\ncount = 0\ncheck = 0\n\nfor row in biome[1:]:\n if check >= 31:\n check -= 31\n if row[check] == tree:\n count += 1\n check += 3\n\nprint(count)\n","repo_name":"IgnisDa/learning","sub_path":"daily-practice/advent-of-code/day-3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"}
+{"seq_id":"29505791151","text":"from .reader import Reader\nfrom .puzzle import Puzzle\n\nclass SudokuSolver:\n\n def __init__(this):\n this.puzzle = Puzzle()\n\n def read_from_file(this, path):\n '''\n Read a puzzle from a file\n '''\n reader = Reader()\n this.puzzle = reader.read_from_file(path)\n this.initial_state = reader.read_from_file(path)\n\n def read_from_lines(this, lines):\n\n this.puzzle = Puzzle()\n this.initial_state = Puzzle()\n this.puzzle.parse_from_lines(lines)\n this.initial_state.parse_from_lines(lines)\n \n def validate_puzzle(this):\n '''\n Will check each row, col and square to see if the puzzle is valid.\n '''\n for i in range(0,9):\n if not this.puzzle.validate_column(i):\n return False\n if not this.puzzle.validate_row(i):\n return False\n\n for i in range(0, 3):\n for j in range(0,3):\n if not this.puzzle.validate_square(i*3, j*3):\n return False\n\n def get_puzzle(this):\n '''\n Will return a puzzle in list of lists format.\n '''\n\n return this.puzzle.board\n\n\n\n \n \n \n","repo_name":"TomJimkesGit/SudokuSolver","sub_path":"sudoku_solver/src/sudoku_solver.py","file_name":"sudoku_solver.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"}
+{"seq_id":"22615237793","text":"import json\nimport sys\nimport numpy\n\nfilename = sys.argv[1]\nfile_read = open(filename, \"r\")\nfile_write = open(filename[:len(filename)-4]+\".json\", \"w\") \n\ncoordinates = []\nlongitude_list = []\nmedian_list = []\nnum_entries = 0\nmean_count = 0\nmean_sum = 0\nnum_output = 0\nmin_entry = 1000000\nmax_entry = 0\n\nlines = file_read.read().splitlines()\n\n#find mean, median, max, and min value\nfor line in lines[1:]:\n\tinfo = line.split(\",\")\n\tfor data in info[1:]:\n\t\ttest = float(data)\t\n\t\tif (test == 99999.0):\n\t\t\tnum_entries += 1\n\t\t\tcontinue\n\t\tmean_sum += test\n\t\tmean_count += 1\n\t\tnum_entries += 1\n\t\tmedian_list.append(test)\n\t\tif (test < min_entry):\n\t\t\tmin_entry = test\n\t\tif (test > max_entry):\n\t\t\tmax_entry = test\n\nmean = float(mean_sum/mean_count)\nmedian = numpy.median(numpy.array(median_list))\n\n#create list of longitudes\nlongitudes = lines[0].split(\",\")\nfor longitude in longitudes[1:]:\n\tlongitude_list.append(longitude)\n\n#create dictionary for every coordinate \nfor line in lines[1:]:\n\tinfo = line.split(\",\")\n\tlatitude = info[0]\n\tlongitude_index = 0\n\tfor data in info[1:]:\n\t\tvalue = float(data)\n\t\tif (value == 99999.0 or value < mean):\n\t\t\tlongitude_index += 1\n\t\t\tcontinue\n\t\twrite_value = value/max_entry\n\t\tcurr_coordinate = {}\n\t\tcurr_coordinate[\"latitude\"] = latitude\n\t\tcurr_coordinate[\"value\"] = str(write_value) \n\t\tcurr_coordinate[\"longitude\"] = longitude_list[longitude_index]\n\t\t#add to list of coordinates \n\t\tcoordinates.append(curr_coordinate)\n\t\tlongitude_index += 1\n\t\tnum_output += 1\n\n#dump list of coordinates into file as JSON format\njson.dump(coordinates, file_write, indent=0)\n\nprint(\"Number of Entries: \" + str(num_entries))\nprint(\"Mean: \" + str(mean))\nprint(\"Median: \" + str(median))\nprint(\"Maximum: \" + str(max_entry))\nprint(\"Minimum: \" + str(min_entry))\nprint(\"Number of Output: \" + str(num_output))\nprint(\"Removed Entries: \" + str(num_entries - num_output))\n\nfile_read.close()\nfile_write.close()","repo_name":"douglashuang/EarthFace-NASA","sub_path":"csv2json.py","file_name":"csv2json.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"}
+{"seq_id":"29163513694","text":"from setuptools import setup, Extension, find_packages\n\narrays_module = Extension('matropy.arrays', sources = ['matropy/src/arrays/array_py.c', 'matropy/src/arrays/array.c'])\nrand_module = Extension('matropy.rand', sources = ['matropy/src/rand/rand_py.c', 'matropy/src/rand/rand.c'])\n\nsetup(\n name = \"matropy\",\n version = \"0.0.0\",\n description = 'The new C Optimized Numerical Library',\n author = 'Somiparno Chattopadhyay',\n ext_modules = [arrays_module, rand_module],\n packages = ['matropy']\n)\n","repo_name":"clueless-skywatcher/matropy","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"40734691195","text":"from tkinter import *\n\nclass SampleApp(Tk):\n\n def __init__(self, s):\n\n Tk.__init__(self)\n self.title(\"GUI\")\n x = 200\n y = s*30\n self.geometry(str(x) + \"x\" + str(y))\n\n for i in range(1, s+1):\n\n self.button = Label(self, text=\"Button \" + str(i))\n self.button.pack()\n\napp = SampleApp(7)\napp.mainloop()","repo_name":"Shrutigoyal1201/AI-pythonproject","sub_path":"minor projects/dynamicbutton.py","file_name":"dynamicbutton.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"40545202285","text":"import ffmpeg\nimport cv2\nimport os\nimport ffmpy\n\n\n\n# # 需要转换格式的视频文件,文件真实存在\n# source_file = r\"/Volumes/T7/IMG_9985.MOV\"\n# # 转换成功后的视频文件,文件夹真实存在,不会自动创建\n# sink_file = r\"/Volumes/T7/IMG_9985.mp4\"\n#\n# ff = ffmpy.FFmpeg(\n# inputs = {source_file: None},\n# outputs = {sink_file: None})\n# ff.run()\n\n\n#\n# cameraCapture = cv2.VideoCapture('/Users/liufucong/Downloads/97a9d0beeab55b14047c4ddfd02bda35_0_1683612198.mp4')\n#\n#\n# print(cameraCapture.get(cv2.CAP_PROP_FPS))\n# print(cameraCapture.get(cv2.CAP_PROP_FRAME_COUNT))\n# print(cameraCapture.get(cv2.CAP_PROP_POS_AVI_RATIO))\n# print(cameraCapture.get(cv2.CAP_PROP_FRAME_WIDTH))\n# print(cameraCapture.get(cv2.CAP_PROP_FRAME_HEIGHT))\n\ncap = cv2.VideoCapture('/Users/liufucong/Downloads/97a9d0beeab55b14047c4ddfd02bda35_0_1683612198.mp4')\n# 用于保存视频的VideoWriter\n# fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n# fourcc = cv2.VideoWriter_fourcc('X','2','6','4')\n# fourcc = cv2.VideoWriter_fourcc('M', 'P', '4', '2')\nout = cv2.VideoWriter('/Users/liufucong/Downloads/test_cv2_zhenlv30.mp4', 0x31637661, 30, (int(cap.get(3)), int(cap.get(4))))\nwhile cap.isOpened():\n (ret, frame) = cap.read()\n if ret == True:\n # with torch.no_grad():\n # frame = cv2.cvtColor(frame)\n # result, names = aa.detect([frame])\n # new_img = result[0][0]\n\n frame = cv2.resize(frame, (int(cap.get(3)), int(cap.get(4))))\n out.write(frame)\n else:\n break\ncap.release()\nout.release()\ncv2.destroyAllWindows()","repo_name":"jeepmeng/all_test_file","sub_path":"mov_mp4.py","file_name":"mov_mp4.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"18325396179","text":"import sys\n\nsys.setrecursionlimit(10 ** 7)\ninput = sys.stdin.readline\nf_inf = float('inf')\nmod = 10 ** 9 + 7\n\n\ndef resolve():\n a, b = map(int, input().split())\n if a < 10 and b < 10:\n print(a * b)\n else:\n print(-1)\n\n\nif __name__ == '__main__':\n resolve()\n","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p02879/s585354286.py","file_name":"s585354286.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"17936935969","text":"import sys\nimport os\nimport re\nimport datetime\nimport bisect\n\n####### main #######\n\nN = int(input())\n\npartsNumA = sorted(map(int, input().split()))\npartsNumB = sorted(map(int, input().split()))\npartsNumC = sorted(map(int, input().split()))\n\n# Bに対してAの個数のBによるループ\npartsNumSumBa = []\nsumA = 0\nfor curPartsB in partsNumB:\n indexA = bisect.bisect_left(partsNumA, curPartsB)\n numA = indexA\n sumA += numA\n partsNumSumBa.append(sumA)\n\n# Cに対してBの個数のCによるループ\nsumBAll = 0\nfor curPartsC in partsNumC:\n indexB = bisect.bisect_left(partsNumB, curPartsC)\n if indexB == 0:\n continue\n numB = partsNumSumBa[indexB - 1]\n sumBAll += numB\n\nprint(sumBAll)","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03559/s388927111.py","file_name":"s388927111.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"16373572377","text":"#web scraping for airport TSA times\r\n\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\n\r\nurl = 'https://orlandoairports.net/'\r\npage = requests.get(url)\r\nsoup = BeautifulSoup(page.content, 'html.parser')\r\nwait_time = soup.find(\"div\", {\"class\": 'wait-time'}).get_text()\r\n#result = soup.find(\"span\", {\"class\": 'gate1'}).get_text()\r\nprint(wait_time.strip())\r\n#print (result)\r\n\r\ndef log_error(e):\r\n \"\"\"Prints out error if occurs\"\"\"\r\n print(e)\r\n\r\n","repo_name":"jenny-tru/brunch","sub_path":"scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"930438550","text":"\"\"\"\nGiven an array of n positive integers.\n Write a program to find the sum of maximum sum subsequence \n of the given array such that the integers in the subsequence are sorted in increasing order.\ninput is {1, 101, 2, 3, 100, 4, 5},\n then output should be 106 (1 + 2 + 3 + 100), if the input array is ,\"\"\"\nnums = [1, 101, 2, 3, 100, 4, 5]\ndp = [0 for i in range(len(nums))]\ndp[0] = nums[0]\nfor i in range(1,len(nums)):\n dp[i] = nums[i]\n for j in range(i):\n if nums[j] < nums[i]:\n dp[i] = max(dp[i] , dp[j] + nums[i])\nprint(max(dp))","repo_name":"xavifeds8/competative-programming","sub_path":"dynamic programiming/maximum sum increasing subsequence.py","file_name":"maximum sum increasing subsequence.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"72604513257","text":"from django.conf.urls import url\n\nimport pages.views as views\nimport pages.admin_views as admin_views\n\nurlpatterns = [\n url(r'^classes/(?P\\w+).json$', views.ApeClassView.as_view(), name='ape_class'),\n url(r'^events/(?P\\w+).json$', views.EventView.as_view(), name='event'),\n url(r'^people/(?P\\w+).json$', views.PersonView.as_view(), name='person'),\n url(r'^house_teams/(?P\\w+).json$', views.HouseTeamView.as_view(), name='house_team'),\n\n url(r'^(?P\\d+).json', views.PageView.as_view(), name=\"page\"),\n url(r'^(?P[a-zA-Z]\\w*).json$', views.PageView.as_view(), name=\"page\"),\n\n url(r'^admin/generic_object_lookup/', admin_views.GenericObjectLookup.as_view(), name='generic_object_lookup'),\n]","repo_name":"zachcalvert/the_ape_theater","sub_path":"the_ape/pages/api_urls.py","file_name":"api_urls.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"90"}
+{"seq_id":"74946850855","text":"# -*- coding: utf-8 -*-\n\"\"\"\nProblem 172 - Investigating numbers with few repeated digits\n\nHow many 18-digit numbers n (without leading zeros) are there such that no digit\noccurs more than three times in n?\n\"\"\"\nfrom common import memoize\n\n\n@memoize\ndef numbers(length, occur3, occur2, occur1):\n \"\"\"\n Return the number of `length`-digit numbers with:\n\n - `occur3` digits that can be used at most 3 times\n - `occur2` digits that can be used at most 2 times\n - `occur1` digits that can be used at most 1 time\n \"\"\"\n if length == 1:\n return occur3 + occur2 + occur1\n\n count = 0\n if occur3:\n count += occur3*numbers(length-1, occur3-1, occur2+1, occur1)\n if occur2:\n count += occur2*numbers(length-1, occur3, occur2-1, occur1+1)\n if occur1:\n count += occur1*numbers(length-1, occur3, occur2, occur1-1)\n\n return count\n\n\ndef solution():\n return 9*numbers(17, occur3=9, occur2=1, occur1=0)\n\n\nif __name__ == '__main__':\n print(solution())\n","repo_name":"yred/euler","sub_path":"python/problem_172.py","file_name":"problem_172.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"}
+{"seq_id":"10427943380","text":"#!/usr/bin/env python3\n\nimport sys\nimport click\nimport os\nimport ujson\nimport psycopg2\nimport subprocess\nimport gzip\nimport datetime\nfrom time import time, sleep\nfrom threading import Thread, Lock\nfrom time import time\nfrom collections import defaultdict\nfrom psycopg2.errors import OperationalError, DuplicateTable, UntranslatableCharacter\nfrom psycopg2.extras import execute_values\nimport config\n\nNUM_THREADS = 5 \nNUM_CACHE_ENTRIES = NUM_THREADS * 2\nUPDATE_INTERVAL = 500000\nBATCH_SIZE = 2000\n\nCREATE_LISTEN_TABLE_QUERIES = [\n\"\"\"\n CREATE TABLE listen (\n listened_at BIGINT NOT NULL,\n track_name TEXT NOT NULL,\n user_name TEXT NOT NULL,\n created TIMESTAMP WITH TIME ZONE DEFAULT NOW(),\n data JSONB NOT NULL\n )\n\"\"\",\n\"SELECT create_hypertable('listen', 'listened_at', chunk_time_interval => 432000)\",\n\"GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO listenbrainz_ts\",\n\"CREATE OR REPLACE FUNCTION unix_now() returns BIGINT LANGUAGE SQL STABLE as $$ SELECT extract(epoch from now())::BIGINT $$\",\n\"SELECT set_integer_now_func('listen', 'unix_now')\",\n\"\"\"\nCREATE VIEW listen_count\n WITH (timescaledb.continuous, timescaledb.refresh_lag=43200, timescaledb.refresh_interval=3600)\n AS SELECT time_bucket(bigint '86400', listened_at) AS listened_at_bucket, user_name, count(listen)\n FROM listen group by time_bucket(bigint '86400', listened_at), user_name;\nCREATE VIEW listened_at_max\n WITH (timescaledb.continuous, timescaledb.refresh_lag=43200, timescaledb.refresh_interval=3600)\n AS SELECT time_bucket(bigint '86400', listened_at) AS listened_at_bucket, user_name, max(listened_at) AS max_value\n FROM listen group by time_bucket(bigint '86400', listened_at), user_name;\nCREATE VIEW listened_at_min\n WITH (timescaledb.continuous, timescaledb.refresh_lag=43200, timescaledb.refresh_interval=3600)\n AS SELECT time_bucket(bigint '86400', listened_at) AS listened_at_bucket, user_name, min(listened_at) AS min_value\n FROM listen group by time_bucket(bigint '86400', listened_at), user_name;\n\"\"\",\n\"GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO listenbrainz_ts;\"\n]\n\nCREATE_INDEX_QUERIES = [\n \"CREATE INDEX listened_at_user_name_ndx_listen ON listen (listened_at DESC, user_name)\",\n \"CREATE UNIQUE INDEX listened_at_track_name_user_name_ndx_listen ON listen (listened_at DESC, track_name, user_name)\"\n]\n\ndef key_count(listen):\n ''' Return the count of top level keys and track_metadata keys as a rough measure as to which listen\n has more \"information\".\n '''\n return len(listen.keys()) + len(listen['track_metadata'].keys())\n\n\ndef remove_empty_keys(listen):\n\n if \"track_metadata\" in listen:\n listen[\"track_metadata\"] = {k: v for k, v in listen[\"track_metadata\"].items() if v }\n if \"additional_info\" in listen[\"track_metadata\"]:\n listen[\"track_metadata\"][\"additional_info\"] = {k: v for k, v in listen[\"track_metadata\"][\"additional_info\"].items() if v }\n\n return listen\n\n\nclass ListenWriter(Thread):\n\n def __init__(self, li, conn):\n Thread.__init__(self)\n\n self.conn = conn\n self.done = False\n self.li = li\n\n\n def exit(self):\n self.done = True\n\n\n def write_listens(self, listens):\n\n with self.conn.cursor() as curs:\n query = \"INSERT INTO listen (listened_at, track_name, user_name, created, data) VALUES %s\"\n try:\n t0 = time()\n execute_values(curs, query, listens, template=None)\n self.conn.commit()\n t1 = time()\n except psycopg2.OperationalError as err:\n print(\"failed to insert rows\", err)\n return\n\n dt = datetime.datetime.fromtimestamp(listens[0][0])\n print(\"Inserted %d rows in %.3f, %d rows/s, ts %d %d-%02d\" % (len(listens), t1-t0, int(len(listens)/(t1-t0)), listens[0][0], dt.year, dt.month))\n\n\n def run(self):\n\n while not self.done:\n batch = self.li.get_batch()\n if batch:\n self.write_listens(batch)\n else:\n sleep(.05)\n\n\n\nclass ListenImporter(object):\n\n def __init__(self, conn):\n self.total = 0\n self.conn = conn\n self.batches = []\n self.lock = Lock()\n self.total = 0\n self.t0 = 0\n self.html = None\n\n self.exact_dup_count = 0\n self.counts = defaultdict(int)\n\n\n def create_tables(self):\n\n with self.conn.cursor() as curs:\n while True:\n try:\n for query in CREATE_LISTEN_TABLE_QUERIES:\n curs.execute(query)\n self.conn.commit()\n print(\"created tables\")\n break\n\n except DuplicateTable as err:\n self.conn.rollback()\n print(\"dropped old table\")\n curs.execute(\"DROP VIEW listen_count CASCADE\")\n curs.execute(\"DROP TABLE listen CASCADE\")\n self.conn.commit()\n\n\n\n def create_indexes(self):\n\n print(\"create indexes\")\n with self.conn.cursor() as curs:\n for query in CREATE_INDEX_QUERIES:\n print(query)\n curs.execute(query)\n\n\n def num_batches(self):\n self.lock.acquire()\n batches = len(self.batches)\n self.lock.release()\n\n return batches\n\n\n def get_batch(self):\n\n self.lock.acquire()\n if len(self.batches):\n listens = self.batches.pop(0)\n self.lock.release()\n return listens\n\n self.lock.release()\n return None\n\n\n def add_batch(self, listens):\n\n\n if not self.t0:\n self.t0 = time()\n\n while True:\n self.lock.acquire()\n if len(self.batches) >= NUM_CACHE_ENTRIES:\n self.lock.release()\n sleep(.01)\n continue\n\n self.batches.append(listens)\n self.lock.release()\n\n self.total += len(listens)\n if self.total % UPDATE_INTERVAL == 0:\n print(\"queued %d listens. %d rows/s\" % (self.total, int(UPDATE_INTERVAL / (time() - self.t0))))\n self.t0 = time()\n\n return\n\n\n\n def cleanup_listen(self, listen):\n\n tm = listen['track_metadata']\n\n # Clean up null characters in the data\n if tm['artist_name']:\n tm['artist_name'] = tm['artist_name'].replace(\"\\u0000\", \"\")\n if tm['track_name']:\n tm['track_name'] = tm['track_name'].replace(\"\\u0000\", \"\")\n if tm['release_name']:\n tm['release_name'] = tm['release_name'].replace(\"\\u0000\", \"\")\n\n return remove_empty_keys(listen)\n\n\n def output_duplicate_resolution(self, test, chosen, listen_0, listen_1):\n return 0\n\n self.html.write(\"
\" % (\"chosen\" if chosen == 1 else \"rejected\"))\n self.html.write(ujson.dumps(listen_1, indent=4, sort_keys=True))\n with open(\"/tmp/a.json\", \"w\") as f:\n f.write(ujson.dumps(listen_0, indent=4, sort_keys=True))\n with open(\"/tmp/b.json\", \"w\") as f:\n f.write(ujson.dumps(listen_1, indent=4, sort_keys=True))\n diff = subprocess.run([\"diff\", \"/tmp/a.json\", \"/tmp/b.json\"], capture_output=True) \n self.html.write(\"
diff
%s
\" % diff.stdout.decode(\"utf-8\"))\n\n\n def check_for_duplicates(self, listen, lookahead):\n ''' \n Check for verious types of duplicate tracks. If this track should be inserted\n into the DB, return True. If it should be skipped (e.g. because there is a better \n match in the lookahead), return False\n '''\n\n if not len(lookahead):\n return\n\n # there is weird shit at the start of last.fm. Start checking in 2007\n if listen['listened_at'] > 1167609600:\n tdiff = lookahead[-1]['listened_at'] - listen['listened_at']\n if tdiff <= 2:\n print(lookahead[-1]['listened_at'], listen['listened_at'])\n\n if tdiff <= 2: \n print(\"Possible lookahead underflow, less than 2 seconds in buffer! All good if the process is done! lookahead len: %d\" % len(lookahead))\n\n reached_end_of_la = True\n for i, la_listen in enumerate(lookahead):\n # check for exact duplicate, skip this listen if duplicate\n tm = listen['track_metadata']\n la_tm = la_listen['track_metadata']\n\n# print(\"0 %d %s %30s %30s\" % (listen['listened_at'],\n# listen['recording_msid'][:6], \n# tm['track_name'][:29],\n# listen['user_name']))\n# print(\"1 %d %s %30s %30s\" % (la_listen['listened_at'], \n# la_listen['recording_msid'][:6], \n# la_tm['track_name'][:29], \n# la_listen['user_name']))\n\n # Check to see if recording_msid is the same -- if so, it is a true duplicate\n # which should never happen.\n if listen['listened_at'] == la_listen['listened_at'] and \\\n listen['recording_msid'] == la_listen['recording_msid'] and \\\n listen['user_name'] == la_listen['user_name']:\n\n self.output_duplicate_resolution(\"recording_msid\", 1, listen, la_listen)\n self.counts['msid_dup_count'] += 1\n# print(\"keep 1\")\n\n return 1\n\n # Check track_name based duplicates and pick best listen to keep\n if listen['listened_at'] == la_listen['listened_at'] and \\\n listen['user_name'] == la_listen['user_name'] and \\\n tm['track_name'].lower().replace(\" \", \"\") == la_tm['track_name'].lower().replace(\" \", \"\"):\n\n\n # If we have a dedup tag in the lookahead listen, it seems to have more\n # infomation, so remove the dedup_tag field and keep that version\n if 'dedup_tag' in la_tm[\"additional_info\"]:\n self.counts['dedup_tag_count'] += 1\n del lookahead[i]\n self.output_duplicate_resolution(\"dedup_tag 0\", 1, listen, la_listen)\n# print(\"keep 0\")\n return 0\n if 'dedup_tag' in tm[\"additional_info\"]:\n self.counts['dedup_tag_count'] += 1\n self.output_duplicate_resolution(\"dedup_tag 1\", 0, listen, la_listen)\n# print(\"keep 1\")\n return 1\n\n self.counts['track_name_dup_count'] += 1\n if key_count(listen) > key_count(la_listen):\n self.output_duplicate_resolution(\"track_name\", 0, listen, la_listen)\n del lookahead[i]\n# print(\"keep 0\")\n return 0\n\n self.output_duplicate_resolution(\"track_name\", 1, listen, la_listen)\n# print(\"keep 1\")\n return 1\n\n # Check to see if two listens have a listen timestamps less than 3 seconds apart\n if abs(listen['listened_at'] - la_listen['listened_at']) <= 3 and \\\n listen['user_name'] == la_listen['user_name'] and \\\n tm['track_name'].lower().replace(\" \", \"\") == la_tm['track_name'].lower().replace(\" \", \"\"):\n\n self.counts['fuzzy_dup_count'] += 1\n if key_count(listen) > key_count(la_listen):\n self.output_duplicate_resolution(\"fuzzy timestamp\", 0, listen, la_listen)\n del lookahead[i]\n# print(\"keep 0\")\n return 0\n\n self.output_duplicate_resolution(\"fuzzy timestamp\", 1, listen, la_listen)\n# print(\"keep 1\")\n return 1\n\n\n if la_listen['listened_at'] > listen['listened_at'] + 5:\n break\n\n# print(\"keep both\")\n\n return 2\n\n\n def import_dump_file(self, filename):\n\n self.html = open(\"output.html\", \"w\")\n self.html.write('')\n self.html.write('')\n self.html.write(\"\\n\")\n self.html.write('')\n self.html.write('\\n')\n\n threads = []\n for i in range(NUM_THREADS):\n with psycopg2.connect(config.DB_CONNECT) as conn:\n lw = ListenWriter(self, conn)\n lw.start()\n threads.append(lw)\n \n\n print(\"import \", filename)\n NUM_LOOKAHEAD_LINES = 5000 \n lookahead = []\n listens = []\n with gzip.open(filename, \"rb\") as f:\n while True:\n while len(lookahead) < NUM_LOOKAHEAD_LINES:\n line = f.readline()\n if not line:\n break\n \n ts, jsdata = line.decode('utf-8').split('-', 1)\n listen = self.cleanup_listen(ujson.loads(jsdata))\n \n # Check for invalid timestamps (last.fm got started in 2004 or so!)\n if listen['listened_at'] < 1136073600: # Jan 1 2006\n continue\n\n lookahead.append(listen)\n \n if not len(lookahead):\n break\n\n listen = lookahead.pop(0)\n ret = self.check_for_duplicates(listen, lookahead)\n if ret == 0:\n lookahead.insert(0, listen)\n elif ret == 2:\n ts = listen['listened_at']\n un = listen['user_name']\n try:\n created = listen['inserted_timestamp']\n del listen['inserted_timestamp']\n except KeyError:\n created = datetime.datetime.utcfromtimestamp(0)\n \n del listen['user_name']\n del listen['listened_at']\n del listen['recording_msid']\n listens.append([\n ts,\n listen['track_metadata']['track_name'],\n un,\n created,\n ujson.dumps(listen)])\n \n if len(listens) == BATCH_SIZE:\n self.add_batch(listens)\n listens = []\n\n\n assert(len(lookahead) == 0)\n if len(listens):\n self.add_batch(listens)\n\n print(\"Wait for batches to write\")\n while self.num_batches() > 0:\n sleep(1)\n \n print(\"Wait for threads to finish.\")\n for t in threads:\n t.exit()\n for t in threads:\n t.join()\n\n print(\"wrote %d listens.\" % self.total)\n self.html.write(\"
Counts:
%s
\\n\" % ujson.dumps(self.counts, indent=4, sort_keys=True))\n self.html.write(\"\")\n self.html.close()\n\n\n@click.command()\n@click.argument(\"listens_file\", nargs=1)\ndef import_listens(listens_file):\n with psycopg2.connect(config.DB_CONNECT) as conn:\n li = ListenImporter(conn)\n try:\n li.create_tables()\n except IOError as err:\n print(err)\n return\n except OSError as err:\n print(err)\n return\n except psycopg2.errors.UntranslatableCharacter:\n print(err)\n return\n\n try:\n files = li.import_dump_file(listens_file)\n except IOError as err:\n print(err)\n return\n except OSError as err:\n print(err)\n return\n except psycopg2.errors.UntranslatableCharacter:\n print(err)\n return\n\n try:\n li.create_indexes()\n except IOError as err:\n print(err)\n return\n except OSError as err:\n print(err)\n return\n except psycopg2.errors.UntranslatableCharacter:\n print(err)\n return\n\n\n\ndef usage(command):\n with click.Context(command) as ctx:\n click.echo(command.get_help(ctx))\n\n\nif __name__ == \"__main__\":\n import_listens()\n sys.exit(0)\n","repo_name":"mayhem/timescale-testing","sub_path":"import_dump.py","file_name":"import_dump.py","file_ext":"py","file_size_in_byte":16764,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"}
+{"seq_id":"18452105939","text":"n = int(input())\na = input()\nb = input()\nc = input()\n\nans = 0\nfor i in range(n):\n h = [a[i], b[i], c[i]]\n h.sort()\n if h[0] != h[1] and h[1] != h[2]:\n ans += 2\n elif h[0] != h[2]:\n ans += 1\n \nprint(ans)","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03140/s636354727.py","file_name":"s636354727.py","file_ext":"py","file_size_in_byte":215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"18305632745","text":"from django.urls import path, include\nfrom django.conf.urls import url\nfrom django.conf.urls.i18n import i18n_patterns\nfrom django.http import HttpResponse\nfrom rest_framework.decorators import api_view\n\nfrom items import views\n\nurlpatterns = [\n path('', views.index, name='item_view'),\n path('search', views.search, name='search_item'),\n url(r'^robots.txt', lambda x: HttpResponse(\"User-Agent: *\\n\"\n \"Disallow: /*?\\n\"\n \"Disallow: /admin/\\n\"\n \"Sitemap: https://albionprofits.tk/static/items/data/sitemap.xml\",\n content_type=\"text/plain\"), name=\"robots_file\"),\n url('api/albion/prices/cities', views.two_city_compare, name='two_cities'),\n path('info', views.info, name='info_page'),\n path('api', views.development, name='api_info'),\n path('changelog', views.changelog, name='changelog_page'),\n path('accounts/profile', views.profile, name='account_profile')\n]","repo_name":"Vzhukov642/albionprofits","sub_path":"items/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"17177336456","text":"import pprint\nf = open(\"input\", \"r\")\ndata = [x.rstrip(\"\\n\") for x in f if x.rstrip(\"\\n\") != \"\"]\ndata = [int(i) for i in data[0].split(\",\")]\n\nlanternfish = data\nfor i in range(256):\n newfish = []\n fishs = []\n for fish in lanternfish:\n fish -= 1\n if fish < 0:\n fishs.append(6)\n newfish.append(8)\n else:\n fishs.append(fish)\n fishs.extend(newfish)\n lanternfish = fishs\nprint(len(lanternfish))\n","repo_name":"feiming/adventofcode2021","sub_path":"day6/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"11748515566","text":"from netmiko import ConnectHandler\n\nwith open('devices.txt') as routers:\n for IP in routers:\n Router = {\n 'device_type': 'cisco_ios',\n 'ip': IP,\n 'username': 'roger',\n 'password': 'cisco'\n }\n\n net_connect = ConnectHandler(**Router)\n hostname = net_connect.send_command('show run | include host')\n hostname.split(\" \")\n hostname,device = hostname.split(\" \")\n print(\"Backing up \" + deivce)\n\n filename = \"$HOME/Git/py-neng/backups/\" + device + \".txt\"\n\n showrun = net_connect.send_command(\"show run\")\n showlvan = net_connect.send_command(\"show vlan\")\n showver = net_connect.send_command(\"show ver\")\n logfile = open(filename, \"a\")\n logfile.write(showrun)\n logfile.write(\"\\n\")\n logfile.write(showvlan)\n logfile.write(\"\\n\")\n logfile.write(showver)\n logfile.write(\"\\n\")\n\n net_connect.disconnect()\n","repo_name":"KidScripto/py-neng","sub_path":"Scripts/multi_config_bak.py","file_name":"multi_config_bak.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"24936100610","text":"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\r\n------------------------------------------------------------------------------\r\nThis script will delete all the files in the path.\r\nIntended to be run after the face/lips roi images have been extracted and saved\r\nin a seperate location.\r\nAfterwards this script can be run to delete the original unwanted images\r\n------------------------------------------------------------------------------\r\nCreated on Fri Oct 11 11:11:11 2019\r\n@author: Ahmad Hassan Mirza - ahmadhassan.mirza@gmail.com\r\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\r\n#import cv2\r\n#import dlib\r\nimport os\r\nfrom os import walk\r\n\r\ndef initilizeDirList(dirListFile):\r\n # Open the file with read only permit\r\n f = open(dirListFile, \"r\")\r\n # use readlines to read all lines in the file\r\n # The variable \"lines\" is a list containing all lines in the file\r\n dirList = f.readlines()\r\n # close the file after reading the lines.\r\n f.close()\r\n return dirList\r\n \r\ndef loadImagesFromDir(path):\r\n fileList=[]\r\n #imgList=[]\r\n #images=[]\r\n for (dirpath, dirnames, filenames) in walk(path):\r\n #genDirectoryList.extend(dirnames)\r\n fileList.extend(filenames)\r\n return fileList\r\n \r\nglobal dirList\r\ninputFile = r'C:\\Users\\MIR6SI\\Desktop\\data_prep\\config_files\\image_paths.txt'\r\n\r\ndirList = initilizeDirList(inputFile)\r\n####### Code for OpenCV #####\r\npathIndex=0\r\nfor path in dirList: \r\n pathIndex += 1\r\n path = path.replace(\"\\\\\", \"\\\\\\\\\").strip()\r\n if os.path.exists(path):\r\n #print(path)\r\n totalDirs = len(dirList)\r\n fileList = loadImagesFromDir(path)\r\n percentComplete = (pathIndex/totalDirs) * 100\r\n count = 0\r\n for file in fileList:\r\n count +=1\r\n try:\r\n os.remove(os.path.join(path,file))\r\n except:\r\n pass\r\n print(\"Progress = \" + str(round(percentComplete,3))+ \"%\")","repo_name":"ahmadhmirza/Avatar-ROS","sub_path":"Remote_Module/DataPrepUtilities/Data_PreProcessing_Scripts/helper_scripts/DeleteFiles.py","file_name":"DeleteFiles.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"73964869735","text":"from unittest.mock import Mock, patch, PropertyMock\n\nfrom python_testing_learnings.test_patch_decorators import MyClass\n\nmock = Mock()\nmock.return_value = \"hello\"\nprint(mock())\n\n# mock return value on constructor\nmock = Mock(return_value=\"hello\")\nprint(mock())\n\n\n# side effect with callable\n\nclass Response:\n def __init__(self, code):\n self.code = code\n\n\ndef get_response(*args, **kwargs):\n return Response(200)\n\n\nmock = Mock()\nmock.side_effect = get_response\nresponse = mock()\nprint(response.code)\n\n# side effect with callable lambda\n\nsum = lambda v1, v2: v1 + v2\nmock = Mock(side_effect=sum)\nprint(mock(1, 1)) # prints 2\nprint(mock(-1, 1)) # prints 0\n\n\n# Propertymocks demo for @property types\n\nclass Foo:\n @property\n def foo(self):\n return 'something'\n\n\nwith patch('__main__.Foo.foo', new_callable=PropertyMock) as mock_foo:\n mock_foo.return_value = 'mockity-mock'\n this_foo = Foo()\n print(this_foo.foo)\n\n# note the below does not work\nwith patch('__main__.Foo', new_callable=PropertyMock) as mock_foo:\n mock_foo.foo.return_value = 'mockity-mock'\n this_foo = Foo()\n print(this_foo.foo)\n\n\n\n# check if you are mocking the right class\nclass SomeClass:\n def __init__(self):\n pass\n\n@patch('__main__.SomeClass')\ndef function(normal_argument, mock_class):\n print(mock_class is SomeClass)\n\nfunction(None)\n\n\n\n#mocking method when only patching the class\nclass Class:\n def method(self):\n pass\n\n\nwith patch('__main__.Class') as MockClass:\n instance = MockClass.return_value\n instance.method.return_value = 'foo'\n assert Class() is instance\n assert Class().method() == 'foo'\n\n","repo_name":"libincheeran/pythonProject_learnings","sub_path":"python_testing_learnings/test_mocks.py","file_name":"test_mocks.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"18498263649","text":"H,W=map(int, input().split())\nA=[]\nfor i in range(H):\n a=list(map(int, input().split()))\n A.append(a)\n\n\nans=[]\ncount=0\nfor h in range(H):\n for w in range(W):\n if h%2==1:\n w=W-1-w\n if h==0 and w==0:\n px,py=0,0\n pre=0\n if A[h][w]%2==0:\n continue\n else:\n A[h][w]-=1\n pre=1\n else:\n if pre>0:\n A[h][w]+=pre\n pre=0\n ans.append([px+1,py+1,h+1,w+1])\n count+=1\n if A[h][w]%2==1:\n A[h][w]-=1\n pre=1\n px,py=h,w\n\n#print(ans)\nprint(count)\nfor a in ans:\n print(*a)","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03263/s837874791.py","file_name":"s837874791.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"18161957159","text":"# input here\n_INPUT = \"\"\"\\\n5 3\n1 2\n3 4\n5 1\n\"\"\"\n\"\"\"\nK = int(input())\nH, W, K = map(int, input().split())\na = list(map(int, input().split()))\nxy = [list(map(int, input().split())) for i in range(N)]\np = tuple(map(int,input().split()))\n\"\"\"\nclass UnionFind():\n def __init__(self, n):\n self.n = n\n self.parents = [-1] * n\n\n def find(self, x):\n if self.parents[x] < 0:\n return x\n else:\n self.parents[x] = self.find(self.parents[x])\n return self.parents[x]\n\n def union(self, x, y):\n x = self.find(x)\n y = self.find(y)\n\n if x == y:\n return\n\n if self.parents[x] > self.parents[y]:\n x, y = y, x\n\n self.parents[x] += self.parents[y]\n self.parents[y] = x\n\n def size(self, x):\n return -self.parents[self.find(x)]\n\n\n\ndef main():\n n, m = map(int, input().split())\n xy = [list(map(int, input().split())) for i in range(m)]\n\n uf = UnionFind(n)\n\n for i in range(len(xy)):\n uf.union(xy[i][0]-1,xy[i][1]-1)\n \n print(-(min(uf.parents)))\n\n \n \n \n\n \nif __name__ == '__main__':\n import io\n import sys\n import math\n import itertools\n from collections import deque\n\n # sys.stdin = io.StringIO(_INPUT)\n main()","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p02573/s114466338.py","file_name":"s114466338.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"6706696911","text":"import logging\n\nlogging.basicConfig(filename=\"example.log\", level=logging.INFO)\n\ndef logger(func):\n def log_func(*args, **kwargs):\n logging.info(\"Running function: {} with args: {} and kwargs: {}\".format(func.__name__, args, kwargs))\n print(func(*args, **kwargs))\n return log_func\n\ndef add(*args):\n sum=0\n for i in args:\n sum+=i\n return sum\n\ndef sub(a,b):\n return a-b\n\ndef person_details(**kwargs):\n #print(kwargs)\n #print(kwargs.keys())\n try:\n print(\"I'm {name}. I'm {age} years old\".format(**kwargs))\n except KeyError as e:\n print(\"Missing key {}\".format(e))\n\nfadd = logger(add)\nfadd(1,2,3,4)\n\nfsub = logger(sub)\nfsub(5,2)\n\nfdetails = logger(person_details)\nfdetails(name=\"Deva\", age=30)\n\n\n\n\n","repo_name":"devashish89/PluralsightPythonIntermediate","sub_path":"ClosureEx2.py","file_name":"ClosureEx2.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"42750249017","text":"from __future__ import unicode_literals\n\nimport codecs\nimport os\nfrom optparse import make_option\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.core.management.utils import find_command, popen_wrapper\n\n\nclass Command(BaseCommand):\n \"\"\"\n Command taken from the standard compilemessages in Django with\n some extras added for handling multiple tenants.\n\n https://github.com/django/django/blob/1.6.8/django/core/management/commands/compilemessages.py\n \"\"\"\n help = 'Compiles .po files to .mo files for use with builtin gettext support.'\n\n requires_model_validation = False\n leave_locale_alone = True\n\n def add_arguments(self, parser):\n parser.add_argument('--locale', '-l', dest='locale', action='append',\n help='locale(s) to process (e.g. de_AT). Default is to process all. Can be used multiple times.'),\n parser.add_argument('--tenant', dest='tenant', default=None,\n help=\"Compile .po files for tenant.\"),\n\n def handle(self, **options):\n locale = options.get('locale')\n tenant = options.get('tenant')\n compile_messages(self.stdout, locale=locale, tenant=tenant)\n\n\ndef compile_messages(stdout, locale=None, tenant=None):\n \"\"\"\n Standard compile_messages updated to handle compiling po files for\n multiple tenants if MULTI_TENANT_DIR settings defined.\n \"\"\"\n program = 'msgfmt'\n if find_command(program) is None:\n raise CommandError(\"Can't find %s. Make sure you have GNU gettext tools 0.15 or newer installed.\" % program)\n\n basedirs = [os.path.join('conf', 'locale'), 'locale']\n if os.environ.get('DJANGO_SETTINGS_MODULE'):\n from django.conf import settings\n basedirs.extend(settings.LOCALE_PATHS)\n\n # Check for tenant translations\n tenant_dir = getattr(settings, 'MULTI_TENANT_DIR', None)\n if tenant and os.path.isdir(os.path.join(tenant_dir, tenant)):\n basedirs += [os.path.join(tenant_dir, tenant, 'locale')]\n else:\n # Compile all tenants\n basedirs += [os.path.join(tenant_dir, d) for d in os.listdir(tenant_dir) if os.path.isdir(os.path.join(tenant_dir, d))]\n\n # Gather existing directories.\n basedirs = set(map(os.path.abspath, filter(os.path.isdir, basedirs)))\n\n if not basedirs:\n raise CommandError(\"This script should be run from the Django Git checkout or your project or app tree, or with the settings module specified.\")\n\n for basedir in basedirs:\n _compile(stdout, locale, basedir, program)\n\n\ndef _compile(stdout, locale, basedir, program):\n if locale:\n dirs = [os.path.join(basedir, l, 'LC_MESSAGES') for l in locale]\n else:\n dirs = [basedir]\n for ldir in dirs:\n for dirpath, dirnames, filenames in os.walk(ldir):\n for f in filenames:\n if not f.endswith('.po'):\n continue\n stdout.write('processing file %s in %s\\n' % (f, dirpath))\n fn = os.path.join(dirpath, f)\n pf = os.path.splitext(fn)[0]\n args = [program, '--check-format', '-o', pf + '.mo', pf + '.po']\n output, errors, status = popen_wrapper(args)\n if status:\n if errors:\n msg = \"Execution of %s failed: %s\" % (program, errors)\n else:\n msg = \"Execution of %s failed\" % program\n raise CommandError(msg)\n","repo_name":"onepercentclub/django-tenant-extras","sub_path":"tenant_extras/management/commands/compilepo.py","file_name":"compilepo.py","file_ext":"py","file_size_in_byte":3522,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"90"}
+{"seq_id":"7687110607","text":"import numpy as np\nimport yaml\nimport argparse\nfrom easydict import EasyDict\n\n# from pipeline import Pipeline\nfrom adv_package import Logger\nfrom adv_package.config import AttackManager, DefenseManager\n\nimport os\n\n\ndef load_config(config_path):\n with open(config_path, 'r', encoding='utf-8') as file:\n config = EasyDict(yaml.safe_load(file))\n \n return config\n\n\nmanagerDict = {\n 'ATTACK': AttackManager,\n 'DEFENSE': DefenseManager\n}\n\n\ndef main():\n parser = argparse.ArgumentParser(description='PyTorch Adversarial Attack package.')\n \n parser.add_argument('--config', required=True, type=str, help='Path to .yaml configuration file.')\n parser.add_argument('--gpus', default=\"0,1\", type=str, help='GPU devices to use (0-7) (default: 0,1)')\n parser.add_argument('--debug', action='store_true')\n\n \n args = parser.parse_args()\n \n config = load_config(args.config)\n \n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpus\n\n pipeline = managerDict[config.TYPE](config)\n\n loss, acc1, acc5 = pipeline.run_pipeline()\n\n if not args.debug:\n # Save history results\n if not os.path.exists(config.PATHS.RESULTS):\n os.makedirs(config.PATHS.RESULTS)\n\n np.save(config.PATHS.RESULTS + 'loss.npy', loss)\n np.save(config.PATHS.RESULTS + 'accuracy.npy', acc1)\n\n # Log best results\n logManager = Logger(config.LOGGER, config.TYPE, args.config)\n logManager.update((loss, acc1, acc5))\n\nif __name__ == \"__main__\":\n main()","repo_name":"AlbertMillan/adversarial--package","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"5261074925","text":"from turtle import *\nfrom time import sleep\n\n\ndef draw_triangle(side_length):\n # iterate 3 times, each time moving foward side_length \n # before turning right 120 degrees\n for _ in range(3):\n forward(side_length)\n right(120)\n\n\ndef draw_hexagon(side_length):\n # call the draw_triangle function 6 times\n # but the turtle must turn an extra 60 degrees at the end of each iteration \n # in order to draw seperate triangles\n for _ in range(6):\n draw_triangle(side_length)\n right(60)\n\n\ndef check_equality(string, number):\n return int(string) == number\n\n\ndef main():\n # read in the user input for the side length\n side_length = int(input(\"How long should each shape be: \"))\n draw_hexagon(side_length)\n sleep(5)\n\n # collect user input for the check_equality function\n user_string = input(\"Enter a number: \")\n user_number = int(input(\"Enter another number: \"))\n check_equality(user_string, user_number)\n\nmain()\n","repo_name":"marvelman3284/computerScience24","sub_path":"lessons/styling/good.py","file_name":"good.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"35615642791","text":"import oiffile\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport copy\nimport math\nimport common_functions as comfun\nimport os\n\nmetadata_keys = [{\"name\":\"Acquisition Parameters Common\",\"keys\":[\"Acquisition Device\",\"ImageCaputreDate\",\"LaserTransmissivity01\",\"LaserWavelength01\",\"ScanMode\",\"ZoomValue\"]},\n {\"name\":\"Axis Parameter Common\",\"keys\":[\"AxisCount\",\"AxisOrder\"]},\n {\"name\":\"Axis 0 Parameters Common\",\"keys\":[\"AbsolutePosition\",\"AxisCode\",\"AxisName\",\"CalibrateValueA\",\"CalibrateValueB\",\"ClipPosition\",\"EndPosition\",\"GUI MaxSize\",\"MaxSize\",\"PixUnit\",\"StartPosition\",\"UnitName\"]},\n {\"name\":\"Axis 1 Parameters Common\",\"keys\":[\"AbsolutePosition\",\"AxisCode\",\"AxisName\",\"CalibrateValueA\",\"CalibrateValueB\",\"ClipPosition\",\"EndPosition\",\"GUI MaxSize\",\"MaxSize\",\"PixUnit\",\"StartPosition\",\"UnitName\"]},\n {\"name\":\"Axis 2 Parameters Common\",\"keys\":[\"AbsolutePosition\",\"AxisCode\",\"AxisName\",\"CalibrateValueA\",\"CalibrateValueB\",\"EndPosition\",\"GUI MaxSize\",\"MaxSize\",\"PixUnit\",\"StartPosition\",\"UnitName\"]},\n {\"name\":\"Axis 3 Parameters Common\",\"keys\":[\"AbsolutePosition\",\"AxisCode\",\"AxisName\",\"AxisZControlUnit\",\"PSU\",\"CalibrateValueA\",\"CalibrateValueB\",\"EndPosition\",\"GUI MaxSize\",\"Interval\",\"MaxSize\",\"Piezo Z Slice\",\"Piezo Z Start Position\",\"Piezo Z Step\",\"PixUnit\",\"Start Absolute Position\",\"StartPosition\",\"Stop Absolute Position\",\"UnitName\"]},\n {\"name\":\"Axis 4 Parameters Common\",\"keys\":[\"AbsolutePosition\",\"AxisCode\",\"AxisName\",\"CalibrateValueA\",\"CalibrateValueB\",\"EndPosition\",\"GUI MaxSize\",\"Interval\",\"MaxSize\",\"PixUnit\",\"StartPosition\",\"UnitName\"]},\n {\"name\":\"Bleach GUI Parameters Common\",\"keys\":[\n \"Comb 0 Activation Time Per Point\",\n \"ImageHeight\",\n \"ImageWidth\"]},\n {\"name\":\"Bleach GUI Parameters Common\",\"keys\":[\n \"Number Of Point\",\n \"Point 0 Number Of Pixel\",\n \"Point 0 Position X\",\n \"Point 0 Position Y\",\n \"Point 1 Number Of Pixel\",\n \"Point 1 Position X\",\n \"Point 1 Position Y\",\n \"Point 2 Number Of Pixel\",\n \"Point 2 Position X\",\n \"Point 2 Position Y\",\n \"Point 3 Number Of Pixel\",\n \"Point 3 Position X\",\n \"Point 3 Position Y\",\n \"Point 4 Number Of Pixel\",\n \"Point 4 Position X\",\n \"Point 4 Position Y\",\n \"Point 5 Number Of Pixel\",\n \"Point 5 Position X\",\n \"Point 5 Position Y\",\n ]},\n {\"name\":\"Bleach Laser 6 parameters\",\"keys\":[\"LaserTransmissivity\",\"LaserWavelength\"]},\n {\"name\":\"GUI Channel 5 Parameters\",\"keys\":[\"AnalogPMTGain\",\"AnalogPMTOffset\",\"AnalogPMTVoltage\",\"CH Name\",\"EmissionWavelength\",\"ExcitationWavelength\"]},\n {\"name\":\"GUI Channel 6 Parameters\",\"keys\":[\"AnalogPMTGain\",\"AnalogPMTOffset\",\"AnalogPMTVoltage\",\"CH Name\",\"EmissionWavelength\",\"ExcitationWavelength\"]},\n {\"name\":\"Laser 5 Parameters\",\"keys\":[\"LaserTransmissivity\",\"LaserWavelength\"]},\n {\"name\":\"Reference Image Parameter\",\"keys\":[\"HeightConvertValue\",\"HeightUnit\",\"ImageDepth\",\"ImageHeight\",\"ImageWidth\",\"PixConvertValue\",\"PixUnit\",\"ValidBitCounts\",\"WidthConvertValue\",\"WidthUnit\"]},\n {\"name\":\"Version Info\",\"keys\":[\"FileVersion\",\"SystemName\",\"SystemVersion\"]}]\n\nclass OlympusImageClass:\n def __init__(self,fname):\n self.fname = fname\n self.read_metadata_main(metadata_keys) # read main metadata from the file\n # show_metadata(self.metadata)\n self.read_data_main() # read main data from the file\n # self.read_attachments() # read attachment data and metadata\n \n def read_metadata_main(self,metadata_keys):\n self.metadata={}\n with oiffile.OifFile(self.fname) as oib:\n # print(oib.mainfile)\n # print(dir(oib))\n if(not oib.is_oib):\n print(\"Program exit: Not an Olympus oib file!\")\n for elm in metadata_keys:\n metaname = elm[\"name\"]\n if (metaname in oib.mainfile.keys()):\n # print(oib.mainfile[metaname].keys())\n for key in elm[\"keys\"]:\n if key in oib.mainfile[metaname].keys():\n value = comfun.string_convert(oib.mainfile[metaname][key])\n # print('{:<70}{:<20}{:<15}'.format(metaname+\" \"+key,oib.mainfile[metaname][key],str(type(value))))\n self.metadata[key] = value\n \n else:\n print(\"**** Warning ***** key: \" +metaname + \" not found!\")\n\n def show_metadata(self):\n # print metadata keys and values\n metadata = self.metadata\n for key in metadata:\n print(key,metadata[key])\n \n def read_data_main(self):\n self.img = oiffile.imread(self.fname)\n print(self.img.shape)\n scanmode = self.metadata['ScanMode'] if 'ScanMode' in self.metadata.keys() else \"\"\n bitsize = int(self.metadata['ValidBitCounts']) if 'ValidBitCounts' in self.metadata.keys() else 0\n sizex = int(self.metadata['ImageWidth']) if 'ImageWidth' in self.metadata.keys() else 0\n sizey = int(self.metadata['ImageHeight']) if 'ImageHeight' in self.metadata.keys() else 0\n sizec = self.img.shape[0]\n if(scanmode == \"XY\"):\n # single frame scan\n self.img_type = \"single framescan\"\n print('Found a {}'.format(self.img_type))\n # reduce pixel data format to 8 bits\n # if (bitsize>8):\n # self.img = np.uint8((self.img/(pow(2,bitsize)-1))*(pow(2,8)-1))\n # reorder dimensions: XYZC\n self.img = np.moveaxis(self.img,0,-1)\n # add missing channels for RGB image\n self.img = np.concatenate([self.img,np.zeros((int(sizex),int(sizey),int(3-sizec)),dtype=np.uint8)],axis=-1)\n print('img.shape = ',self.img.shape)\n if(scanmode == \"XYZ\"):\n print(self.img.shape)\n # zstack\n self.img_type = \"zstack\"\n print('Found a {}'.format(self.img_type))\n \n \n def display_image_with_markers(self,channels=[],title='',savepath=''):\n # display an olympus image\n # savepath = path to save the file with name as 'title'.png\n nmarkers = int(self.metadata['Number Of Point']) if 'Number Of Point' in self.metadata.keys() else 0\n markers = [{'x':None,'y':None} for marker in range(nmarkers)]\n for i in range(nmarkers):\n xkey = \"\".join((\"Point \",str(i),' Position X'))\n ykey = \"\".join((\"Point \",str(i),' Position Y'))\n markers[i][\"x\"] = int(self.metadata[xkey]) if xkey in self.metadata.keys() else 0\n markers[i][\"y\"] = int(self.metadata[ykey]) if ykey in self.metadata.keys() else 0\n print(markers)\n fh = plt.figure()\n ah1 = plt.subplot(111)\n ah1.imshow(self.img[0,0,:,:],cmap=\"hot\")\n # ah1.imshow(self.img)\n # display markers\n for marker in markers:\n print(marker)\n ah1.plot(marker['x'],marker['y'],'o',markersize=5,color='blue')\n fh.tight_layout()\n # ah1.imshow(img[:,:,channels],interpolation='nearest',origin='lower',aspect='equal')\n # adjust coordinates to reflect image orientation\n # ah1.set_xlim([0,len(img)])\n # ah1.set_ylim([0,len(img)])\n ah1.set_title(title)\n if (os.path.isdir(savepath) and len(title)>0):\n print('Path to save found!')\n print('Saving as:', savepath+'/'+title+'.png')\n plt.savefig(savepath+'/'+title+'.png')\n else:\n print('Path to save or title not found! Image not saved!')\n plt.show()\n return(fh,ah1)\n\n # def diplay_zprojection(self):\n # # display the z-projected image of a zstack\n \nclass OlympusLineClass(OlympusImageClass):\n pass\n\nclass OlympusFrameClass(OlympusImageClass):\n pass\n","repo_name":"anupgp/image_analysis","sub_path":"olympus_image_file.py","file_name":"olympus_image_file.py","file_ext":"py","file_size_in_byte":8391,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"}
+{"seq_id":"26314120362","text":"# 先把API com元件初始化\nimport os\n\n# 第一種讓群益API元件可導入讓Python code使用的方法 win32com\n#import win32com.client \n#from ctypes import WinDLL,byref\n#from ctypes.wintypes import MSG\n#SKCenterLib = win32com.client.Dispatch(\"{AC30BAB5-194A-4515-A8D3-6260749F8577}\")\n#SKOrderLib = win32com.client.Dispatch(\"{54FE0E28-89B6-43A7-9F07-BE988BB40299}\")\n#SKReply = win32com.client.Dispatch(\"{72D98963-03E9-42AB-B997-BB2E5CCE78DD}\")\n#SKQuote = win32com.client.Dispatch(\"{E7BCB8BB-E1F0-4F6F-A944-2679195E5807}\")\n#SKOSQuote = win32com.client.Dispatch(\"{E3CB8A7C-896F-4828-85FC-8975E56BA2C4}\")\n#SKOOQuote = win32com.client.Dispatch(\"{853EC706-F437-46E2-80E0-896901A5B490}\")\n\n# 第二種讓群益API元件可導入Python code內用的物件宣告 comtypes\nimport comtypes.client\ncomtypes.client.GetModule(os.path.split(os.path.realpath(__file__))[0] + r'\\SKCOM.dll')\nimport comtypes.gen.SKCOMLib as sk\nskC = comtypes.client.CreateObject(sk.SKCenterLib,interface=sk.ISKCenterLib)\nskO = comtypes.client.CreateObject(sk.SKOrderLib,interface=sk.ISKOrderLib)\n#skR = comtypes.client.CreateObject(sk.SKReplyLib,interface=sk.ISKReplyLib)\n#skQ = comtypes.client.CreateObject(sk.SKQuoteLib,interface=sk.ISKQuoteLib)\n#skOSQ = comtypes.client.CreateObject(sk.SKOSQuoteLib,interface=sk.ISKOSQuoteLib)\n#skOOQ = comtypes.client.CreateObject(sk.SKOOQuoteLib,interface=sk.ISKOOQuoteLib)\n\n# 畫視窗用物件\nfrom tkinter import *\nfrom tkinter.ttk import *\nfrom tkinter import messagebox\n\n# 數學計算用物件\nimport math\n\n# 載入其他物件\nimport StockOrder\nimport FutureOrder\nimport OptionOrder\nimport SeaFutureOrder\nimport SeaOptionOrder\nfrom StopLossOrder import StopLossOrderGui\nimport ForeignStockOrder\nimport MessageControl\nimport Config\n#----------------------------------------------------------------------------------------------------------------------------------------------------\n\n# 上半部登入框\nclass __FrameLogin(Frame):\n def __init__(self, master = None):\n Frame.__init__(self, master)\n self.__oMsg = MessageControl.MessageControl()\n\n self.group = LabelFrame(master, text=\"Center\", style=\"Pink.TLabelframe\")\n self.group.grid(column = 0, row = 0, padx = 10, pady = 10)\n\n self.__CreateWidget()\n\n def __CreateWidget(self):\n frame = Frame(self.group, style=\"Pink.TFrame\")\n frame.grid(column = 0, row = 0, padx = 10, pady = 5, sticky = 'ew')\n frame.grid_columnconfigure(0, minsize = 620)\n frame.grid_rowconfigure(1, minsize = 40)\n\n # 帳號\n Label(frame, style=\"Pink.TLabel\", text = \"帳號:\").grid(column=1, row=0)\n # 輸入框\n self.textID = Entry(frame, width = 20)\n self.textID.grid(column = 2, row = 0)\n #self.textID.insert(0, \"F128521428\")\n\n # 密碼\n Label(frame, style=\"Pink.TLabel\", text = \"密碼:\").grid(column = 1, row = 1)\n # 輸入框\n self.textPassword = Entry(frame, width = 20)\n self.textPassword['show'] = '*'\n self.textPassword.grid(column = 2, row = 1)\n #self.textPassword.insert(0, \"Abc123\")\n\n # 伺服器\n self.__chbVar = IntVar()\n Checkbutton(frame, style=\"Pink.TCheckbutton\", text='模擬平台', variable = self.__chbVar, onvalue = 1, offvalue = 0).grid(column = 3, row = 0)\n \n # 按鈕\n Button(frame, style = \"Pink.TButton\", text = \"登入\", command = self.__buttonlogin_Click).grid(column = 3, row = 1, padx=10)\n\n # ID\n lbID = Label(frame, style=\"Pink.TLabel\", text = \"<>\")\n lbID.grid(column = 4, row = 1)\n\n # 訊息欄\n self.listInformation = Listbox(frame, height = 5)\n self.listInformation.grid(column = 0, columnspan = 5, row = 2, sticky = 'ew')\n\n sb = Scrollbar(frame)\n self.listInformation.config(yscrollcommand = sb.set)\n sb.config(command = self.listInformation.yview)\n sb.grid(row = 2, column = 5, sticky = 'ns')\n\n sb = Scrollbar(frame, orient = 'horizontal')\n self.listInformation.config(xscrollcommand = sb.set)\n sb.config(command = self.listInformation.xview)\n sb.grid(row = 3, column = 0, columnspan = 5, sticky = 'ew')\n\n # global variable\n global GlobalListInformation, Global_ID\n GlobalListInformation = self.listInformation\n Global_ID = lbID\n\n def __buttonlogin_Click(self):\n try:\n self.__obj = dict(\n # 證券\n stock = StockOrder.StockOrder(information = self.listInformation),\n # 期貨\n future = FutureOrder.FutureOrder(information = self.listInformation),\n # 選擇權\n option = OptionOrder.OptionOrder(information = self.listInformation),\n # 海期\n sea_future = SeaFutureOrder.SeaFutureOrder(information = self.listInformation),\n # 海選\n sea_option = SeaOptionOrder.SeaOptionOrder(information = self.listInformation),\n # 停損\n stop_loss = StopLossOrderGui.StopLossOrderGui(information = self.listInformation),\n # 複委託\n foreign_stock = ForeignStockOrder.ForeignStockOrder(information = self.listInformation),\n )\n\n skC.SKCenterLib_SetLogPath(os.path.split(os.path.realpath(__file__))[0] + \"\\\\CapitalLog_Order\")\n if self.__chbVar.get() == 1:\n skC.SKCenterLib_ResetServer(\"morder1.capital.com.tw\")\n\n m_nCode = skC.SKCenterLib_login(self.textID.get().replace(' ',''),self.textPassword.get().replace(' ',''))\n if(m_nCode == 0):\n Global_ID[\"text\"] = self.textID.get().replace(' ','')\n for _ in 'stock', 'future', 'option', 'sea_future', 'sea_option', 'stop_loss', 'foreign_stock':\n self.__obj[_].SetID( Global_ID[\"text\"] )\n\n self.__oMsg.WriteMessage(\"【 登入成功 】\", self.listInformation)\n else:\n self.__oMsg.SendReturnMessage(\"Login\", m_nCode, \"Login\", self.listInformation)\n\n except Exception as e:\n messagebox.showerror(\"error!\",e)\n\n# 下半部-下單\nclass __FrameOrder(Frame):\n def __init__(self, master = None):\n Frame.__init__(self, master)\n self.__obj = dict(\n msg = MessageControl.MessageControl(),\n )\n\n self.__CreateWidget()\n\n def __CreateWidget(self):\n frame = Frame(self, style=\"Pink.TFrame\")\n frame.grid(column = 0, row = 0)\n\n self.__AddTab(frame)\n self.__FOrder(frame)\n self.__FAccount(frame)\n\n def __AddTab(self, master):\n tab = Notebook(master, style=\"Pink.TNotebook\")\n tab.grid(column = 0, row = 2, sticky = 'ew', padx = 10, pady = 10)\n # 證券\n self.__obj['stock'] = StockOrder.StockOrder(information = GlobalListInformation)\n tab.add(self.__obj['stock'], text=\"證券 \")\n\n # 期貨\n self.__obj['future'] = FutureOrder.FutureOrder(information = GlobalListInformation)\n tab.add(self.__obj['future'], text=\"期貨 \")\n\n # 選擇權\n self.__obj['option'] = OptionOrder.OptionOrder(information = GlobalListInformation)\n tab.add(self.__obj['option'], text=\"選擇權 \")\n\n # 海期\n self.__obj['sea_future'] = SeaFutureOrder.SeaFutureOrder(information = GlobalListInformation)\n tab.add(self.__obj['sea_future'], text=\"海期 \")\n\n # 海選\n self.__obj['sea_option'] = SeaOptionOrder.SeaOptionOrder(information = GlobalListInformation)\n tab.add(self.__obj['sea_option'], text=\"海選 \")\n\n # 停損\n self.__obj['stop_loss'] = StopLossOrderGui.StopLossOrderGui(information = GlobalListInformation)\n tab.add(self.__obj['stop_loss'], text=\"智動單-停損 \")\n\n # 複委託\n self.__obj['foreign_stock'] = ForeignStockOrder.ForeignStockOrder(information = GlobalListInformation)\n tab.add(self.__obj['foreign_stock'], text=\"複委託 \")\n\n def __FOrder(self, master):\n frame = Frame(master, style=\"Pink.TFrame\")\n frame.grid(column = 0, row = 0, sticky = 'ew', padx = 10, pady = 10)\n\n for i in range(2, 6, 3):\n frame.grid_columnconfigure(i, minsize = 25)\n\n # 初始化\n lbInitialize = Label(frame, style=\"Pink.TLabel\", text = \"1.下單物件初始\")\n lbInitialize.grid(column = 0, row = 1)\n # 按鈕\n btnInitialize = Button(frame, style = \"Pink.TButton\", text = \"下單初始設定\")\n btnInitialize[\"command\"] = self.__btnInitialize_Click\n btnInitialize.grid(column = 1, row = 1, padx = 5)\n\n # 讀取憑證\n lbReadCert = Label(frame, style=\"Pink.TLabel\", text = \"2.讀取憑證\")\n lbReadCert.grid(column = 3, row = 1)\n # 按鈕\n btnReadCert = Button(frame, style = \"Pink.TButton\", text = \"讀取憑證\")\n btnReadCert[\"command\"] = self.__btnReadCert_Click\n btnReadCert.grid(column = 4, row = 1, padx = 5)\n\n # 讀取憑證\n lbGetAccount = Label(frame, style=\"Pink.TLabel\", text = \"3.取得下單帳號\")\n lbGetAccount.grid(column = 6, row = 1)\n # 按鈕\n btnGetAccount = Button(frame, style = \"Pink.TButton\", text = \"載入帳號\")\n btnGetAccount[\"command\"] = self.__btnGetAccount_Click\n btnGetAccount.grid(column = 7, row = 1, padx = 5)\n\n # 按鈕\n group = LabelFrame(frame, text=\"(請先連海期主機) 4.海期選下單設定\", style=\"Pink.TLabelframe\")\n group.grid(column = 8, row = 1, padx = 30)\n\n frame = Frame(group, style=\"Pink.TFrame\")\n frame.grid(column = 0, row = 0, sticky = 'ew', padx = 10, pady = 10)\n\n Button(frame, style = \"Pink.TButton\", text = \"下載海期商品檔\", command = self.__btnLoadOSCommodity_Click).grid(column = 0, row = 0, padx = 5)\n\n Button(frame, style = \"Pink.TButton\", text = \"下載海選商品檔\", command = self.__btnLoadOOCommodity_Click).grid(column = 1, row = 0, padx = 5)\n\n # 選帳號\n def __FAccount(self, master):\n frame = Frame(master, style=\"Pink.TFrame\")\n frame.grid(column = 0, row = 1, sticky = 'ew', padx = 10, pady = 10)\n\n # 證券\n lbStockAccount = Label(frame, style=\"Pink.TLabel\", text = \"證券帳號\")\n lbStockAccount.grid(column = 0, row = 2, pady = 5)\n # 輸入框\n self.boxStockAccount = Combobox(frame, state='readonly')\n self.boxStockAccount.grid(column = 0, row = 3, padx = 10)\n self.boxStockAccount.bind(\"<>\", lambda _ : self.__obj['stock'].SetAccount(self.boxStockAccount.get()))\n\n def __FutureCallBack(even):\n self.__obj['future'].SetAccount(self.boxFutureAccount.get())\n self.__obj['option'].SetAccount(self.boxFutureAccount.get())\n self.__obj['stop_loss'].SetAccount(self.boxFutureAccount.get())\n # 期貨\n lbFutureAccount = Label(frame, style=\"Pink.TLabel\", text = \"期貨帳號\")\n lbFutureAccount.grid(column = 1, row = 2, pady = 5)\n # 輸入框\n self.boxFutureAccount = Combobox(frame, state='readonly')\n self.boxFutureAccount.grid(column = 1, row = 3, padx = 10)\n self.boxFutureAccount.bind(\"<>\", __FutureCallBack )\n\n def __SeaFutureCallBack(even):\n self.__obj['sea_future'].SetAccount(self.boxSeaFutureAccount.get())\n self.__obj['sea_option'].SetAccount(self.boxSeaFutureAccount.get())\n # 海期\n lbSeaFutureAccount = Label(frame, style=\"Pink.TLabel\", text = \"海期帳號\")\n lbSeaFutureAccount.grid(column = 2, row = 2, pady = 5)\n # 輸入框\n self.boxSeaFutureAccount = Combobox(frame, state='readonly')\n self.boxSeaFutureAccount.grid(column = 2, row = 3, padx = 10)\n self.boxSeaFutureAccount.bind(\"<>\", __SeaFutureCallBack )\n\n # 複委託\n lbForeignStockAccount = Label(frame, style=\"Pink.TLabel\", text = \"複委託帳號\")\n lbForeignStockAccount.grid(column = 3, row = 2, pady = 5)\n # 輸入框\n self.boxForeignStockAccount = Combobox(frame, state='readonly')\n self.boxForeignStockAccount.grid(column = 3, row = 3, padx = 10)\n self.boxForeignStockAccount.bind(\"<>\", lambda _ : self.__obj['foreign_stock'].SetAccount(self.boxForeignStockAccount.get()))\n\n # global variable\n global GlobalboxStockAccount, GlobalboxFutureAccount, GlobalboxSeaFutureAccount, GlobalboxForeignStockAccount\n GlobalboxStockAccount = self.boxStockAccount\n GlobalboxFutureAccount = self.boxFutureAccount\n GlobalboxSeaFutureAccount = self.boxSeaFutureAccount\n GlobalboxForeignStockAccount = self.boxForeignStockAccount\n\n # 下單function\n # 1.下單物件初始\n def __btnInitialize_Click(self):\n try:\n m_nCode = skO.SKOrderLib_Initialize()\n self.__obj['msg'].SendReturnMessage(\"Order\", m_nCode, \"SKOrderLib_Initialize\", GlobalListInformation)\n except Exception as e:\n messagebox.showerror(\"error!\", e) \n\n # 2.讀取憑證 \n def __btnReadCert_Click(self):\n try:\n m_nCode = skO.ReadCertByID(Global_ID[\"text\"])\n self.__obj['msg'].SendReturnMessage(\"Order\", m_nCode, \"ReadCertByID\", GlobalListInformation)\n except Exception as e:\n messagebox.showerror(\"error!\", e)\n\n # 3.取得下單帳號\n def __btnGetAccount_Click(self):\n try:\n m_nCode = skO.GetUserAccount()\n self.__obj['msg'].SendReturnMessage(\"Order\", m_nCode, \"GetUserAccount\", GlobalListInformation)\n except Exception as e:\n messagebox.showerror(\"error!\", e)\n\n # 4.下載海期商品檔\n def __btnLoadOSCommodity_Click(self):\n try:\n m_nCode = skO.SKOrderLib_LoadOSCommodity()\n self.__obj['msg'].SendReturnMessage(\"Order\", m_nCode, \"SKOrderLib_LoadOSCommodity\", GlobalListInformation)\n except Exception as e:\n messagebox.showerror(\"error!\", e) \n\n # 4.下載海選商品檔\n def __btnLoadOOCommodity_Click(self):\n try:\n m_nCode = skO.SKOrderLib_LoadOOCommodity()\n self.__obj['msg'].SendReturnMessage(\"Order\", m_nCode, \"SKOrderLib_LoadOOCommodity\", GlobalListInformation)\n except Exception as e:\n messagebox.showerror(\"error!\", e) \n\nclass SKOrderLibEvent:\n __account_list = dict(\n stock = [],\n future = [],\n sea_future = [],\n foreign_stock = [],\n )\n\n def OnAccount(self, bstrLogInID, bstrAccountData):\n strValues = bstrAccountData.split(',')\n strAccount = strValues[1] + strValues[3]\n\n #GlobalboxForeignStockAccount['values'] = '123'\n\n if strValues[0] == 'TS':\n SKOrderLibEvent.__account_list['stock'].append(strAccount)\n GlobalboxStockAccount['values'] = SKOrderLibEvent.__account_list['stock']\n elif strValues[0] == 'TF':\n SKOrderLibEvent.__account_list['future'].append(strAccount)\n GlobalboxFutureAccount['values'] = SKOrderLibEvent.__account_list['future']\n elif strValues[0] == 'OF':\n SKOrderLibEvent.__account_list['sea_future'].append(strAccount)\n GlobalboxSeaFutureAccount['values'] = SKOrderLibEvent.__account_list['sea_future']\n elif strValues[0] == 'OS':\n SKOrderLibEvent.__account_list['foreign_stock'].append(strAccount)\n GlobalboxForeignStockAccount['values'] = SKOrderLibEvent.__account_list['foreign_stock']\n\n#win32com使用此方式註冊callback\n#SKOrderLibEventHandler = win32com.client.WithEvents(SKOrderLib, SKOrderLibEvent)\n\n#comtypes使用此方式註冊callback\nSKOrderEvent = SKOrderLibEvent()\nSKOrderLibEventHandler = comtypes.client.GetEvents(skO, SKOrderEvent)\n\nif __name__ == '__main__':\n root = Tk()\n root.title(\"PythonExampleOrder\")\n root[\"background\"] = \"#ffdbdb\"\n\n s = Style()\n\n for _ in \"Pink.TFrame\", \"Pink.TLabelframe\", \"Pink.TNotebook\":\n s.configure(_, background = \"#ffdbdb\")\n\n for _ in \"Pink.TLabel\", \"Pink.TRadiobutton\", \"Pink.TCheckbutton\":\n s.configure(_, font = 1, foreground = \"#6d6d6d\", background = \"#ffdbdb\")\n\n s.configure(\"Pink.TButton\", font = 1, foreground = \"#0f900a\")\n s.configure(\"PinkFiller.TLabel\", font = 1, foreground = \"#ffdbdb\", background = \"#ffdbdb\")\n\n # Center\n __FrameLogin(master = root)\n \n # OrderTab\n root.TabControl = Notebook(root, style=\"Pink.TNotebook\")\n root.TabControl.grid(column = 0, row = 2, sticky = 'ew', padx = 10, pady = 10)\n root.TabControl.add(__FrameOrder(master = root), text=\"下單\")\n\n root.mainloop()\n","repo_name":"RxAdd/Python_Trade","sub_path":"PythonExample/order_service/Order.py","file_name":"Order.py","file_ext":"py","file_size_in_byte":16765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"34869159644","text":"import json\nfrom django.conf import settings\nfrom django.contrib.auth import authenticate, login\nfrom django.http import Http404, HttpResponse\nfrom django.shortcuts import redirect\n\nfrom app.functions import getStudent\nfrom facebook.backend import FacebookBackend\nfrom facebook.models import CreateGroupToken, ClassGroup, UserAccessToken\n\n\ndef facebook_login(request):\n\t\"\"\"\n\tLogin the User with the facebook authentication backend\n\t\"\"\"\n\n\tif request.user.is_authenticated():\n\t\treturn redirect('app:home')\n\n\t# Get the facebook access token from the code returned\n\tfacebook_code = request.GET['code']\n\taccess_token = UserAccessToken()\n\tredirect_uri = settings.FACEBOOK_SETTINGS['LOGIN_REDIRECT_URI']\n\n\t# Get the user information (especially the user_id)\n\t# Like this the user_id is not passing via the client\n\taccess_token.fetchAccessToken(facebook_code, redirect_uri)\n\tuser_info = access_token.fetchUserInfo()\n\n\tuser = authenticate(\n\t\tfb_id=user_info['facebook_id']\n\t)\n\n\tif user is not None:\n\t\tlogin(request, user)\n\t\treturn redirect('app:home')\n\telse:\n\t\traise Http404('Vous ne vous êtes pas inscrit ou vous devez renseigner votre compte facebook')\n\n\ndef createClassroomGroup(request, tokenValue, facebookUserId):\n\t\"\"\" Creates a classroom group for the user that claimed it\n\t \t/!\\ The user is fetched by the token, that's why there is not decorator\"\"\"\n\n\t# Check if the token exists\n\ttry:\n\t\ttoken = CreateGroupToken.objects.get(value=tokenValue)\n\texcept CreateGroupToken.DoesNotExist:\n\t\traise Http404('Votre token n\\'existe pas !')\n\n\tstudent = getStudent(token.delegate)\n\tclassroom = student.classroom\n\n\tfbGroup = ClassGroup()\n\tfbGroup.groupId = fbGroup.createClassGroup(student, facebookUserId)\n\tfbGroup.name = classroom.name\n\tfbGroup.save()\n\n\ttoken.delete()\n\n\treturn HttpResponse(\n\t\tjson.dumps({'success': True,\n\t\t\t\t\t'id': fbGroup.groupId}),\n\t\tcontent_type='application/json'\n\t)","repo_name":"grodino/refiche","sub_path":"facebook/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"73679493415","text":"class Solution:\n def rotate(self, nums, k) -> None:\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n self.rev(nums, 0, len(nums) - 1)\n self.rev(nums, 0, k - 1)\n self.rev(nums, k, len(nums) - 1)\n\n def rev(self, nums, s, e):\n \"\"\"\n reverse function\n \"\"\"\n while s < e:\n nums[s], nums[e] = nums[e], nums[s]\n s += 1\n e -= 1\n\n return nums\n\n\na = Solution()\na.rotate([1, 2], 3)","repo_name":"y7y1h13/Algo_Study","sub_path":"1일1알고리즘/8월/19일/Rotate Array.py","file_name":"Rotate Array.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"18419091565","text":"import cv2\nimport numpy as np\nimport time\nimport HandDetectionModule as hdm\nimport math\n\n###############################\nwCam, hCam = 640, 480\n#wCam, hCam = 1920, 1080\nfingers = {\n \"THUMB\": 4,\n \"INDEX\": 8,\n \"MIDDLE\": 12,\n \"RING\": 16,\n \"PINKY\": 20\n}\n###############################\ncapture = cv2.VideoCapture(0)\ncapture.set(3, wCam) # Set Display width\ncapture.set(4, hCam) # Set Display height\npTime = 0\nstartTime = -1\ndetector = hdm.HandDetector(detectionCon=0.75, trackCon=0.65)\n\ncurrTime = time.time()\nwhile True:\n success, img = capture.read()\n img = cv2.flip(img, 1)\n # img = cv2.resize(img, (wCam, hCam))\n # Draws the hand\n detector.findHands(img, draw=False)\n lmlist = detector.findPosition(img, draw=False)\n if(len(lmlist) != 0):\n with open('../SharedMem.txt', 'w') as f:\n # turn 5 finger's positions into string\n output_string = \"{\\n\"\n for key in fingers:\n output_string += \"\\t{}:\\tX:{} Y:{}\\n\".format(key,lmlist[fingers[key]][1], lmlist[fingers[key]][2])\n output_string += \"}\\n\"\n\n f.write(output_string)\n print(output_string)\n\n x1, y1 = lmlist[fingers[\"THUMB\"]][1],lmlist[fingers[\"THUMB\"]][2]\n x2, y2 = lmlist[fingers[\"INDEX\"]][1],lmlist[fingers[\"INDEX\"]][2]\n cx, cy = (x1 + x2) // 2, (y1 + y2) // 2\n\n cv2.circle(img, (x1, y1), 10, (255, 0, 0), cv2.FILLED)\n cv2.circle(img, (x2, y2), 10, (255, 0, 0), cv2.FILLED)\n\n # Draws a line between thumb and index and puts a dot on the center of the line\n cv2.line(img, (x1, y1), (x2, y2), (255, 255, 0), 10)\n cv2.circle(img, (cx, cy), 10, (255, 0, 0), cv2.FILLED)\n\n length = math.hypot(x2-x1, y2-y1)\n # print(length)\n\n if(length < 30):\n cv2.circle(img, (cx, cy), 10, (0, 255, 0), cv2.FILLED)\n if (startTime == -1):\n startTime = time.time()\n # print(\"This is wroking\")\n elif(startTime != -1):\n endTime = time.time()\n\n cTime = time.time()\n fps = 1/ (cTime - pTime)\n pTime = cTime\n\n # FPS Display\n cv2.putText(img, f'FPS: {int(fps)}', (40, 70), cv2.FONT_HERSHEY_COMPLEX,\n 1, (255, 0, 0), 2)\n\n cv2.imshow(\"Img\", img)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n","repo_name":"ThePatrickHe/EE475-Capstone-Wi23","sub_path":"CV/BrightnessHandControl.py","file_name":"BrightnessHandControl.py","file_ext":"py","file_size_in_byte":2324,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"}
+{"seq_id":"73211785578","text":"#\n# @lc app=leetcode id=977 lang=python\n#\n# [977] Squares of a Sorted Array\n#\n\n# @lc code=start\nimport heapq\n\nclass Solution(object):\n def sortedSquares(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n # Heapify is O(N)\n ans = []\n nums = [nums[i]**2 for i in range(len(nums))]\n \n heapq.heapify(nums)\n \n while nums:\n ans.append(heapq.heappop(nums))\n \n return ans\n# @lc code=end\n\n","repo_name":"ashshekhar/leetcode-problems-solutions","sub_path":"977.squares-of-a-sorted-array.py","file_name":"977.squares-of-a-sorted-array.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"74068514217","text":"'''4 - Faça um Programa que verifique se uma letra digitada é vogal ou consoante'''\n\nletra = str(input(\"Digite uma letra para saber se e consoante ou vogal: \"))\n\nvogal = [\"a\",\"e\",\"i\",\"o\",\"u\",\"A\",\"E\",\"I\",\"O\",\"U\"]\nconsoante = [\"b\",\"c\",\"d\",\"f\",\"g\",\"h\",\"j\",\"l\",\"m\",\"n\",\"p\",\"q\",\"r\",\"s\",\"t\",\"v\",\"x\",\"y\",\"z\",\"B\",\"C\",\"D\",\"F\",\"G\",\"H\",\"J\",\"L\",\"M\",\"N\",\"P\",\"Q\",\"R\",\"S\",\"T\",\"V\",\"X\",\"Y\",\"Z\"]\n\n\nif (letra in vogal):\n print(f\"A Letra digitada {letra} é uma vogal é uma vogal\")\nelif(letra in consoante):\n print(f\"A Letra digitada {letra} é uma consoante\")\nelse:\n print(\"Digite uma letra apenas!!!\")","repo_name":"xXRicardoX/Lista_de_Exercicio_Python","sub_path":"02 - Estrutura_de_Decisão/04.py","file_name":"04.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"39016262779","text":"from fastapi import FastAPI, Response, status\nfrom pydantic import BaseModel\nfrom typing import List, Dict\nfrom time import time\nfrom calculators import _tdee_calculate, _bmr_calculate, test_run, _macro_per_meal\n\nclass UserData(BaseModel):\n name: str\n age: int\n sex: str\n weight: int\n height: int\n excercising_factor: float\n\n# metadata of the APIs\ntags_metadata = [\n {\n \"name\": \"check-status\",\n \"description\": \"Check the status of the service\"\n\n },\n {\n \"name\": \"bmr-calculate\",\n \"description\": \"Calculate the user's BMR (recommended daily calories intake)\"\n },\n {\n \"name\": \"tdee-calculate\",\n \"description\": \"Calculation of user's daily macro intake\"\n },\n {\n \"name\": \"macro-per-meal\",\n \"description\": \"Calculations of user's meally plan by macro nutrients\"\n }\n]\n\n# init app with exposing RESTAPI\napp = FastAPI(openapi_tags=tags_metadata)\n\n\n@app.get(\"/\", tags=[\"check-status\"], status_code=status.HTTP_200_OK)\ndef check_status(response: Response):\n _status, message = test_run()\n if _status == 1:\n return {'API Calories Index Calculator': {'Status': message}}\n elif _status == 0:\n response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR\n return {'API Calories Index Calculator': {'Status': message}}\n\n# BMR calculator API call: deprecated\n# @app.post(\"/bmr-calculate\", tags=[\"bmr-calculate\"], status_code=status.HTTP_200_OK)\n# async def bmr_calculate(user_data: UserData):\n \n# bmr_index = _bmr_calculate(user_data)\n# res = {\"name\": user_data.name, \"bmr_index\": bmr_index}\n\n# return res\n\n# TDEE calculator API call\n@app.post(\"/tdee-calculate\", tags=[\"tdee-calculate\"], status_code=status.HTTP_200_OK)\nasync def tdee_calculate(user_data: UserData):\n \n tdee_index = _tdee_calculate(user_data)\n res = {\"name\": user_data.name, \"breakfast\": tdee_index*0.3, \"lunch\":tdee_index*0.4, \"dinner\":tdee_index*0.3}\n \n return res\n\n@app.post(\"/macro-per-meal\", tags=[\"macro-per-meal\"], status_code=status.HTTP_200_OK)\nasync def macro_per_meal(user_data: UserData):\n \n macro_calculations = _macro_per_meal(user_data)\n res = {\n \"name\": user_data.name, \n \"daily_macro_in_gram\": macro_calculations\n }\n \n return res\n","repo_name":"capoolebugchat/nutrition-calculator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"22959561400","text":"import os.path\n\n__all__ = [\n \"__title__\",\n \"__summary__\",\n \"__uri__\",\n \"__version__\",\n \"__commit__\",\n \"__author__\",\n \"__email__\",\n \"__license__\",\n \"__copyright__\",\n]\n\n\ntry:\n base_dir = os.path.dirname(os.path.abspath(__file__))\nexcept NameError:\n base_dir = None\n\n\n__title__ = \"dev_review\"\n__summary__ = \"guide to code review during research\"\n__uri__ = \"https://github.com/ResearchCodeReviewCommunity/dev-review\"\n\n__version__ = \"0.1.0\"\n\nif base_dir is not None and os.path.exists(os.path.join(base_dir, \".commit\")):\n with open(os.path.join(base_dir, \".commit\")) as fp:\n __commit__ = fp.read().strip()\nelse:\n __commit__ = None\n\n__author__ = \"Research Code Review Community\"\n__email__ = \"fergus.cooper@cs.ox.ac.uk\"\n\n__license__ = \"MIT\"\n__copyright__ = \"2020-present %s\" % __author__\n","repo_name":"ResearchCodeReviewCommunity/dev-review","sub_path":"src/dev_review/__about__.py","file_name":"__about__.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"90"}
+{"seq_id":"18165927599","text":"from collections import Counter\nh, w, m = map(int, input().split())\nblist = [list(map(int, input().split())) for _ in range(m)]\n\nblist_set = set(map(tuple, blist))\n\nrowdict = Counter([blist[i][0] for i in range(m)])\ncoldict = Counter([blist[i][1] for i in range(m)])\n\nmaxr = rowdict.most_common()[0][1]\nmaxr_keys = []\nfor i in rowdict.most_common():\n if i[1] == maxr:\n maxr_keys.append(i[0])\n else:\n break\nmaxc = coldict.most_common()[0][1]\nmaxc_keys = []\nfor i in coldict.most_common():\n if i[1] == maxc:\n maxc_keys.append(i[0])\n else:\n break\n\nfor i in maxr_keys:\n for j in maxc_keys:\n if (i, j) not in blist_set:\n print(maxr + maxc)\n break\n else:\n continue\n break\nelse:\n print(maxr + maxc - 1)\n","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p02580/s009111399.py","file_name":"s009111399.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"4429684617","text":"import queue\n\n# 1:up\n# 2:left\n# 3:right\n# 4 : down\n\nlst = list(map(int, input().split()))\nx_packman, y_packman = lst[0], lst[1]\nlst = list(map(int, input().split()))\nx_food, y_food = lst[0], lst[1]\nlst = list(map(int, input().split()))\nrow, col = lst[0], lst[1]\nmatrix = [[] for _ in range(row)]\nmatrix_path = [[-1 for x in range(col)] for y in range(row)]\ndef print_table():\n for i in range(row):\n for j in range(col):\n print(matrix_path[i][j],end=\" \")\n print(\"\")\n print(\"\")\ndef h(x, y):\n return abs(x - x_food) + abs(y - y_food)\n\ndef put_in_fringe(cost, x, y,direction):\n if x >= row or y >= col or x < 0 or y < 0 or matrix[x][y] == '%' or matrix_path[x][y] != -1:\n return\n fringe.put((cost+h(x,y) ,cost,x,y,direction))\ndef expand():\n v = fringe.get()\n cost, direction, x, y = v[1], v[4], v[2], v[3]\n matrix_path[x][y] = direction\n if x == x_food and y == y_food:\n return True\n put_in_fringe(cost + 1, x - 1, y,1)\n put_in_fringe(cost + 1, x, y - 1,2)\n put_in_fringe(cost + 1, x, y + 1,3)\n put_in_fringe(cost + 1, x + 1, y,4)\n return False\ndef print_path(x,y):\n direction = matrix_path[x][y]\n if direction == 0:\n print(x,y)\n return\n if direction == 1:\n print_path(x+1,y)\n elif direction == 2:\n print_path(x,y+1)\n elif direction == 3:\n print_path(x,y-1)\n else:\n print_path(x-1,y)\n print(x,y)\nfor f in range(row):\n matrix[f] = input()\nfringe = queue.PriorityQueue()\nheurestic = h(x_packman, x_food)\nfringe.put((heurestic, 0, x_packman, y_packman,0))\nwhile not fringe.empty():\n if expand() == True:\n print_path(x_food,y_food)\n break\n","repo_name":"ahbagheri01/Artificial-Intelligence","sub_path":"Computer Assignments/P1/A*/A2.py","file_name":"A2.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"22890848041","text":"import json\nfrom difflib import get_close_matches\n\n#loading data from json file\ndata = json.load(open('dictionary.json'))\n\ndef translate(w):\n #convert to lower case\n w = w.lower()\n \n if w in data :\n return data[w]\n #for getting close matches\n elif len(get_close_matches(w , data.keys())) >0:\n yn = input(\"did you meman %s instead ? Enter Y if yes , or N if no\")\n yn = yn.lower()\n if yn==\"y\":\n return data[get_close_matches(w , data.keys())[0]]\n elif yn==\"n\":\n return \"the word does not exist . please double check it\"\n else:\n return\"we didn't understand your entry\"\n else:\n return\"The word does not exist.\"\n \n# driver code\n\nword = input(\"enter word : \")\noutput = translate(word)\n\nif type(output) == list:\n for item in output:\n print(item)\nelse:\n print(output)\ninput(\"pres ENTER to exit\")","repo_name":"Rakshita-S/english---dictionary","sub_path":"dictionary.py","file_name":"dictionary.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"33381275069","text":"from pathlib import Path\n\nimport numpy as np\nimport torch\nimport pyproj\nimport pandas as pd\nfrom pathlib import Path\n\nfrom malpolon.data.datasets.torchgeo_datasets import RasterTorchGeoDataset\n\nDATA_PATH = Path(\"malpolon/tests/data/\")\n\n\ndef test_patch_query_torchgeo():\n class Sentinel2(RasterTorchGeoDataset):\n filename_glob = DATA_PATH / \"torchgeo_sentinel2_test_sample.tif\"\n is_image = True\n separate_files = True\n all_bands = [\"B08\"]\n rgb_bands = [\"B08\"]\n\n class MicroLifeClef(RasterTorchGeoDataset):\n filename_glob = DATA_PATH / \"torchgeo_mlc_test_sample.tif\"\n is_image = True\n separate_files = True\n all_bands = [\"bio_1\"]\n rgb_bands = [\"bio_1\"]\n dataset_s2, dataset_mlc = Sentinel2('./'), MicroLifeClef('./')\n patch1_s2 = dataset_s2[{'lon': 3.87075, 'lat': 43.61135, 'crs': dataset_s2.crs_pyproj.geodetic_crs, 'units': 'pixel', 'size': (100, 100)}][0][0]\n patch2_s2 = dataset_s2[{'lon': 570265.8337376957, 'lat': 4829076.115471331, 'crs': dataset_s2.crs_pyproj, 'units': 'm', 'size': 1000}][0][0]\n mlc_center = (dataset_mlc.bounds[1]+dataset_mlc.bounds[0])/2, (dataset_mlc.bounds[3]+dataset_mlc.bounds[2])/2\n patch_mlc = dataset_mlc[{'lon': mlc_center[0], 'lat': mlc_center[1], 'crs': dataset_mlc.crs_pyproj, 'units': 'pixel', 'size': 200}][0][0]\n expected_patch_s2 = torch.load(DATA_PATH / 'torchgeo_sentinel2_expected.raw')\n expected_patch_mlc = torch.load(DATA_PATH / 'torchgeo_mlc_expected.raw')\n assert tuple(patch1_s2.shape) == expected_patch_s2.shape\n assert tuple(patch2_s2.shape) == expected_patch_s2.shape\n assert tuple(patch_mlc.shape) == expected_patch_mlc.shape\n np.testing.assert_allclose(patch1_s2, expected_patch_s2)\n np.testing.assert_allclose(patch2_s2, expected_patch_s2)\n np.testing.assert_allclose(patch_mlc, expected_patch_mlc)\n\ndef test_load_observation_data() -> None:\n class Sentinel2(RasterTorchGeoDataset):\n filename_glob = DATA_PATH / \"torchgeo_sentinel2_test_sample.tif\"\n is_image = True\n separate_files = True\n all_bands = [\"B08\"]\n rgb_bands = [\"B08\"]\n dataset_s2 = Sentinel2('./')\n df = dataset_s2._load_observation_data(root=DATA_PATH,\n obs_fn='sentinel2_raster_torchgeo.csv')\n assert type(df) is pd.DataFrame\n\ndef test_coords_transform() -> None:\n class Sentinel2(RasterTorchGeoDataset):\n filename_glob = DATA_PATH / \"torchgeo_sentinel2_test_sample.tif\"\n is_image = True\n separate_files = True\n all_bands = [\"B08\"]\n rgb_bands = [\"B08\"]\n dataset_s2 = Sentinel2('./')\n querys = {'lon': [3.87075, 3.87075, 570265.8337376957],\n 'lat': [43.61135, 43.61135, 4829076.115471331],\n 'input_crs': [pyproj.CRS.from_epsg(4326), \"4326\", \"self\"], # Sentinel-2 CRS is 32631\n 'output_crs': [pyproj.CRS.from_epsg(4326), \"4326\", \"self\"]}\n expected = {0: (3.87075, 43.61135),\n 1: (3.87075, 43.61135),\n 2: (570265.8337376957, 4829076.115471331)}\n for o in range(len(querys['output_crs'])):\n for i in range(len(querys['input_crs'])):\n coords = dataset_s2.coords_transform(querys['lon'][i], querys['lat'][i],\n querys['input_crs'][i],\n querys['output_crs'][o])\n np.testing.assert_allclose(round(coords[0], 5),\n round(expected[o][0], 5),\n rtol=1e-5)\n\ndef test_point_to_bbox() -> None:\n class MicroLifeClef(RasterTorchGeoDataset):\n filename_glob = DATA_PATH / \"torchgeo_mlc_test_sample.tif\"\n is_image = True\n separate_files = True\n all_bands = [\"bio_1\"]\n rgb_bands = [\"bio_1\"]\n dataset_mlc = MicroLifeClef('./')\n # Meters\n bbox = dataset_mlc.point_to_bbox(3.87075, # 570265.8337376957\n 43.61135, # 4829076.115471331\n size=200,\n units='m',\n crs='4326')\n bbox = np.array([bbox.minx, bbox.maxx, bbox.miny, bbox.maxy])\n expected_bbox = np.array([3.8694875, 3.8719917, 43.6104583, 43.61224])\n np.testing.assert_allclose(bbox.round(5),\n expected_bbox.round(5),\n rtol=1e-4)\n # Pixels\n bbox = dataset_mlc.point_to_bbox(3.87075,\n 43.61135,\n size=50,\n units='pixel',\n crs='4326')\n bbox = np.array([bbox.minx, bbox.maxx, bbox.miny, bbox.maxy])\n expected_bbox = np.array([3.6624166666666667, 4.079083333333333, 43.403016666666666, 43.81968333333334])\n np.testing.assert_allclose(bbox.round(5),\n expected_bbox.round(5),\n rtol=1e-4)\n # CRS\n bbox = dataset_mlc.point_to_bbox(3.87075,\n 43.61135,\n size=0.02,\n units='crs',\n crs='4326')\n bbox = np.array([bbox.minx, bbox.maxx, bbox.miny, bbox.maxy])\n expected_bbox = np.array([3.86075, 3.88075, 43.60135, 43.62135])\n np.testing.assert_allclose(bbox.round(5),\n expected_bbox.round(5),\n rtol=1e-4)\n\ndef test_valid_query_point() -> None:\n class Sentinel2(RasterTorchGeoDataset):\n filename_glob = DATA_PATH / \"torchgeo_sentinel2_test_sample.tif\"\n is_image = True\n separate_files = True\n all_bands = [\"B08\"]\n rgb_bands = [\"B08\"]\n class MicroLifeClef(RasterTorchGeoDataset):\n filename_glob = DATA_PATH / \"torchgeo_mlc_test_sample.tif\"\n is_image = True\n separate_files = True\n all_bands = [\"bio_1\"]\n rgb_bands = [\"bio_1\"]\n dataset_s2, dataset_mlc = Sentinel2('./'), MicroLifeClef('./')\n query1 = {'lon': 3.87075, 'lat': 43.61135, 'crs': pyproj.CRS.from_epsg(4326), 'units': 'pixel', 'size': (100, 100)}\n query2 = {'lon': 570265.8337376957, 'lat': 4829076.115471331, 'crs': pyproj.CRS.from_epsg(32631), 'units': 'pixel', 'size': (100, 100)}\n assert dataset_s2._valid_query_point(query1)\n assert dataset_s2._valid_query_point(query2)\n assert dataset_mlc._valid_query_point(query1)\n assert dataset_mlc._valid_query_point(query2)\n\ndef test_format_label_to_task() -> None:\n class Sentinel2(RasterTorchGeoDataset):\n filename_glob = DATA_PATH / \"torchgeo_sentinel2_test_sample.tif\"\n is_image = True\n separate_files = True\n all_bands = [\"B08\"]\n rgb_bands = [\"B08\"]\n dataset_s2 = Sentinel2('./')\n\n dataset_s2.task = 'classification_binary'\n dataset_s2.binary_positive_classes = [1, 2]\n labels_raw = [2, 3]\n labels_formatted = dataset_s2._format_label_to_task(labels_raw)\n assert labels_formatted == 1\n\n dataset_s2.task = 'classification_multiclass'\n labels_raw = [3]\n labels_formatted = dataset_s2._format_label_to_task(labels_raw)\n assert labels_formatted == 3\n \n dataset_s2.task = 'classification_multilabel'\n dataset_s2.unique_labels = [1, 2, 3, 4, 5]\n labels_raw = [1, 4]\n labels_expected = np.zeros(5, dtype=np.float32)\n labels_expected[0] = 1\n labels_expected[-2] = 1\n labels_formatted = dataset_s2._format_label_to_task(labels_raw)\n assert all(labels_formatted == labels_expected)","repo_name":"plantnet/malpolon","sub_path":"malpolon/tests/test_torchgeo_datasets.py","file_name":"test_torchgeo_datasets.py","file_ext":"py","file_size_in_byte":7633,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"90"}
+{"seq_id":"14393678726","text":"# #author wangzhaoyang\r\n# li=['i'+'thanks' for i in range(10)]\r\n# print(li)\r\n# dict = {1:\"wang\",2:\"zhao\",3:\"yang\"}\r\n# print(dict.get(5))\r\n# print(dict.items())\r\ns=set([1,2,3,4,5])\r\ns2=set([4,5,6,7])\r\nif 3 in s:\r\n print('wangzhaoyang')\r\n\r\n","repo_name":"mydearlove/python_dev","sub_path":"python/day2/字典列表生成式.py","file_name":"字典列表生成式.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"28364903891","text":"\"\"\" Tests functions in covid_news_handling.py\"\"\"\nfrom time import time\nfrom shared_vars_funcs import scheduler_assertions\nfrom covid_news_handling import remove_news_articles , removed_articles\nfrom covid_news_handling import news_API_request\nfrom covid_news_handling import update_news\nfrom covid_news_handling import news_articles\nfrom covid_news_handling import schedule_news_updates\n\ndef article_assertions() -> None:\n \"\"\" Checks non-emptyness, type and content of news_articles\"\"\"\n assert news_articles\n assert isinstance(news_articles,list)\n for article in news_articles:\n assert isinstance(article,dict)\n assert 'title' in article\n assert 'content' in article\n\ndef test_news_API_request():\n \"\"\" Tests output of news_API_request\"\"\"\n assert news_API_request()\n assert news_API_request('Covid COVID-19 coronavirus') == news_API_request()\n article_assertions()\n\ndef test_update_news():\n \"\"\" Check that update_news works correctly\"\"\"\n # clears news articles\n news_articles.clear()\n update_news('test')\n scheduler_assertions('test')\n article_assertions()\n\ndef test_schedule_news_update():\n \"\"\" Schedules update and tests it with scheduler_assertions\"\"\"\n correct_sched_time = time() + 100\n schedule_news_updates(update_interval = 100,\n update_name = 'update test')\n scheduler_assertions(update_name = 'update test',\n event_time = correct_sched_time)\n\ndef test_remove_news_articles():\n \"\"\" Checks that the function can remove news articles\"\"\"\n news_articles.clear()\n test_article = {\n 'title': 'test title',\n 'content': 'test content'\n }\n prev_removed_article = {\n 'title': 'previously removed',\n 'content': 'previously removed'\n }\n news_articles.append(test_article)\n news_articles.append(prev_removed_article)\n removed_articles.append(prev_removed_article)\n # This should only remove the test article, because update=False\n remove_news_articles(article2remove = 'test title', update = False)\n assert test_article not in news_articles\n # Since update=True this should remove prev_removed_article from\n # news_articles, but not from removed_articles\n remove_news_articles(update = True)\n assert prev_removed_article not in news_articles\n # Finally since prev_removed_article in not in news_articles and\n # update=True, prev_removed_article is removed from removed_articles\n remove_news_articles(update = True)\n assert prev_removed_article not in removed_articles\n","repo_name":"NicholasJAlexander/ECM1400-Assessment","sub_path":"test_covid_news_handling.py","file_name":"test_covid_news_handling.py","file_ext":"py","file_size_in_byte":2582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"73823281258","text":"from tritonclient.utils import np_to_triton_dtype\nimport numpy as np\nimport soundfile as sf\n\nclass SpeechClient(object):\n def __init__(self, triton_client, model_name, protocol_client):\n self.triton_client = triton_client\n self.protocol_client = protocol_client\n self.model_name = model_name\n\n def recognize(self, wav_file, idx=0):\n waveform, sample_rate = sf.read(wav_file)\n samples = np.array([waveform], dtype=np.float32)\n lengths = np.array([[len(waveform)]], dtype=np.int32)\n sequence_id = 10086 + idx\n result = ''\n inputs = [\n self.protocol_client.InferInput(\"WAV\", samples.shape,\n np_to_triton_dtype(samples.dtype)),\n self.protocol_client.InferInput(\"WAV_LENS\", lengths.shape,\n np_to_triton_dtype(lengths.dtype))\n ]\n inputs[0].set_data_from_numpy(samples)\n inputs[1].set_data_from_numpy(lengths)\n outputs = [self.protocol_client.InferRequestedOutput(\"TRANSCRIPTS\")]\n response = self.triton_client.infer(self.model_name,\n inputs,\n request_id=str(sequence_id),\n outputs=outputs)\n result = response.as_numpy(\"TRANSCRIPTS\")[0].decode(\"utf-8\")\n return [result]\n","repo_name":"dingyuqing05/trt2022_wenet","sub_path":"wenet/runtime/server/x86_gpu/client/offline_client.py","file_name":"offline_client.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"90"}
+{"seq_id":"31538538987","text":"import re\nimport inspect\nimport numpy as np\nimport ast\nimport phylanx.execution_tree\nfrom phylanx import PhylanxSession\nfrom .physl_db import db\n\n\n# find name of file that imported this file\n_name_of_importing_file = None\n\nif __name__ != '__main__':\n if _name_of_importing_file is None:\n for frame in inspect.stack()[1:]:\n if frame.filename[0] != '<':\n _name_of_importing_file = frame.filename\n\n\ndef physl_zip(loop):\n def define(i, idx):\n return ['define', (i, ['slice', ('__physl_iterator', str(idx))])]\n\n if isinstance(loop, ast.For):\n targets = [it.id for it in loop.target.elts]\n args = [arg.id for arg in loop.iter.args]\n elif isinstance(loop, list):\n if isinstance(loop[0], list):\n targets = loop[0][1]\n else:\n targets = loop[0]\n args = loop[1][1]\n\n lambda_ = ['lambda', (*targets, ['list', (*targets, )])]\n fmap = ['fmap', (lambda_, *args)]\n iterators = tuple(define(i, idx) for idx, i in enumerate(targets))\n return (fmap, iterators)\n\n\nmapped_methods = {\n \"add\": \"__add\",\n \"array\": \"hstack\",\n \"det\": \"determinant\",\n \"diagonal\": \"diag\",\n \"divide\": \"__div\",\n \"matmul\": \"__mul\",\n \"multiply\": \"__mul\",\n \"negative\": \"__minus\",\n \"print\": \"cout\",\n \"subtract\": \"__sub\",\n \"len\": \"__len\"\n}\n\nnumpy_constants = {\n \"inf\": 'inf',\n \"Inf\": 'inf',\n \"Infinity\": 'inf',\n \"PINF\": 'inf',\n \"infty\": 'inf',\n \"NINF\": 'ninf',\n \"nan\": 'nan',\n \"NaN\": 'nan',\n \"NAN\": 'nan',\n \"PZERO\": 'PZERO',\n \"NZERO\": 'NZERO',\n \"e\": 'euler',\n \"euler_gamma\": 'euler_gamma',\n \"pi\": 'pi',\n \"float\": 'float',\n \"int\": 'int',\n \"bool\": 'bool'\n}\n\nmethods_supporting_dtype = [\n 'linearmatrix',\n 'linspace',\n 'power'\n]\n\n\ndef create_array(array_tree, kwargs):\n symbol_info = []\n\n hstack_symbol = 'hstack'\n vstack_symbol = 'vstack'\n dstack_symbol = 'dstack'\n\n def extract_data(arr):\n if isinstance(arr, tuple):\n if not arr:\n return []\n elif isinstance(arr[0], str):\n return [i for i in arr]\n else:\n current_dim = []\n for entry in arr:\n current_dim.append(extract_data(entry))\n return current_dim\n elif isinstance(arr, list):\n symbol_info.append('$' + arr[0].split('$', 1)[1])\n return extract_data(arr[1])\n\n data = extract_data(array_tree)\n\n if not symbol_info:\n if kwargs:\n args = (['list', tuple(data)], kwargs)\n else:\n args = (['list', tuple(data)], )\n return [hstack_symbol, args]\n\n data = np.array(*extract_data(array_tree))\n num_dim = len(data.shape)\n\n if 3 == num_dim:\n columns = [data[:, :, i] for i in range(data.shape[-1])]\n dstacks = []\n for i, column in enumerate(columns):\n dstacks.append([])\n if kwargs:\n [dstacks[i].append((['list', tuple(data)], kwargs)) for data in column]\n else:\n [dstacks[i].append((['list', tuple(data)], )) for data in column]\n\n outer_symbol = '' if not symbol_info else symbol_info.pop(0)\n arr = []\n for d in dstacks:\n vstack = []\n for hstacks in d:\n vstack.append([hstack_symbol + symbol_info.pop(0), hstacks])\n sym_info = '' if not symbol_info else symbol_info.pop(0)\n if kwargs:\n args = (['list', tuple(vstack)], kwargs)\n else:\n args = (['list', tuple(vstack)], )\n vstack = [vstack_symbol + sym_info, args]\n arr.append(vstack)\n if kwargs:\n args = (['list', tuple(arr)], kwargs, )\n else:\n args = (['list', tuple(arr)], )\n arr = [dstack_symbol + outer_symbol, args]\n elif 2 == num_dim:\n arr = []\n for hstacks in data:\n sym_info = '' if not symbol_info else symbol_info.pop(0)\n if kwargs:\n args = (['list', tuple(hstacks)], kwargs)\n else:\n args = (['list', tuple(hstacks)], )\n arr.append([hstack_symbol + sym_info, args])\n sym_info = '' if not symbol_info else symbol_info.pop(0)\n if kwargs:\n args = (['list', tuple(arr)], kwargs)\n else:\n args = (['list', tuple(arr)], )\n arr = [vstack_symbol + sym_info, args]\n elif 1 == num_dim:\n sym_info = '' if not symbol_info else symbol_info.pop(0)\n if kwargs:\n args = (['list', tuple(data)], kwargs)\n else:\n args = (['list', tuple(data)], )\n arr = [hstack_symbol + sym_info, args]\n else:\n ValueError(\"Phylanx supports arrays with 3 dimensions or less.\")\n return (arr,)\n\n\ndef primitive_name(method_name):\n \"\"\"Given a method_name, returns the corresponding Phylanx primitive.\n\n This primarily used for mapping NumPy mapped_methods to Phylanx primitives,\n but there are also other functions in python that would map to primitives\n with different name in Phylanx, e.g., `print` is mapped to `cout`.\n \"\"\"\n\n primitive_name = mapped_methods.get(method_name)\n if primitive_name:\n return primitive_name\n\n constant_name = numpy_constants.get(method_name)\n if constant_name:\n return constant_name\n\n return method_name\n\n\ndef print_physl_src(src, with_symbol_info=False, tag=4):\n \"\"\"Pretty print PhySL source code.\"\"\"\n\n # Remove line number info\n src = re.sub(r'\\$\\d+', '', src)\n\n if with_symbol_info:\n print(src)\n return\n\n # The regex below matches one of the following three\n # things in order of priority:\n # 1: a quoted string, with possible \\\" or \\\\ embedded\n # 2: a set of balanced parenthesis\n # 3: a single character\n pat = re.compile(r'\"(?:\\\\.|[^\"\\\\])*\"|\\([^()]*\\)|.')\n indent = 0\n tab = 4\n for s in re.findall(pat, src):\n if s in \" \\t\\r\\b\\n\":\n pass\n elif s == '(':\n print(s)\n indent += 1\n print(\" \" * indent * tab, end=\"\")\n elif s == ')':\n indent -= 1\n print(\"\", sep=\"\")\n print(\" \" * indent * tab, end=\"\")\n print(s, end=\"\")\n elif s == ',':\n print(s)\n print(\" \" * indent * tab, end=\"\")\n else:\n print(s, end=\"\", sep=\"\")\n print(\"\", sep=\"\")\n\n\ndef get_symbol_info(symbol, name):\n \"\"\"Adds symbol info (line and column number) to the symbol.\"\"\"\n\n if name in numpy_constants.keys():\n return name\n else:\n return '%s$%d$%d' % (name, symbol.lineno, symbol.col_offset)\n\n\ndef remove_line(a):\n return re.sub(r'\\$.*', '', a)\n\n\ndef is_fun(func, ir):\n \"\"\"\n Check that the intermediate representation (ir) describes\n a function with name func.\n \"\"\"\n return type(ir) == list and type(ir[0]) == str and re.match(func + r'\\b', ir[0])\n\n\ndef check_noreturn(ir):\n \"\"\"\n Check that the intermediate representation (ir) passed\n to this routine does not contain ir return statement.\n \"\"\"\n if type(ir) not in [list, tuple]:\n return\n if len(ir) == 0:\n return\n elif len(ir) == 1:\n check_noreturn(ir[0])\n elif is_fun('define', ir):\n check_hasreturn(ir)\n elif is_fun('return', ir):\n msg = \"Illegal return\"\n g = re.match(r'.*\\$(\\d+)\\$(\\d+)$', str(ir[0]))\n if g:\n msg += \": line=%s, col=%s\" % (g.group(1), g.group(2))\n raise NotImplementedError(msg)\n elif is_fun('.*', ir):\n check_noreturn(ir[1])\n elif type(ir) in [list, tuple]:\n for s in ir:\n check_noreturn(s)\n\n\ndef check_hasreturn(ir):\n \"\"\"\n Process the intermediate representation (ir) passed\n and ensure that if it has ir return statement, it is\n at the end.\n \"\"\"\n if type(ir) not in [list, tuple]:\n return\n if len(ir) == 0:\n return\n elif len(ir) == 1:\n check_hasreturn(ir[0])\n elif is_fun('for_each', ir):\n check_noreturn(ir[1])\n elif is_fun('while', ir):\n check_noreturn(ir[1])\n elif is_fun('if', ir):\n for k in ir[1][1:]:\n check_hasreturn(k)\n elif is_fun('.*', ir):\n check_hasreturn(ir[1])\n else:\n if len(ir) == 0:\n return\n check_noreturn(ir[:-1])\n check_hasreturn([ir[-1]])\n\n\ndef check_return(ir):\n \"\"\"\n Process the intermediate representation (ir) passed\n and check that return statements are only used where\n allowed.\n \"\"\"\n if type(ir) not in [list, tuple]:\n return\n if len(ir) == 0:\n return\n elif len(ir) == 1:\n check_return(ir[0])\n elif is_fun('block', ir):\n check_hasreturn(ir[1])\n elif is_fun('while', ir):\n check_noreturn(ir[1])\n elif is_fun('if', ir):\n for k in ir[1][1:]:\n check_hasreturn(k)\n elif is_fun('.*', ir):\n check_return(ir[1])\n else:\n for s in ir:\n check_return(s)\n\n\nclass PhySLFunction:\n\n functions = []\n\n def __init__(self, physl):\n self.physl = physl\n\n def compile_function(self):\n self.physl._ensure_is_compiled()\n\n @staticmethod\n def compile():\n if PhySLFunction.functions:\n for func in PhySLFunction.functions:\n func.compile_function()\n PhySLFunction.functions = []\n\n\nclass PhySL:\n \"\"\"Python AST to PhySL Transducer.\"\"\"\n\n compiler_state = None\n\n def _ensure_compiler_state(self):\n \"\"\"Ensure the compiler state object has been created\"\"\"\n\n if PhySL.compiler_state is None:\n if \"compiler_state\" in self.kwargs:\n PhySL.compiler_state = self.kwargs['compiler_state']\n else:\n # the static method compiler_state is constructed only once\n PhySL.compiler_state = \\\n phylanx.execution_tree.global_compiler_state(\n self.wrapped_function.__name__, self.file_name)\n\n def _print_progress(self, msg):\n if self.kwargs.get(\"print_progress\"):\n msg += ' %s(%s)'\n print(msg % (self.wrapped_function.__name__, self.file_name))\n\n def _compile_or_load(self):\n \"\"\"Compile or load this function from database\"\"\"\n\n physl_db = None\n try:\n self._print_progress('physl: compiling')\n\n # create/open database representing the function in this file\n physl_db = db(self.file_name) # _name_of_importing_file)\n\n # check whether this Phylanx function is already in database\n self.__src__, self.__ast__ = physl_db.select(\n self.wrapped_function.__name__)\n\n if self.__src__ is None:\n self._print_progress('physl: not found in db')\n\n # this function is not in database, generate physl\n self.ir = self._apply_rule(self.python_tree.body[0])\n check_return(self.ir)\n\n if self.doc_src is None:\n self.__src__ = self._generate_physl(self.ir)\n else:\n self.__src__ = self.doc_src\n self.__ast__ = phylanx.ast.generate_ast(self.__src__)\n\n # now store the PhySL string and AST for this function\n physl_db.insert(\n self.wrapped_function.__name__, self.__src__, self.__ast__)\n\n physl_db.close()\n\n except Exception as e:\n # close database, if needed\n if physl_db is not None:\n physl_db.close()\n\n # assume something went wrong while handling the database, simply\n # compile things withoput db support\n self.ir = self._apply_rule(self.python_tree.body[0])\n check_return(self.ir)\n\n if self.doc_src is not None:\n if type(e) == RuntimeError and \"Incomplete parse\" in str(e):\n # simply re-raise the exception assuming the PhySL provided\n # by the doc string was invalid\n raise e\n\n self.__src__ = self.doc_src\n else:\n self.__src__ = self._generate_physl(self.ir)\n\n self.__ast__ = phylanx.ast.generate_ast(self.__src__)\n\n # now, print generated PhySL if required\n if self.kwargs.get(\"debug\"):\n print_physl_src(self.__src__)\n print(end=\"\", flush=\"\")\n\n self._print_progress('physl: compiled')\n\n def _ensure_global_state(self):\n \"\"\"Ensure global PhySL session has been initialized\"\"\"\n\n if not PhylanxSession.is_initialized:\n PhylanxSession.init(1)\n\n if not self.is_compiled:\n\n # compile all functions that have so far been collected without an\n # initialized session object\n PhySLFunction.compile()\n\n def _ensure_is_compiled(self):\n \"\"\"Ensure this function has been compiled, also compile all functions\n that have been collected so fart without being compiled\"\"\"\n\n if self.is_compiled:\n return\n\n # create compiler state\n self._ensure_compiler_state()\n\n # transduce the python code, generate AST\n self._compile_or_load()\n\n # compile this function\n phylanx.execution_tree.compile(\n PhySL.compiler_state, self.file_name,\n self.wrapped_function.__name__, self.__ast__)\n\n self.is_compiled = True\n\n def __init__(self, func, tree, kwargs):\n self.defined = set()\n self.numpy_aliases = {'numpy'}\n self.linalg_aliases = {'LA'}\n self.wrapped_function = func\n self.kwargs = kwargs\n self.is_compiled = False\n self.file_name = None\n self.__src__ = None\n self.__ast__ = None\n self.ir = None\n self.python_tree = tree\n if 'doc_src' in kwargs and kwargs['doc_src']:\n self.doc_src = func.__doc__\n else:\n self.doc_src = None\n\n if self.kwargs.get('fglobals'):\n self.fglobals = self.kwargs['fglobals']\n for key, val in self.fglobals.items():\n if type(val).__name__ == 'module':\n if val.__name__ == 'numpy':\n self.numpy_aliases.add(key)\n elif val.__name__ == 'linalg':\n self.numpy_aliases.add(key)\n else:\n for npname in self.numpy_aliases:\n if val.__name__ == '%s.linalg' % npname:\n self.linalg_aliases.add(key)\n self.file_name = self.fglobals.get('__file__')\n if not self.file_name:\n self.file_name = _name_of_importing_file\n\n self.performance = self.kwargs.get('performance', False)\n self.localities = self.kwargs.get('localities')\n self.__perfdata__ = (None, None, None)\n\n # Add arguments of the function to the list of discovered variables.\n if inspect.isfunction(self.python_tree.body[0]):\n for arg in self.python_tree.body[0].args.args:\n self.defined.add(arg.arg)\n else:\n PhySL.defined_classes = {}\n\n # compile this function if session was already initialized, otherwise\n # simply collect it for later compilation\n # always compile right away if debug=True\n if PhylanxSession.is_initialized or self.kwargs.get('debug', False):\n self._ensure_global_state()\n self._ensure_is_compiled()\n\n else:\n func = PhySLFunction(self)\n PhySLFunction.functions.append(func)\n\n def _generate_physl(self, ir):\n if len(ir) == 2 and isinstance(ir[0], str) and isinstance(\n ir[1], tuple):\n result = [self._generate_physl(i) for i in ir[1]]\n # Remove return statements when generating physl\n if re.match(r'return\\$.*', ir[0]):\n if len(result) == 1:\n return result[0]\n else:\n return '(' + ', '.join(result) + ')'\n else:\n return ir[0] + '(' + ', '.join(result) + ')'\n elif isinstance(ir, list):\n return ', '.join([self._generate_physl(i) for i in ir])\n elif isinstance(ir, tuple):\n # NOTE Phylanx does not support tuples at this point, therefore, we\n # unpack all tuples for now!\n return ', '.join([self._generate_physl(i) for i in ir])\n else:\n return ir\n\n def _apply_rule(self, node):\n \"\"\"Calls the corresponding rule, based on the name of the node.\"\"\"\n if node is not None:\n node_name = node.__class__.__name__\n return eval('self._%s' % node_name)(node)\n\n # Invocation support\n def lazy(self, *args, **kwargs):\n \"\"\"Compile a given function, return a variable binding the function to\n arguments\"\"\"\n\n self._ensure_global_state()\n self._ensure_is_compiled()\n\n if len(args) == 0 and len(kwargs) == 0:\n return phylanx.execution_tree.variable(\n phylanx.execution_tree.code_for(\n PhySL.compiler_state, self.file_name,\n self.wrapped_function.__name__))\n\n def map_wrapped(val):\n \"\"\"If a variable is passed as an argument to an invocation of a\n Phylanx function we need to extract the compiled execution tree\n and pass that along instead\"\"\"\n\n if isinstance(val, phylanx.execution_tree.variable):\n return val.value\n\n return val\n\n mapped_args = tuple(map(map_wrapped, args))\n kwitems = kwargs.items()\n mapped_kwargs = {k: map_wrapped(v) for k, v in kwitems}\n\n return phylanx.execution_tree.variable(\n phylanx.execution_tree.bound_code_for(\n PhySL.compiler_state, self.file_name,\n self.wrapped_function.__name__, *mapped_args, **mapped_kwargs))\n\n def call(self, *args, **kwargs):\n \"\"\"Invoke this Phylanx function, pass along the given arguments\"\"\"\n\n self._ensure_global_state()\n self._ensure_is_compiled()\n\n self.__perfdata__ = (None, None, None)\n self.performance_primitives = None\n\n if self.performance:\n self.performance_primitives = \\\n phylanx.execution_tree.enable_measurements(\n PhySL.compiler_state, True)\n\n result = phylanx.execution_tree.eval(\n PhySL.compiler_state, self.file_name,\n self.wrapped_function.__name__, *args, **kwargs)\n\n if self.performance:\n treedata = self.tree()\n\n self.__perfdata__ = (\n phylanx.execution_tree.retrieve_counter_data(\n PhySL.compiler_state), treedata[0], treedata[1])\n\n return result\n\n def tree(self):\n \"\"\"Return the tree data for this object\"\"\"\n\n self._ensure_global_state()\n self._ensure_is_compiled()\n\n return phylanx.execution_tree.retrieve_tree_topology(\n PhySL.compiler_state, self.file_name,\n self.wrapped_function.__name__)\n\n def get_physl_source(self):\n \"\"\"Return generated PhySL source string\"\"\"\n\n self._ensure_global_state()\n self._ensure_is_compiled()\n\n return self.__src__\n\n# #############################################################################\n# Transducer rules\n\n def _Add(self, node):\n \"\"\"Leaf node, returning raw string of the `add` operation.\"\"\"\n\n return '__add'\n\n def _And(self, node):\n \"\"\"Leaf node, returning raw string of the `and` operation.\"\"\"\n\n return '__and'\n\n def _arg(self, node):\n \"\"\"class arg(arg, annotation)\n\n A single argument in a list.\n\n `arg` is a raw string of the argument name.\n `annotation` is its annotation, such as a `Str` or `Name` node.\n\n TODO:\n add support for `annotation` which is ignored at this time. Maybe\n we can use this to let user provide type information!?!\n \"\"\"\n\n arg = get_symbol_info(node, node.arg)\n return arg\n\n def _arguments(self, node):\n \"\"\"class arguments(args, vararg, kwonlyargs, kwarg, defaults, kw_defaults)\n\n The arguments for a function.\n `args` and `kwonlyargs` are lists of arg nodes.\n `vararg` and `kwarg` are single arg nodes, referring to the *args,\n **kwargs parameters.\n `defaults` is a list of default values for arguments that can be passed\n positionally. If there are fewer defaults, they correspond to the last\n n arguments.\n `kw_defaults` is a list of default values for keyword-only arguments. If\n one is None, the corresponding argument is required.\n \"\"\"\n if node.vararg or node.kwarg:\n raise (Exception(\"Phylanx does not support *args and **kwargs\"))\n\n defaults = tuple(map(self._apply_rule, node.defaults))\n result = tuple()\n padded_defaults = (None, ) * (len(node.args) - len(defaults)) + defaults\n for arg, default in zip(node.args, padded_defaults):\n a = self._apply_rule(arg)\n symbol_name = re.sub(r'\\$\\d+', '', a)\n self.defined.add(symbol_name)\n if default is None:\n result = (*result, a)\n else:\n op = get_symbol_info(arg, '__arg')\n default_name = re.sub(r'\\$\\d+', '', default)\n if default_name in self.fglobals:\n default = '%s' % self.fglobals[default_name]\n result = (*result, [op, (a, default)])\n return result\n\n def _Assign(self, node):\n \"\"\"class Assign(targets, value)\n\n `targets` is a list of nodes which are assigned a value.\n `value` is a single node which gets assigned to `targets`.\n\n TODO:\n Add support for multi-target (a,b, ... = iterable) and chain (a = b = ... )\n assignments.\n \"\"\"\n\n if len(node.targets) > 1:\n raise Exception(\"Phylanx does not support chain assignments.\")\n if isinstance(node.targets[0], ast.Tuple):\n raise Exception(\n \"Phylanx does not support multi-target assignments.\")\n\n symbol = self._apply_rule(node.targets[0])\n # if lhs is not indexed.\n if isinstance(symbol, str):\n symbol_name = re.sub(r'\\$\\d+', '', symbol)\n if symbol_name in self.defined:\n op = get_symbol_info(node.targets[0], \"store\")\n else:\n op = get_symbol_info(node.targets[0], \"define\")\n # TODO:\n # For now `self.defined` is a set containing names of symbols\n # with no extra information. We may want to make it a\n # dictionary with the symbol names as keys and list of\n # symbol_infos to keep track of the symbol.\n self.defined.add(symbol_name)\n # lhs is a subscript.\n else:\n op = get_symbol_info(node.targets[0], \"store\")\n\n target = self._apply_rule(node.targets[0])\n value = self._apply_rule(node.value)\n return [op, (target, value)]\n\n def _Attribute(self, node):\n \"\"\"class Attribute(value, attr, ctx)\n\n `value` is an AST node.\n `attr` is a bare string giving the name of the attribute.\n \"\"\"\n\n method_name = get_symbol_info(node, primitive_name(node.attr))\n\n namespace = [node.attr]\n current_node = node.value\n while isinstance(current_node, ast.Attribute):\n namespace.insert(0, current_node.attr)\n current_node = current_node.value\n namespace.insert(0, current_node.id)\n\n if isinstance(current_node, ast.Name):\n if namespace[0] in self.numpy_aliases:\n return method_name\n elif namespace[0] in self.linalg_aliases:\n return method_name\n else:\n attr = '.'.join(namespace)\n raise NotImplementedError(\n 'Phylanx does not support non-NumPy member functions.'\n 'Cannot transform: %s' % attr)\n\n def _AugAssign(self, node):\n \"\"\"class AugAssign(target, op, value)\"\"\"\n\n symbol = get_symbol_info(node, 'store')\n op = get_symbol_info(node, self._apply_rule(node.op))\n target = self._apply_rule(node.target)\n value = self._apply_rule(node.value)\n\n return [symbol, (target, (op, (target, value)))]\n\n def _BinOp(self, node):\n \"\"\"class BinOp(left, op, right)\"\"\"\n\n op = get_symbol_info(node, self._apply_rule(node.op))\n left = self._apply_rule(node.left)\n right = self._apply_rule(node.right)\n return [op, (left, right)]\n\n def _BoolOp(self, node):\n \"\"\"class BoolOp(left, op, right)\"\"\"\n\n op = get_symbol_info(node, self._apply_rule(node.op))\n values = list(map(self._apply_rule, node.values))\n\n return [op, (values, )]\n\n def _Call(self, node):\n \"\"\"class Call(func, args, keywords, starargs, kwargs)\n\n TODO(?):\n Add support for keywords, starargs, and kwargs\n \"\"\"\n\n def __apply(self, k):\n kw = self._apply_rule(k.value)\n return (k.arg, kw)\n\n oplist = get_symbol_info(node.func, 'list')\n oparg = get_symbol_info(node.func, '__arg')\n\n symbol = self._apply_rule(node.func)\n args = tuple(self._apply_rule(arg) for arg in node.args)\n kwargs = tuple([oparg, __apply(self, k)] for k in node.keywords)\n\n dtype = ''\n\n # TODO: these are workarounds for the cases that Phylanx does not\n # follow NumPy functions' signatures.\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n if 'hstack' in symbol:\n return create_array(args, kwargs)\n\n if 'vstack' in symbol:\n if args and isinstance(args[0], tuple):\n args = ([oplist, (tuple(args), )],)\n return [symbol, args + kwargs]\n\n if 'dstack' in symbol:\n if args and isinstance(args[0], tuple):\n args = ([oplist, (tuple(args), )],)\n return [symbol, args + kwargs]\n\n if 'zeros_like' in symbol:\n symbol = symbol.replace('zeros_like', 'constant_like')\n return [symbol, ('0', args + kwargs)]\n\n if 'ones_like' in symbol:\n symbol = symbol.replace('ones_like', 'constant_like')\n return [symbol, ('1', args + kwargs)]\n\n if 'empty_like' in symbol:\n symbol = symbol.replace('empty_like', 'constant_like')\n return [symbol, (None, args + kwargs)]\n\n if 'zeros' in symbol:\n symbol = symbol.replace('zeros', 'constant')\n if isinstance(args[0], tuple):\n return [symbol, ('0', [oplist, args[0]]) + kwargs]\n else:\n return [symbol, ('0', args + kwargs)]\n\n if 'ones' in symbol:\n symbol = symbol.replace('ones', 'constant')\n if isinstance(args[0], tuple):\n return [symbol, ('1', [oplist, args[0]]) + kwargs]\n else:\n return [symbol, ('1', args + kwargs)]\n\n if 'empty' in symbol:\n symbol = symbol.replace('empty', 'constant')\n if isinstance(args[0], tuple):\n return [symbol, (None, [oplist, args[0]]) + kwargs]\n else:\n return [symbol, (None, args + kwargs)]\n\n method = [m for m in methods_supporting_dtype if symbol.find(m, 0) == 0]\n if len(method) == 1:\n symbol = symbol.replace(method[0], method[0] + dtype)\n\n return [symbol, args + kwargs]\n\n # def _ClassDef(self, node):\n # \"\"\"class ClassDef(name, bases, keywords, starargs, kwargs, body,\n # decorator_list)\n\n # `name` is a raw string for the class name.\n # `bases` is a list of nodes for explicitly specified base classes.\n # `keywords` is a list of keyword nodes, principally for `metaclass`.\n # Other keywords will be passed to the metaclass, as per PEP-3115.\n # `starargs` removed in python 3.5.\n # `kwargs` removed in Python 3.5.\n # `body` is a list of nodes representing the code within the class\n # definition.\n # `decorator_list` is the list of decorators to be applied, stored\n # outermost first (i.e. the first in the list will be applied last).\n # \"\"\"\n # PhySL.defined_classes[node.name] = {}\n # if node.bases:\n # raise NotImplementedError(\"Phylanx does not support inheritance.\")\n # class_body = list(self._apply_rule(m) for m in node.body)\n\n # return class_body\n\n def _Compare(self, node):\n \"\"\"class Compare(left, ops, comparators)\n\n A comparison of two or more values.\n `left` is the first value in the comparison\n `ops` is the list of operators\n `comparators` is the list of values after the first (`left`).\n \"\"\"\n\n if (len(node.ops) == 1):\n left = self._apply_rule(node.left)\n op = get_symbol_info(node, self._apply_rule(node.ops[0]))\n right = self._apply_rule(node.comparators[0])\n\n return [op, (left, right)]\n else:\n # if we're dealing with more than one comparison, we canonicalize the\n # comparisons in to the form of chained logical ands. e.g., a < b < c\n # becomes: ([__and ((__lt b, c), (__lt a, b))])\n # TODO: Make sure to respect Python operator precedence.\n comparison = []\n for i in range(len(node.ops)):\n op = self._apply_rule(node.ops[-i])\n left = self._apply_rule(node.comparators[-i - 1])\n right = self._apply_rule(node.comparators[-i])\n if comparison:\n comparison = ['__and', (*comparison, (op, (left, right)))]\n else:\n comparison = [*comparison, (op, (left, right))]\n\n op = self._apply_rule(node.ops[0])\n left = self._apply_rule(node.left)\n right = self._apply_rule(node.comparators[0])\n if comparison:\n comparison = ['__and', (*comparison, (op, (left, right)))]\n else:\n comparison = [op, (left, right)]\n\n return comparison\n\n def _comprehension(self, node):\n \"\"\"class comprehension(target, iter, ifs, is_async)\n\n 1 for clause in a comprehension.\n `target` is the reference to use in each element- a `name` or `Tuple`.\n `iter` is the object to iterate over.\n `ifs` is a list of test expressions (a for clause may have multiple ifs).\n `is_async` indicates a comprehension is asynchronous.\n \"\"\"\n\n target = self._apply_rule(node.target)\n iteration_space = self._apply_rule(node.iter)\n\n comprehension = {\n 'target': target,\n 'iter': iteration_space\n }\n\n return comprehension\n\n def _Constant(self, node):\n \"\"\"A constant value.\"\"\"\n import sys\n if sys.version_info.minor <= 7:\n return self._apply_rule(node.value)\n\n # starting V3.8 string and number literals are represented as _Constant\n # nodes\n if isinstance(node.value, str):\n return self._Str(node)\n\n # special integral values need special handling\n name_constants = {None: 'nil', False: 'false', True: 'true'}\n if isinstance(node.value, bool):\n return name_constants[node.value]\n\n if node.value is None:\n return name_constants[node.value]\n\n # everything that's not a string can be directly passed on\n return '%s' % node.value\n\n def _Div(self, node):\n \"\"\"Leaf node, returning raw string of the 'division' operation.\"\"\"\n\n return '__div'\n\n def _Eq(self, node):\n \"\"\"Leaf node, returning raw string of the 'equality' operation.\"\"\"\n\n return '__eq'\n\n def _Expr(self, node):\n \"\"\"class Expr(value)\n\n `value` holds one of the other nodes (rules).\n \"\"\"\n\n return self._apply_rule(node.value)\n\n def _ExtSlice(self, node):\n \"\"\"class ExtSlice(dims)\n\n Advanced slicing.\n `dims` holds a list of `Slice` and `Index` nodes.\n \"\"\"\n slicing = list(map(self._apply_rule, node.dims))\n return slicing\n\n def _block(self, node):\n \"\"\"Returns a map representation of a PhySL block.\"\"\"\n\n if isinstance(node, list):\n block = tuple(map(self._apply_rule, node))\n if len(node) == 1:\n return block\n else:\n return ['block', block]\n else:\n block = (self._apply_rule(node), )\n return block\n\n def _For(self, node):\n \"\"\"class For(target, iter, body, orelse)\n\n A for loop.\n `target` holds the variable(s) the loop assigns to, as a single Name,\n Tuple or List node.\n `iter` holds the item to be looped over, again as a single node.\n `body` contain lists of nodes to execute.\n `orelse` same as `body`, however, those in orelse are executed if the\n loop finishes normally, rather than via a break statement.\n \"\"\"\n\n # this lookup table helps us to choose the right mapping function based on the\n # type of the iteration space (list, range, or prange).\n mapping_function = {\n 'list': 'for_each',\n 'slice': 'for_each',\n 'range': 'for_each',\n 'prange': 'parallel_map'\n }\n\n target = self._apply_rule(node.target)\n # TODO: **MAP**\n # target_name = target.split('$', 1)[0]\n # self.defined.add(target_name)\n iteration_space = self._apply_rule(node.iter)\n if isinstance(iteration_space, list) and iteration_space[0].startswith('zip'):\n iter_space, indices = physl_zip(node)\n symbol = get_symbol_info(node, 'for_each')\n body = self._block(node.body)\n body = ['block', (*indices, *body)]\n op = get_symbol_info(node, 'lambda')\n return [symbol, ([op, ('__physl_iterator', body)], iter_space)]\n\n # extract the type of the iteration space- used as the lookup key in\n # `mapping_function` dictionary above.\n if isinstance(iteration_space, list):\n symbol_name = mapping_function[iteration_space[0].split('$', 1)[0]]\n symbol = get_symbol_info(node, symbol_name)\n # replace keyword `prange` to `range` for compatibility with Phylanx.\n iteration_space[0] = iteration_space[0].replace('prange', 'range')\n else:\n symbol = get_symbol_info(node, 'for_each')\n\n body = self._block(node.body)\n # orelse = self._block(node.orelse)\n op = get_symbol_info(node, 'lambda')\n return [symbol, ([op, (target, body)], iteration_space)]\n # return [symbol, (target, iteration_space, body, orelse)]\n\n def _FunctionDef(self, node):\n \"\"\"class FunctionDef(name, args, body, decorator_list, returns)\n\n `name` is a raw string of the function name.\n `args` is a arguments node.\n `body` is the list of nodes inside the function.\n `decorator_list` is the list of decorators to be applied, stored\n outermost first (i.e. the first in the list will be applied last).\n `returns` is the return annotation (Python 3 only).\n\n Notes:\n We ignore decorator_list and returns.\n \"\"\"\n\n op = get_symbol_info(node, 'define')\n symbol = get_symbol_info(node, node.name)\n args = self._apply_rule(node.args)\n body = self._block(node.body)\n lambda_op = get_symbol_info(node, 'lambda')\n\n if (args):\n return [op, (symbol, args, body)]\n else:\n return [op, (symbol, (lambda_op, (body,)))]\n\n def _Gt(self, node):\n \"\"\"Leaf node, returning raw string of the 'greater than' operation.\"\"\"\n\n return '__gt'\n\n def _GtE(self, node):\n \"\"\"Leaf node, returning raw string of the 'greater than or equal' operation.\"\"\"\n\n return '__ge'\n\n def _If(self, node):\n \"\"\"class If(test, body, orelse)\n\n `test` holds a single node, such as a Compare node.\n `body` and `orelse` each hold a list of nodes.\n \"\"\"\n\n symbol = get_symbol_info(node, 'if')\n test = self._apply_rule(node.test)\n body = self._block(node.body)\n orelse = self._block(node.orelse)\n\n return [symbol, (test, body, orelse)]\n\n def _IfExp(self, node):\n \"\"\"class IfExp(test, body, orelse)\n\n `test` holds a single node, such as a Compare node.\n `body` and `orelse` each hold a list of nodes.\n \"\"\"\n\n symbol = get_symbol_info(node, 'if')\n test = self._apply_rule(node.test)\n body = self._block(node.body)\n orelse = self._block(node.orelse)\n\n return [symbol, (test, body, orelse)]\n\n def _In(self, node):\n raise Exception(\"`In` operator is not defined in Phylanx.\")\n\n def _Index(self, node):\n \"\"\"class Index(value)\n\n Simple subscripting with a single value.\n \"\"\"\n\n # tuple index shouldn't be transformed to list here\n if isinstance(node.value, ast.Tuple):\n elements = tuple(map(self._apply_rule, node.value.elts))\n return (*elements, )\n return self._apply_rule(node.value)\n\n def _Is(self, node):\n raise Exception(\"`Is` operator is not defined in Phylanx.\")\n\n def _IsNot(self, node):\n raise Exception(\"`IsNot` operator is not defined in Phylanx.\")\n\n def _Lambda(self, node):\n \"\"\"class Lambda(args, body)\n\n `body` is a single node.\n \"\"\"\n symbol = get_symbol_info(node, 'lambda')\n args = self._apply_rule(node.args)\n body = self._block(node.body)\n if args:\n return [symbol, (args, body)]\n else:\n return [symbol, (body)]\n\n def _List(self, node):\n \"\"\"class List(elts, ctx)\"\"\"\n\n op = get_symbol_info(node, 'list')\n elements = tuple(map(self._apply_rule, node.elts))\n return [op, (*elements, )]\n\n def _ListComp(self, node):\n \"\"\"class ListComp(elt, generators)\n\n `elt` (or key and value) is a single node representing the part that\n will be evaluated for each item.\n `generators` is a list of comprehension nodes.\n \"\"\"\n\n if len(node.generators) > 1:\n raise NotImplementedError(\"Nested comprehensions is not yet supported!\")\n\n elt = self._apply_rule(node.elt)\n loop = self._apply_rule(node.generators[0])\n\n target = loop['target']\n iter_space = loop['iter']\n if isinstance(iter_space, list) and iter_space[0].startswith('zip'):\n iter_space, iterators = physl_zip([target, iter_space])\n symbol = get_symbol_info(node, 'fmap')\n body = ['block', (*iterators, elt)]\n op = get_symbol_info(node, 'lambda')\n return [symbol, ([op, ('__physl_iterator', body)], iter_space)]\n\n lambda_ = ['lambda', (target, elt)]\n fmap = ['fmap', (lambda_, iter_space)]\n\n return fmap\n\n def _Lt(self, node):\n \"\"\"Leaf node, returning raw string of the 'less than' operation.\"\"\"\n\n return '__lt'\n\n def _LtE(self, node):\n \"\"\"Leaf node, returning raw string of the 'less than or equal' operation.\"\"\"\n\n return '__le'\n\n def _Mod(self, node):\n \"\"\"Leaf node, returning raw string of the `mod` operation.\"\"\"\n\n return '__mod'\n\n def _Module(self, node):\n \"\"\"Root node of the Python AST.\"\"\"\n module = list(self._apply_rule(m) for m in node.body)\n\n return module\n\n def _Mult(self, node):\n \"\"\"Leaf node, returning raw string of the 'multiplication' operation.\"\"\"\n\n return '__mul'\n\n def _Name(self, node):\n \"\"\"class Name(id, ctx)\n\n A variable name.\n `id` holds the name as a string.\n `ctx` is one of `Load`, `Store`, `Del`.\n \"\"\"\n\n symbol = get_symbol_info(node, primitive_name(node.id))\n return symbol\n\n def _NameConstant(self, node):\n name_constants = {None: 'nil', False: 'false', True: 'true'}\n return name_constants[node.value]\n\n def _Not(self, node):\n \"\"\"Leaf node, returning raw string of the 'not' operation.\"\"\"\n\n return '__not'\n\n def _NotEq(self, node):\n \"\"\"Leaf node, returning raw string of the 'not equal' operation.\"\"\"\n\n return '__ne'\n\n def _NotIn(self, node):\n raise Exception(\"`NotIn` operator is not defined in Phylanx.\")\n\n def _Num(self, node):\n \"\"\"class Num(n)\"\"\"\n\n return str(node.n)\n\n def _Or(self, node):\n \"\"\"Leaf node, returning raw string of the 'or' operation.\"\"\"\n\n return '__or'\n\n def _Pass(self, node):\n \"\"\"Empty function.\"\"\"\n\n return 'nil'\n\n def _Pow(self, node):\n \"\"\"Leaf node, returning raw string of the 'power' operation.\"\"\"\n\n return 'power'\n\n def _Return(self, node):\n \"\"\"class Return(value)\n\n TODO:\n implement return-from primitive (see section Function Return Values on\n https://goo.gl/wT6X4P). At this time Phylanx only supports returns from the\n end of the function!\n \"\"\"\n\n symbol = get_symbol_info(node, \"return\")\n\n if type(node.value) == ast.Tuple:\n return [symbol, (self._apply_rule(node.value),)]\n\n value = self._apply_rule(node.value)\n if value is None:\n value = get_symbol_info(node, \"nil\")\n\n return [symbol, (value,)]\n\n def _Slice(self, node):\n \"\"\"class Slice(lower, upper, step)\"\"\"\n\n symbol = 'list'\n\n lower = self._apply_rule(node.lower)\n if lower is None:\n lower = 'nil'\n\n upper = self._apply_rule(node.upper)\n if upper is None:\n upper = 'nil'\n\n step = self._apply_rule(node.step)\n if step is None:\n slice_ = self._generate_physl([symbol, (lower, upper)])\n else:\n slice_ = self._generate_physl([symbol, (lower, upper, step)])\n\n return slice_\n\n def _Str(self, node):\n \"\"\"class Str(s)\"\"\"\n\n return '\"' + re.sub(r'([\"|\\\\])', r'\\\\\\1', node.s) + '\"'\n\n def _Sub(self, node):\n \"\"\"Leaf node, returning raw string of the 'subtraction' operation.\"\"\"\n\n return '__sub'\n\n def _Subscript(self, node):\n \"\"\"class Subscript(value, slice, ctx)\"\"\"\n\n def _NestedSubscript(node):\n \"\"\"Handles the subscripts of dimensions higher than 1\"\"\"\n\n if isinstance(node.value, ast.Subscript):\n raise NotImplementedError(\n 'Phylanx only supports 1 and 2 dimensional arrays.')\n # value = _NestedSubscript(node.value)\n else:\n op = '%s' % get_symbol_info(node, 'slice')\n value = self._apply_rule(node.value)\n op = '%s' % get_symbol_info(node, 'slice')\n # if isinstance(node.ctx, ast.Load):\n # slice_ = self._apply_rule(node.slice)\n # return [value, [slice_]]\n\n # if isinstance(node.ctx, ast.Store):\n slice_ = self._apply_rule(node.slice)\n return [op, (value, [slice_])]\n\n op = get_symbol_info(node, 'slice')\n slice_ = self._apply_rule(node.slice)\n if isinstance(node.value, ast.Subscript):\n value = _NestedSubscript(node.value)\n return [op, (value, slice_)]\n else:\n value = self._apply_rule(node.value)\n # TODO: **SLICING**\n # if isinstance(node.slice, ast.Index) and isinstance(slice_, str) \\\n # or isinstance(node.slice, ast.Slice):\n # # return [op, (value, slice_, 'nil')]\n # else:\n # return [op, (value, slice_)]\n return [op, (value, slice_)]\n\n def _With(self, node):\n if 0 < len(node.items) and type(node.items[0]) == ast.withitem:\n withitem = node.items[0]\n if type(withitem.context_expr) == ast.Attribute:\n attribute = withitem.context_expr\n if attribute.attr == \"parallel\":\n if self.fglobals[attribute.value.id].parallel.is_parallel_block():\n return [\"parallel_block\", tuple(map(self._apply_rule, node.body))]\n elif type(withitem.context_expr) == ast.Name:\n if withitem.context_expr.id == \"parallel\":\n if self.fglobals[\"parallel\"].is_parallel_block():\n return [\"parallel_block\", tuple(map(self._apply_rule, node.body))]\n raise Exception(\"Unsupported use of 'With'\")\n\n def _Dict(self, node):\n res = []\n for i in range(len(node.keys)):\n key = self._apply_rule(node.keys[i])\n val = self._apply_rule(node.values[i])\n res += [[\"list\", (key, val)]]\n return [\"dict\", ([\"list\", tuple(res)],)]\n\n def _Tuple(self, node):\n \"\"\"class Tuple(elts, ctx)\"\"\"\n\n op = get_symbol_info(node, 'list')\n expr = tuple(map(self._apply_rule, node.elts))\n return [op, expr]\n\n def _Assert(self, node):\n \"\"\"class Assert(test, msg)\n\n `test`: holds a single expression to evaluate\n `msg`: an optional expression to evaluate (and print) if test fails.\n \"\"\"\n symbol = '%s' % get_symbol_info(node, 'assert')\n test = self._apply_rule(node.test)\n if node.msg:\n msg = self._apply_rule(node.msg)\n return [symbol, (test, msg)]\n\n return [symbol, (test,)]\n\n def _Global(self, node):\n \"\"\"class Global(names)\n\n `names`: holds a list of identifiers to make global\n \"\"\"\n\n result = ()\n for name in node.names:\n if name not in self.defined:\n # TODO:\n # For now `self.defined` is a set containing names of symbols\n # with no extra information. We may want to make it a\n # dictionary with the symbol names as keys and list of\n # symbol_infos to keep track of the symbol.\n op = get_symbol_info(node, \"define_global\")\n self.defined.add(name)\n\n result = result + ([op, (name, 'nil')],)\n return result\n\n def _UAdd(self, node):\n \"\"\"\n\n TODO:\n Make sure we do not have the equivalent of this in PhySL. Otherwise, add support.\n *** For now we never get here (see :func:_UnaryOp)\n \"\"\"\n\n raise Exception(\"`UAdd` operation is not defined in PhySL.\")\n\n def _UnaryOp(self, node):\n \"\"\"class UnaryOp(op, operand)\"\"\"\n\n operand = self._apply_rule(node.operand)\n if isinstance(node.op, ast.UAdd):\n return [(operand, )]\n\n op = get_symbol_info(node, self._apply_rule(node.op))\n return [op, (operand, )]\n\n def _USub(self, node):\n \"\"\"Leaf node, returning raw string of the 'negative' operation.\"\"\"\n\n return '__minus'\n\n def _While(self, node):\n \"\"\"class While(test, body, orelse)\n\n TODO:\n Figure out what `orelse` attribute may contain. From my experience this is always\n an empty list!\n \"\"\"\n\n symbol = get_symbol_info(node, 'while')\n test = self._block(node.test)\n body = self._block(node.body)\n return [symbol, (test, body)]\n\n\n# #############################################################################\n","repo_name":"STEllAR-GROUP/phylanx","sub_path":"python/phylanx/ast/physl.py","file_name":"physl.py","file_ext":"py","file_size_in_byte":47785,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"90"}
+{"seq_id":"3140004398","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nPackage: iads\nFichier: utils.py\nAnnée: semestre 2 - 2018-2019, Sorbonne Université\n\"\"\"\n\n# ---------------------------\n# Fonctions utiles pour les TDTME de 3i026\n\n# import externe\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# importation de LabeledSet\nfrom . import LabeledSet as ls\nfrom . import Classifiers as cl\n\ndef plot2DSet(set):\n \"\"\" LabeledSet -> NoneType\n Hypothèse: set est de dimension 2\n affiche une représentation graphique du LabeledSet\n remarque: l'ordre des labels dans set peut être quelconque\n \"\"\"\n S_pos = set.x[np.where(set.y == 1),:][0] # tous les exemples de label +1\n S_neg = set.x[np.where(set.y == -1),:][0] # tous les exemples de label -1\n plt.scatter(S_pos[:,0],S_pos[:,1],marker='o') # 'o' pour la classe +1\n plt.scatter(S_neg[:,0],S_neg[:,1],marker='x') # 'x' pour la classe -1\n\ndef plot_frontiere(set,classifier,step=50):\n \"\"\" LabeledSet * Classifier * int -> NoneType\n Remarque: le 3e argument est optionnel et donne la \"résolution\" du tracé\n affiche la frontière de décision associée au classifieur\n \"\"\"\n mmax=set.x.max(0)\n mmin=set.x.min(0)\n x1grid,x2grid=np.meshgrid(np.linspace(mmin[0],mmax[0],step),np.linspace(mmin[1],mmax[1],step))\n grid=np.hstack((x1grid.reshape(x1grid.size,1),x2grid.reshape(x2grid.size,1)))\n\n # calcul de la prediction pour chaque point de la grille\n res=np.array([classifier.predict(grid[i,:]) for i in range(len(grid)) ])\n res=res.reshape(x1grid.shape)\n # tracer des frontieres\n plt.contourf(x1grid,x2grid,res,colors=[\"red\",\"cyan\"],levels=[-1000,0,1000])\n\n# ------------------------\n\ndef createGaussianDataset(positive_center, positive_sigma, negative_center, negative_sigma, nb_points):\n \"\"\"\n rend un LabeledSet 2D généré aléatoirement.\n Arguments:\n - positive_center (vecteur taille 2): centre de la gaussienne des points positifs\n - positive_sigma (matrice 2*2): variance de la gaussienne des points positifs\n - negative_center (vecteur taille 2): centre de la gaussienne des points négative\n - negative_sigma (matrice 2*2): variance de la gaussienne des points négative\n - nb_points (int): nombre de points de chaque classe à générer\n\"\"\"\n#TODO: A Compléter\n x, y = np.random.multivariate_normal(positive_center, positive_sigma, nb_points).T\n a, b = np.random.multivariate_normal(negative_center, negative_sigma, nb_points).T\n label = ls.LabeledSet(2)\n for i in range(len(a)):\n label.addExample(np.array([x[i], y[i]]), 1)\n label.addExample(np.array([a[i], b[i]]),-1)\n return label\n\n raise NotImplementedError(\"Please Implement this method\")\n\n# Exemple d'utilisation de utils\n\nthe_set = createGaussianDataset(np.array([1,1]),np.array([[1,0],[0,1]]),np.array([-1,-1]),np.array([[1,0],[0,1]]),100)\n#_Entrainement\n\ndef entrainement(n, label, perceptron, pourcentage=60, show=False) :\n\n mean = 0\n meanList = []\n for i in range(n) :\n train, test = split(label, pourcentage)\n perceptron.train(train)\n acc = perceptron.accuracy(test)\n mean += acc\n if(show):\n print(str(i) + \" entrainement\")\n print(\"Accuracy \"+str(acc)+\"%\\n\")\n\n meanList.append(acc)\n mean = mean /n\n vari = np.var(meanList)\n print(\"Mean accuracy\",str(mean))\n print(\"Variance accuracy\", str(vari))\n\n return (mean, vari)\n\n# Super_entrainement\n\ndef super_entrainement(n, label, perceptron, pourcentage=60, show=False) :\n x = []\n y = []\n mean = 0\n meanList = []\n for i in range(n) :\n train, test = split(label, pourcentage)\n perceptron.train(train)\n acc = perceptron.accuracy(test)\n mean += acc\n if(show):\n print(str(i) + \" entrainement\")\n print(\"Accuracy \"+str(acc)+\"%\\n\")\n y.append(acc)\n x.append(i)\n meanList.append(acc)\n mean = mean /n\n vari = np.var(meanList)\n print(\"Mean accuracy\",str(mean))\n print(\"Variance accuracy\", str(vari))\n plt.plot(x,y)\n plt.xlabel('N')\n plt.ylabel('Accuracy')\n plt.title('performances accuracy')\n plt.legend()\n plt.show()\n plot_frontiere(test,perceptron)\n plot2DSet(test)\n return (mean, vari)\n\n\n # Fonction pour afficher le LabeledSet\ndef affiche_base(LS):\n \"\"\" LabeledSet\n affiche le contenu de LS\n \"\"\"\n for i in range(0,LS.size()):\n print(\"Exemple \"+str(i))\n print(\"\\tdescription : \",LS.getX(i))\n print(\"\\tlabel : \",LS.getY(i))\n return\n\ndef split(label,pourcentage=60) :\n\n size = label.size()\n\n label_train = ls.LabeledSet(label.getInputDimension())\n label_test = ls.LabeledSet(label.getInputDimension())\n\n indice = np.arange(label.size())\n temoin = np.random.permutation(indice)\n\n for i in range (temoin.size) :\n if (i < pourcentage*len(temoin)/100):\n label_train.addExample(label.getX(temoin[i]), label.getY(temoin[i]))\n else :\n label_test.addExample(label.getX(temoin[i]), label.getY(temoin[i]))\n\n return (label_train, label_test)\n\n\n\ndef bestClassifier(en, method, caracteristics,learningRate, criterion):\n #Ordre : 0 = KNN, 1: Random, 2: PerceptronKernel, 3:Gradient Stochastique, 4:Stochastique Kernel\n nb = 0\n while(nb < 4 ):\n if nb == 0 :\n classifier = \"KNN\"\n elif nb == 1:\n classifier = \"Classifier Random\"\n elif nb == 2 :\n classifier = \"Classifier PercepetronKernel\"\n elif nb == 3 :\n classifier = \"Classifier Gradient Stochastique\"\n elif nb == 4 :\n classifier = \"Classifier Stochastique Kernel\"\n print(\"init\",classifier,\"\\n\\n\")\n df = en.toDataFrame(method,criterion)\n lis = np.arange(len(df))\n for c1 in range(len(caracteristics)):\n for c2 in range(c1+1, len(caracteristics)):\n une_base = ls.LabeledSet(2)\n ca1 = caracteristics[c1]\n ca2 = caracteristics[c2]\n\n indice = np.random.permutation(lis)\n indice = indice[:1000]\n for i in indice:\n une_base.addExample([df.iloc[i][ca1], df.iloc[i][c2]]\n , df.iloc[i]['target'])\n if nb == 0 :\n cla = cl.ClassifierKNN(une_base.getInputDimension(),3)\n elif nb == 1 :\n k= cl.KernelPoly()\n cla= cl.ClassifierPerceptronKernel(6,learningRate,k)\n elif nb == 2 :\n cla = cl.ClassifierGradientStochastique(une_base.getInputDimension(), learningRate)\n elif nb == 3 :\n k = cl.KernelPoly()\n cla = cl.ClassifierGradientStochastiqueKernel(6, learningRate, k)\n if ( (c1 == 0) and (c2 == 1) and(nb == 0) ):\n maxi_mean, mini_vari = entrainement(25, une_base,cla, 40)\n minica1 = ca1\n minica2 = ca2\n criterion = caracteristics[c1]\n clamini = cla\n\n mean, vari = entrainement(25, une_base,cla, 40)\n if ( (vari < mini_vari) and (mean> maxi_mean) ):\n mini_vari = vari\n maxi_mean = mean\n minica1 = ca1\n minica2 = ca2\n clamini = cla\n classifiermini = classifier\n if( (c1==len(caracteristics) -1) and (c2 == len(caracteristics)-1) ):\n print(\"\\n\",classifier,\"done\")\n print(\"\\n\\n\",classifier,\"done\\n\\n\")\n nb+=1\n print(\"\\n\\nClassifier chosen\",classifiermini,\"Chosen criterion\",criterion, \"\\nParams :\", minica1, \"and\", minica2, \"\\nMean :\",maxi_mean,\n \"\\nVariance\", mini_vari)\n df = en.toDataFrame(method,criterion)\n une_base = ls.LabeledSet(2)\n for i in range(1000):\n une_base.addExample([df.iloc[i][minica1], df.iloc[i][minica2]]\n , df.iloc[i]['target'])\n mean, vari = super_entrainement(25, une_base,clamini, 40)\n\n\n\ndef bestRegressor(eng, method, caracteristics, learningRate, criterion):\n nb = 0\n while(nb < 2):\n if nb == 0:\n classifier = \"Classifier Gradient Batch\"\n else :\n classifier = \"Classifier Gradient Batch Kernel\"\n print(\"init\", classifier, \"\\n\\n\")\n\n df = eng.toDataFrame(method, criterion)\n lis = np.arange(len(df))\n for c1 in range(len(caracteristics)):\n for c2 in range(c1+1, len(caracteristics)):\n une_base = ls.LabeledSet(2)\n ca1 = caracteristics[c1]\n ca2 = caracteristics[c2]\n\n indice = np.random.permutation(lis)\n indice = indice[:1000]\n\n\n for i in indice :\n une_base.addExample([df.iloc[i][ca1], df.iloc[i][ca2]], df.iloc[i]['target'])\n\n\n if nb == 0 :\n cla = cl.ClassifierGradientBatch(une_base.getInputDimension(), learningRate)\n print(\"ok cgb\")\n else :\n k = cl.KernelPoly()\n cla = cl.ClassifierGradientBatchKernel(6, learningRate, k)\n\n if( (c1 == 0) and (c2== 1) and (nb == 0)):\n maxi_mean , mini_var = entrainement(100, une_base, cla, 50)\n minica1 = ca1\n minica2 = ca2\n clamini = cla\n classifiermini = classifier\n\n mean, var = entrainement(100, une_base, cla, 50)\n\n if( (mini_var > var) and (maxi_mean < mean) ):\n mini_var = var\n maxi_mean = mean\n minica1 = ca1\n minica2 = ca2\n clamini = cla\n classifiermini = classifier\n\n\n print('\\n\\n',classifier, \"done\\n\\n\")\n nb += 1\n print(\"\\n\\n Classifier chosen\", classifiermini, \"\\nParameters :\", minica1,\",\", minica2, \"\\nMean \", maxi_mean,\n \"\\nVariance :\", mini_var)\n une_base = ls.LabeledSet(2)\n indice = np.random.permutation(lis)\n indice = indice[:1000]\n for i in indice:\n une_base.addExample([df.iloc[i][minica1], df.iloc[i][minica2]], df.iloc[i]['target'])\n mean , var = super_entrainement(100, une_base, clamini, 40)\n","repo_name":"ohouens/3i026","sub_path":"iads/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":10434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"34980415505","text":"#!/usr/bin/env python\n# http://mrjob.readthedocs.io/en/latest/guides/writing-mrjobs.html#multi-step-jobs\nfrom mrjob.job import MRJob\nfrom mrjob.step import MRStep\nimport json\nfrom collections import Counter\nfrom collections import defaultdict\n\n\nclass MRMostUsedWord(MRJob):\n\n def mapper_get_words(self, _, line):\n \"\"\"Take in stdin and return each hashtag with the number one\"\"\"\n\n for tweet in line.split('/n'):\n try:\n entity = json.loads(tweet)\n try:\n entity = entity['entities']\n if entity is not None and (entity['hashtags'] is not None\n or len(entity['hashtags']) != 0):\n for hashtags in entity['hashtags']:\n yield(hashtags['text'].lower().encode('utf-8'), 1)\n else:\n pass\n except KeyError:\n continue\n except ValueError:\n continue\n\n def combiner_count_words(self, word, counts):\n \"\"\"sum the hashtags we've seen so far\"\"\"\n yield (word, sum(counts))\n\n def reducer_count_words(self, word, counts):\n \"\"\"send all (num_occurrences, word) pairs to the same reducer\n with a null key\"\"\"\n yield None, (sum(counts), word)\n\n def reducer_find_top_hashtags(self, _, word_count_pairs):\n \"\"\" return the top ten hashtags from the word_count_pairs generator\"\"\"\n top_hashtags = defaultdict(int)\n for hashtag_number in list(word_count_pairs):\n top_hashtags[hashtag_number[1].encode('utf-8')] += hashtag_number[0]\n print(top_hashtags)\n for hash_t, num in Counter(top_hashtags).most_common(20):\n print(\"{}\\t{}\".format(hash_t, num))\n\n def steps(self):\n return [\n MRStep(mapper=self.mapper_get_words,\n combiner=self.combiner_count_words,\n reducer=self.reducer_count_words),\n MRStep(reducer=self.reducer_find_top_hashtags)\n ]\n\n\nif __name__ == '__main__':\n MRMostUsedWord.run()\n","repo_name":"dannypaz/class","sub_path":"dsci-6007/5.1 - Functional Programming/lab-5.1-Jonathan-H-Michael-Seeber-MrJob.py","file_name":"lab-5.1-Jonathan-H-Michael-Seeber-MrJob.py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"90"}
+{"seq_id":"14525009754","text":"\"\"\" \n Main window functionality\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport cProfile\nimport logging\nimport os\nimport pstats\nimport sys\n\nfrom .version import PROGRAM_NAME, PROGRAM_VERSION, PROGRAM_URL, DEBUGGING\nfrom .qt import Qt, QtCore, QtGui, QtWidgets, APPLICATION_INSTANCE\n\nfrom .statstablemodel import StatsTableModel\nfrom .statstableview import StatsTableView\n\n\nlogger = logging.getLogger(__name__)\n\n\n\ndef createBrowser(fileName = None, selfProfFile=None, **kwargs):\n \"\"\" Opens an MainWindow window\n \"\"\"\n # Assumes qt.getQApplicationInstance() has been executed.\n browser = MainWindow(**kwargs)\n browser.show()\n\n if sys.platform.startswith('darwin'):\n browser.raise_()\n\n QtWidgets.QApplication.instance().processEvents()\n\n #profFileName = None\n\n if selfProfFile:\n profiler = cProfile.Profile()\n profiler.enable()\n\n if fileName is not None:\n browser.openStatsFile(fileName)\n\n if selfProfFile:\n logger.info(\"Saving profiling information to {}\".format(selfProfFile))\n profStats = pstats.Stats(profiler)\n profStats.dump_stats(selfProfFile)\n\n QtWidgets.QApplication.instance().processEvents()\n sys.exit(3)\n\n return browser\n \n \ndef execute():\n \"\"\" Executes all browsers by starting the Qt main application\n \"\"\" \n logger.info(\"Starting the browser(s)...\")\n app = APPLICATION_INSTANCE\n exit_code = app.exec_()\n logger.info(\"Browser(s) done...\")\n return exit_code\n\n\ndef browse(fileName = None, **kwargs):\n \"\"\" Opens and executes a main window\n \"\"\"\n _object_browser = createBrowser(fileName = fileName, **kwargs)\n exit_code = execute()\n return exit_code\n\n \n# The main window inherits from a Qt class, therefore it has many \n# ancestors public methods and attributes.\n# pylint: disable=R0901, R0902, R0904, W0201 \n\n\nclass MainWindow(QtWidgets.QMainWindow):\n \"\"\" pepyeye main application window.\n \"\"\"\n _nInstances = 0\n \n def __init__(self, reset = False):\n \"\"\" Constructor\n :param reset: If true the persistent settings, such as column widths, are reset. \n \"\"\"\n super(MainWindow, self).__init__()\n\n MainWindow._nInstances += 1\n self._InstanceNr = self._nInstances \n \n # Model\n self._statsTableModel = StatsTableModel(parent=self)\n\n # Views\n self.__setupActions()\n self.__setupMenu()\n self.__setupViews()\n self.setWindowTitle(\"{}\".format(PROGRAM_NAME))\n app = QtWidgets.QApplication.instance()\n app.lastWindowClosed.connect(app.quit) \n\n self.filterLineEdit.textChanged.connect(self._statsTableModel.filterRows)\n self._statsTableModel.modelReset.connect(self.updateOccursLabel)\n\n self._readViewSettings(reset=reset)\n \n logger.debug(\"MainWindow constructor finished\")\n \n\n def __setupActions(self):\n \"\"\" Creates the main window actions.\n \"\"\"\n pass\n \n \n def __setupMenu(self):\n \"\"\" Sets up the main menu.\n \"\"\"\n fileMenu = self.menuBar().addMenu(\"&File\")\n openAction = fileMenu.addAction(\"&Open...\", self.openStatsFile)\n openAction.setShortcut(\"Ctrl+O\")\n self.reloadAction = fileMenu.addAction(\"&Reload\", self.reloadStatsFile)\n self.reloadAction.setShortcut(\"Ctrl+R\")\n self.reloadAction.setEnabled(False)\n fileMenu.addSeparator()\n fileMenu.addAction(\"C&lose\", self.closeWindow, \"Ctrl+W\")\n fileMenu.addAction(\"E&xit\", self.quitApplication, \"Ctrl+Q\")\n if DEBUGGING is True:\n fileMenu.addSeparator()\n fileMenu.addAction(\"&Test\", self.myTest, \"Ctrl+T\")\n \n self.menuBar().addSeparator()\n help_menu = self.menuBar().addMenu(\"&Help\")\n help_menu.addAction('&About', self.about)\n\n\n def __setupViews(self):\n \"\"\" Creates the UI widgets. \n \"\"\"\n #self.mainWidget = QtWidgets.QWidget(self)\n #self.setCentralWidget(self.mainWidget)\n \n self.mainSplitter = QtWidgets.QSplitter(self, orientation = QtCore.Qt.Vertical)\n self.setCentralWidget(self.mainSplitter)\n\n self.mainWidget = QtWidgets.QWidget()\n self.mainLayout = QtWidgets.QVBoxLayout()\n self.mainWidget.setLayout(self.mainLayout)\n self.mainSplitter.addWidget(self.mainWidget)\n\n # Filter\n self.filterLineEdit = QtWidgets.QLineEdit()\n self.filterLineEdit.setFixedWidth(400)\n self.filterLineEdit.setPlaceholderText(\"Filter on path or function name...\")\n self.filterLayout = QtWidgets.QHBoxLayout()\n self.filterLayout.addWidget(self.filterLineEdit)\n\n self.occursLabel = QtWidgets.QLabel(\"\")\n self.filterLayout.addWidget(self.occursLabel)\n self.filterLayout.addStretch()\n self.mainLayout.addLayout(self.filterLayout)\n\n # Table view\n self.tableView = StatsTableView(self._statsTableModel)\n self.mainLayout.addWidget(self.tableView)\n\n self.label = QtWidgets.QLabel(\"Hi there\")\n self.mainSplitter.addWidget(self.label)\n\n\n # End of setup_methods\n\n def reloadStatsFile(self):\n \"\"\" Reloads the currently open stats file\n \"\"\"\n if self._fileName is not None:\n self.loadStatsFile(self._fileName)\n self._statsTableModel._sortAndFilter()\n else:\n logger.warning(\"No current file to be reloaded.\")\n\n\n \n def loadStatsFile(self, fileName):\n \"\"\" Loads a pstats file and updates the table model\n \"\"\"\n assert fileName is not None, \"fileName undefined\"\n logger.debug(\"Loading file: {}\".format(fileName))\n\n self._fileName = fileName\n self.setWindowTitle(\"{} -- {}\".format(os.path.basename(fileName), PROGRAM_NAME))\n pStats = pstats.Stats(fileName)\n self._statsTableModel.setStats(statsObject=pStats)\n #pStats.strip_dirs()\n #pStats.calc_callees()\n\n self.reloadAction.setEnabled(True)\n \n\n def openStatsFile(self, fileName=None):\n \"\"\" Lets the user select a pstats file and opens it.\n \"\"\"\n if not fileName:\n fileName = QtWidgets.QFileDialog.getOpenFileName(self,\n caption = \"Choose a pstats file\", directory = '', \n filter='All files (*);;Profile statistics (*.prof; *.pro)')\n fileName = fileName[0]\n\n if fileName:\n logger.info(\"Loading data from: {!r}\".format(fileName))\n try:\n self.loadStatsFile(fileName)\n except Exception as ex:\n if DEBUGGING:\n raise\n else:\n logger.error(\"Error opening file: %s\", ex)\n QtWidgets.QMessageBox.warning(self, \"Error opening file\", str(ex))\n \n\n\n\n def _settingsGroupName(self, prefix):\n \"\"\" Creates a setting group name based on the prefix and instance number\n \"\"\"\n settingsGroup = \"window{:02d}-{}\".format(self._InstanceNr, prefix)\n logger.debug(\" settings group is: {!r}\".format(settingsGroup))\n return settingsGroup \n \n \n def _readViewSettings(self, reset=False):\n \"\"\" Reads the persistent program settings\n \n :param reset: If True, the program resets to its default settings\n \"\"\" \n pos = QtCore.QPoint(20 * self._InstanceNr, 20 * self._InstanceNr)\n windowSize = QtCore.QSize(1024, 700)\n \n if reset:\n logger.debug(\"Resetting persistent view settings\")\n else:\n logger.debug(\"Reading view settings for window: {:d}\".format(self._InstanceNr))\n settings = QtCore.QSettings()\n settings.beginGroup(self._settingsGroupName('view'))\n pos = settings.value(\"main_window/pos\", pos)\n windowSize = settings.value(\"main_window/size\", windowSize)\n splitter_state = settings.value(\"main_splitter/state\")\n if splitter_state:\n self.mainSplitter.restoreState(splitter_state)\n self.tableView.readViewSettings('table/header_state', settings, reset) \n settings.endGroup()\n \n logger.debug(\"windowSize: {!r}\".format(windowSize))\n self.resize(windowSize)\n self.move(pos)\n\n\n def _writeViewSettings(self):\n \"\"\" Writes the view settings to the persistent store\n \"\"\" \n logger.debug(\"Writing view settings for window: {:d}\".format(self._InstanceNr))\n \n settings = QtCore.QSettings()\n settings.beginGroup(self._settingsGroupName('view'))\n self.tableView.writeViewSettings(\"table/header_state\", settings)\n settings.setValue(\"main_splitter/state\", self.mainSplitter.saveState()) \n settings.setValue(\"main_window/pos\", self.pos())\n settings.setValue(\"main_window/size\", self.size())\n settings.endGroup()\n\n\n def updateOccursLabel(self):\n \"\"\" Updates the occurs label from the amount of rows in the table model.\n \"\"\"\n if self.filterLineEdit.text():\n self.occursLabel.setText(\"occurs in {} of {} rows\"\n .format(self._statsTableModel.rowCount(),\n self._statsTableModel.unfilteredRowCount()))\n else:\n self.occursLabel.setText('in {} rows'.format(\n self._statsTableModel.unfilteredRowCount()))\n\n\n def myTest(self):\n \"\"\" Function for testing \"\"\"\n logger.debug(\"myTest\")\n logger.debug(\"row height: {}\".format(self.tableView.rowHeight(0)))\n \n def about(self):\n \"\"\" Shows the about message window. \"\"\"\n message = u\"{} version {}\\n\\n{}\"\"\".format(PROGRAM_NAME, PROGRAM_VERSION, PROGRAM_URL)\n QtWidgets.QMessageBox.about(self, \"About {}\".format(PROGRAM_NAME), message)\n\n def closeWindow(self):\n \"\"\" Closes the window \"\"\"\n self.close()\n \n def quitApplication(self):\n \"\"\" Closes all windows \"\"\"\n app = QtWidgets.QApplication.instance()\n app.closeAllWindows()\n\n def closeEvent(self, event):\n \"\"\" Close all windows (e.g. the L0 window).\n \"\"\"\n logger.debug(\"closeEvent\")\n self._writeViewSettings()\n self.close()\n event.accept()\n \n\n","repo_name":"titusjan/pepeye","sub_path":"libpepeye/mainwindow.py","file_name":"mainwindow.py","file_ext":"py","file_size_in_byte":10441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"19517910754","text":"from APES import *\nimport numpy as np\nfrom time import time\nimport pylab as pl\nfrom IPython import display\nSettings.SetBlockSize(100)\n\n# Complex Example.\ninitalization_time = time()\n#Add pictures for items\nSettings.AddImage('Wall','APES/Pics/wall.jpg')\nSettings.AddImage('Food','APES/Pics/food.jpg')\n\n#Create Probability distribution matrices (PDMs)\nobs_pdm = np.zeros(Settings.WorldSize)\nagnts_pdm = np.zeros(Settings.WorldSize)\nfood_pdm = np.zeros(Settings.WorldSize)\n\n# Obstacles can appear from 3rd to 7th row and 5th column\nobs_pdm[3:8,5] = 1 \nagnts_pdm[2,[0,10]] = 1\nfood_pdm[:,4:7] = 1\n\n#Add PDMs to Settings\nSettings.AddProbabilityDistribution('Obs_pdm',obs_pdm) \nSettings.AddProbabilityDistribution('agnts_pdm',agnts_pdm)\nSettings.AddProbabilityDistribution('food_pdm',food_pdm)\n\n#Create World Elements\n#Create vertical obastacle with length 4\nobshape = np.array([[1],[1],[1],[1]]) \nobs = Obstacles('Wall',Shape=obshape,PdstName='Obs_pdm')\n\n#Create two agents\nragnt = Agent(Fname='APES/Pics/red.jpg',PdstName='agnts_pdm')\nbagnt = Agent(Fname='APES/Pics/blue.jpg',PdstName='agnts_pdm')\nfood = Foods('Food',PdstName='food_pdm')\n\n#Reward food by 10, time step by -0.1\ngame = World(RewardsScheme=[0,10,-0.1])\n\n#Adding Agents in Order of Following the action\ngame.AddAgents([ragnt,bagnt])\ngame.AddObstacles([obs])\ngame.AddFoods([food])\ninitalization_time = initalization_time-time()\n\n#Execute at the beginning of every episode\nworld_generating = time()\ngame.GenerateWorld()\nworld_generating= time()-world_generating\n\n#Execute every time step\ncounter=0\nsteps_time=0\nwhile not game.Terminated[0]:\n # Agents taking action\n bagnt.RandomAction()\n ragnt.RandomAction()\n counter+=1\n\n step_time = time()\n game.Step()\n steps_time += time()-step_time\navg = steps_time/counter\nprint('initialization time:{}\\nGenerating world:{}\\nAvg step time:{}\\nAvg steps per second:{}'.format(initalization_time,world_generating,avg,1/avg))\n","repo_name":"aqeel13932/APES","sub_path":"Performance.py","file_name":"Performance.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"90"}
+{"seq_id":"739010078","text":"import pygame\n\nclass Hitbox:\n def __init__(self, surface, pixX, pixY, pixW, pixH, index) -> None:\n self.x = pixX\n self.y = pixY\n self.w = pixW\n self.h = pixH\n self.rect = pygame.Rect(self.x, self.y, self.w, self.h)\n self.surface = surface\n\n self.index = index\n def draw(self) -> None:\n pygame.draw.ellipse(self.surface,(255,255,255),(self.x-self.w+75/2, self.y+self.h-37.5/2, 75, 37.5))\n\n def is_colliding(self, point) -> bool:\n if self.rect.collidepoint(point):\n return True\n else:\n return False","repo_name":"DragonOfD00M/Dungeon-Auto-Fighters","sub_path":"classes/hitbox.py","file_name":"hitbox.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"23241658923","text":"from sys import stdin\r\n\r\n\r\ndef CalcBlock(n: int) -> int:\r\n result: int = 0\r\n for i in range(1, n + 1):\r\n result += i\r\n return result\r\n\r\n\r\ndef solution():\r\n while True:\r\n num: int = int(stdin.readline().rstrip())\r\n if num == 0:\r\n break\r\n print(CalcBlock(num))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n solution()\r\n","repo_name":"anothel/CodeKata","sub_path":"백준/Bronze/5341. Pyramids/Pyramids.py","file_name":"Pyramids.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"}
+{"seq_id":"73775697895","text":"#coding: utf-8\n\n\"\"\"\n 无压单对账明细\n\"\"\"\n\nfrom finance_pub import *\n\ndef get_unpending_data(start_date, end_date, order_type, cp_type, payment_type, finance_result, order_state, ver, channel, app, daojia_cp_type):\n if not start_date:\n start_date = None\n if not end_date:\n end_date = None\n if not app:\n app = None\n if not ver:\n ver = None\n if not channel:\n channel = None\n if not order_type:\n order_type = None\n if not order_state:\n order_state = None\n if not finance_result:\n finance_result = None\n if not cp_type:\n cp_type = None\n if not payment_type:\n payment_type = None\n if order_type:\n if int(order_type) == 110:\n if daojia_cp_type:\n cp_type = daojia_cp_type;\n cursor = connections['report'].cursor()\n cursor.execute(\"call `SP_T_RP_D_ACCOUNT_CHECKING`(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\",\n [start_date, end_date, order_type, cp_type, payment_type, finance_result, order_state, ver, channel, app])\n objs = cursor.fetchall()\n data = []\n for obj in objs:\n data.append(\n [\n str(obj[0]),\n str(obj[1]),\n str(obj[2]),\n str(obj[3]),\n str(obj[4]),\n str(obj[5]),\n str(obj[6]),\n str(obj[7]),\n str(obj[8]),\n str(obj[9]),\n str(obj[10]),\n str(obj[11]),\n str(obj[12]),\n str(obj[13]),\n str(obj[14]),\n str(obj[15]),\n str(obj[16]),\n str(obj[17]),\n str(obj[18]),\n str(obj[19]),\n\t\t\t\tstr(obj[20]),\n ]\n )\n if not data:\n data.append([Const.NONE] * 21)\n else:\n data.sort(key=lambda o: o[0], reverse=True)\n return data\n\n@login_required\n@permission_required(u'man.%s' % FinanceConst.FINANCE_DETAIL_NO_OV_ORDER_DETAIL, raise_exception=True)\n@add_common_var\ndef unpending_order(request, template_name):\n app = request.GET.get(\"app\")\n report_check_app(request, app)\n vers = get_app_versions(app)\n channels = get_app_channels(app)\n products = get_order_types()\n order_states = get_order_states()\n finance_results = get_finance_result_type()\n cp_types = get_cp_types()\n daojia_cp_types = get_full_cp_names()\n payment_types = get_payment_types()\n cur_order_type = None\n cur_cp_type = None\n cur_daojia_cp_type = None\n try:\n cur_order_type = int(request.GET.get(\"order_type\"))\n except:\n pass\n try:\n cur_cp_type = int(request.GET.get(\"cp_type\"))\n except:\n pass\n try:\n if int(cur_order_type) == 110:\n if cur_cp_type:\n cur_daojia_cp_type = cur_cp_type\n except:\n pass\n return report_render(request, template_name, {\n \"currentdate\": get_datestr(1, \"%Y-%m-%d\"),\n \"vers\": vers,\n \"channels\": channels,\n \"products\": products,\n \"order_states\":order_states,\n \"finance_results\":finance_results,\n \"cp_types\":cp_types,\n \"payment_types\":payment_types,\n \"cur_order_type\":cur_order_type,\n \"cur_cp_type\":cur_cp_type,\n \"cur_daojia_cp_type\":cur_daojia_cp_type,\n \"daojia_cp_types\":daojia_cp_types,\n })\n\n@login_required\n@permission_required(u'man.%s' % FinanceConst.FINANCE_DETAIL_NO_OV_ORDER_DETAIL, raise_exception=True)\ndef unpending_order_ajax(request):\n start_date = request.POST[\"start_date\"]\n end_date = request.POST[\"end_date\"]\n order_type = request.POST[\"ot\"]\n app = request.POST.get(\"app\")\n report_check_app(request, app)\n if app:\n app = \"^%s$\" % app\n else:\n app = get_user_apps(request.user)\n ver = request.POST.get(\"ver\")\n channel = request.POST.get(\"channel\")\n order_state = request.POST.get(\"order_state\")\n finance_result = request.POST.get(\"finance_result\")\n cp_type = request.POST.get(\"cp_type\")\n daojia_cp_type = request.POST.get(\"daojia_cp_type\")\n payment_type = request.POST.get(\"payment_type\")\n result = get_unpending_data(start_date, end_date, order_type, cp_type, payment_type, finance_result, order_state, ver, channel, app, daojia_cp_type)\n return HttpResponse(json.dumps(result))\n\n\n@login_required\n@permission_required(u'man.%s' % FinanceConst.FINANCE_DETAIL_NO_OV_ORDER_DETAIL, raise_exception=True)\ndef unpending_order_csv(request):\n print(\"unpending_order_csv\")\n start_date = request.GET.get(\"start_date\")\n end_date = request.GET.get(\"end_date\")\n order_type = request.GET.get(\"ot\")\n app = request.GET.get(\"app\")\n report_check_app(request, app)\n if app:\n app = \"^%s$\" % app\n else:\n app = get_user_apps(request.user)\n ver = request.GET.get(\"ver\")\n channel = request.GET.get(\"channel\")\n order_state = request.GET.get(\"order_state\")\n finance_result = request.GET.get(\"finance_result\")\n cp_type = request.GET.get(\"cp_type\")\n daojia_cp_type = request.GET.get(\"daojia_cp_type\")\n payment_type = request.GET.get(\"payment_type\")\n if order_type:\n name = \"%s无压单对账明细表\" % str(TongjiPayProduct.objects.get(type=order_type).name)\n else:\n name = \"无压单对账明细表\"\n filename = '%s(%s-%s).csv' % (name, str(start_date), str(end_date))\n csv_data = [[\"订单号\",\n \"订单创建时间\",\n \"商品名字\",\n \"内容商名称\",\n \"支付方式\",\n \"商品定价(应收款)\",\n \"商品成本价(应付款)\",\n \"营销策略编号\",\n \"营销减免金额\",\n \"优惠券编号\",\n \"优惠券面值\",\n \"优惠券消耗价格\",\n \"交易状态\",\n \"应退款\",\n \"实收款\",\n \"实际退款\",\n \"交易服务费\",\n \"实际付款\",\n \"是否异常\",\n \"异常原因\",\n\t\t\t\t \"订单来源\"]]\n csv_data.extend(get_unpending_data(start_date, end_date, order_type, cp_type, payment_type, finance_result, order_state, ver, channel, app, daojia_cp_type))\n return get_csv_response(filename, csv_data)","repo_name":"cash2one/pt","sub_path":"boss/finance/views/unpending_order.py","file_name":"unpending_order.py","file_ext":"py","file_size_in_byte":6347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"18071675377","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# michael a.g. aïvázis \n# (c) 1998-2023 all rights reserved\n\n# aws support\nimport boto3\n\n# set the path to the file\nbucket = \"parasim-ros3eu\"\nname = \"README.txt\"\n\n# start a session\ns3 = boto3.Session(profile_name=\"parasim\").client(\"s3\")\n\n# download\ns3.download_file(bucket, name, f\"{bucket}/{name}\")\n\n# show me\nprint(f\"downloaded '{name}'\")\n\n\n# end of file\n","repo_name":"aivazis/s3","sub_path":"parasim/bin/read.py","file_name":"read.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"2222803316","text":"from termcolor import colored\n\nfrom collections import defaultdict\nimport csv\n\ndef askQuesion(hname, fdict):\n favorite_restaurant = input(\"{inputname} さんどこのレストランが好きですか?\\n\".format(inputname=hname))\n fdict[favorite_restaurant.title()] += 1\n\n\nhumanName = input(colored('こんにちは、あなたのお名前は?\\n', 'green'))\n\n# 好みを登録しているデータがあるかを確認する\nrestaurantDict = defaultdict(int)\ntry:\n with open('section9.csv', 'r') as csv_file:\n reader = csv.DictReader(csv_file)\n for row in reader:\n restaurantDict[row['NAME']] = int(row['COUNT'])\n \nexcept FileNotFoundError as exc:\n pass\nfinally:\n pass\n\n# if 好みのデータが一個でも登録されているか?\nif len(restaurantDict) > 0:\n #一番多い登録数でソートする\n sortRestaurantDict = sorted(restaurantDict.items(), key=lambda x:x[1], reverse=True)\n #mostPopulraRestaurant = next(iter(sortRestaurantDict))\n for restaurantName in sortRestaurantDict:\n print(\"私のおすすめは{restaurant}です\".format(restaurant=restaurantName[0]))\n choice = input(\"このレストランは好きですか? [Yes/No]\")\n if choice in ['Yes','YES','y', 'ye', 'yes']:\n #好きな場合はカウントを加算する\n restaurantDict[restaurantName[0].title()] += 1\n break\n else:\n #改めて好きなレストランを聞く\n #favorite_restaurant = input(\"{inputname} さんどこのレストランが好きですか?\\n\".format(inputname=humanName))\n #restaurantDict[favorite_restaurant.title()] += 1\n askQuesion(humanName,restaurantDict)\nelse:\n #データが1個も登録されいていない\n askQuesion(humanName,restaurantDict)\n # favorite_restaurant = input(\"{inputname} さんどこのレストランが好きですか?\\n\".format(inputname=humanName))\n # restaurantDict[favorite_restaurant.title()] += 1\n\nprint(colored(\"\"\"\nRoboco: Junさん、ありがとうございました\n良い一日を!\n\"\"\",'green'))\n\nwith open('section9.csv', 'w') as csv_file:\n fieldnames = ['NAME', 'COUNT']\n writer = csv.DictWriter(csv_file, fieldnames=fieldnames)\n writer.writeheader()\n for key,val in restaurantDict.items():\n print(key, val)\n writer.writerow({'NAME': key, 'COUNT':val})","repo_name":"mogubess/python_s","sub_path":"section9/roboter/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2387,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"38269680101","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('tindeers', '0004_auto_20150117_0425'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='userprofile',\n name='email',\n ),\n migrations.AlterField(\n model_name='userprofile',\n name='gender',\n field=models.CharField(default=b'E', max_length=1, choices=[(b'M', b'male'), (b'F', b'female'), (b'E', b'Eunuch')]),\n preserve_default=True,\n ),\n ]\n","repo_name":"TylerLubeck/PennApps2015","sub_path":"tindeers/migrations/0005_auto_20150117_0849.py","file_name":"0005_auto_20150117_0849.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"10812210037","text":"import cv2\nimport numpy as np\n\nfrom src.deep_sort.detector.detection import Detection\nfrom src.deep_sort.detector.detections_provider import DetectionsProvider\nfrom src.utils.geometry.rect import Rect\n\n\nclass HogDetectionsProvider(DetectionsProvider):\n \"\"\"\n Detects people using Histogram of Oriented Gradients (HOG) approach.\n For classification is using SVM. Detections are happening in real time.\n \"\"\"\n\n def __init__(self, svm_people_detector=cv2.HOGDescriptor_getDefaultPeopleDetector()):\n self.__hog = cv2.HOGDescriptor()\n self.__hog.setSVMDetector(svm_people_detector)\n\n def load_detections(self,\n image: np.ndarray,\n frame_id: int,\n min_height: int = 0) -> list[Detection]:\n \"\"\"Creates detections for given image.\n \"\"\"\n detection_list = []\n\n scale_factor = 0.6\n width = int(image.shape[1] * scale_factor)\n height = int(image.shape[0] * scale_factor)\n dim = (width, height)\n\n resized = cv2.resize(image, dim, interpolation=cv2.INTER_AREA)\n (humans, _) = self.__hog.detectMultiScale(resized, winStride=(24, 24))\n\n # loop over all detected humans\n for (x, y, w, h) in humans:\n real_width = w / scale_factor\n real_height = h / scale_factor\n\n x /= scale_factor\n y /= scale_factor\n\n if real_height < min_height:\n continue\n\n bbox_origin = Rect(left=x, top=y, width=real_width, height=real_height)\n detection_list.append(Detection(bbox_origin, 1.0))\n\n return detection_list\n","repo_name":"st235/HSE.DeepLearning","sub_path":"src/deep_sort/detector/hog_detections_provider.py","file_name":"hog_detections_provider.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"12396361641","text":"import os\nimport pandas as pd\nimport numpy as np\nimport math\n\nimport sys\nsys.path.append(\"../..\")\nfrom src.load_datasets import load_dataset, load_rankings, load_train_data\nimport src.evaluate_regression\n\nimport src.encoding\nfrom src.feature_engineering import normalize_train_data, normalize_test_data\nfrom src.meta_information import add_dataset_meta_information\nfrom src.evaluate_regression import custom_spearmanr_scorer\n\nfrom sklearn.model_selection import cross_validate\nfrom sklearn.metrics import make_scorer, matthews_corrcoef\n\nfrom sklearn.feature_selection import SelectKBest\nfrom sklearn.feature_selection import mutual_info_regression, f_regression\n\nfrom sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier\nfrom sklearn.tree import DecisionTreeClassifier, ExtraTreeClassifier\nfrom sklearn.neighbors import KNeighborsClassifier, RadiusNeighborsClassifier\nfrom sklearn.svm import LinearSVC\nfrom sklearn.gaussian_process import GaussianProcessClassifier\nimport xgboost\n\n\ndef get_pearson_correlated_features(data=None, threshold=0.7):\n \"\"\"\n Calculates the pearson correlation of all features in the dataframe and returns a set of features with a\n correlation greater than the threshold.\n\n :param data: The input dataframe.\n :type data: pd.DataFrame\n :param threshold: The threshold for the correlation coefficient in the range of [0.0, 1.0].\n :type threshold: float,optional(default=0.7)\n\n :return: The set of features with a correlation greater than the threshold.\n :rtype: set\n \"\"\"\n # Calculate correlation matrix\n corr_matrix = data.corr()\n\n # Get the set of correlated features\n correlated_features = set()\n for i in range(len(corr_matrix.columns)):\n for j in range(i):\n if abs(corr_matrix.iloc[i, j]) > threshold:\n colname = corr_matrix.columns[i]\n correlated_features.add(colname)\n\n return correlated_features\n\n\n# Define variables for ranking\nfactors = [\"dataset\", \"model\", \"tuning\", \"scoring\"]\nnew_index = \"encoder\"\ntarget = \"rank\"\n\n# Load data\ndf_train = load_dataset(\"./data/raw/dataset_rank_train.csv\")\n\nif \"cv_score\" in df_train.columns:\n df_train = df_train.drop(\"cv_score\", axis=1)\n\nX_train = df_train.drop(target, axis=1)\ny_train = df_train[target]\n\n\n# Create indices for cv\ncv_indices = src.evaluate_regression.custom_cross_validated_indices(pd.concat([X_train, y_train], axis=1), \n factors, \n target, \n n_splits=5, \n shuffle=True, \n random_state=1444)\n# Preprocessing\n# OHE encoding \nX_train, ohe = src.encoding.ohe_encode_train_data(X_train=X_train,\n cols_to_encode=[\"model\", \"tuning\", \"scoring\"],\n verbosity=2)\n\n# Encoder encoding: Poincare Embeddings for feature \"encoder\"\nX_train, _ = src.encoding.poincare_encoding(path_to_graph=\"./data/raw/graph.adjlist\",\n path_to_embeddings=\"./data/preprocessed/embeddings.csv\",\n data=X_train,\n column_to_encode=\"encoder\",\n encode_dim=15,\n explode_dim=True,\n epochs=5000,\n dim_reduction=None,\n verbosity=2)\n\n# Add meta information\nX_train = add_dataset_meta_information(df=X_train,\n path_to_meta_df=\"./data/preprocessed/dataset_agg.csv\",\n nan_threshold=0.4,\n replacing_strategy=\"median\")\n\n# Normalize data\nX_train, scaler = normalize_train_data(X_train=X_train, \n method=\"minmax\",\n verbosity=2)\n\n# Get correlated features\nprint(\"Drop correlated features...\")\ncorrelated_features = get_pearson_correlated_features(data=X_train)\n#print(f\"Correlated features: {correlated_features}\")\n\n# Filter out some features\ncorrelated_features = [f for f in correlated_features if not f.startswith(\"enc_dim_\")]\ncorrelated_features = [f for f in correlated_features if not f.startswith(\"model_\")]\ncorrelated_features = [f for f in correlated_features if not f.startswith(\"tuning_\")]\ncorrelated_features = [f for f in correlated_features if not f.startswith(\"scoring_\")]\n\n# Drop features\nX_train = X_train.drop(correlated_features, axis=1)\n\n\n# Feature selection\nprint(\"Feature selection...\")\nfs = SelectKBest(score_func=f_regression, k='all') # or mutual_info_regression\nfs.fit(X_train, y_train.ravel())\n\n# Select columns based on mask\nmask = [x >= np.quantile(fs.scores_, 0.4) for x in fs.scores_] # 0.4\nX_train_fs = X_train.loc[:, mask]\nselected_features = list(X_train_fs.columns)\nsf = list(X_train.columns)\nsf = [f for f in sf if f not in selected_features or not f.startswith(\"enc_dim_\") or not f.startswith(\"tuning_\") or not f.startswith(\"scoring_\") or not f.startswith(\"model_\")]\nprint(sf)\n#print(selected_features)\n\nX_train = X_train[sf]\n\n# Classification\n# Use the labels as they are\n# Define models\nrf = RandomForestClassifier(random_state=42, n_jobs=-1)\ndt = DecisionTreeClassifier(random_state=42)\net = ExtraTreeClassifier(random_state=42)\nets = ExtraTreesClassifier(random_state=42, n_jobs=-1)\nknn = KNeighborsClassifier(n_jobs=-1)\nsvc = LinearSVC(random_state=42, multi_class=\"crammer_singer\")\nrnc = RadiusNeighborsClassifier(n_jobs=-1, radius=5)\ngpc = GaussianProcessClassifier(random_state=42, multi_class=\"one_vs_rest\") # \"one_vs_one\"\n# XGBoost from phase-1\nxgb = xgboost.XGBClassifier(colsample_bytree=0.27972729119255346,\n learning_rate=0.1228007619140701,\n max_depth=23,\n n_estimators=144,\n reg_alpha=1e-09,\n reg_lambda=18.935672936151313,\n subsample=1.0,\n random_state=42,\n n_jobs=-1)\n\n# Remove other models, because of performance\nmodels = [rf, dt, et, ets, knn]\n\nscoring = {\n 'spearman': custom_spearmanr_scorer,\n 'MCC' : make_scorer(matthews_corrcoef)\n}\n\n\n# ToDo: More models\n# Traverse models and score\nfor model in models:\n print(model)\n cv_results = cross_validate(estimator=model, \n X=X_train,\n y=y_train,\n cv=cv_indices, \n scoring=scoring,\n n_jobs=-1, \n return_train_score=True)\n\n for scorer in list(scoring.keys()):\n print(f\"CV Test {scorer}: \\t{round(cv_results[f'test_{scorer}'].mean(), 4)} +/- {round(cv_results[f'test_{scorer}'].std(), 4)}\")\n print(\"\")\n","repo_name":"nina-prog/meta-learning-for-encoder-selection","sub_path":"notebooks/week11/week11-multiclass-classification.py","file_name":"week11-multiclass-classification.py","file_ext":"py","file_size_in_byte":7253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"7911074471","text":"from typing import List\n\nfrom fastapi import APIRouter, Depends\nfrom fastapi_utils.cbv import cbv\nfrom sqlalchemy.orm import Session\n\nfrom app.connectors.db_conn import get_db\nfrom app.core.constants import (DATA_CONFLICT_MESSAGE, DATA_ERROR_LISTALL,\n DATA_NOT_FOUND_MESSAGE, DB_ERROR,\n DEFAULT_LIMIT_COUNT, DEFAULT_OFFSET_COUNT)\nfrom app.core.exceptions import (InternalServerError, RecordAlreadyExists,\n RecordNotFound)\nfrom app.schemas.models import VoicemailBox\nfrom app.schemas.schemas import (ActionVoicemailBox, ActionVoicemailBoxUpdate,\n PaginatedVoicemailBox)\nfrom app.utils import auth\nfrom app.utils.utils import get_eastern_timezoneval\n\nv1_router = APIRouter(tags=[\"voicemailbox\"])\n\nclass VoicemailboxActions:\n def __init__(self):\n self.dataset = []\n\n def get_all_voicemailbox(\n self,\n session: Session,\n limit: int = DEFAULT_LIMIT_COUNT,\n offset: int = DEFAULT_OFFSET_COUNT,\n ) -> List[VoicemailBox]:\n ormqueryset = session.query(VoicemailBox).offset(offset).limit(limit).all()\n for item in ormqueryset:\n self.dataset.append({\"vmb_id\": item.vmb_id, \"vmb_name\": item.vmb_name})\n return self.dataset\n\n def create_voicemailbox(self, session: Session, voicemail_info: ActionVoicemailBox) -> VoicemailBox:\n voicemail_details = (\n session.query(VoicemailBox).filter(VoicemailBox.vmb_name == voicemail_info.vmb_name).first()\n )\n\n if voicemail_details is not None:\n raise RecordAlreadyExists\n final_data = dict((k, v) for k, v in voicemail_info.__dict__.items() if v!=None)\n new_voicemail_info = VoicemailBox(**final_data)\n new_voicemail_info.vmb_timestamp = get_eastern_timezoneval()\n session.add(new_voicemail_info)\n session.commit()\n session.refresh(new_voicemail_info)\n return new_voicemail_info\n\n def get_voicemailbox_info_by_id(self, session: Session, _id: int) -> VoicemailBox:\n voicemail_info = session.query(VoicemailBox).get(_id)\n if voicemail_info is None:\n raise RecordNotFound\n return voicemail_info\n\n def set_voicemailbox_info(self, session: Session, _id: int, info_update: ActionVoicemailBox) -> VoicemailBox:\n voicemail_info = self.get_voicemailbox_info_by_id(session, _id)\n if voicemail_info is None:\n raise RecordNotFound\n voicemail_info.vmb_name = info_update.vmb_name\n voicemail_info.vmb_status = info_update.vmb_status\n session.commit()\n session.refresh(voicemail_info)\n return voicemail_info\n\n def delete_voicemailbox_info(self, session: Session, _id: int):\n voicemail_info = self.get_voicemailbox_info_by_id(session, _id)\n if voicemail_info is None:\n raise RecordNotFound\n session.delete(voicemail_info)\n session.commit()\n return voicemail_info\n\n@cbv(v1_router)\nclass Voicemailbox:\n \"\"\"The following code base it imposes the routes of voicemail transaction table actions.\"\"\"\n session: Session = Depends(get_db)\n @v1_router.get(\n \"/list-voicemailbox\", response_model=PaginatedVoicemailBox, dependencies=[Depends(auth.JWTBearer())]\n )\n def list_voicemail_box(self, limit: int = DEFAULT_LIMIT_COUNT, offset: int = DEFAULT_OFFSET_COUNT):\n try:\n obj_voicemail_actions = VoicemailboxActions()\n voicemail_list = obj_voicemail_actions.get_all_voicemailbox(self.session, limit, offset)\n return PaginatedVoicemailBox(limit=limit, offset=offset, data=voicemail_list)\n except RecordNotFound:\n raise RecordNotFound(message=DATA_ERROR_LISTALL)\n except Exception:\n raise InternalServerError(message=DB_ERROR)\n\n @v1_router.post(\"/add-voicemailbox\", dependencies=[Depends(auth.JWTBearer())])\n def add_voicemail_box(self, voicemail_info: ActionVoicemailBoxUpdate):\n try:\n obj_voicemail_actions = VoicemailboxActions()\n voicemail_info = obj_voicemail_actions.create_voicemailbox(self.session, voicemail_info)\n return voicemail_info\n except RecordAlreadyExists:\n raise RecordAlreadyExists(message=DATA_CONFLICT_MESSAGE.format(\"voicemail box\", voicemail_info.vmb_name))\n except Exception:\n raise InternalServerError(message=DB_ERROR)\n\n @v1_router.get(\"/get-voicemailbox\", response_model=ActionVoicemailBox, dependencies=[Depends(auth.JWTBearer())])\n def get_voicemail_info(self, voicemail_id: int, session: Session = Depends(get_db)):\n try:\n obj_voicemail_actions = VoicemailboxActions()\n voicemail_info = obj_voicemail_actions.get_voicemailbox_info_by_id(session, voicemail_id)\n return ActionVoicemailBox(**voicemail_info.__dict__)\n except RecordNotFound:\n raise RecordNotFound(message=DATA_NOT_FOUND_MESSAGE.format(\"voicemail-box\", voicemail_id))\n except RecordAlreadyExists:\n raise RecordAlreadyExists(message=DATA_CONFLICT_MESSAGE.format(\"voicemail-box\", voicemail_id))\n except Exception:\n raise InternalServerError(message=DB_ERROR)\n\n @v1_router.put(\n \"/set-voicemailbox\", response_model=ActionVoicemailBoxUpdate, dependencies=[Depends(auth.JWTBearer())]\n )\n def update_voicemail(\n self, voicemail_id: int, new_info: ActionVoicemailBoxUpdate, session: Session = Depends(get_db)\n ):\n try:\n obj_voicemail_actions = VoicemailboxActions()\n voicemail_info = obj_voicemail_actions.set_voicemailbox_info(session, voicemail_id, new_info)\n return ActionVoicemailBox(**voicemail_info.__dict__)\n except RecordNotFound:\n raise RecordNotFound(message=DATA_NOT_FOUND_MESSAGE.format(\"voicemail-box\", voicemail_id))\n except RecordAlreadyExists:\n raise RecordAlreadyExists(message=DATA_CONFLICT_MESSAGE.format(\"voicemail-box\", voicemail_id))\n except Exception:\n raise InternalServerError(message=DB_ERROR)\n\n @v1_router.delete(\"/delete-voicemailbox\", dependencies=[Depends(auth.JWTBearer())])\n def delete_voicemail(self, voicemail_id: int, session: Session = Depends(get_db)):\n try:\n obj_voicemail_actions = VoicemailboxActions()\n voicemail_info = obj_voicemail_actions.delete_voicemailbox_info(session, voicemail_id)\n return ActionVoicemailBox(**voicemail_info.__dict__)\n except RecordNotFound:\n raise RecordNotFound(message=DATA_NOT_FOUND_MESSAGE.format(\"voicemail-box\", voicemail_id))\n except RecordAlreadyExists:\n raise RecordAlreadyExists(message=DATA_CONFLICT_MESSAGE.format(\"voicemail-box\", voicemail_id))\n except Exception:\n raise RecordNotFound(message=DATA_NOT_FOUND_MESSAGE.format(\"voicemail-box\", voicemail_id))","repo_name":"avinash-chaluvadi-dev/pratilipi-ana","sub_path":"soa-gateway/app/api/voicemail_box.py","file_name":"voicemail_box.py","file_ext":"py","file_size_in_byte":6961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"26156938434","text":"# -*- coding: utf-8 -*-\n# 링크 : https://arisel.notion.site/2786-95d348c9aa9a4399a5aafa1d191ec290\n\n\nfrom sys import stdin\n\n\nclass Restaurant(object):\n def __init__(self, n, arr):\n self.n = n\n self.arr = arr\n self.arr.sort(key=lambda x : (x[1]))\n self.res = []\n\n def _init_cost(self):\n cost, _min = [], arr[-1][0]\n for i in [i[0] for i in arr[::-1]]:\n _min = min(_min, i)\n cost.append(_min)\n return cost[::-1]\n \n def _find(self):\n now, _min = 0, 1000001\n for i in range(self.n):\n before = now\n now += self.arr[i][1]\n _min = min(_min, self.arr[i][0] - self.arr[i][1])\n self.res.append(min(before + self.cost[i], now + _min))\n\n def _pirnt(self):\n print(*self.res, sep=\"\\n\")\n\n def solve(self):\n self.cost = self._init_cost()\n self._find()\n self._pirnt()\n\n\nif __name__ == \"__main__\":\n n = int(stdin.readline())\n arr = [tuple(map(int, stdin.readline().split())) for _ in range(n)]\n\n Restaurant_problem = Restaurant(n, arr)\n Restaurant_problem.solve()\n","repo_name":"arisel117/BOJ","sub_path":"code/BOJ 2786.py","file_name":"BOJ 2786.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"17930832359","text":"cache = {}\n\ndef main():\n def lucas(x):\n if x == 0:\n return 2\n elif x == 1:\n return 1\n elif x in cache:\n return cache[x]\n else:\n cache[x] = lucas(x-1) + lucas(x-2)\n return cache[x]\n\n N = int(input())\n print(lucas(N))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03544/s829191636.py","file_name":"s829191636.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"10226734231","text":"# Task 9 : Bilangan Elegan\n# Suatu bilangan dikatakan elegant jika banyak faktor/pembaginya adalah 4. Sebagai contoh, 10\n# adalah bilangan elegant (karena ia punya 4 faktor yaitu 1, 2, 5, dan 10) sementara 9 bukan bilangan\n# elegan (karena ia punya 3 faktor yaitu 1, 3, dan 9).\n# Input Format\n# Masukan terdiri dari sebuah bilangan N\n# Output Format\n# Keluaran berupa pernyataan bahwa bilangan N tersebut bilangan elegan atau tidak\n# Sample Input\n# 10\n# Sample Output\n# Bilangan elegan\n# Sample Input\n# 9\n# Sample Output\n# Bukan Bilangan elegan\n\nbilangan = int(input())\njumlah_faktor_pembagi = 0\nfor i in range(1,bilangan+1):\n if bilangan % i == 0:\n jumlah_faktor_pembagi += 1\n\nprint('Bilangan elegan' if jumlah_faktor_pembagi == 4 else 'Bukan Bilangan elegan')\n","repo_name":"hasanqqsp/codebase-tpl1102","sub_path":"pertemuan-6/latihan-3.py","file_name":"latihan-3.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"11204019147","text":"from hpp.corbaserver.robot import Robot as Parent\n\n\nclass Robot(Parent):\n \"\"\"\n Control of robot Romeo in hpp\n\n This class implements a client to the corba server implemented in\n hpp-corbaserver. It derive from class hpp.corbaserver.robot.Robot.\n\n This class is also used to initialize a client to rviz in order to\n display configurations of the Romeo robot.\n\n At creation of an instance, the urdf and srdf files are loaded using\n idl interface hpp::corbaserver::Robot::loadRobotModel.\n \"\"\"\n\n # Information to retrieve urdf and srdf files.\n urdfFilename = (\n \"package://example-robot-data/robots/romeo_description/urdf/romeo.urdf\"\n )\n srdfFilename = (\n \"package://example-robot-data/robots/romeo_description/srdf/romeo.srdf\"\n )\n\n halfSitting = {\n \"LEyePitch\": 0,\n \"LWristYaw\": -0.3,\n \"root_joint\": (0, 0, 0.840252, 0, 0, 0, 1),\n \"LEyeYaw\": 0,\n \"RWristYaw\": -0.3,\n \"LHipYaw\": 0,\n \"RHipPitch\": -0.3490658,\n \"RElbowYaw\": 1.05,\n \"LShoulderYaw\": 0.6,\n \"TrunkYaw\": 0,\n \"RShoulderPitch\": 1.5,\n \"LShoulderPitch\": 1.5,\n \"LWristPitch\": -0.2,\n \"HeadRoll\": 0,\n \"LKneePitch\": 0.6981317,\n \"RAnkleRoll\": 0,\n \"LHipPitch\": -0.3490658,\n \"LElbowYaw\": -1.05,\n \"RHipYaw\": 0,\n \"LAnklePitch\": -0.3490658,\n \"RAnklePitch\": -0.3490658,\n \"LToePitch\": 0,\n \"RKneePitch\": 0.6981317,\n \"HeadPitch\": 0,\n \"LWristRoll\": -0.4,\n \"RShoulderYaw\": -0.6,\n \"RWristPitch\": -0.2,\n \"LElbowRoll\": -0.5,\n \"RWristRoll\": -0.4,\n \"LAnkleRoll\": 0,\n \"REyeYaw\": 0,\n \"NeckPitch\": 0,\n \"REyePitch\": 0,\n \"RToePitch\": 0,\n \"LHipRoll\": 0,\n \"RHipRoll\": 0,\n \"RElbowRoll\": 0.5,\n \"NeckYaw\": 0,\n \"LHand\": 0,\n \"RHand\": 0,\n # Here start romeo -full- specifics\n \"LFinger11\": 0,\n \"LFinger12\": 0,\n \"LFinger13\": 0,\n \"LFinger21\": 0,\n \"LFinger22\": 0,\n \"LFinger23\": 0,\n \"LFinger31\": 0,\n \"LFinger32\": 0,\n \"LFinger33\": 0,\n \"LThumb1\": 0,\n \"LThumb2\": 0,\n \"LThumb3\": 0,\n \"RFinger11\": 0,\n \"RFinger12\": 0,\n \"RFinger13\": 0,\n \"RFinger21\": 0,\n \"RFinger22\": 0,\n \"RFinger23\": 0,\n \"RFinger31\": 0,\n \"RFinger32\": 0,\n \"RFinger33\": 0,\n \"RThumb1\": 0,\n \"RThumb2\": 0,\n \"RThumb3\": 0,\n \"LEyeYaw\": 0,\n \"LEyePitch\": 0,\n \"REyeYaw\": 0,\n \"REyePitch\": 0,\n }\n\n leftHandClosed = {\n \"LFinger11\": 0.0,\n \"LFinger12\": 0.0,\n \"LFinger13\": 0.0,\n \"LFinger21\": 0.0,\n \"LFinger22\": 0.0,\n \"LFinger23\": 0.0,\n \"LFinger31\": 0.0,\n \"LFinger32\": 0.0,\n \"LFinger33\": 0.0,\n \"LThumb1\": 0.0,\n \"LThumb2\": 0.0,\n \"LThumb3\": 0.0,\n }\n leftHandOpen = {\n \"LFinger11\": 1.06,\n \"LFinger12\": 1.06,\n \"LFinger13\": 1.06,\n \"LFinger21\": 1.06,\n \"LFinger22\": 1.06,\n \"LFinger23\": 1.06,\n \"LFinger31\": 1.06,\n \"LFinger32\": 1.06,\n \"LFinger33\": 1.06,\n \"LThumb1\": -1.06,\n \"LThumb2\": 1.06,\n \"LThumb3\": 1.06,\n }\n rightHandClosed = {\n \"RFinger11\": 0.0,\n \"RFinger12\": 0.0,\n \"RFinger13\": 0.0,\n \"RFinger21\": 0.0,\n \"RFinger22\": 0.0,\n \"RFinger23\": 0.0,\n \"RFinger31\": 0.0,\n \"RFinger32\": 0.0,\n \"RFinger33\": 0.0,\n \"RThumb1\": 0.0,\n \"RThumb2\": 0.0,\n \"RThumb3\": 0.0,\n }\n rightHandOpen = {\n \"RFinger11\": 1.06,\n \"RFinger12\": 1.06,\n \"RFinger13\": 1.06,\n \"RFinger21\": 1.06,\n \"RFinger22\": 1.06,\n \"RFinger23\": 1.06,\n \"RFinger31\": 1.06,\n \"RFinger32\": 1.06,\n \"RFinger33\": 1.06,\n \"RThumb1\": -1.06,\n \"RThumb2\": 1.06,\n \"RThumb3\": 1.06,\n }\n\n def __init__(self, robotName, load=True):\n Parent.__init__(self, robotName, \"freeflyer\", load)\n self.tf_root = \"base_link\"\n self.leftAnkle = \"LAnkleRoll\"\n self.rightAnkle = \"RAnkleRoll\"\n\n def getInitialConfig(self):\n q = []\n for n in self.jointNames:\n dof = self.halfSitting[n]\n if type(dof) is tuple:\n q += dof\n else:\n q.append(dof)\n return q\n\n def getJointDofValue(self, jointName):\n i = 0\n for n in self.jointNames:\n if n == jointName:\n return self.getCurrentConfig()[i + 3]\n i += 1\n\n def getHandConfig(self, side, conf):\n q = []\n if side == \"left\":\n if conf == \"open\":\n q = self.getLeftHandOpenConfig()\n elif conf == \"closed\":\n q = self.getLeftHandClosedConfig()\n elif side == \"right\":\n if conf == \"open\":\n q = self.getRightHandOpenConfig()\n elif conf == \"closed\":\n q = self.getRightHandClosedConfig()\n elif side == \"both\":\n if conf == \"open\":\n q = self.getRightHandOpenConfig()\n self.setCurrentConfig(q)\n q = self.getLeftHandOpenConfig()\n elif conf == \"closed\":\n q = self.getRightHandClosedConfig()\n self.setCurrentConfig(q)\n q = self.getLeftHandClosedConfig()\n return q\n\n def getLeftHandClosedConfig(self):\n q = self.getCurrentConfig()\n for n in self.jointNames:\n if n in self.leftHandClosed:\n q[self.jointNames.index(n) + 3] = self.leftHandClosed[n]\n return q\n\n def getLeftHandOpenConfig(self):\n q = self.getCurrentConfig()\n for n in self.jointNames:\n if n in self.leftHandOpen:\n q[self.jointNames.index(n) + 3] = self.leftHandOpen[n]\n return q\n\n def getRightHandClosedConfig(self):\n q = self.getCurrentConfig()\n for n in self.jointNames:\n if n in self.rightHandClosed:\n q[self.jointNames.index(n) + 3] = self.rightHandClosed[n]\n return q\n\n def getRightHandOpenConfig(self):\n q = self.getCurrentConfig()\n for n in self.jointNames:\n if n in self.rightHandOpen:\n q[self.jointNames.index(n) + 3] = self.rightHandOpen[n]\n return q\n","repo_name":"humanoid-path-planner/hpp_romeo","sub_path":"src/hpp/corbaserver/romeo/robot.py","file_name":"robot.py","file_ext":"py","file_size_in_byte":6427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"36672113390","text":"def isEven(num):\n return num % 2 == 0\n\n\ndef partition(values, callback):\n evens = []\n odds = []\n for val in values:\n if callback(val):\n evens.append(val)\n else:\n odds.append(val)\n return [evens, odds]\n\n\nprint(partition([1, 2, 3, 4], isEven)) # [[2,4],[1,3]]\n","repo_name":"codafett/python","sub_path":"uncategorised/partition.py","file_name":"partition.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"25110445049","text":"# Import necessary libraries\nfrom flask import Flask, render_template, redirect\nfrom flask_pymongo import PyMongo\nimport scrape\n\n# Initiate Flask app\napp = Flask(__name__)\napp.config[\"MONGO_URI\"] = \"mongodb://localhost:27017/mars\"\n\n# Use flask_pymongo to set up mongo connection\nmongo = PyMongo(app)\n\n@app.route(\"/\")\ndef home():\n\n # Find data\n mars = mongo.db.mars.find_one()\n print(\"home: \", mars)\n return render_template(\"index.html\", mars=mars)\n\n@app.route(\"/scrape\")\ndef data_scrape():\n mars = mongo.db.mars\n mars_data = scrape.scrape()\n print(\"mars_data: \", mars_data)\n mars.update(\n {}, \n mars_data, \n upsert=True\n )\n return redirect(\"/\", code=302)\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"hconstancio/Web_Scrapping_Mission2Mars","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"15799531825","text":"from os import listdir, getcwd\nfrom os.path import join,isdir\n\nf = [x for x in listdir('.') if isdir(x)]\n\noutmk = open('mkdir.sh','w')\noutidx = open('putidx.sh','w')\noutidxm = open('putmidx.sh','w')\noutf = open('putfiles.sh','w')\n\ndirs = getcwd().split('/')[-1]+'/'\ntag = './'\n\noutmk.write('mkdir '+tag + '/\\n'+ 'cd ' + tag+'\\n')\noutmk.write('mkdir '+dirs + '/\\n'+ 'cd ' + dirs+'\\n')\noutidx.write('cd ' + tag+dirs+'\\n')\noutidxm.write('cd ' + tag+dirs+'\\n')\noutidxm.write('mput index.html\\n')\noutidxm.close()\noutf.write('cd ' +tag+ dirs+'\\n')\n\nfor l in f:\n outmk.write('mkdir '+l.strip()+'/\\n')\n outidx.write('mput '+l.strip()+'/index.html\\n')\n outf.write('mput '+l.strip()+'/*\\n')\n\noutmk.close()\noutidx.close()\noutf.close()\n","repo_name":"tibristo/BosonTagger","sub_path":"createput.py","file_name":"createput.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"18548494019","text":"mini, maxi, K_ban = map(int, input().split())\n\nif (maxi - mini + 1 <= 2 * K_ban):\n ans = [i for i in range(mini, maxi + 1)]\n\nelse:\n left = [i for i in range(mini, mini + K_ban)]\n right = [i for i in range(maxi - K_ban + 1, maxi + 1)]\n \n ans = left + right\n \n\n[print(n) for n in ans]","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03386/s080328292.py","file_name":"s080328292.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"26251814658","text":"# 0부터 N까지의 정수 K개를 더해서 그 합이 N이 되는 경우의 수\n\nimport sys\ninput = sys.stdin.readline\n\nN, K = map(int,input().split())\n\ndp=[[0]*201 for i in range(201)]\n\nfor i in range(1, 201):\n dp[1][i]=1 # K=1이면 N 1개\n dp[2][i]=i+1 # K=2이면 N+1개 \n\nfor i in range(2, 201):\n dp[i][1]=i # K에 상관없이 N이 1인 경우에 합이 N이 되는 경우의 수는 K개\n for j in range(2, 201):\n dp[i][j]=(dp[i][j-1]+dp[i-1][j]) % 1000000000\n\nprint(dp[K][N])","repo_name":"MaxKim-J/hufs-algorithm-study","sub_path":"yunhee/2225 - 합분해.py","file_name":"2225 - 합분해.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"}
+{"seq_id":"22125467720","text":"import random #for toss\n#board=['','','','','','','','','','']\ndef drawboard(board):\n\tprint(' | |')\n\n\tprint(' '+board[7]+ '|' +' '+ board[8]+ '|' + board[9])\n\tprint('------------')\n\tprint(' | |')\n\n\tprint(' '+board[6]+ '|' +' '+ board[5]+ '|' + board[6])\n\tprint('-------------')\n\tprint(' '+board[3]+ '|' +' '+ board[2]+ '|' + board[1])\n\tprint(' | |')\n\n# drawboard(board)\n\n#taking input from user\n\ndef input_player():\n\tletter = ''\n\t#while loop - if user mistakenly gives input other than X or O\n\twhile not(letter == 'X' or letter =='O'):\n\t\tprint('Do you want to be X or O?')\n\t\tletter = raw_input().upper()\n\n\tif letter == 'X':\n\t\treturn ['X' , 'O']\n\telse:\n\t\treturn['O', 'X']\n\n# print(input_player())\n\ndef first_move():\n\t\n\t\n\tprint('choose number for toss, 0 or 1?')\n\ttoss = raw_input()\n\n\tif random.randint(0,1) == toss:\n\t\treturn ' player'\n\telse:\n\t\treturn 'computer'\n\ndef playAgain():\n\tprint('do you want to play again? yes or no?')\n\treturn raw_input().lower().startswith('y')\n\ndef makeMove(board , letter , move):\n\tboard[move]=letter\n\ndef winner(board, letter):\n\treturn ((board[7]==letter and board[8]==letter and board[9]==letter )\n\t\tor (board[4]==letter and board[5]==letter and board[6]==letter)or\n\t\t\t\t(board[1]==letter and board[2]==letter and board[3]==letter)or\n\t\t\t\t(board[7]==letter and board[4]==letter and board[1]==letter)or\n\t\t\t\t(board[8]==letter and board[5]==letter and board[2]==letter)or\n\t\t\t\t(board[9]==letter and board[6]==letter and board[3]==letter)or\n\t\t\t\t(board[7]==letter and board[5]==letter and board[3]==letter) or \n\t\t\t\t(board[9]==letter and board[5]==letter and board[1]==letter)\n\t\t\t\t)\n\ndef board_copy(board):\n\n\tdupeboard = []\n\n\tfor i in board:\n\t\tdupeboard.append(i)\n\ndef free_space(board , move):\n\treturn board[move]==''\n\n#print(free_space(['','X','O','O','','','','','',''] ,3))\n\ndef player_move(board):\n\tmove = ' '\n\twhile move not in '1 2 3 4 5 6 7 8 9'.split() or not free_space(board, int(move)):\n\t\tmove = raw_input(' what is your next move?')\n\t\treturn int(move)\ndef random_moves(board , moves):\n\tpossibleMoves = []\n\tfor i in moves:\n\t\tif free_space(board , i):\n\t\t\tpossibleMoves.append(i)\n\n\tif len(possibleMoves) != 0:\n\t\treturn random.choice(possibleMoves)\n\n\telse:\n\t\treturn None\n\ndef getComputerMove(board , computerLetter):\n\tif computerLetter == 'X':\n\t\tplayerLetter = 'O'\n\telse:\n\t\tplayerLetter = 'X'\n# computer checking if it can win in one move\n\tfor i in range(1,10):\n\t\tcopy = board_copy(board)\n\t\tif free_space(copy, i):\n\t\t\tmakeMove(copy , computerLetter, i)\n\t\t\tif winner(copy , computerLetter):\n\t\t\t\treturn i \n#computer checking if player can win in one move\n\tfor i in range(1,10):\n\t\tcopy = board_copy(board)\n\t\tif free_space(copy , i):\n\t\t\tmakeMove(copy, playerLetter , i)\n\t\t\tif winner(copy,playerLetter):\n\t\t\t\treturn i\n#check available space in corner\n\tmove = random_moves(board , [ 1,3,7,9])\n\tif move != None:\n\t\treturn i\n#check available space in center\n\tmove = random_moves(board , [5])\n\tif move != None:\n\t\treturn 5\n\n#else check for side spaces\n\treturn random_moves(board , [2,4,6,8])\n\ndef full_board(board):\n\tfor i in range(1,10):\n\t\tif free_space(board, i):\n\t\t\treturn False\n\treturn True\n\nprint(' WELCOME TO TIC TAC TOE!')\n\nwhile True:\n\n\tboard = [' ']*10\n\tplayerLetter, computerLetter = input_player()\n\tturn = first_move()\n\tprint ('the '+turn+' will go first')\n\tgame_on = True\n\twhile game_on:\n\t\tif turn == 'player':\n\t\t\tdrawboard(board)\n\t\t\tmove = player_move(board)\n\t\t\tmakeMove(board , playerLetter , move)\n\n\t\t\tif winner(board, playerLetter):\n\t\t\t\tdrawboard(board)\n\t\t\t\tprint('player wins!')\n\t\t\t\tgame_on = False\n\t\t\telse:\n\t\t\t\tif full_board(board):\n\t\t\t\t\tdrawboard(board)\n\t\t\t\t\tprint('tie!')\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tturn = 'computer'\n\n\t\telse:\n\t\t\tmove = getComputerMove(board,computerLetter)\n\t\t\tmakeMove(board , computerLetter , move)\n\n\t\t\tif winner(board , computerLetter):\n\t\t\t\tdrawboard(board)\n\t\t\t\tprint('computer wins')\n\t\t\t\tgame_on = False\n\t\t\telse: \n\t\t\t\tif full_board(board):\n\t\t\t\t\tdrawboard(board)\n\t\t\t\t\tprint('tie!')\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tturn = 'player'\n\n\tif not playAgain():\n\t\tbreak\n\n\n\n# print(first_move())\n\n\n# print(winner(['','O','O','O','','','','','',''] ,'O'))\n\n# print(playAgain())","repo_name":"mephi007/AI-bot","sub_path":"tic-tac.py","file_name":"tic-tac.py","file_ext":"py","file_size_in_byte":4127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"37496784074","text":"from word_container import WordContainer\nfrom tkinter import *\n\nCOLOR1=\"#f0f5f9\"\nCOLOR2=\"#c9d6df\"\nCOLOR3=\"#52616b\"\nCOLOR4=\"#1e2022\"\n\nBG_COLOR=COLOR1\nFRAME_BG_COLOR=COLOR2\nTEXT_COLOR1=COLOR3\nTEXT_COLOR2=COLOR4\nTEXT_COLOR3=\"#e84a5f\"\n\nFONT=(\"Helvetica\", 40)\nFONT_SMALL=(\"Helvetica\", 20)\n\nTYPING_SEC = 60\n\nclass TypingSpeedTestGUI(Frame):\n def __init__(self, master=None):\n super().__init__(master)\n master.title(\"Typing Speed Test\")\n master.config(padx=30, pady=30, bg=BG_COLOR)\n master.bind('', self.key_press)\n self.pack()\n self.init_widget()\n\n self.init_message()\n self.state = 'initial'\n self.wc = WordContainer()\n\n self.input_string = \"\"\n self.wordlist = []\n self.timer = None\n self.n_char = 0\n self.n_char_ok = 0\n self.n_ok = 0\n self.n_ng = 0\n\n def init_widget(self):\n self.top_label = self.top_label = Label(width=40, fg=TEXT_COLOR1, bg=BG_COLOR, font=FONT_SMALL, anchor=W)\n self.top_label.pack()\n self.frame = Frame(width=400, height=300, bg=FRAME_BG_COLOR)\n self.frame.pack(fill=BOTH)\n self.input_label = Label(width=20, fg=TEXT_COLOR1, bg=FRAME_BG_COLOR, font=FONT)\n self.input_label.pack(padx=10, pady=10)\n\n self.main_label = []\n for i in range(0, 5):\n self.main_label.append( Label(self.frame, fg=TEXT_COLOR1, bg=FRAME_BG_COLOR, font=FONT) )\n self.main_label[i].pack()\n\n def init_message(self):\n self.main_label[1].config(text=\"typing speed test for 60 sec.\", fg=TEXT_COLOR1)\n self.main_label[2].config(text=\"press SPACE to start\", fg=TEXT_COLOR1)\n self.input_label.config(text=\"input box\", fg=TEXT_COLOR1)\n\n def key_press(self, event):\n if event.keysym == \"Return\":\n key = \" \"\n else:\n key = event.char\n # print(f\"key pressed: keysym({event.keysym}), char({key})\")\n\n if ( self.state == 'initial' and key == \" \") or (self.state == 'result' and key == \"n\"):\n self.start_typing()\n\n elif self.state == 'typing':\n self.n_char += 1\n self.n_char_ok += 1\n if key == \" \":\n self.finish_input_string()\n else:\n self.update_input_string(key)\n\n header_text = f\"Type Count: {self.n_char}, Correct Word: {self.n_ok}\"\n self.top_label.config(text=header_text)\n\n def update_input_string(self, char):\n self.input_string += char\n self.input_label.config(text=self.input_string, fg=TEXT_COLOR2)\n\n if len(self.input_string) < len(self.wordlist[0]):\n if self.input_string != self.wordlist[0][:len(self.input_string)]:\n self.main_label[0].config(fg=TEXT_COLOR3)\n elif self.input_string == self.wordlist[0]:\n self.main_label[0].config(fg=TEXT_COLOR2)\n else:\n self.main_label[0].config(fg=TEXT_COLOR3)\n\n def finish_input_string(self):\n if self.input_string == self.wordlist[0]:\n self.count_ok()\n else:\n self.count_ng()\n\n self.input_string = \"\"\n self.input_label.config(text=self.input_string, fg=TEXT_COLOR2)\n\n self.wordlist.pop(0)\n self.wordlist.append(self.wc.get_next())\n self.update_main_label()\n\n def count_ok(self):\n print(\"count ok\")\n self.n_ok += 1\n\n def count_ng(self):\n print(\"count ng\")\n self.n_ng += 1\n self.n_char_ok -= len(self.input_string)\n\n def start_typing(self):\n self.n_char = 0\n self.n_char_ok = 0\n self.n_ok = 0\n self.n_ng = 0\n\n self.input_string = \"\"\n self.input_label.config(text=self.input_string, fg=TEXT_COLOR2)\n\n print(\"start typing\")\n self.state = 'typing'\n self.wc.init_list()\n self.wordlist = []\n for i in range(0, 5):\n self.wordlist.append(self.wc.get_next())\n self.update_main_label()\n\n self.timer = super().after(TYPING_SEC*1000, self.time_up)\n\n def update_main_label(self):\n for i in range(0, 5):\n self.main_label[i].config(text=self.wordlist[i], fg=TEXT_COLOR1)\n\n def time_up(self):\n print(\"time up\")\n super().after_cancel(self.timer)\n\n self.state = 'result'\n self.show_result()\n\n def show_result(self):\n self.main_label[0].config(text=f\"Total Type Count: {self.n_char}\", fg=TEXT_COLOR1)\n self.main_label[1].config(text=f\"Correct Type Count: {self.n_char_ok}\", fg=TEXT_COLOR1)\n self.main_label[2].config(text=f\"Correct Word: {self.n_ok}\", fg=TEXT_COLOR1)\n self.main_label[3].config(text=f\"Incorrect Word: {self.n_ng}\", fg=TEXT_COLOR1)\n self.main_label[4].config(text=\"press n to restart\", fg=TEXT_COLOR1)\n self.input_label.config(text=\"input box\", fg=TEXT_COLOR1)\n\n\nroot = Tk()\napp = TypingSpeedTestGUI(master=root)\napp.mainloop()\n","repo_name":"Chikara-Ohishi/typing-speed-test","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"5705126600","text":"class Solution(object):\r\n def largestRectangleArea(self, heights):\r\n \"\"\"\r\n :type heights: List[int]\r\n :rtype: int\r\n \"\"\"\r\n stack=[-1]\r\n heights.append(0)\r\n best=0\r\n for i in range(len(heights)):\r\n while heights[i] Path:\r\n \"\"\"\r\n The path to the resources folder for my_object.\r\n\r\n The data folder is defined as a sub-folder of the package folder named \"resources\"\r\n where my_object is defined. More information is available in the get_package_folder\r\n documentation.\r\n If no my_object is passed, returns the resources folder for the package from where\r\n the function is called.\r\n\r\n \"\"\"\r\n package_folder = get_package_folder()\r\n resources_folder = package_folder / \"resources\"\r\n assert resources_folder.exists()\r\n return resources_folder\r\n\r\n\r\ndef get_package_folder() -> Path:\r\n \"\"\"\r\n The path to the package folder from where the function is called, or where\r\n my_object is declared.\r\n\r\n If the package has been bundled in a .exe file, returns the folder of the\r\n application itself. Otherwise, the package folder is defined as the highest folder\r\n in the folder structure containing an __init__.py file.\r\n\r\n Warnings\r\n --------\r\n In the unfrozen case, it is assumed that all package and sub-package have an\r\n __init__.py file.\r\n\r\n \"\"\"\r\n package_path = _get_package_folder_from_caller()\r\n while _is_parent_a_package(package_path):\r\n package_path = package_path.parent\r\n return package_path\r\n\r\n\r\ndef _get_package_folder_from_caller() -> Path:\r\n index = 0\r\n caller = inspect.stack()[index].filename\r\n while caller == __file__:\r\n index += 1\r\n caller = inspect.stack()[index].filename\r\n return Path(caller)\r\n\r\n\r\ndef _is_parent_a_package(package_path: Path) -> bool:\r\n parent_path = package_path.parent\r\n init_file = parent_path / \"__init__.py\"\r\n return init_file.exists()\r\n","repo_name":"bebert64/git-api","sub_path":"git_api/commons/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"38804048754","text":"#This alphabet ditionary is used to map letters to integers to encrypt the text.\nalphabet={\n 'A':1,\n 'B':2,\n 'C':3,\n 'D':4,\n 'E':5,\n 'F':6,\n 'G':7,\n 'H':8,\n 'I':9,\n 'J':10,\n 'K':11,\n 'L':12,\n 'M':13,\n 'N':14,\n 'O':15,\n 'P':16,\n 'Q':17,\n 'R':18,\n 'S':19,\n 'T':20,\n 'U':21,\n 'V':22,\n 'W':23,\n 'X':24,\n 'Y':25,\n 'Z':0\n\n}\n\n#Second alphabet dictionary, used for mapping numbers to letters, used in decryption.\nalphabet2={\n 1:'A',\n 2:'B',\n 3:'C',\n 4:'D',\n 5:'E',\n 6:'F',\n 7:'G',\n 8:'H',\n 9:'I',\n 10:'J',\n 11:'K',\n 12:'L',\n 13:'M',\n 14:'N',\n 15:'O',\n 16:'P',\n 17:'Q',\n 18:'R',\n 19:'S',\n 20:'T',\n 21:'U',\n 22:'V',\n 23:'W',\n 24:'X',\n 25:'Y',\n 0:'Z'\n\n}\n\n\n\nclass RSAalgo:\n #Finds product of two primes\n def N(num1,num2):\n return (num1*num2)\n\n #Used to check if two numbers are prime\n def check_prime(num):\n check = False\n for i in range(num - 1):\n if i == 0:\n continue\n elif i == 1:\n continue\n if num % i == 0:\n return check\n check = True\n return check\n #generates a number which is equal to (primenumber-1)*(primenumber-2), and check if the numbers are prime.\n def generate_prime(number1,number2):\n one=RSAalgo.check_prime(number1)\n two=RSAalgo.check_prime(number2)\n if one ==False:\n return None\n elif two ==False:\n return None\n n = int(number1 - 1) * int(number2 - 1)\n return (n)\n #public key\n #generates a public key which is used to encrypt the data\n def generate_public(n):\n for i in range(100000000000):\n if i==0:\n continue\n elif (n%i)==0:\n\n continue\n else:\n print(i)\n e=i\n return e\n return('No number which is not a factor')\n\n #generates private key used to decrypt the data.\n def private_key(n,e):\n for i in range(1000000):\n if(i*e)%n==1:\n d=i\n print(d)\n return(d)\n #Encrypts the data by mapping each letter to integers then appending it to the list\n def encryption(string,e,N):\n word=list()\n for i in string:\n if i=='\\n':\n continue\n elif i==' ':\n word.append(i)\n continue\n #Takes the alphabet value of i and raises to the power of the public key with remainder of N\n p=(alphabet[i]**e)%N\n print(i)\n word.append(p)\n print(word)\n return word\n #Decrypts data by mapping each number to letter then appending it to string.\n def decryption(word,d,N):\n word=word.split(' ')\n print(word)\n letters=''\n for i in word:\n if i==' ':\n letters=letters+ ' '\n continue\n if i=='{':\n letters=letters+ ' '\n continue\n if i=='}':\n continue\n if i=='}\\n':\n continue\n p=(int(i)**d)%N\n letters=letters+alphabet2[p]\n print(letters)\n return letters\n\n\n#num1=3\n#num2=5\n#print(RSA.generate_prime(num1,num2))\n#N=RSA.N(num1,num2)\n#r=RSA.generate_prime(num1,num2)\n\n#e=RSA.generate_public(r)\n#d=RSA.private_key(r,e)\n\n#word=RSA.encryption('APPLE',e,N)\n#wordz=RSA.decryption(word,d,N)","repo_name":"HayyanKhokhar1234/newsuite","sub_path":"RSA.py","file_name":"RSA.py","file_ext":"py","file_size_in_byte":3545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"37117356735","text":"import sqlite3\r\nimport chat_utils as utils\r\n\r\n\"\"\"\r\nThis file is only used for initiate database. To run it, set utils.NEED_INIT to True,\r\nthen run it (remember to set it back to False after initiation on database.).\r\n\r\n\"\"\"\r\n\r\nif utils.NEED_INIT:\r\n\r\n init_table = [\"\"\"\r\n CREATE TABLE users(\r\n user_id INTEGER PRIMARY KEY AUTOINCREMENT,\r\n -- user_id is in [100000, 199999]\r\n username TEXT UNIQUE,\r\n password TEXT\r\n );\"\"\",\r\n\r\n \"\"\"\r\n -- add a test user\r\n INSERT INTO users \r\n VALUES (100000, 'test_user', '11111111');\r\n \"\"\",\r\n\r\n \"\"\"\r\n CREATE TABLE groups(\r\n group_id INTEGER PRIMARY KEY AUTOINCREMENT,\r\n -- group_id is in [200000, 299999]\r\n group_name TEXT DEFAULT 'Unnamed Group'\r\n );\"\"\",\r\n\r\n \"\"\"\r\n -- add a test_group\r\n INSERT INTO groups \r\n VALUES (200000, 'test_group');\"\"\",\r\n\r\n \"\"\"\r\n CREATE TABLE messages(\r\n message_id INTEGER PRIMARY KEY AUTOINCREMENT,\r\n send_date TEXT,\r\n send_from INT\r\n REFERENCES users(user_id)\r\n ON DELETE CASCADE,\r\n send_to INT\r\n REFERENCES groups(group_id)\r\n ON DELETE CASCADE,\r\n content TEXT\r\n );\"\"\",\r\n\r\n \"\"\"\r\n INSERT INTO messages \r\n VALUES (300000, '2021-11-24 20:16:43', 100000, 200000, 'Hello world!');\r\n \"\"\",\r\n\r\n \"\"\"\r\n CREATE TABLE in_group(\r\n user_id INTEGER,\r\n group_id INTEGER,\r\n FOREIGN KEY(user_id) REFERENCES users(user_id) ON DELETE CASCADE,\r\n FOREIGN KEY(group_id) REFERENCES groups(group_id) ON DELETE CASCADE\r\n );\"\"\",\r\n\r\n \"\"\"\r\n INSERT INTO in_group \r\n VALUES (100000, 200000);\"\"\"]\r\n\r\n db = sqlite3.connect(utils.DB_PATH)\r\n cursor = db.cursor()\r\n\r\n for sql in init_table:\r\n cursor.execute(sql)\r\n\r\n db.commit()\r\n cursor.close()\r\n db.close()","repo_name":"Xinyu-Li-123/Simple-Chatroom-ICS-Final-Project","sub_path":"init_database.py","file_name":"init_database.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"23197763507","text":"r = 1\nnumbp = 0\nnumbi = 0\nwhile r != 0:\n r = int(input('Digite um numero: '))\n if r != 0:\n if r % 2 == 0:\n numbp += 1\n else:\n numbi += 1\nprint('Você digitou {} par e {} impa'.format(numbp, numbi))","repo_name":"isacepifanioo/python","sub_path":"script/ex16.py","file_name":"ex16.py","file_ext":"py","file_size_in_byte":239,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"90"}
+{"seq_id":"1297494425","text":"import matplotlib.pyplot as plt\nfrom matplotlib.lines import Line2D\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nimport numpy as np\nimport math\nimport sys\n\n# time change (discrete time)\nT = 5475.0 #days\ndt = 1\n#time array\nt = np.linspace(0, T, int(T/dt))\n\n#first 5 years = 0-1825 days\n#second 5 years = 1826 - 3650 days\n#third 5 years = 3651 - 5475 days\n\nsupply_rate = np.empty(3650)\nsupply_rate.fill(0.04)\n\nyear11 = np.empty(365)\nyear11.fill(0.08)\nsupply_rate = np.append(supply_rate,year11)\n\nyear12 = np.empty(365)\nyear12.fill(0.11)\nsupply_rate = np.append(supply_rate,year12)\n\nyear13 = np.empty(365)\nyear13.fill(0.14)\nsupply_rate = np.append(supply_rate,year13)\n\nyear14 = np.empty(365)\nyear14.fill(0.17)\nsupply_rate = np.append(supply_rate,year14)\n\nyear15 = np.empty(365)\nyear15.fill(0.20)\nsupply_rate = np.append(supply_rate,year15)\n\nk_w = 0.02 #light attenuation by just water\nk_p = 0.001 #attenuation due to producers\n\n#depth array\ndeltaz = 0.5\ndepth = 100\nzetas = np.linspace(0,depth,int(depth/deltaz)) \n\n# array to store the solution if you were using Euler's method\nN = np.zeros((len(t),len(zetas)))\nP1 = np.zeros((len(t),len(zetas)))\nP2 = np.zeros((len(t),len(zetas)))\nC = np.zeros((len(t),len(zetas)))\nlight = np.zeros((len(t),len(zetas)))\nN[0,:] = 0.4\nP1[0,:] = 0.5\nP2[0,:] = 0.5\nC[0,:] = 0.5\n\n#initial light irradiance (constant)\nI_naughts = np.empty(len(t))\n\n#inital light irradiance (seasonal)\nI_max = 1000\nI_min = 300\n\ni = 0\nfor a in t:\n if a<=1825.0:\n I_naughts[i] = I_min\n else:\n I_naughts[i] = I_min + (I_max-I_min)/2.*(np.sin(((a)/365*2*np.pi-np.pi/2))+1)\n i = i + 1\n \n#parameter values\nphi1 = 0.1 #interaction strength for p1\nphi2 = 0.1 #interaction strength for p2\neps1 = 0.3 #transfer efficiency for p1\neps2 = 0.3 #transfer efficiency for p2\ndelta_c = 0.001 #consumer mortality\nk_w = 0.02 #light attenuation by just water\nk_p = 0.001 #attenuation due to producers\ndelta1 = 0.001 #death rate of producer 1\ndelta2 = 0.001 #death rate of producer 2\nmu1 = 1.0 #resource affinity parameter for p1\nmu2 = 0.8 #resource affinity parameter for p2\nalpha_n1 = 0.14 #saturation of nutrients for producer 1\nalpha_i1 = 0.017 #saturation of light for producer 1\nalpha_n2 = 0.14 #saturation of nutrients for producer 2\nalpha_i2 = 0.014 #saturation of light for producer 2\n \n#building 2 dimensional arrays for nutrients, producer, and consumer groups\ntotal_time_arr1 = []\ntotal_zeta_arr1 = []\ntotal_color_arr1 = []\ntotal_time_arr2 = []\ntotal_zeta_arr2 = []\ntotal_color_arr2 = []\nfor i in range(1, len(t)):\n #print(t[i])\n for j in range(0,len(zetas)):\n I = I_naughts[i] * np.exp(-(k_w*zetas[j]*(deltaz)+np.sum(k_p*(P1[0,:j]+P2[0,:j])*(deltaz))))\n\n if (N[i-1,j]/(N[i-1,j]+(mu1/alpha_n1)) > I/(I+(mu1/alpha_i1))):\n total_time_arr1.append(i)\n total_zeta_arr1.append(zetas[j])\n total_color_arr1.append('tab:red')\n else:\n total_time_arr1.append(i)\n total_zeta_arr1.append(zetas[j])\n total_color_arr1.append('tab:blue')\n \n if (N[i-1,j]/(N[i-1,j]+(mu2/alpha_n2)) > I/(I+(mu2/alpha_i2))):\n total_time_arr2.append(i)\n total_zeta_arr2.append(zetas[j])\n total_color_arr2.append('tab:red')\n else:\n total_time_arr2.append(i)\n total_zeta_arr2.append(zetas[j])\n total_color_arr2.append('tab:blue')\n\n mu1_growth = np.minimum((N[i-1,j]/(N[i-1,j]+(mu1/alpha_n1))), (I/(I+(mu1/alpha_i1))))\n mu2_growth = np.minimum((N[i-1,j]/(N[i-1,j]+(mu2/alpha_n2))), (I/(I+(mu2/alpha_i2))))\n dNdt = supply_rate[i] - (mu1*mu1_growth*P1[i-1,j]) - (mu2*mu2_growth*P2[i-1,j]) + (delta1*P1[i-1,j]) +(delta2*P2[i-1,j])\n dP1dt = (mu1*mu1_growth*P1[i-1,j]) - (phi1*P1[i-1,j]*C[i-1,j]) - (delta1*P1[i-1,j])\n dP2dt = (mu2*mu2_growth*P2[i-1,j]) - (phi2*P2[i-1,j]*C[i-1,j]) - (delta2*P2[i-1,j])\n dCdt = (eps1*phi1*P1[i-1,j]*C[i-1,j]) + (eps2*phi2*P2[i-1,j]*C[i-1,j]) - (delta_c*C[i-1,j])\n N[i,j] = N[i-1,j] + dNdt * dt\n P1[i,j] = P1[i-1,j] + dP1dt * dt\n P2[i,j] = P2[i-1,j] + dP2dt * dt\n C[i,j] = C[i-1,j] + dCdt * dt\n light[i,j] = I\n\n# print shape of arrays for nutrients, producer, and consumer groups\nprint('N shape = ', N.shape)\nprint('P1 shape = ', P1.shape)\nprint('P2 shape = ', P2.shape)\nprint('C shape = ', C.shape)\n\nN = N.T\nP1 = P1.T\nP2 = P2.T\nC = C.T\n\nfig, axs = plt.subplots(3,4,figsize=(30,12))\n\naxs[0,0].plot(t,np.log(N[0, :]), color = 'red')\naxs[0,0].set_title('Nutrients at surface')\naxs[0,0].set_xlabel('time (days)')\naxs[0,0].set_ylabel('Nutrient Concentration')\n\naxs[1,0].plot(t,np.log(N[50, :]), color = 'red')\naxs[1,0].set_title('Nutrients at half depth')\naxs[1,0].set_xlabel('time (days)')\naxs[1,0].set_ylabel('Nutrient Concentration')\n\naxs[2,0].plot(t,np.log(N[100, :]), color = 'red')\naxs[2,0].set_title('Nutrients at full depth')\naxs[2,0].set_xlabel('time (days)')\naxs[2,0].set_ylabel('Nutrient Concentration')\n\naxs[0,1].plot(t,np.log(P1[0, :]), color = 'orange', linestyle = 'dashed')\naxs[0,1].set_title('Producer 1 at surface')\naxs[0,1].set_xlabel('time (days)')\naxs[0,1].set_ylabel('Producer Biomass')\n\naxs[1,1].plot(t,np.log(P1[50, :]), color = 'orange', linestyle = 'dashed')\naxs[1,1].set_title('Producer 1 at half depth')\naxs[1,1].set_xlabel('time (days)')\naxs[1,1].set_ylabel('Producer Biomass')\n\naxs[2,1].plot(t,np.log(P1[100, :]), color = 'orange', linestyle = 'dashed')\naxs[2,1].set_title('Producer 1 at full depth')\naxs[2,1].set_xlabel('time (days)')\naxs[2,1].set_ylabel('Producer Biomass')\n\naxs[0,2].plot(t,np.log(P2[0, :]), color = 'blue')\naxs[0,2].set_title('Producer 2 at surface')\naxs[0,2].set_xlabel('time (days)')\naxs[0,2].set_ylabel('Producer Biomass')\n\naxs[1,2].plot(t,np.log(P2[50, :]), color = 'blue')\naxs[1,2].set_title('Producer 2 at half depth')\naxs[1,2].set_xlabel('time (days)')\naxs[1,2].set_ylabel('Producer Biomass')\n\naxs[2,2].plot(t,np.log(P2[100, :]), color = 'blue')\naxs[2,2].set_title('Producer 2 at full depth')\naxs[2,2].set_xlabel('time (days)')\naxs[2,2].set_ylabel('Producer Biomass')\n\naxs[0,3].plot(t,np.log(C[0, :]), color = 'grey', linestyle = 'dotted')\naxs[0,3].set_title('Consumer at surface')\naxs[0,3].set_xlabel('time (days)')\naxs[0,3].set_ylabel('Consumer Biomass')\n\naxs[1,3].plot(t,np.log(C[50, :]), color = 'grey', linestyle = 'dotted')\naxs[1,3].set_title('Consumer at half depth')\naxs[1,3].set_xlabel('time (days)')\naxs[1,3].set_ylabel('Consumer Biomass')\n\naxs[2,3].plot(t,np.log(C[100, :]), color = 'grey', linestyle = 'dotted')\naxs[2,3].set_title('Consumer at full depth')\naxs[2,3].set_xlabel('time (days)')\naxs[2,3].set_ylabel('Consumer Biomass')\n\n\nplt.show()\n\n\n\n\n\n","repo_name":"klinn1/bloom_succession","sub_path":"np2c_model.py","file_name":"np2c_model.py","file_ext":"py","file_size_in_byte":6704,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"}
+{"seq_id":"6063716975","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n#@Author : Aixiu\r\n\r\n\"\"\"\r\n生成器(generator):\r\n 生成器的本质就是迭代器\r\n \r\n 创建生成器的两种方案:\r\n 1.生成器函数\r\n 2.生成器表达式\r\n \r\n 生成器函数\r\n 特性\r\n 1.生成器函数中有一个关键字 yield\r\n 2.生成器函数执行的时候,得到的是生成器,\r\n \r\n yield:\r\n 作用:\r\n 1.可以返回数据\r\n 2.可以分段的执行函数中的内容,通过 __next__() 可以执行到下一个 yield的位置\r\n\"\"\"\r\n\r\n# def func():\r\n# print(123456)\r\n# yield 999 # yield 也有反回的意思\r\n\r\n# ret = func()\r\n# # print(ret) # \r\n\r\n# ret.__next__()\r\n# print(ret.__next__()) # yield 只有执行到 next的时候才会返回数据\r\n\r\n\r\n# def func():\r\n# print(123)\r\n# yield 666\r\n# print(456)\r\n# yield 999\r\n \r\n# ret = func()\r\n# print(ret.__next__())\r\n# print(ret.__next__())\r\n\r\n\r\n# 案例\r\n# 去工厂定制 1000 件衣服\r\n\r\n# def order():\r\n# lst = []\r\n# for i in range(1000):\r\n# lst.append(f'衣服{i}')\r\n# return lst\r\n\r\n# lst = order()\r\n# print(lst)\r\n\r\ndef order():\r\n lst = []\r\n for i in range(1000):\r\n lst.append(f'衣服{i+1}')\r\n if len(lst) == 50:\r\n yield lst\r\n lst = []\r\n \r\ngei = order()\r\nprint(gei.__next__())\r\nprint(gei.__next__())\r\n","repo_name":"aixiu/PythonABC","sub_path":"函数-装饰器详解/06_生成器.py","file_name":"06_生成器.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"18110397929","text":"from sys import stdin\nfrom collections import deque\ninput = stdin.readline\n\n# 自作dequeでは実行時間的に通らないので...\n\nn = int(input())\nd: deque = deque([])\n\nfor _ in range(n):\n command = input().split()\n\n if command[0] == \"insert\":\n x = int(command[1])\n d.appendleft(x)\n\n elif command[0] == \"delete\":\n x = int(command[1])\n if d.count(x):\n d.remove(x)\n\n elif command[0] == \"deleteFirst\":\n tmp = d.popleft()\n\n elif command[0] == \"deleteLast\":\n tmp = d.pop()\n\nwhile d:\n tmp = d.popleft()\n\n if d:\n print(tmp, end=\" \")\n else:\n print(tmp)\n\n","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p02265/s199158939.py","file_name":"s199158939.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"70402644136","text":"#Energy bill calculator Problem:\n# Outputs the raw cost of energy usage\ndef rawcost(prev, curr):\n unitsUsed = curr - prev\n calori3 = 39.3 / 3.6\n kWh = unitsUsed * 1.022 \n kWh = kWh * calori3\n return kWh * 2.84\n\n# Circle properties problem:\n# Returns the: radius, area, circumference and arc length of a circle\ndef properties(diameter, arcAngle):\n radius = diameter / 2\n radius2 = radius ** 2\n area = 3.14 * radius2\n circumference = 3.14 * diameter\n arcLength = circumference * arcAngle\n return radius, area, circumference, arcLength\n","repo_name":"TuttiFrutti1090/11D-Cs1-TIME","sub_path":"TIME 1/slide17.py","file_name":"slide17.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"4881865544","text":"# 정수를 저장하는 큐를 구현한 다음, 입력으로 주어지는 명령을 처리하는 프로그램을 작성하시오.\n\n# 명령은 총 여섯 가지이다.\n\n# push X: 정수 X를 큐에 넣는 연산이다.\n# pop: 큐에서 가장 앞에 있는 정수를 빼고, 그 수를 출력한다. 만약 큐에 들어있는 정수가 없는 경우에는 -1을 출력한다.\n# size: 큐에 들어있는 정수의 개수를 출력한다.\n# empty: 큐가 비어있으면 1, 아니면 0을 출력한다.\n# front: 큐의 가장 앞에 있는 정수를 출력한다. 만약 큐에 들어있는 정수가 없는 경우에는 -1을 출력한다.\n# back: 큐의 가장 뒤에 있는 정수를 출력한다. 만약 큐에 들어있는 정수가 없는 경우에는 -1을 출력한다.\n\nimport sys \nclass Queue:\n def __init__(self):\n self.queue = []\n def push(self, x):\n self.queue.append(x)\n def pop(self):\n if self.queue:\n self.queue.pop(0)\n def size(self):\n print(len(self.queue)) \n def empty(self):\n if self.queue:\n print(0)\n else:\n print(1)\n def front(self):\n if self.queue:\n print(self.queue[0])\n else:\n print(-1)\n def back(self):\n if self.queue:\n print(self.queue[-1])\n else:\n print(-1)\n\nif __name__ == \"__main__\":\n my_queue = Queue()\n N = int(sys.stdin.readline())\n for _ in range(N):\n command = list(sys.stdin.readline())\n if command[0] == \"push\":\n my_queue.push(int(command[1]))\n elif command[0] == \"pop\":\n my_queue.pop()\n elif command[0] == \"size\":\n my_queue.size()\n elif command[0] == \"empty\":\n my_queue.empty()\n elif command[0] == \"front\":\n my_queue.front()\n elif command[0] == \"back\":\n my_queue.back()\n ","repo_name":"yesj1234/Yangs_team_note","sub_path":"daily_coding/data_structure/queue/queue_class.py","file_name":"queue_class.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"35247096725","text":"from neopixel import NeoPixel\nimport board\nfrom time import sleep\n\npixels = NeoPixel(board.D18, 59, auto_write=False, bpp=4)\n\n\ncolors = [(255,0,0,0), (0,255,0,0), (0,0,255,0), (0,0,0,255)]\n\nwhile True:\n\n for i in range(3):\n pixels.fill(colors[i])\n pixels.show()\n print(pixels[0])\n sleep(2)","repo_name":"MagePhenix/piZeroRGB","sub_path":"piPixelTest.py","file_name":"piPixelTest.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"36278063439","text":"from colorama import Fore\nimport xml.etree.ElementTree as ET\nfrom Paciente import paciente\nfrom Paciente import lista_paciente\nnpacientes=lista_paciente()\n\n\ndef menu():\n print(Fore.YELLOW+\"------------Bienvenido usuario ♣--------------\")\n opcion=\"\"\n \n while opcion!=5:\n opcion_ejecucion=\"\"\n print()\n print(Fore.CYAN+\"------------Menú---------------\")\n print()\n print(Fore.CYAN+\"1. Cargar Archivo\")\n print(Fore.CYAN+\"2. Ver información de los pacientes cargados\")\n print(Fore.CYAN+\"3. Elegir paciente para realizar su respectivo análisis\")\n print(\"4. Generar archivo xml de salida\")\n print(Fore.CYAN+\"5. Salir\")\n print()\n opcion=int(input())\n if opcion==1:\n nombre_archivo=input(\"Introduzca la ruta del archivo \\n\")\n ruta=nombre_archivo\n cargar_archivo(ruta)\n print(\"Su archivo ha sido cargado con éxito :)\")\n\n if opcion==2:\n npacientes.print()\n\n if opcion==3:\n print(\"Escoja su paciente\")\n paciente=input()\n paciente_buscado=npacientes.buscar(paciente)\n if paciente_buscado is None:\n print(Fore.RED+(\"El paciente no ha sido encontrado\"))\n else:\n while opcion_ejecucion != 2:\n print(Fore.GREEN+\"1. Ejecutar períodos proporcionados por el xml\")\n print(Fore.GREEN+\"2. Salir\")\n opcion_ejecucion=int(input())\n \n \n if opcion_ejecucion == 1:\n print(\"Patrón inicial:\")\n \n paciente_buscado.paciente.celula.imprimir()\n x=1\n rejilla=paciente_buscado.paciente.celula.retornar_rejillas()\n paciente_buscado.paciente.rejilla.append(rejilla,x)\n paciente_buscado.paciente.celula.graficar(x)\n while x<=paciente_buscado.paciente.periodo:\n print()\n print(\"__________________________________________\\n\")\n print(\"Período No\",x)\n print()\n paciente_buscado.paciente.celula.periodos()\n rejilla=paciente_buscado.paciente.celula.retornar_rejillas()\n paciente_buscado.paciente.celula.imprimir()\n x+=1\n paciente_buscado.paciente.celula.graficar(x)\n paciente_buscado.paciente.rejilla.append(rejilla,x) \n paciente_buscado.paciente.rejilla.verificar_repeticion()\n print()\n print(\"Períodos finalizados\")\n print()\n print(\"---Aquí están los resultados---\")\n estado=paciente_buscado.paciente.rejilla.estado_paciente()\n print(\"Tipo de enfermedad:\",estado)\n periodo=paciente_buscado.paciente.rejilla.periodoinfectado()\n print(\"Periodo que se empieza a repetir:\",periodo)\n repeticiones=paciente_buscado.paciente.rejilla.repeticiones()\n print(\"Cada cuántos períodos se repite el patrón:\",repeticiones)\n inicial=paciente_buscado.paciente.rejilla.retornar_patron()\n paciente_buscado.paciente.celula.volver_inicial(inicial)\n paciente_buscado.paciente.rejilla.delete()\n print()\n\n if opcion==4:\n npacientes.generar_xml()\n print(\"Archivo generado con éxito\")\n\n \n \n \n\n \n\n\n\n\n\n\n \n\n \n\n\ndef cargar_archivo(ruta):\n try:\n tree = ET.parse(ruta)\n pacientes = tree.getroot()\n \n\n for nuevo_paciente in pacientes.findall(\"paciente\"):\n datos_personales=nuevo_paciente.find(\"datospersonales\")\n nombre=datos_personales.find(\"nombre\").text\n edad=datos_personales.find(\"edad\").text\n\n #covertir mis variables a enteros\n edad_entero=int(edad)\n tamano=nuevo_paciente.find(\"m\").text\n tamano_entero=int(tamano)\n periodo=nuevo_paciente.find(\"periodos\").text\n periodo_entero=int(periodo)\n\n #creación de un nuevo paciente\n paciente_nuevo=paciente(nombre,edad_entero,tamano_entero,periodo_entero)\n npacientes.append(paciente_nuevo)\n paciente_nuevo.celula.append()\n \n #creación de rejilla\n #lectura de filas y columnas contagiadas\n for celda in nuevo_paciente.iter(\"celda\"):\n if celda.attrib=={}:\n break\n fila_entero=int(celda.attrib[\"f\"])\n columna_entero=int(celda.attrib[\"c\"])\n paciente_nuevo.celula.cambio_celula(fila_entero,columna_entero)\n\n \n x=1\n rejilla=paciente_nuevo.celula.retornar_rejillas()\n paciente_nuevo.rejilla.append(rejilla,x)\n while x<=paciente_nuevo.periodo:\n paciente_nuevo.celula.periodos()\n rejilla=paciente_nuevo.celula.retornar_rejillas()\n x+=1\n paciente_nuevo.rejilla.append(rejilla,x) \n #se verifica el estado del paciente \n paciente_nuevo.rejilla.verificar_repeticion()\n estado=paciente_nuevo.rejilla.estado_paciente()\n paciente_nuevo.estado=estado\n periodo=paciente_nuevo.rejilla.periodoinfectado()\n paciente_nuevo.periodo_repetido=periodo\n repeticiones=paciente_nuevo.rejilla.repeticiones()\n paciente_nuevo.numero=repeticiones\n inicial=paciente_nuevo.rejilla.retornar_patron()\n paciente_nuevo.celula.volver_inicial(inicial)\n paciente_nuevo.rejilla.delete()\n except Exception as e:\n print(\"El archivo no fue cargado correctamente\") \n\n\n\n\n\n\n\n\n \n\n \n \n \n\n\nmenu()","repo_name":"luis2001xd/IPC2_Proyecto1_202003745","sub_path":"Proyecto/Menu.py","file_name":"Menu.py","file_ext":"py","file_size_in_byte":6289,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"}
+{"seq_id":"37796546764","text":"#!/usr/bin/env python\n\nimport sys\n\nlangnum = 10\n\ndictl = {'0':1, '1':2, '2':3, '3':4, '4':5, '5':6, '6':7, '7':8, '8':9, '9':10}\n\n# Load scoring file and label.scp.\ndef Loaddata(fin):\n\tx = []\n\tfor i in range(langnum+1):\n\t\tx.append(0)\n\tfin = open(fin, 'r')\n\tlines = fin.readlines()\n\tfin.close()\n\n\n\tdata = []\n\n\tfor line in lines[1:]:\n\t\tpart = line.split()\n\t\tx[0] = part[0].split('g')[1].split('_')[0]\n\t\tfor i in range(langnum):\n\t\t\tx[i+1] = part[i + 1]\n\t\tdata.append(x)\n\t\tx = []\n\t\tfor i in range(langnum+1):\n\t\t\tx.append(0)\n\t\n\treturn data\n\n\n# Generate target trials and nontarget trials.\n# Prepare for plotting DET curves and computing EER / minDCF.\n# data: matrix for result scores.\ndef fun(data, targetf, nontargetf):\n\t\n\ttargetf = open(targetf, 'w')\n\tnontargetf = open(nontargetf, 'w')\n\tfor part in data:\n\t\tlan = part[0]\n\t\tfor j in range(langnum):\n\t\t\tif j + 1 == dictl[lan]:\n\t\t\t\ttargetf.write(part[j + 1] + '\\n')\n\t\t\telse:\n\t\t\t\tnontargetf.write(part[j + 1] + '\\n')\n\ttargetf.close()\n\tnontargetf.close()\n\n\nif __name__ == '__main__':\n '''\n if (len(sys.argv) != 3):\n print \"usage %s