content
stringlengths
1
1.05M
input_ids
listlengths
1
883k
ratio_char_token
float64
1
22.9
token_count
int64
1
883k
''' settings for Django ''' import os import django.conf.global_settings as DEFAULT_SETTINGS LOCALHOST = False DEBUG = False TEMPLATE_DEBUG = DEBUG DEBUG_PAYMENTS = DEBUG # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.abspath( os.path.join(os.path.dirname(__file__), os.pardir)) + '/' LIVE = 1 ADMINS = ( ('Madra David', 'madra@redcore.co.ug'), ) APP_EMAILS = { 'contact_us':'mandelashaban593@gmail.com', 'about_us':'mandelashaban593@gmail.com', 'info':'mandelashaban593@gmail.com', 'support':'mandelashaban593@gmail.com', } DEBUG_EMAILS = { 'madra@redcore.co.ug' , } APP_NAME = 'Useremit' DOMAIN_NAME = 'Remit' APP_TITLE = 'Remit | Send Money to Mobile Money in Uganda or Kenya | Pay utility bills online' MANAGERS = ADMINS USE_JUMIO = True BASE_URL = 'https://useremit.com/' BASE_DIR = os.path.abspath( os.path.join(os.path.dirname(__file__), os.pardir)) + '/' DATABASES = { 'default': { # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'. 'ENGINE': 'django.db.backends.postgresql_psycopg2', # Or path to database file if using sqlite3. 'NAME': 'anenyuoe4', # The following settings are not used with sqlite3: 'USER': 'dqebbquaa4iba', 'PASSWORD': 'WMm8mq1ZYAOn', # Empty for localhost through domain sockets or '127.0.0.1' for # localhost through TCP. 'HOST': 'LOCALHOST', 'PORT': '', # Set to empty string for default. 'OPTIONS': {'autocommit': True, }, } } # Hosts/domain names that are valid for this site; required if DEBUG is False # See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts ALLOWED_HOSTS = ['www.useremit.com', 'http://useremit.com', 'https://useremit.com', 'https://useremit.com'] # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # In a Windows environment this must be set to your system time zone. #TIME_ZONE = 'Africa/Nairobi' TIME_ZONE ='UTC' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale. USE_L10N = True # If you set this to False, Django will not use timezone-aware datetimes. USE_TZ = True # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/var/www/example.com/media/" MEDIA_ROOT = BASE_DIR + 'static/uploads/' # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://example.com/media/", "http://media.example.com/" MEDIA_URL = BASE_URL + 'static/uploads/' # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/var/www/example.com/static/" #GEOIP_PATH = BASE_URL + 'geoip_data/' geo_dir = os.path.dirname(__file__) geo_rel_path = "geoip" GEOIP_PATH = os.path.join(geo_dir, geo_rel_path) EMAIL_TEMPLATE_DIR = BASE_DIR + 'templates/email/' AJAX_TEMPLATE_DIR = BASE_DIR + 'templates/ajax/' SMS_TEMPLATE_DIR = BASE_DIR + 'templates/sms/' # Additional locations of static files STATICFILES_DIRS = ( # Put strings here, like "/home/html/static" or "C:/www/django/static". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # 'django.contrib.staticfiles.finders.DefaultStorageFinder', ) # Make this unique, and don't share it with anybody. SECRET_KEY = 'ksx8+lq!5pzx&)xuqp0sc-rdgtd14gmix-eglq(iz%3+7h)f52' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', # 'django.template.loaders.eggs.Loader', ) MIDDLEWARE_CLASSES = ( 'sslify.middleware.SSLifyMiddleware', 'django.middleware.common.CommonMiddleware', #'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', #'session_security.middleware.SessionSecurityMiddleware', # Uncomment the next line for simple clickjacking protection: 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'remit.urls' # Python dotted path to the WSGI application used by Django's runserver. WSGI_APPLICATION = 'remit.wsgi.application' TEMPLATE_DIRS = ( BASE_DIR + 'templates', BASE_DIR + 'remit_admin/templates/', BASE_DIR + 'remit_admin/templates/admin/', ) INSTALLED_APPS = ( #background tasks #'huey.djhuey', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', 'remit', 'social_widgets', 'accounts', #'south' 'landingapp', 'coverage', #'notification', 'nexmo', 'guardian', # Uncomment the next line to enable admin documentation: # 'django.contrib.admindocs', #'django_admin_bootstrapped.bootstrap3', #'django_admin_bootstrapped', # Uncomment the next line to enable the admin: 'remit_admin', 'session_security', 'gravatar', 'django_cron', 'django.contrib.humanize', 'django_extensions', #'django_bitcoin', 'btc', 'rest_framework', 'rest_framework.authtoken', 'api', 'seo', 'payments', 'background_task', 'django.contrib.admin', 'ipn', 'standard', 'crispy_forms', 'tinymce', #'django_twilio', ) PAYPAL_RECEIVER_EMAIL = "mandelashaban593@gmail.com" # Rest Framework REST_FRAMEWORK = { 'DEFAULT_AUTHENTICATION_CLASSES': ( 'rest_framework.authentication.TokenAuthentication', ), 'DEFAULT_PERMISSION_CLASSES': ( 'rest_framework.permissions.IsAdminUser' ), 'DEFAULT_RENDERER_CLASSES': ( 'rest_framework.renderers.JSONRenderer', #'rest_framework.renderers.BrowsableAPIRenderer', ), # Use Django's standard `django.contrib.auth` permissions, 'DATETIME_FORMAT': '%Y-%m-%d %H:%M:%S' } SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer' # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' } }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, } } # Custom template processors # YOpay YOPAY_USERNAME = '100224720137' YOPAY_PASSWORD = 'jLQF-r1oa-OyIq-0zoQ-544O-7U1F-oGj5-YoyU' YOPAY_ENDPOINT = 'https://paymentsapi1.yo.co.ug/ybs/task.php' # Ipay LIVE = 1 IPAY_CALLBACK_URL = '%stransaction/confirm_payment/' % BASE_URL IPAY_USER = 'redcore' IPAY_MERCHANT = 'RedCore' IPAY_HASH_KEY = '0yiq0zoQ544O' # uba UBA_CALLBACK_URL = '' UBA_MERCHANT_ID = '' UBA_MERCHANT_KEY = '' #jumio JUMIO_URL="https://netverify.com/api/netverify/v2/initiateNetverify/" JUMIO_TOKEN="fcf1eec3-728d-4f8a-8811-5b8e0e534597" JUMIO_SECRET="9mnQyVj1ppiyVESYroDHZS23Z9OfQ9GS" JUMIO_USER_AGENT="MyCompany MyApp/1.0.0" USE_JUMIO = True """ JUMIO_SUCCESS_URL="https://simtransfer.com/jumiopass/" JUMIO_ERROR_URL="https://simtransfer.com/jumiofail/" """ JUMIO_SUCCESS_URL="https://simtransfer.com/idscanned/" JUMIO_ERROR_URL="https://simtransfer.com/idscanfailed/" JUMIO_CALLBACK="https://simtransfer.com/jumiodata/" # Mailgun ANONYMOUS_USER_ID = -1 AUTH_PROFILE_MODULE = 'accounts.Profile' LOGIN_URL = BASE_URL + 'login/' SIGNUP_URL = BASE_URL + 'signup/' LOGOUT_URL = BASE_URL + 'signout/' AUTHENTICATION_BACKENDS = ( 'accounts.backends.EmailVerificationBackend', 'remit.backends.EmailAuthBackend', 'guardian.backends.ObjectPermissionBackend', ) ACTIVATION_LINK = BASE_URL + 'activate/' EMAIL_USE_TLS = True EMAIL_HOST = 'smtp.gmail.com' EMAIL_PORT = 587 """ EMAIL_HOST_USER = '' EMAIL_HOST_PASSWORD = '' DEFAULT_FROM_EMAIL = '' DEFAULT_TO_EMAIL = '' """ #EMAIL_PORT = 587 ADMIN_USER='admin_key_user' ADMIN_USER_KEY='user_004_admin' # Mailgun settings DEFAULT_FROM_EMAIL = 'Remit.ug <noreply@remit.ug>' #EMAIL_USE_TLS = True #EMAIL_HOST = 'smtp.mailgun.org' #EMAIL_HOST_USER = 'postmaster@remit.ug' #EMAIL_HOST_PASSWORD = '25s0akinnuk8' #EMAIL_PORT = 25 # Mailgun settings EMAIL_BACKEND = 'django_mailgun.MailgunBackend' #EMAIL_TEMPLATE_DIR = '%stemplates/email/' % (BASE_DIR) # using sandbox account here , change later """ MAILGUN_ACCESS_KEY = 'key-159a0akhdauw79rtshe1rw-itl6t-0i6' MAILGUN_SERVER_NAME = 'remit.ug' MAILGUN_ACCESS_LINK = 'https://api.mailgun.net/v2/remit.ug/messages' """ MAILGUN_ACCESS_KEY = 'key-159a0akhdauw79rtshe1rw-itl6t-0i6' MAILGUN_SERVER_NAME = 'useremit.com' MAILGUN_ACCESS_LINK = 'https://api.mailgun.net/v3/useremit.com/messages' CONTACT_NO = '+256783877133' # Nexmo NEXMO_USERNAME = '8cede62f' NEXMO_PASSWORD = 'd4d43a29' NEXMO_FROM = 'Remit' #Nexmo App NEXMO_API_KEY = '8cede62fSecret' NEXMO_API_SECRET = 'd4d43a29' NEXMO_DEFAULT_FROM = 'Remit' #if set to zero we use twilio USE_NEXMO = 0 USE_TWILIO = True USE_SUKUMA = False USE_AFRICA_SMS = True TWILIO_ACCOUNT_SID='AC2a0de3ac9808d7bfa5c3d75853c073d6' TWILIO_AUTH_TOKEN='82b2ab8535255c8fd8d96bad96103ae7' TWILIO_DEFAULT_CALLERID = 'Remit' # Session security SESSION_EXPIRE_AT_BROWSER_CLOSE = True # cron jobs CRON_CLASSES = [ "remit.cron.UpdateRates", # ... ] # Paganation PAGNATION_LIMIT = 10 # Avatar GRAVATAR_URL = "https://www.gravatar.com/avatar.php?" # Bitcoin #BITCOIND_CONNECTION_STRING = "http://ubuntu:bitwa8bfede82llet@localhost:8332" BITCOIND_CONNECTION_STRING = "http://redcorebrpc:BKGyjwyNXzHumywcau3FubmyaJ8NypJtd1eSdTYCqSkJ@localhost:8332" # How many bitcoin network confirmations are required until we consider the transaction # as received BITCOIN_MINIMUM_CONFIRMATIONS = 3 # Use Django signals to tell the system when new money has arrived to your # wallets BITCOIN_TRANSACTION_SIGNALING = True from decimal import Decimal MAIN_ADDRESS = '12oaMnJZZJRx59kWyAshzmogHERo8y54Et' BITCOIN_PAYMENT_BUFFER_SIZE = 1 BITCOIN_ADDRESS_BUFFER_SIZE = 1 PAYMENT_VALID_HOURS = 1 BITCOIN_PRIVKEY_FEE = Decimal("0.0005") BITCOIN_TRANSACTION_CACHING = 1 #admin who processed transactions PROCESSED_BY = 1 #background tasks #HUEY_CONFIG = { # 'QUEUE': 'huey.backends.redis_backend.RedisBlockingQueue', # 'QUEUE_NAME': 'test-queue', # 'QUEUE_CONNECTION': { # 'host': 'localhost', # 'port': 6379, # }, # 'THREADS': 4, #} SESSION_COOKIE_SECURE = True CSRF_COOKIE_SECURE = True CSRF_COOKIE_HTTPONLY = True CSRF_FAILURE_VIEW = 'remit.views.csrf_failure_view' MTN_SDP = '172.25.48.43' MTN_TEST_BED = 0 MTN_SDP_USERNAME = 'remitug.sp1' MTN_SDP_PASS = 'Huawei2014' MTN_SDP_SERVICEID = '2560110001380' MTN_SDP_URL = 'http://172.25.48.43:8310/' MTN_VENDOR_CODE = 'REMIT' REVENUE_SHARE = 2.16 #disable email and sms sending DISABLE_COMMS = False #background tasks MAX_ATTEMPTS = 5 #need this for generating reports from sqlite IS_SQLITE = False OTHER_FEES = True OTHER_FEES = True SEND_KYC_SMS = True # Pesapot PESAPOT_URL = 'http://pesapot.com/api/' PESAPOT_TOKEN = '' PESAPOT_KEY = '' #paybill PAYBILL = False DISABLE_MTN = True ENABLE_TRADELANCE = True ENABLE_YO = False DISABLE_AIRTEL_MONEY = False DISABLE_MTN_MOBILE_MONEY = False #force Transaction id FORCE_TRANSACTION_ID = True # Localhost settings # Crispy forms tags settings CRISPY_TEMPLATE_PACK = 'bootstrap3' try: from local_settings import * except ImportError: pass STATIC_ROOT = BASE_DIR + 'static' # URL prefix for static files. # Example: "http://example.com/static/", "http://static.example.com/" STATIC_URL = BASE_URL + 'static/'
[ 7061, 6, 6460, 329, 37770, 705, 7061, 198, 11748, 28686, 198, 11748, 42625, 14208, 13, 10414, 13, 20541, 62, 33692, 355, 5550, 38865, 62, 28480, 51, 20754, 198, 29701, 1847, 39, 10892, 796, 10352, 198, 30531, 796, 10352, 198, 51, 3620, ...
2.358883
5,584
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). import os import shutil import subprocess import sys from pex.pex_info import PexInfo from pex.testing import run_pex_command from pex.typing import TYPE_CHECKING from pex.variables import unzip_dir if TYPE_CHECKING: from typing import Any
[ 2, 15069, 33448, 41689, 1628, 20420, 357, 3826, 27342, 9865, 3843, 20673, 13, 9132, 737, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 3826, 38559, 24290, 737, 198, 11748, 28686, 198, 11748, 4423, 346, 198, 11748, ...
3.375
112
#!/usr/bin/python3 # # Copyright (c) 2017-2020, SUSE LLC # # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. Redistributions # in binary form must reproduce the above copyright notice, this list of # conditions and the following disclaimer in the documentation and/or # other materials provided with the distribution. # # Neither the name of the SUSE Linux Products GmbH nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ''' Author: Bo Maryniuk <bo@suse.de> This tool helps to: 1. Format patches from Git the way it has a minimal impact on the changes in the future 2. Update patches to the current package source 3. Detect content differences, if the filename is still the same 4. Generate include message for .changes logfile ''' import os import sys import re import argparse import shutil ORDERING_FILE = 'patches.orders.txt' CHANGES_FILE = 'patches.changes.txt' def remove_order(filename): ''' Remove order of the patch filename. Git formats patches: XXXX-filename.patch This function removes the "XXXX-" part, if any. ''' ordnum = os.path.basename(filename).split('-')[0] if ordnum and not re.sub(r'[0-9]', '', ordnum): filename = os.path.join(os.path.dirname(filename), filename.split('-', 1)[-1]).lower() ordnum = int(ordnum) else: ordnum = None return ordnum, filename def remove_order_from_subject(src_file, dst_file, use_unique=False): ''' Remove subject inside the patch. Git format patches inside with the following subject format: Subject: [PATCH X/Y] ......... This function removes [PATCH X/Y] part, if any. In Git format-patches one can add "-N" flag, so then subject won't have these numbers, but just "[PATCH]". In this case we leave it out. ''' if os.path.exists(dst_file) and not use_unique: raise IOError('the file {0} exists'.format(dst_file)) if os.path.exists(dst_file) and use_unique: dst_file = unique(dst_file) dst = open(dst_file, 'w') for fline in open(src_file).read().split(os.linesep): fline_tk = re.split(r'\s+\[PATCH \d+/\d+\]\s+', fline) if len(fline_tk) == 2 and fline_tk[0] == 'Subject:': fline = ' [PATCH] '.join(fline_tk) dst.write('{0}\n'.format(fline)) dst.close() def git_format_patch(tag): ''' Formats patches from the given tag. ''' patches = 0 for patch in os.popen( 'git format-patch {0}'.format(tag)).read().split(os.linesep): if patch.split('.')[-1] == 'patch': patches += 1 print("Patches fetched: {0}".format(patches)) def get_diff_contents(data): ''' Get diff contents only. ''' # Yes, I know about library https://github.com/cscorley/whatthepatch # But for now we go ultra-primitive to keep no deps data = '--'.join(data.split("--")[:-1]) contents = [] for chunk in re.split(r'@@.*?@@.*?\n', data)[1:]: contents.append(chunk.split('diff --git')[0]) return contents def unique(fname): ''' Change name to the unique, in case it isn't. :param fname: :param use: :return: ''' fname = fname.split('.') if '-' not in fname[0]: fname[0] = '{0}-{1}'.format(fname[0], 1) else: chnk = fname[0].split('-') try: fname[0] = '{0}-{1}'.format('-'.join(chnk[:-1]), int(chnk[-1]) + 1) except ValueError: # Filename is not in "str-int", but "str-str". fname[0] = '{0}-{1}'.format(fname[0], 1) return '.'.join(fname) def extract_spec_source_patches(specfile): ''' Extracts source patches from the .spec file to match existing comments, according to the https://en.opensuse.org/openSUSE:Packaging_Patches_guidelines :param: specfile :return: ''' patch_sec_start = False patch_sec_end = False head_buff = [] patch_section = [] for spec_line in open(specfile).read().split(os.linesep): if re.match(r'^[Pp]atch[0-9]+:', spec_line) and not patch_sec_start: patch_sec_start = True if not spec_line.startswith('#') and \ not re.match(r'^[Pp]atch[0-9]+:', spec_line) and \ patch_sec_start and \ not patch_sec_end: patch_sec_end = True if not patch_sec_start and not patch_sec_end: head_buff.append(spec_line) if patch_sec_start and not patch_sec_end: patch_section.append(spec_line) first_comment = [] for head_line in reversed(head_buff): if not head_line: break if head_line.startswith('#'): first_comment.append(head_line) patch_section.insert(0, os.linesep.join(first_comment)) patchset = {} curr_key = None for line in reversed(patch_section): if re.match(r'^[Pp]atch[0-9]+:', line): curr_key = re.sub(r'^[Pp]atch[0-9]+:', '', line).strip() patchset[curr_key] = [] continue if curr_key and line and line.startswith('#'): patchset[curr_key].append(line) return patchset def do_remix_spec(args): ''' Remix spec file. :param args: :return: ''' if not os.path.exists(args.spec or ''): raise IOError('Specfile {0} is not accessible or is somewhere else'.format(args.spec)) if not os.path.exists(args.ordering or ''): args.ordering = './{0}'.format(ORDERING_FILE) if not os.path.exists(args.ordering): raise IOError('Ordering file is expected "./{0}" but is not visible'.format(ORDERING_FILE)) patchset = extract_spec_source_patches(args.spec) for o_line in open(args.ordering).read().split(os.linesep): if re.match(r'^[Pp]atch[0-9]+:', o_line): ref, pname = [_f for _f in o_line.split(' ') if _f] print(os.linesep.join(patchset.get(pname) or ['# Description N/A'])) print(ref.ljust(15), pname) def do_create_patches(args): ''' Create and reformat patches for the package. ''' current_dir = os.path.abspath('.') if not args.existing: if os.listdir(current_dir): print("Error: this directory has to be empty!") sys.exit(1) git_format_patch(args.format) else: if not [fname for fname in os.listdir(current_dir) if fname.endswith('.patch')]: print("Error: can't find a single patch in {0} to work with!".format(current_dir)) sys.exit(1) ord_fh = open(args.ordering or ORDERING_FILE, 'w') ord_fh.write('#\n#\n# This is pre-generated snippets of patch ordering\n#\n') ord_patches_p = [] patches = 0 for fname in os.listdir(current_dir): if fname.split('.')[-1] == 'patch': # Check if we should skip this patch in case subject starts with SKIP_TAG with open(fname) as patch_file: if any(re.match(r'^Subject: \[PATCH.*] {}'.format(re.escape(args.skip_tag)), i) for i in patch_file.readlines()): print("Skipping {}".format(fname)) os.unlink(fname) continue print("Preparing {}".format(fname)) order, nfname = remove_order(fname) if args.index is not None: order += args.index remove_order_from_subject(fname, nfname, use_unique=args.increment) os.unlink(fname) ord_fh.write('{patch}{fname}\n'.format(patch='Patch{0}:'.format(order).ljust(15), fname=nfname)) ord_patches_p.append(order) patches += 1 if ord_patches_p: ord_fh.write('#\n#\n# Patch processing inclusion:\n') for order in ord_patches_p: ord_fh.write('%patch{num} -p1\n'.format(num=order)) else: ord_fh.write('# Nothing here, folks... :-(\n') ord_fh.close() print("\nRe-formatted {0} patch{1}".format(patches, patches > 1 and 'es' or '')) def do_update_patches(args): ''' Update patches on the target package source. ''' print("Updating packages from {0} directory".format(args.update)) added = [] removed = [] changed = [] # Gather current patches current_patches = {} for fname in os.listdir(os.path.abspath(".")): if fname.endswith('.patch'): current_patches[os.path.basename(fname)] = True for fname in os.listdir(args.update): if fname.endswith('.patch'): fname = os.path.join(args.update, fname) if os.path.isfile(fname): current_patches[os.path.basename(fname)] = False n_fname = os.path.basename(fname) if not os.path.exists(n_fname): print("Adding {0} patch".format(fname)) shutil.copyfile(fname, os.path.join(os.path.abspath("."), n_fname)) added.append(n_fname) else: if get_diff_contents(open(fname).read()) != get_diff_contents(open(n_fname).read()): if args.changed: print("Replacing {0} patch".format(n_fname)) os.unlink(n_fname) shutil.copyfile(fname, os.path.join(os.path.abspath("."), n_fname)) changed.append(n_fname) else: print("WARNING: Patches {0} and {1} are different!".format(fname, n_fname)) for fname in sorted([patch_name for patch_name, is_dead in list(current_patches.items()) if is_dead]): print("Removing {0} patch".format(fname)) os.unlink(fname) removed.append(fname) # Generate an include for spec changes with open(CHANGES_FILE, "w") as changes: for title, data in [('Changed', changed), ('Added', added), ('Removed', removed)]: if not data: continue print("- {}:".format(title), file=changes) for fname in sorted(data): print(" * {}".format(fname), file=changes) print(file=changes) if not removed and not added and not changes: print("No files has been changed") def main(): ''' Main app. ''' VERSION = '0.2' parser = argparse.ArgumentParser(description='Git patch formatter for RPM packages') parser.add_argument('-u', '--update', action='store', const=None, help='update current patches with the destination path') parser.add_argument('-f', '--format', action='store', const=None, help='specify tag or range of commits for patches to be formatted') parser.add_argument('-o', '--ordering', action='store', const=None, help='specify ordering spec inclusion file. Default: {0}'.format(ORDERING_FILE)) parser.add_argument('-x', '--index', action='store', const=None, help='specify start ordering index. Default: 0') parser.add_argument('-s', '--spec', action='store', const=None, help='remix spec file and extract sources with their comments to match new patch ordering') parser.add_argument('-i', '--increment', action='store_const', const=True, help='use increments for unique names when patch commits repeated') parser.add_argument('-c', '--changed', action='store_const', const=True, help='update also changed files with the content') parser.add_argument('-e', '--existing', action='store_const', const=True, help='work with already formatted patches from Git') parser.add_argument('-k', '--skip-tag', action='store', const=None, default='[skip]', help='skip commits starting with this tag. Default: [skip]') parser.add_argument('-v', '--version', action='store_const', const=True, help='show version') args = parser.parse_args() try: if args.index: try: args.index = int(args.index) except ValueError: raise Exception('Value "{0}" should be a digit'.format(args.index)) if args.version: print("Version: {0}".format(VERSION)) elif args.spec: do_remix_spec(args) elif args.update and not args.format: do_update_patches(args) elif (args.format and not args.update) or args.existing: do_create_patches(args) else: parser.print_help() sys.exit(1) except Exception as ex: print("Critical error:", ex, file=sys.stderr) if __name__ == '__main__': main()
[ 2, 48443, 14629, 14, 8800, 14, 29412, 18, 198, 2, 198, 2, 15069, 357, 66, 8, 2177, 12, 42334, 11, 311, 19108, 11419, 198, 2, 198, 2, 1439, 2489, 10395, 13, 198, 2, 198, 2, 2297, 396, 3890, 290, 779, 287, 2723, 290, 13934, 5107, ...
2.271242
6,120
from dataclasses import dataclass import random from typing import List import numpy as np import torch from config import Config from domain import DispatchMode from models import DQN from modules.state import FeatureManager from objects import Area, Vehicle from objects.area import AreaManager from objects.vehicle import VehicleManager random.seed(1234) np.random.seed(1234) torch.manual_seed(1234) torch.cuda.manual_seed_all(1234) torch.backends.cudnn.deterministic = True def load_dispatch_component(dispatch_mode: DispatchMode, config: Config, is_train=False) -> DispatchModuleInterface: if dispatch_mode == DispatchMode.DQN: dispatch_module = DQNDispatch(config=config, is_train=is_train) return dispatch_module elif dispatch_mode == DispatchMode.RANDOM: dispatch_module = RandomDispatch() return dispatch_module elif dispatch_mode == DispatchMode.NOT_DISPATCH: return None else: raise NotImplementedError
[ 6738, 4818, 330, 28958, 1330, 4818, 330, 31172, 198, 11748, 4738, 198, 6738, 19720, 1330, 7343, 198, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 28034, 198, 198, 6738, 4566, 1330, 17056, 198, 6738, 7386, 1330, 35934, 19076, 198, 6738,...
2.970149
335
# Standard Python modules # ======================= import weakref from abc import ABCMeta, abstractmethod, abstractproperty # External modules # ================ from vtk import vtkActor from vtk import vtkMapper from vtk import vtkPolyDataAlgorithm from vtk import vtkBoundingBox # DICE modules # ============ from dice_tools import wizard def set_selected(self, enable): if enable and not self.__selected: color = getattr(self, 'color', None) if color != None: self.__saved_color = color self.set_color([0.9, 0, 0]) self.__selected = True wizard.w_geometry_object_selection_state(self, True) elif not enable and self.__selected: self.__selected = False color = getattr(self, 'color', None) if color != None: self.set_color(self.__saved_color) self.__saved_color = None wizard.w_geometry_object_selection_state(self, False) wizard.subscribe(GeometryBase, 'w_geometry_objects_select')
[ 2, 8997, 11361, 13103, 198, 2, 36658, 50155, 198, 11748, 4939, 5420, 198, 6738, 450, 66, 1330, 9738, 48526, 11, 12531, 24396, 11, 12531, 26745, 198, 198, 2, 34579, 13103, 198, 2, 796, 25609, 18604, 198, 6738, 410, 30488, 1330, 410, 30...
2.386667
450
#!/usr/bin/env python # # Copyright 2011-2013 Colin Scott # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import sys import os sys.path.append(os.path.dirname(__file__) + "/../../..") simple_cfg = ''' from sts.control_flow.replayer import Replayer from sts.simulation_state import SimulationConfig simulation_config = SimulationConfig() control_flow = Replayer(simulation_config, "%s") ''' if __name__ == '__main__': unittest.main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 198, 2, 15069, 2813, 12, 6390, 18373, 4746, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428,...
3.410714
280
import os a=input("enter username:") if a.isalpha(): os.system("useradd "+a) os.system("passwd a")
[ 11748, 28686, 198, 64, 28, 15414, 7203, 9255, 20579, 25, 4943, 198, 361, 257, 13, 271, 26591, 33529, 198, 197, 418, 13, 10057, 7203, 7220, 2860, 43825, 64, 8, 198, 197, 418, 13, 10057, 7203, 6603, 16993, 257, 4943, 198 ]
2.525
40
"""Views.""" from django.conf import settings from django.contrib.auth.decorators import login_required from django.http import HttpResponse, Http404 from django.shortcuts import render, redirect from responsive_dashboard.dashboard import dashboards from responsive_dashboard.models import UserDashboard, UserDashlet # pylint: disable=no-member
[ 37811, 7680, 82, 526, 15931, 198, 198, 6738, 42625, 14208, 13, 10414, 1330, 6460, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 13, 12501, 273, 2024, 1330, 17594, 62, 35827, 198, 6738, 42625, 14208, 13, 4023, 1330, 367, 29281, 3107...
3.52
100
import os from dataclasses import dataclass CONFIG = Config.create_from_env()
[ 11748, 28686, 198, 6738, 4818, 330, 28958, 1330, 4818, 330, 31172, 628, 198, 198, 10943, 16254, 796, 17056, 13, 17953, 62, 6738, 62, 24330, 3419, 198 ]
3.115385
26
import click import logging import multiprocessing import os import sys from atlas import __version__ from atlas.conf import make_config from atlas.parsers import refseq_parser from atlas.tables import merge_tables from atlas.workflows import download, run_workflow logging.basicConfig(level=logging.INFO, datefmt="%Y-%m-%d %H:%M", format="[%(asctime)s %(levelname)s] %(message)s") if __name__ == "__main__": cli()
[ 11748, 3904, 198, 11748, 18931, 198, 11748, 18540, 305, 919, 278, 198, 11748, 28686, 198, 11748, 25064, 198, 6738, 379, 21921, 1330, 11593, 9641, 834, 198, 6738, 379, 21921, 13, 10414, 1330, 787, 62, 11250, 198, 6738, 379, 21921, 13, 79...
2.772152
158
"""Test cases for the python_jsonapi.core.types.relationships module.""" from python_jsonapi.core.types.relationships import Relationship from python_jsonapi.core.types.relationships import RelationshipsMixin def test_relationship_init() -> None: """Can init a new relationships.""" sut = Relationship() assert sut is not None def test_mixin_init() -> None: """Can init a new mixin.""" sut = RelationshipsMixin() assert sut is not None relationship = Relationship() sut = RelationshipsMixin(relationships={"self": relationship}) assert sut is not None assert sut.relationships is not None assert sut.relationships["self"] == relationship def test_mixin_add_relationship() -> None: """Can add a new entry.""" sut = RelationshipsMixin() sut.add_relationship(key="relationship1", relationship=Relationship()) sut.add_relationship(key="relationship2", relationship=Relationship()) assert sut.relationships is not None assert sut.relationships["relationship1"] is not None assert sut.relationships["relationship2"] is not None
[ 37811, 14402, 2663, 329, 262, 21015, 62, 17752, 15042, 13, 7295, 13, 19199, 13, 39468, 5748, 8265, 526, 15931, 198, 6738, 21015, 62, 17752, 15042, 13, 7295, 13, 19199, 13, 39468, 5748, 1330, 39771, 198, 6738, 21015, 62, 17752, 15042, 13...
3.182081
346
from needlestack.indices.index import BaseIndex
[ 6738, 761, 32712, 441, 13, 521, 1063, 13, 9630, 1330, 7308, 15732, 198 ]
3.692308
13
from django.conf import settings from django.contrib.auth.models import User from django.core.mail import send_mail
[ 6738, 42625, 14208, 13, 10414, 1330, 6460, 201, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 13, 27530, 1330, 11787, 201, 198, 6738, 42625, 14208, 13, 7295, 13, 4529, 1330, 3758, 62, 4529, 201, 198, 201, 198, 201, 198 ]
3.075
40
#! /usr/bin/python # -*- coding: utf-8 -*- __author__ = "Osman Baskaya" from nltk.stem.wordnet import WordNetLemmatizer import sys lmtzr = WordNetLemmatizer() for line in sys.stdin: print ' '.join(map(lmtzr.lemmatize, line.split()))
[ 2, 0, 1220, 14629, 14, 8800, 14, 29412, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 834, 9800, 834, 796, 366, 16748, 805, 347, 2093, 11729, 1, 198, 198, 6738, 299, 2528, 74, 13, 927, 13, 4775, 3262, ...
2.247706
109
# https://realpython.com/blog/python/lyricize-a-flask-app-to-create-lyrics-using-markov-chains/ from random import choice import sys text = "some sample text" text = """ An old man turned ninety-eight He won the lottery and died the next day It's a black fly in your Chardonnay It's a death row pardon two minutes too late And isn't it ironic... don't you think It's like rain on your wedding day It's a free ride when you've already paid It's the good advice that you just didn't take Who would've thought... it figures Mr. Play It Safe was afraid to fly He packed his suitcase and kissed his kids goodbye He waited his whole damn life to take that flight And as the plane crashed down he thought "Well isn't this nice..." And isn't it ironic... don't you think It's like rain on your wedding day It's a free ride when you've already paid It's the good advice that you just didn't take Who would've thought... it figures Well life has a funny way of sneaking up on you When you think everything's okay and everything's going right And life has a funny way of helping you out when You think everything's gone wrong and everything blows up In your face A traffic jam when you're already late A no-smoking sign on your cigarette break It's like ten thousand spoons when all you need is a knife It's meeting the man of my dreams And then meeting his beautiful wife And isn't it ironic...don't you think A little too ironic...and, yeah, I really do think... It's like rain on your wedding day It's a free ride when you've already paid It's the good advice that you just didn't take Who would've thought... it figures Life has a funny way of sneaking up on you Life has a funny, funny way of helping you out Helping you out I recommend getting your heart trampled on to anyone I recommend walking around naked in your living room Swallow it down (what a jagged little pill) It feels so good (swimming in your stomach) Wait until the dust settles You live you learn You love you learn You cry you learn You lose you learn You bleed you learn You scream you learn I recommend biting off more then you can chew to anyone I certainly do I recommend sticking your foot in your mouth at any time Feel free Throw it down (the caution blocks you from the wind) Hold it up (to the rays) You wait and see when the smoke clears You live you learn You love you learn You cry you learn You lose you learn You bleed you learn You scream you learn Wear it out (the way a three-year-old would do) Melt it down (you're gonna have to eventually anyway) The fire trucks are coming up around the bend You live you learn You love you learn You cry you learn You lose you learn You bleed you learn You scream you learn You grieve you learn You choke you learn You laugh you learn You choose you learn You pray you learn You ask you learn You live you learn """ # text = "For now, well generate sample text via the very scientific method of throwing a string directly into the code based on some copied & pasted Alanis Morisette lyrics." if __name__ == "__main__": generateText(text, int(sys.argv[1]), int(sys.argv[2]))
[ 2, 3740, 1378, 5305, 29412, 13, 785, 14, 14036, 14, 29412, 14, 306, 1173, 1096, 12, 64, 12, 2704, 2093, 12, 1324, 12, 1462, 12, 17953, 12, 306, 10466, 12, 3500, 12, 4102, 709, 12, 38861, 14, 198, 198, 6738, 4738, 1330, 3572, 198, ...
3.793902
820
""" Copyright 2021 The Johns Hopkins University Applied Physics Laboratory LLC Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the Software), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import gym import copy
[ 37811, 198, 15269, 220, 33448, 383, 25824, 21183, 2059, 27684, 23123, 18643, 11419, 198, 220, 198, 5990, 3411, 318, 29376, 7520, 11, 1479, 286, 3877, 11, 284, 597, 1048, 16727, 257, 4866, 220, 198, 1659, 428, 3788, 290, 3917, 10314, 369...
3.931271
291
# Adapted from Graham Neubig's Paired Bootstrap script # https://github.com/neubig/util-scripts/blob/master/paired-bootstrap.py import numpy as np from sklearn.metrics import f1_score, precision_score, recall_score from tqdm import tqdm EVAL_TYPE_ACC = "acc" EVAL_TYPE_BLEU = "bleu" EVAL_TYPE_BLEU_DETOK = "bleu_detok" EVAL_TYPE_PEARSON = "pearson" EVAL_TYPE_F1 = "f1" EVAL_TYPE_MACRO_F1 = "macro-f1" EVAL_TYPE_PREC = "precision" EVAL_TYPE_REC = "recall" EVAL_TYPE_AVG = "avg" EVAL_TYPES = [EVAL_TYPE_ACC, EVAL_TYPE_BLEU, EVAL_TYPE_BLEU_DETOK, EVAL_TYPE_PEARSON, EVAL_TYPE_F1, EVAL_TYPE_AVG, EVAL_TYPE_PREC, EVAL_TYPE_REC] def eval_preproc(data, eval_type='acc'): ''' Preprocess into the appropriate format for a particular evaluation type ''' if type(data) == str: data = data.strip() if eval_type == EVAL_TYPE_BLEU: data = data.split() elif eval_type == EVAL_TYPE_PEARSON: data = float(data) elif eval_type in [EVAL_TYPE_F1, EVAL_TYPE_MACRO_F1, EVAL_TYPE_PREC, EVAL_TYPE_REC]: data = float(data) elif eval_type == EVAL_TYPE_AVG: data = float(data) return data def eval_measure(gold, sys, eval_type='acc'): ''' Evaluation measure This takes in gold labels and system outputs and evaluates their accuracy. It currently supports: * Accuracy (acc), percentage of labels that match * Pearson's correlation coefficient (pearson) * BLEU score (bleu) * BLEU_detok, on detokenized references and translations, with internal tokenization :param gold: the correct labels :param sys: the system outputs :param eval_type: The type of evaluation to do (acc, pearson, bleu, bleu_detok) ''' if eval_type == EVAL_TYPE_ACC: return sum([1 if g == s else 0 for g, s in zip(gold, sys)]) / float(len(gold)) elif eval_type == EVAL_TYPE_BLEU: import nltk gold_wrap = [[x] for x in gold] return nltk.translate.bleu_score.corpus_bleu(gold_wrap, sys) elif eval_type == EVAL_TYPE_PEARSON: return np.corrcoef([gold, sys])[0,1] elif eval_type == EVAL_TYPE_BLEU_DETOK: import sacrebleu # make sure score is 0-based instead of 100-based return sacrebleu.corpus_bleu(sys, [gold]).score / 100. elif eval_type == EVAL_TYPE_F1: return f1_score(gold, sys) elif eval_type == EVAL_TYPE_MACRO_F1: return f1_score(gold, sys, average="macro") elif eval_type == EVAL_TYPE_PREC: return precision_score(gold, sys) elif eval_type == EVAL_TYPE_REC: return recall_score(gold, sys) elif eval_type == EVAL_TYPE_AVG: return np.mean(sys) else: raise NotImplementedError('Unknown eval type in eval_measure: %s' % eval_type) def eval_with_paired_bootstrap(gold, sys1, sys2, num_samples=10000, sample_ratio=0.5, eval_type='acc', return_results=False): ''' Evaluate with paired boostrap This compares two systems, performing a significance tests with paired bootstrap resampling to compare the accuracy of the two systems. :param gold: The correct labels :param sys1: The output of system 1 :param sys2: The output of system 2 :param num_samples: The number of bootstrap samples to take :param sample_ratio: The ratio of samples to take every time :param eval_type: The type of evaluation to do (acc, pearson, bleu, bleu_detok) ''' assert(len(gold) == len(sys1)) assert(len(gold) == len(sys2)) # Preprocess the data appropriately for they type of eval gold = [eval_preproc(x, eval_type) for x in gold] sys1 = [eval_preproc(x, eval_type) for x in sys1] sys2 = [eval_preproc(x, eval_type) for x in sys2] sys1_scores = [] sys2_scores = [] wins = [0, 0, 0] n = len(gold) ids = list(range(n)) for _ in tqdm(range(num_samples)): # Subsample the gold and system outputs np.random.shuffle(ids) reduced_ids = ids[:int(len(ids)*sample_ratio)] reduced_gold = [gold[i] for i in reduced_ids] reduced_sys1 = [sys1[i] for i in reduced_ids] reduced_sys2 = [sys2[i] for i in reduced_ids] # Calculate accuracy on the reduced sample and save stats sys1_score = eval_measure(reduced_gold, reduced_sys1, eval_type=eval_type) sys2_score = eval_measure(reduced_gold, reduced_sys2, eval_type=eval_type) if sys1_score > sys2_score: wins[0] += 1 elif sys1_score < sys2_score: wins[1] += 1 else: wins[2] += 1 sys1_scores.append(sys1_score) sys2_scores.append(sys2_score) # Print win stats wins = [x/float(num_samples) for x in wins] print('Win ratio: sys1=%.3f, sys2=%.3f, tie=%.3f' % (wins[0], wins[1], wins[2])) if wins[0] > wins[1]: print('(sys1 is superior with p value p=%.10f)\n' % (1-wins[0])) elif wins[1] > wins[0]: print('(sys2 is superior with p value p=%.10f)\n' % (1-wins[1])) # Print system stats sys1_scores.sort() sys2_scores.sort() print('sys1 mean=%.3f, median=%.3f, 95%% confidence interval=[%.3f, %.3f]' % (np.mean(sys1_scores), np.median(sys1_scores), sys1_scores[int(num_samples * 0.025)], sys1_scores[int(num_samples * 0.975)])) print('sys2 mean=%.3f, median=%.3f, 95%% confidence interval=[%.3f, %.3f]' % (np.mean(sys2_scores), np.median(sys2_scores), sys2_scores[int(num_samples * 0.025)], sys2_scores[int(num_samples * 0.975)])) if return_results: sys1_summary = (np.mean(sys1_scores), (sys1_scores[int(num_samples * 0.025)], sys1_scores[int(num_samples * 0.975)])) sys2_summary = (np.mean(sys2_scores), (sys2_scores[int(num_samples * 0.025)], sys2_scores[int(num_samples * 0.975)])) p_value_lose = 1-wins[0] p_value_win = 1-wins[1] return sys1_summary, sys2_summary, p_value_lose, p_value_win def eval_with_hierarchical_paired_bootstrap(gold, sys1_list, sys2_list, num_samples=10000, sample_ratio=0.5, eval_type='acc', return_results=False): ''' Evaluate with a hierarchical paired boostrap This compares two systems, performing a significance tests with paired bootstrap resampling to compare the accuracy of the two systems, with two-level sampling: first we sample a model, then we sample data to evaluate it on. :param gold: The correct labels :param sys1: The output of system 1 :param sys2: The output of system 2 :param num_samples: The number of bootstrap samples to take :param sample_ratio: The ratio of samples to take every time :param eval_type: The type of evaluation to do (acc, pearson, bleu, bleu_detok) ''' for sys1 in sys1_list: assert(len(gold) == len(sys1)) for sys2 in sys2_list: assert(len(gold) == len(sys2)) # Preprocess the data appropriately for they type of eval gold = [eval_preproc(x, eval_type) for x in gold] sys1_list = [[eval_preproc(x, eval_type) for x in sys1] for sys1 in sys1_list] sys2_list = [[eval_preproc(x, eval_type) for x in sys2] for sys2 in sys2_list] sys1_scores = [] sys2_scores = [] wins = [0, 0, 0] n = len(gold) ids = list(range(n)) for _ in tqdm(range(num_samples)): # Subsample the gold and system outputs np.random.shuffle(ids) reduced_ids = ids[:int(len(ids)*sample_ratio)] sys1_idx = np.random.choice(list(range(len(sys1_list)))) sys1 = sys1_list[sys1_idx] sys2_idx = np.random.choice(list(range(len(sys2_list)))) sys2 = sys2_list[sys2_idx] reduced_gold = [gold[i] for i in reduced_ids] reduced_sys1 = [sys1[i] for i in reduced_ids] reduced_sys2 = [sys2[i] for i in reduced_ids] # Calculate accuracy on the reduced sample and save stats sys1_score = eval_measure(reduced_gold, reduced_sys1, eval_type=eval_type) sys2_score = eval_measure(reduced_gold, reduced_sys2, eval_type=eval_type) if sys1_score > sys2_score: wins[0] += 1 elif sys1_score < sys2_score: wins[1] += 1 else: wins[2] += 1 sys1_scores.append(sys1_score) sys2_scores.append(sys2_score) # Print win stats wins = [x/float(num_samples) for x in wins] print('Win ratio: sys1=%.3f, sys2=%.3f, tie=%.3f' % (wins[0], wins[1], wins[2])) if wins[0] > wins[1]: print('(sys1 is superior with p value p=%.10f)\n' % (1-wins[0])) elif wins[1] > wins[0]: print('(sys2 is superior with p value p=%.10f)\n' % (1-wins[1])) # Print system stats sys1_scores.sort() sys2_scores.sort() print('sys1 mean=%.3f, median=%.3f, 95%% confidence interval=[%.3f, %.3f]' % (np.mean(sys1_scores), np.median(sys1_scores), sys1_scores[int(num_samples * 0.025)], sys1_scores[int(num_samples * 0.975)])) print('sys2 mean=%.3f, median=%.3f, 95%% confidence interval=[%.3f, %.3f]' % (np.mean(sys2_scores), np.median(sys2_scores), sys2_scores[int(num_samples * 0.025)], sys2_scores[int(num_samples * 0.975)])) if return_results: sys1_summary = (np.mean(sys1_scores), (sys1_scores[int(num_samples * 0.025)], sys1_scores[int(num_samples * 0.975)])) sys2_summary = (np.mean(sys2_scores), (sys2_scores[int(num_samples * 0.025)], sys2_scores[int(num_samples * 0.975)])) p_value_lose = 1-wins[0] p_value_win = 1-wins[1] return sys1_summary, sys2_summary, p_value_lose, p_value_win
[ 2, 30019, 276, 422, 11520, 3169, 549, 328, 338, 350, 9820, 18892, 26418, 4226, 198, 2, 3740, 1378, 12567, 13, 785, 14, 710, 549, 328, 14, 22602, 12, 46521, 14, 2436, 672, 14, 9866, 14, 8957, 1202, 12, 18769, 26418, 13, 9078, 198, ...
2.332578
3,972
from paginate_sqlalchemy import SqlalchemyOrmPage
[ 6738, 42208, 4559, 62, 25410, 282, 26599, 1330, 311, 13976, 282, 26599, 5574, 76, 9876, 628 ]
3.1875
16
import tensorflow as tf # import slim # conv layers layers = tf.contrib.layers arg_scope = tf.contrib.framework.arg_scope
[ 11748, 11192, 273, 11125, 355, 48700, 198, 198, 2, 1330, 18862, 198, 2, 3063, 11685, 198, 75, 6962, 796, 48700, 13, 3642, 822, 13, 75, 6962, 198, 853, 62, 29982, 796, 48700, 13, 3642, 822, 13, 30604, 13, 853, 62, 29982, 628 ]
2.952381
42
import datetime # Log parm(File_name)
[ 11748, 4818, 8079, 198, 198, 2, 5972, 1582, 76, 7, 8979, 62, 3672, 8, 628 ]
2.666667
15
#!/usr/bin/env python # ============================================================================= # MODULE DOCSTRING # ============================================================================= """ Utility classes to wrap command line tools. The module provides a class :class:`.CLITool` that provides boilerplate code to wrap command line tools and make them compatible to :class:`~tfep.utils.cli.Launcher`. """ # ============================================================================= # GLOBAL IMPORTS # ============================================================================= import abc import inspect import os # ============================================================================= # CLITOOL # ============================================================================= # ============================================================================= # CLI OPTIONS # =============================================================================
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 628, 198, 2, 38093, 25609, 198, 2, 33893, 37760, 18601, 2751, 198, 2, 38093, 25609, 198, 198, 37811, 198, 18274, 879, 6097, 284, 14441, 3141, 1627, 4899, 13, 198, 198, 464, 8265, 3769, 257,...
6.978873
142
# -*- coding: utf-8 -*- import facebook from allauth.socialaccount.models import SocialToken from django.core.exceptions import ObjectDoesNotExist
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 11748, 23960, 198, 6738, 477, 18439, 13, 14557, 23317, 13, 27530, 1330, 5483, 30642, 198, 6738, 42625, 14208, 13, 7295, 13, 1069, 11755, 1330, 9515, 13921, 3673, 3109, 396...
3.311111
45
import pprint import time import keras import numpy as np import joblib import dataset_loaders import selection_policy import augmentations import experiments import experiments_util import featurized_classifiers import visualization_util import matplotlib.pyplot as plt mem = joblib.Memory(cachedir="./cache", verbose=1) if __name__ == "__main__": main()
[ 11748, 279, 4798, 198, 11748, 640, 198, 198, 11748, 41927, 292, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 1693, 8019, 198, 198, 11748, 27039, 62, 2220, 364, 198, 11748, 6356, 62, 30586, 198, 11748, 35016, 602, 198, 11748, 10256, 1...
3.306306
111
from rest_framework import serializers from .models import *
[ 6738, 1334, 62, 30604, 1330, 11389, 11341, 198, 6738, 764, 27530, 1330, 1635, 628 ]
4.428571
14
# -*- coding: utf-8 -*- """ Created on 2014-5-13 @author: skycrab """ import json import time import random import string import urllib import hashlib import threading import traceback import xml.etree.ElementTree as ET import logging from urllib import request as urllib2 from functools import wraps from .config import WxPayConf, WxPayConf_shop try: import pycurl from cStringIO import StringIO except ImportError: pycurl = None try: import requests except ImportError: requests = None logger = logging.getLogger('control')
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 41972, 319, 1946, 12, 20, 12, 1485, 198, 198, 31, 9800, 25, 6766, 6098, 397, 198, 37811, 198, 11748, 33918, 198, 11748, 640, 198, 11748, 4738, 198, 11748, ...
2.942408
191
from __future__ import annotations from contextlib import contextmanager from typing import TYPE_CHECKING from typing import Any from typing import Callable from typing import Dict from typing import List from typing import Optional from typing import Union from argo_dsl.api.io.argoproj.workflow import v1alpha1 if TYPE_CHECKING: from .template import Template Item = _Item() SERIALIZE_ARGUMENT_FUNCTION = Callable[[Any], str] SERIALIZE_ARGUMENT_METHOD = Callable[["Template", Any], str]
[ 6738, 11593, 37443, 834, 1330, 37647, 198, 198, 6738, 4732, 8019, 1330, 4732, 37153, 198, 6738, 19720, 1330, 41876, 62, 50084, 2751, 198, 6738, 19720, 1330, 4377, 198, 6738, 19720, 1330, 4889, 540, 198, 6738, 19720, 1330, 360, 713, 198, ...
3.357616
151
import unittest from app.models import Article
[ 11748, 555, 715, 395, 198, 6738, 598, 13, 27530, 1330, 10172, 628, 220, 220, 220, 220, 198 ]
3.117647
17
# coding: utf-8 ''' Created on 18.04.2013 @author: ''' import dals.os_io.io_wrapper as dal if __name__=='__main__': sets = dal.get_utf8_template() sets['name'] = 'test_import_to_jy.txt' readed = dal.file2list(sets) map(convert_one_line, readed) print 'Done'
[ 2, 19617, 25, 3384, 69, 12, 23, 201, 198, 7061, 6, 201, 198, 41972, 319, 1248, 13, 3023, 13, 6390, 201, 198, 201, 198, 31, 9800, 25, 220, 201, 198, 7061, 6, 201, 198, 201, 198, 11748, 288, 874, 13, 418, 62, 952, 13, 952, 62, ...
1.93125
160
__version__ = '1.3.0.a3'
[ 834, 9641, 834, 796, 705, 16, 13, 18, 13, 15, 13, 64, 18, 6, 198 ]
1.666667
15
import logging import math from emoji import emojize from peewee import DoesNotExist from telegram import ParseMode from telegram.ext import CommandHandler from telegram.ext import ConversationHandler from telegram.ext import Filters from telegram.ext import MessageHandler from telegram.ext import RegexHandler from constants import limit_status from handlers.shared import cancel_handler from handlers.shared import select_class_keyboard from models.class_model import ClassModel ASK_NAME, ASK_LIMIT = range(2) DELETING_CLASS = range(1)
[ 11748, 18931, 198, 11748, 10688, 198, 198, 6738, 44805, 1330, 795, 13210, 1096, 198, 198, 6738, 613, 413, 1453, 1330, 8314, 3673, 3109, 396, 198, 198, 6738, 573, 30536, 1330, 2547, 325, 19076, 198, 6738, 573, 30536, 13, 2302, 1330, 9455...
3.580645
155
""" - ` ` , ` , [] [] Run shift + alt + F10 [] ctrl + q """ """ """ # # print("") # print('hello') # print("""""") # print('''''') # : ctrl + shift + F10 # '' "" . ''' a = 7 7 a. ( ) a 7 . 7 . [ ] - + + _ - - - - ''' ''' import keyword print(keyword.kwlist) ''' ''' a = 7 # 7 a b = 7 # 7 b print(type(a)) # int print(a is 7) # true print(b is 7) # true print(a is b) # true print(id(a)) print(id(b)) print(id(7)) # id , a b 7 id ''' # a, b = 5, 10 print('a+b=', a+b) # (swapping) a, b = b, a print('a=', a, 'b=', b) # del b print(b)
[ 37811, 198, 220, 220, 220, 220, 220, 532, 220, 220, 198, 220, 220, 220, 220, 220, 220, 220, 4600, 220, 220, 220, 220, 220, 198, 220, 220, 220, 220, 220, 220, 220, 4600, 837, 220, 220, 220, 220, 220, 220, 220, 220, 198, 220, 220,...
1.532819
518
#!/usr/bin/env python3 import argparse import sys import io import os.path import shutil import requests from convert_file import convert_file from gooey import Gooey, GooeyParser if len(sys.argv) >= 2: if '--ignore-gooey' not in sys.argv: sys.argv.append('--ignore-gooey') if __name__ == '__main__': main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 11748, 1822, 29572, 198, 11748, 25064, 198, 11748, 33245, 198, 11748, 28686, 13, 6978, 198, 11748, 4423, 346, 198, 11748, 7007, 198, 6738, 10385, 62, 7753, 1330, 10385, 62, 7753, 198...
2.542636
129
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. import os import pwd import random import traceback import tempfile import base64 import ansible.constants as C from ansible import utils from ansible import errors from ansible import module_common from ansible.runner.return_data import ReturnData
[ 2, 357, 66, 8, 2321, 11, 3899, 1024, 23303, 272, 1279, 76, 40302, 13, 2934, 3099, 272, 31, 14816, 13, 785, 29, 198, 2, 198, 2, 770, 2393, 318, 636, 286, 28038, 856, 198, 2, 198, 2, 28038, 856, 318, 1479, 3788, 25, 345, 460, 17...
3.636364
264
#!/usr/bin/env python import argparse import os import sys import matplotlib import numpy as np matplotlib.use('Agg') import matplotlib.pyplot as plt import os, sys, inspect currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) parentdir = os.path.dirname(currentdir) sys.path.insert(0, parentdir) from src import plots from src.utils import load_data from src.utils import argparse_parents # def main(): # parser = argparse.ArgumentParser(description="Plot purity and completeness per genome. Genomes can be sorted by completeness (default) or purity") # parser.add_argument('file', nargs='?', type=argparse.FileType('r'), help=argparse_parents.HELP_FILE) # parser.add_argument('-s','--sort_by', help='Sort by either purity or completeness (default: completeness)', choices=set(['purity','completeness'])) # parser.add_argument('-o','--out_file', help='Path to store image (default: only show image)') # args = parser.parse_args() # if not args.file and sys.stdin.isatty(): # parser.print_help() # parser.exit(1) # metrics = load_data.load_tsv_table(sys.stdin if not sys.stdin.isatty() else args.file) # if args.sort_by is not None: # plot_by_genome(metrics, args.out_file, args.sort_by) # else: # plot_by_genome(metrics, args.out_file) if __name__ == "__main__": main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 11748, 1822, 29572, 198, 11748, 28686, 198, 11748, 25064, 198, 198, 11748, 2603, 29487, 8019, 198, 11748, 299, 32152, 355, 45941, 198, 198, 6759, 29487, 8019, 13, 1904, 10786, 46384, ...
2.640152
528
from django.urls import reverse from rest_framework.test import APITestCase from testapp.models import ( A, B, C, Child, ChildProps, Container, Entry, MainObject, Parent, Tag, ) from .mixins import InclusionsMixin
[ 6738, 42625, 14208, 13, 6371, 82, 1330, 9575, 198, 198, 6738, 1334, 62, 30604, 13, 9288, 1330, 3486, 2043, 395, 20448, 198, 198, 6738, 1332, 1324, 13, 27530, 1330, 357, 198, 220, 220, 220, 317, 11, 198, 220, 220, 220, 347, 11, 198, ...
2.411215
107
# ---------------------------------------------------------------------------- # Copyright (c) 2022, Bokulich Laboratories. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file LICENSE, distributed with this software. # ---------------------------------------------------------------------------- import qiime2 as q2 import pandas as pd import threading from q2_fondue.utils import handle_threaded_exception from qiime2 import Artifact threading.excepthook = handle_threaded_exception
[ 2, 16529, 10541, 198, 2, 15069, 357, 66, 8, 33160, 11, 47390, 377, 488, 46779, 13, 198, 2, 198, 2, 4307, 6169, 739, 262, 2846, 286, 262, 40499, 347, 10305, 13789, 13, 198, 2, 198, 2, 383, 1336, 5964, 318, 287, 262, 2393, 38559, ...
4.349593
123
# vim: ts=8:sts=8:sw=8:noexpandtab # This file is part of python-markups module # License: 3-clause BSD, see LICENSE file # Copyright: (C) Dmitry Shachnev, 2012-2018 import markups.common as common from markups.abstract import AbstractMarkup, ConvertedMarkup
[ 2, 43907, 25, 40379, 28, 23, 25, 6448, 28, 23, 25, 2032, 28, 23, 25, 3919, 11201, 392, 8658, 198, 198, 2, 770, 2393, 318, 636, 286, 21015, 12, 4102, 4739, 8265, 198, 2, 13789, 25, 513, 12, 565, 682, 347, 10305, 11, 766, 38559, ...
2.955056
89
import pandas as pd from pm4py.objects.log.importer.xes import importer as xes_import from pm4py.objects.log.util import log as utils from pm4py.statistics.start_activities.log.get import get_start_activities from pm4py.statistics.end_activities.log.get import get_end_activities from pm4py.algo.filtering.log.end_activities import end_activities_filter from pm4py.visualization.petrinet import factory as vis_factory from pm4py.algo.discovery.alpha import factory as alpha_miner from pm4py.algo.discovery.heuristics import factory as heuristics_miner from pm4py.algo.discovery.inductive import factory as inductive_miner from pm4py.evaluation import factory as evaluation_factory from pm4py.algo.conformance.tokenreplay import factory as token_replay # logs traces # process model # 1. event log log = xes_import.apply('edited_hh110_labour.xes') trace_key_list = [] event_key_list = [] event_count = 0 # Counter event for trace in log: # keys trace key # trace_key_list . for trace_key in trace.attributes.keys(): if trace_key not in trace_key_list: trace_key_list.append(trace_key) for event in trace: # keys events for event_key in event.keys(): if event_key not in event_key_list: event_key_list.append(event_key) event_count += 1 # for events counter 1 # 2. trace event print("Trace keys : " + str(trace_key_list)) print("Event keys : " + str(event_key_list)) # 3. traces print("Number of traces : " + str(len(log))) # 4. events print("Number of events : " + str(event_count)) # 5. events event log unique_events = utils.get_event_labels(log,'concept:name') print("Events of log : " + str(unique_events)) # 6. # traces # start_activities = get_start_activities(log) print("Starting activities: " + str(start_activities)) # # traces end_activities = get_end_activities(log) print("End activities" + str(end_activities)) # 7. case id, activity name, transition (start # complete), timestamp # DataFrame log_df = pd.DataFrame(columns = ["Case ID" , "Activity Name" , "Transition" , "Timestamp"]) for trace_id, trace in enumerate(log): for event_index, event in enumerate(trace): # DataFrame # event, # row = pd.DataFrame({ "Case ID" : [trace.attributes["concept:name"]], "Activity Name" : [event["concept:name"]], "Transition" : [event["lifecycle:transition"]], "Timestamp" : [event["time:timestamp"]] }) # append DataFrame # log_df = log_df.append(row, ignore_index = True) print("Printing log table : \n") print(log_df) # dataframe # #print(log_df.to_string(index=False)) # log_df csv log_df.to_csv('log_table.csv', index = False) # 8. event log traces # "end" filtered_log = end_activities_filter.apply(log,["End"]) print("New log : \n " + str(filtered_log)) # size filtered_log # "End" print("Size of filtered log : " + str(len(filtered_log))) # - # filtered_log csv # 2 comments #filt_log_df = pd.DataFrame(filtered_log) #filt_log_df.to_csv('filtered_log.csv') # 9. # Alpha Miner # log net, initial_marking, final_marking = alpha_miner.apply(log) gviz = vis_factory.apply(net, initial_marking, final_marking) vis_factory.view(gviz) evaluation_result = evaluation_factory.apply(log, net, initial_marking,final_marking) print(evaluation_result) print_fit_traces(log, net, initial_marking, final_marking) #evaluation_df = pd.DataFrame(evaluation_result) #print(evaluation_df) #evaluation_df.to_csv('alpha_miner_log_evaluation.csv') # filtered log net, initial_marking, final_marking = alpha_miner.apply(filtered_log) gviz = vis_factory.apply(net, initial_marking, final_marking) vis_factory.view(gviz) evaluation_result = evaluation_factory.apply(filtered_log, net, initial_marking,final_marking) print(evaluation_result) print_fit_traces(log, net, initial_marking, final_marking) #evaluation_df = pd.DataFrame(evaluation_result) #print(evaluation_df) #evaluation_df.to_csv('alpha_miner_filtered_log_evaluation.csv') # Heuristics Miner # log net, initial_marking, final_marking = heuristics_miner.apply(log) gviz = vis_factory.apply(net, initial_marking, final_marking) vis_factory.view(gviz) evaluation_result = evaluation_factory.apply(log, net, initial_marking,final_marking) print(evaluation_result) print_fit_traces(log, net, initial_marking, final_marking) #evaluation_df = pd.DataFrame(evaluation_result) #print(evaluation_df) #evaluation_df.to_csv('heuristic_miner_log_evaluation.csv') #alignments = alignment.apply_log(log, net, initial_marking, final_marking) #pretty_print_alignments(alignments) # filtered log net, initial_marking, final_marking = heuristics_miner.apply(filtered_log) gviz = vis_factory.apply(net, initial_marking, final_marking) vis_factory.view(gviz) evaluation_result = evaluation_factory.apply(filtered_log, net, initial_marking,final_marking) print(evaluation_result) print_fit_traces(log, net, initial_marking, final_marking) #evaluation_df = pd.DataFrame(evaluation_result) #print(evaluation_df) #evaluation_df.to_csv('heuristic_miner_filtered_log_evaluation.csv') # Inductive Miner # log net, initial_marking, final_marking = inductive_miner.apply(log) gviz = vis_factory.apply(net, initial_marking, final_marking) vis_factory.view(gviz) evaluation_result = evaluation_factory.apply(log, net, initial_marking,final_marking) print(evaluation_result) print_fit_traces(log, net, initial_marking, final_marking) #evaluation_df = pd.DataFrame(evaluation_result) #print(evaluation_df) #evaluation_df.to_csv('inductive_miner_log_evaluation.csv') # filtered log net, initial_marking, final_marking = inductive_miner.apply(filtered_log) gviz = vis_factory.apply(net, initial_marking, final_marking) vis_factory.view(gviz) evaluation_result = evaluation_factory.apply(filtered_log, net, initial_marking,final_marking) print(evaluation_result) print_fit_traces(log, net, initial_marking, final_marking) #evaluation_df = pd.DataFrame(evaluation_result) #print(evaluation_df) #evaluation_df.to_csv('inductive_miner_filtered_log_evaluation.csv')
[ 11748, 19798, 292, 355, 279, 67, 201, 198, 6738, 9114, 19, 9078, 13, 48205, 13, 6404, 13, 320, 26634, 13, 48169, 1330, 848, 4337, 355, 2124, 274, 62, 11748, 201, 198, 6738, 9114, 19, 9078, 13, 48205, 13, 6404, 13, 22602, 1330, 2604,...
2.387345
2,734
#!/usr/bin/env python ############################################################################## # Copyright (c) 2014, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # Written by: # Nikhil Jain <nikhil.jain@acm.org> # Abhinav Bhatele <bhatele@llnl.gov> # Peer-Timo Bremer <ptbremer@llnl.gov> # # LLNL-CODE-678961. All rights reserved. # # This file is part of Damselfly. For details, see: # https://github.com/LLNL/damselfly # Please also read the LICENSE file for our notice and the LGPL. ############################################################################## # # Define k random distrobutions centered around random positions # Keep track of empty cells # For each set # Until you have placed everything # Randomly pull an empty cell # Compute the current PDF value of this cell for this distribution # sum-up the probability for all already occupied cells and then scale your # current p with 1 / (1-sum) # Pull uniform random number [0,1] # Accept or reject sample # from sys import argv,exit import numpy as np import struct from math import * import random from __builtin__ import True symbol = ["ro","g^","bs","yo","cs"] colors = ["r","g","b","y","c"] # Base class for are probability distribution def rank_to_coords(rank,groups,rows,columns,nodes_per_router,cores_per_node): dims = [0,0,0,0,rank] dims[4] = rank % cores_per_node; rank /= cores_per_node; dims[3] = rank % nodes_per_router; rank /= nodes_per_router; dims[2] = rank % columns; rank /= columns; dims[1] = rank % rows; rank /= rows; dims[0] = rank % groups; return dims if len(argv) < 10: print "Usage: %s <numGroups> <numRows> <numColumns> <numNodesPerRouter> <numCoresPerNode> [Binomial|Geometric] <p> <output filename> <#cores task 1> .... <#cores task N>" exit(0) # Parse the command line groups = int(argv[1]) rows = int(argv[2]) columns = int(argv[3]) nodes_per_router = int(argv[4]) cores_per_node = int(argv[5]) dist = argv[6] p = float(argv[7]) fileprefix = argv[8] # Compute the system size router_count = groups * rows *columns node_count = router_count * nodes_per_router cores_per_router = nodes_per_router * cores_per_node core_count = router_count * nodes_per_router * cores_per_node task_sizes = [int(arg) for arg in argv[9:]] # Create a list of tasks tasks = range(0,len(task_sizes)) # Shuffle the tasks to give everyone the opportunity to have an "empty" machine np.random.shuffle(tasks) # Adjust the order of sizes task_sizes = [task_sizes[i] for i in tasks] # Create random array of centers task_centers = np.random.random_integers(0,router_count-1,len(tasks)) # Create the corresponding distributions if dist == "Binomial": task_distributions = [Binomial(router_count,c,p) for c in task_centers] elif dist == "Geometric": task_distributions = [Geometric(router_count,c,p) for c in task_centers] # Slots cores = np.zeros(core_count) # List of empty router slots empty = list(xrange(0, router_count)) # List of empty nodes empty_nodes = list(xrange(0,node_count)) # Create scale down the task_sizes to leave some stragglers task_sizes_tight = list(task_sizes) for i,t in enumerate(task_sizes_tight): # How many routers would this job fill nr_rounters = t / cores_per_router if nr_rounters * cores_per_router < t: nr_rounters += 1 # Pick no more than about 3% of the routers to be left out task_sizes_tight[i] = (97*nr_rounters) / 100 * cores_per_router # For all tasks for t,size,dist in zip(tasks,task_sizes_tight,task_distributions): count = 0 while count < size: # Choose a random node elem = random.choice(empty) # Get a uniform random number test = np.random.uniform() # Get the current pmf value for the distribution current = dist.adjustedPMF(elem) if current < 0: print "Current ", current, " of ", elem, " tested against ", test print dist.pmf(elem), dist.fill_sum exit(0) # If we pass the test if test < current: #print "Picked node", elem, " ", (size-count)/cores_per_node, " left to pick" #print "Current ", current, dist.pmf(elem)," of ", elem, " tested against ", test # Now fill up all the cores as long as # we have tasks i = 0 while i<cores_per_node*nodes_per_router and count<size: cores[elem*cores_per_node*nodes_per_router + i] = t+1 i += 1 count += 1 # Remove the router from the empty list empty.remove(elem) # Remove the corresponding nodes (This assumine the sizes for this # loop are multiples of the core_per_router for i in xrange(0,nodes_per_router): empty_nodes.remove(elem*nodes_per_router + i) # Adjust all distributions to include another filled element for d in task_distributions: d.fillSlot(elem) # Now place the remaining cores of the tasks by uniformly picking # empty nodes for t,full,tight in zip(tasks,task_sizes,task_sizes_tight): size = full - tight count = 0 while count < size: # Choose a random node elem = random.choice(empty_nodes) i = 0 while i<cores_per_node and count<size: cores[elem*cores_per_node + i] = t+1 i += 1 count += 1 # Remove the router from the empty list empty_nodes.remove(elem) if False: pmfs = [] scale = 0 for d in task_distributions: pmfs.append([d.pmf(i) for i in xrange(0,router_count)]) scale = max(scale,max(pmfs[-1])) import matplotlib.pyplot as plt fig, ax = plt.subplots() for pmf,t in zip(pmfs,tasks): #print "Colors ", colors[t] ax.plot(xrange(0,cores_per_node*nodes_per_router*router_count,cores_per_node),pmf,colors[t]) #print "" for t in tasks: #print "Colors ", symbol[t] x = np.where(cores == t+1) ax.plot(x,[(t+1)*scale/len(tasks) ]*len(x),symbol[t]) #print x plt.show() # set up text and binary files csvfileall = open(fileprefix + ".csv", "w") binfileall = open(fileprefix + ".bin", "wb") csvfileall.write("g,r,c,n,core,jobid\n") for taskid in xrange(0,len(tasks)): x = np.where(cores == taskid+1) # Now find the size of the t's job loc = 0 while tasks[loc] != taskid: loc += 1 if x[0].shape[0] != task_sizes[loc]: print "Task assignment inconsistent for task ", taskid, ": found ", x[0].shape[0], " assigned cores but needed ", task_sizes[loc] exit(0) csvfile = open("%s-%d.csv" % (fileprefix, taskid), "w") binfile = open("%s-%d.bin" % (fileprefix, taskid), "wb") csvfile.write("g,r,c,n,core,jobid\n") # print x for rank in x[0]: dims = rank_to_coords(rank, groups, rows, columns, nodes_per_router, cores_per_node) csvfile.write("%d,%d,%d,%d,%d,0\n" % (dims[0],dims[1],dims[2],dims[3],dims[4])) csvfileall.write("%d,%d,%d,%d,%d,%d\n" % (dims[0],dims[1],dims[2],dims[3],dims[4],taskid)) binfile.write(struct.pack('6i', dims[0], dims[1], dims[2], dims[3], dims[4], 0)) binfileall.write(struct.pack('6i', dims[0], dims[1], dims[2], dims[3], dims[4], taskid)) csvfile.close() binfile.close() csvfileall.close() binfileall.close()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 29113, 29113, 7804, 4242, 2235, 198, 2, 15069, 357, 66, 8, 1946, 11, 13914, 45036, 3549, 2351, 4765, 11, 11419, 13, 198, 2, 21522, 771, 379, 262, 13914, 45036, 3549, 2351, 18643, ...
2.250724
3,454
#!/usr/bin/env python import shutil import itertools import random import sys import traceback from pathlib import Path import altair as alt import pandas as pd import plotly as plotly import plotly.express as px import plotly.graph_objects as go import streamlit as st from pandas.api.types import is_numeric_dtype from plotly.validators.scatter.marker import SymbolValidator from streamlit import cli as stcli import scml_vis.compiler as compiler from scml_vis.compiler import VISDATA_FOLDER from scml_vis.utils import ( add_selector, add_stats_display, add_stats_selector, load_data, plot_network, score_distribution, score_factors, ) __all__ = ["main"] MARKERS = SymbolValidator().values[2::3] MARKERS = [_ for _ in MARKERS if not any(_.startswith(x) for x in ("star", "circle", "square"))] random.shuffle(MARKERS) MARKERS = ["circle", "square"] + MARKERS DB_FOLDER = Path.home() / "negmas" / "runsdb" DB_NAME = "rundb.csv" BASE_FOLDERS = [ Path.home() / "negmas" / "logs" / "scml" / "scml2020", Path.home() / "negmas" / "logs" / "scml" / "scml2020oneshot", Path.home() / "negmas" / "logs" / "scml" / "scml2021oneshot", Path.home() / "negmas" / "logs" / "scml" / "scml2021", Path.home() / "negmas" / "logs" / "tournaments", Path.home() / "negmas" / "tournaments", ] WORLD_INDEX = 0 if __name__ == "__main__": import sys from streamlit import cli as stcli folder = None if len(sys.argv) > 1: folder = Path(sys.argv[1]) if st._is_running_with_streamlit: main(folder) else: sys.argv = ["streamlit", "run"] + sys.argv sys.exit(stcli.main())
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 11748, 4423, 346, 198, 11748, 340, 861, 10141, 198, 11748, 4738, 198, 11748, 25064, 198, 11748, 12854, 1891, 198, 6738, 3108, 8019, 1330, 10644, 198, 198, 11748, 5988, 958, 355, 5988, 19...
2.450808
681
import numpy as np from typing import List from math import sqrt from QFA.Automaton import Automaton from math import cos, sin, pi if __name__ == "__main__": example()
[ 11748, 299, 32152, 355, 45941, 198, 6738, 19720, 1330, 7343, 198, 6738, 10688, 1330, 19862, 17034, 198, 198, 6738, 1195, 7708, 13, 38062, 13951, 1330, 17406, 13951, 198, 6738, 10688, 1330, 8615, 11, 7813, 11, 31028, 628, 628, 628, 628, ...
3.12069
58
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.views.generic import TemplateView, ListView from django.shortcuts import render from rest_framework.authentication import TokenAuthentication from rest_framework import viewsets, mixins from rest_framework.response import Response from rest_framework import generics, status from rest_framework.views import APIView from .serializers import SearchSerializer from .sample_data import MOCK_DATA from .models import Search import requests import os # class SearchView(mixins.ListAPIMixin): # serializer_class = SearchSerializer # def get(self, request): # response = requests.get(MOCK_DATA) # if response.ok: # return response # else: # return None # class PostCollection(ListModelMixin, # CreateModelMixin, # GenericAPIView): # queryset = Post.objects.all() # serializer_class = PostSerializer # def get(self, request, *args, **kwargs): # return self.list(request, *args, **kwargs) # def post(self, request, *args, **kwargs): # return self.create(request, *args, **kwargs) # def delete(self, request, *args, **kwargs): # return self.destroy(request, *args, **kwargs) # return context # def home(request): # ip_address = request.META.get('HTTP_X_FORWARDED_FOR', '') # response = requests.get( # 'https://nasaapidimasv1.p.rapidapi.com/getAsteroidStats') # nasadata = response.json() # return render(request, 'home.html', { # 'ip': nasadata['ip'], # 'country': nasadata['country_name'], # 'latitude': nasadata['latitude'], # 'longitude': nasadata['longitude'], # 'api_key': os.environ.get('API_KEY', '') # }) # Create your views here.
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, 6738, 42625, 14208, 13, 33571, 13, 41357, 1330, 37350, 7680, 11, 7343, 7680, 198, 6738, 42625, 14208, 13, ...
2.4753
749
#!/usr/bin/env python import rospy from geometry_msgs.msg import Vector3 from sensor_msgs.msg import Imu import numpy as np c_imu, c_angle = 0, 0 if __name__ == "__main__": rospy.init_node("freq_checker") imu_sub = rospy.Subscriber("/myo_raw/myo_ori", Vector3, cb_angle) ang_sub = rospy.Subscriber("/myo_raw/myo_imu", Imu, cb_imu) rospy.spin()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 11748, 686, 2777, 88, 198, 6738, 22939, 62, 907, 14542, 13, 19662, 1330, 20650, 18, 198, 6738, 12694, 62, 907, 14542, 13, 19662, 1330, 1846, 84, 198, 11748, 299, 32152, 355, 45941...
2.179012
162
# [START woosmap_http_zones_collection_request] import requests import json url = "https://api.woosmap.com/zones?private_key=YOUR_PRIVATE_API_KEY" payload = json.dumps({ "zones": [ { "zone_id": "ZoneA", "description": "Delivery Zone for Store A", "store_id": "STORE_ID_123456", "polygon": "POLYGON ((-122.496116 37.7648181,-122.4954079 37.751518,-122.4635648 37.7530788,-122.4618481 37.7514501,-122.4601315 37.7521288,-122.4565266 37.7513144,-122.4540375 37.7566755,-122.4528359 37.7583041,-122.4515485 37.7595934,-122.4546384 37.774656,-122.4718903 37.7731635,-122.472577 37.772485,-122.4755811 37.7725529,-122.4791001 37.7723493,-122.4793576 37.7713995,-122.4784993 37.769839,-122.4783276 37.7680071,-122.4774693 37.766718,-122.4772118 37.7652931,-122.496116 37.7648181))", "types": [ "delivery" ] }, { "zone_id": "ZoneB", "description": "Delivery Zone for Store B", "store_id": "STORE_ID_123456", "polygon": "POLYGON ((-122.4546384 37.774656,-122.4515485 37.7595934,-122.4354306 37.7602172,-122.4333707 37.7512596,-122.423071 37.7511239,-122.4242726 37.7687665,-122.4259893 37.7691736,-122.4289075 37.7732444,-122.4306241 37.7850483,-122.4472753 37.7830133,-122.445902 37.7759581,-122.4546384 37.774656))", "types": [ "delivery" ] }, { "zone_id": "ZoneC", "description": "Delivery Zone for Store C", "store_id": "STORE_ID_45678", "polygon": "POLYGON ((-122.4758889 37.7524995,-122.4751594 37.7321718,-122.4688079 37.7299995,-122.4648597 37.7261979,-122.4519851 37.7228035,-122.4483802 37.7215815,-122.4458053 37.726741,-122.4365356 37.7310857,-122.4315574 37.7324433,-122.4246909 37.7312214,-122.4219444 37.731493,-122.423071 37.7511239,-122.4333707 37.7512596,-122.4354306 37.7602172,-122.4515485 37.7595934,-122.4528628 37.7582744,-122.4540375 37.7566755,-122.4565266 37.7513144,-122.4601315 37.7521288,-122.4618481 37.7514501,-122.4635648 37.7530788,-122.4758889 37.7524995))", "types": [ "delivery" ] } ] }) headers = { 'content-type': 'application/json' } response = requests.request("POST", url, headers=headers, data=payload) print(response.text) # [END woosmap_http_zones_collection_request]
[ 2, 685, 2257, 7227, 266, 16426, 8899, 62, 4023, 62, 89, 1952, 62, 43681, 62, 25927, 60, 198, 11748, 7007, 198, 11748, 33918, 198, 198, 6371, 796, 366, 5450, 1378, 15042, 13, 86, 16426, 8899, 13, 785, 14, 89, 1952, 30, 19734, 62, 2...
1.992623
1,220
import win32com.client # Disable early binding: full of race conditions writing the cache files, # and changes the semantics since inheritance isn't handled correctly import win32com.client.gencache _savedGetClassForCLSID = win32com.client.gencache.GetClassForCLSID win32com.client.gencache.GetClassForCLSID = lambda x: None project = win32com.client.DispatchEx("Mga.MgaProject") project.Open("MGA=" + r'D:\Projects\META\development\models\DynamicsTeam\MasterInterpreter\MasterInterpreter.mga') # config_light = win32com.client.DispatchEx("CyPhyMasterInterpreter.ConfigurationSelectionLight") # # GME id, or guid, or abs path or path to Test bench or SoT or PET # config_light.ContextId = '{6d24a596-ec4f-4910-895b-d03a507878c3}' # print config_light.SelectedConfigurationIds # config_light.SetSelectedConfigurationIds(['id-0065-000000f1']) # #config_light.KeepTemporaryModels = True # #config_light.PostToJobManager = True # master = win32com.client.DispatchEx("CyPhyMasterInterpreter.CyPhyMasterInterpreterAPI") # master.Initialize(project) # results = master.RunInTransactionWithConfigLight(config_light) # It works only this way and does not worth the time to figure out the other way. # will run ALL configurations. focusobj = None try: project.BeginTransactionInNewTerr() focusobj = project.GetObjectByID('id-0065-00000635') finally: project.AbortTransaction() selectedobj=win32com.client.DispatchEx("Mga.MgaFCOs") interpreter = "MGA.Interpreter.CyPhyMasterInterpreter" launcher = win32com.client.DispatchEx("Mga.MgaLauncher") launcher.RunComponent(interpreter, project, focusobj, selectedobj, 128) project.Close()
[ 11748, 1592, 2624, 785, 13, 16366, 201, 198, 201, 198, 2, 31529, 1903, 12765, 25, 1336, 286, 3234, 3403, 3597, 262, 12940, 3696, 11, 201, 198, 2, 290, 2458, 262, 33815, 1201, 24155, 2125, 470, 12118, 9380, 201, 198, 11748, 1592, 2624,...
2.862944
591
import numpy as np import matplotlib.pyplot as plt from PIL import Image from PIL import ImageOps
[ 11748, 299, 32152, 355, 45941, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 6738, 350, 4146, 1330, 7412, 198, 6738, 350, 4146, 1330, 7412, 41472, 628, 628, 628, 628, 628 ]
3.147059
34
import os
[ 11748, 28686, 198 ]
3.333333
3
from django.shortcuts import render from django.http import Http404 from .models import CarOwner
[ 6738, 42625, 14208, 13, 19509, 23779, 1330, 8543, 198, 6738, 42625, 14208, 13, 4023, 1330, 367, 29281, 26429, 220, 198, 6738, 764, 27530, 1330, 1879, 42419, 628, 628 ]
3.607143
28
from .WtBtAnalyst import WtBtAnalyst from .WtCtaOptimizer import WtCtaOptimizer __all__ = ["WtBtAnalyst","WtCtaOptimizer"]
[ 6738, 764, 54, 83, 33, 83, 2025, 21470, 1330, 370, 83, 33, 83, 2025, 21470, 201, 198, 6738, 764, 54, 83, 34, 8326, 27871, 320, 7509, 1330, 370, 83, 34, 8326, 27871, 320, 7509, 201, 198, 201, 198, 834, 439, 834, 796, 14631, 54, 8...
2.172414
58
import math import sys import time from grove.adc import ADC Grove = GroveRotaryAngleSensor if __name__ == '__main__': main()
[ 11748, 10688, 198, 11748, 25064, 198, 11748, 640, 198, 6738, 7128, 303, 13, 324, 66, 1330, 49169, 628, 198, 198, 42921, 303, 796, 24144, 24864, 560, 13450, 293, 47864, 628, 198, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, ...
2.77551
49
# -*- coding: utf-8 -*- import string from flask_wtf import FlaskForm as Form from wtforms.fields.html5 import URLField, EmailField, TelField from wtforms import (ValidationError, HiddenField, TextField, HiddenField, PasswordField, SubmitField, TextAreaField, IntegerField, RadioField, FileField, DecimalField, BooleanField, SelectField, FormField, FieldList) from wtforms.validators import (Required, Length, EqualTo, Email, NumberRange, URL, AnyOf, Optional, IPAddress) from flask_login import current_user from ..user import User from ..widgets import ButtonField
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 11748, 4731, 198, 6738, 42903, 62, 86, 27110, 1330, 46947, 8479, 355, 5178, 198, 6738, 266, 83, 23914, 13, 25747, 13, 6494, 20, 1330, 10289, 15878, 11, 9570, 15878,...
3.222826
184
"""TLA+ parser and syntax tree.""" from .parser import parse, parse_expr
[ 37811, 51, 13534, 10, 30751, 290, 15582, 5509, 526, 15931, 198, 6738, 764, 48610, 1330, 21136, 11, 21136, 62, 31937, 198 ]
3.47619
21
import keras.backend as K import keras.layers import runai.mp from .keep import Keep from .parallelised import Parallelised Activation = Keep.create('Activation') Dropout = Keep.create('Dropout') Flatten = Keep.create('Flatten')
[ 11748, 41927, 292, 13, 1891, 437, 355, 509, 198, 11748, 41927, 292, 13, 75, 6962, 198, 198, 11748, 1057, 1872, 13, 3149, 198, 198, 6738, 764, 14894, 1330, 9175, 198, 6738, 764, 1845, 29363, 1417, 1330, 42945, 1417, 198, 198, 25526, 34...
3.12
75
from matplotlib import pyplot as plt import numpy as np
[ 6738, 2603, 29487, 8019, 1330, 12972, 29487, 355, 458, 83, 198, 11748, 299, 32152, 355, 45941, 198 ]
3.294118
17
# utility functions import ast import urllib import datetime import pytz import pylibmc # Import settings from django.conf import settings # API Models from apps.api.models import APIKey, Character, APITimer # Eve_DB Models from eve_db.models import MapSolarSystem # API Access Masks CHARACTER_API_ACCESS_MASKS = {'AccountBalance': 1, 'AssetList': 2, 'CalendarEventAttendees': 4, 'CharacterSheet': 8, 'ContactList': 16, 'ContactNotifications': 32, 'FacWarStats': 64, 'IndustryJobs': 128, 'KillLog': 256, 'MailBodies': 512, 'MailingLists': 1024, 'MailMessages': 2048, 'MarketOrders': 4096, 'Medals': 8192, 'Notifications': 16384, 'NotificationTexts': 32768, 'Research': 65536, 'SkillInTraining': 131072, 'SkillQueue': 262144, 'Standings': 524288, 'UpcomingCalendarEvents': 1048576, 'WalletJournal': 2097152, 'WalletTransactions': 4194304, 'CharacterInfo': 25165824, 'AccountStatus': 33554432, 'Contracts': 67108864, 'Locations': 134217728} def get_memcache_client(): """ Returns a ready-to-use memcache client """ return pylibmc.Client(settings.MEMCACHE_SERVER, binary=settings.MEMCACHE_BINARY, behaviors=settings.MEMCACHE_BEHAVIOUR) def dictfetchall(cursor): """ Returns all rows from a cursor as a dict """ desc = cursor.description return [ dict(zip([col[0] for col in desc], row)) for row in cursor.fetchall() ] def cast_empty_string_to_int(string): """ Casts empty string to 0 """ # Strip stuff only if it's a string if isinstance(string, str): string = string.strip() return int(string) if string else 0 def cast_empty_string_to_float(string): """ Casts empty string to 0 """ # Strip stuff only if it's a string if isinstance(string, str): string = string.strip() return float(string) if string else 0 def calculate_character_access_mask(sheets): """ Returns combined access mask for a list of API sheets. """ mask = 0 for sheet in sheets: mask += CHARACTER_API_ACCESS_MASKS[sheet] return mask def manage_character_api_timers(character): """ Adds and removes character APITimers for a given character depending on the character's key permissions. When we add more functions, we need to add them to the masks dictionary. """ key_mask = character.apikey.accessmask for sheet in CHARACTER_API_ACCESS_MASKS: mask = CHARACTER_API_ACCESS_MASKS[sheet] if ((mask & key_mask) == mask): # If we have permission, create timer if not already present try: APITimer.objects.get(character=character, apisheet=sheet) except APITimer.DoesNotExist: new_timer = APITimer(character=character, corporation=None, apisheet=sheet, nextupdate=pytz.utc.localize(datetime.datetime.utcnow())) new_timer.save() else: # If we are not permitted to do this, remove existent timers try: APITimer.objects.get(character=character, apisheet=sheet).delete except APITimer.DoesNotExist: pass def validate_characters(user, access_mask): """ Returns characters of a user that match a given minimum access mask. """ # Get keys keys = APIKey.objects.filter(user=user) characters = [] for key in keys: # Do a simple bitwise operation to determine if we have sufficient rights with this key. if ((access_mask & key.accessmask) == access_mask): # Get all chars from that key which have sufficient permissions. characters += list(Character.objects.filter(apikey=key)) return characters def find_path(start, finish, security=5, invert=0): """ Returns a list of system objects which represent the path. start: system_id of first system finish: system_id of last system security: sec level of system * 10 invert: if true (1), use security as highest seclevel you want to enter, default (0) seclevel is the lowest you want to try to use """ # Set params params = urllib.urlencode({'start': start, 'finish': finish, 'seclevel': security, 'invert': invert}) response = urllib.urlopen('http://localhost:3455/path', params) path_list = ast.literal_eval(response.read()) path = [] for waypoint in path_list: path.append(MapSolarSystem.objects.get(id=waypoint)) return path
[ 2, 10361, 5499, 198, 11748, 6468, 198, 11748, 2956, 297, 571, 198, 11748, 4818, 8079, 198, 11748, 12972, 22877, 198, 11748, 279, 2645, 571, 23209, 198, 198, 2, 17267, 6460, 198, 6738, 42625, 14208, 13, 10414, 1330, 6460, 198, 198, 2, ...
2.089078
2,582
from django.shortcuts import render from ..models import Field
[ 6738, 42625, 14208, 13, 19509, 23779, 1330, 8543, 198, 198, 6738, 11485, 27530, 1330, 7663, 628, 198 ]
3.882353
17
"""mcpython - a minecraft clone written in python licenced under MIT-licence authors: uuk, xkcdjerry original game by forgleman licenced under MIT-licence minecraft by Mojang blocks based on 1.14.4.jar of minecraft, downloaded on 20th of July, 2019""" import globals as G import crafting.IRecipeType import json import ResourceLocator import item.ItemHandler import traceback import mod.ModMcpython G.craftinghandler = CraftingHandler() mod.ModMcpython.mcpython.eventbus.subscribe("stage:recipe:groups", load_recipe_providers, info="loading crafting recipe groups") mod.ModMcpython.mcpython.eventbus.subscribe("stage:recipes", G.craftinghandler.load, "minecraft", info="loading crafting recipes")
[ 37811, 76, 13155, 7535, 532, 257, 6164, 3323, 17271, 3194, 287, 21015, 3476, 5864, 739, 17168, 12, 677, 594, 198, 41617, 25, 334, 2724, 11, 2124, 74, 10210, 73, 6996, 198, 198, 14986, 983, 416, 5784, 293, 805, 3476, 5864, 739, 17168, ...
2.691525
295
# Checks number of concurrent connections from XCaches to MWT2 dCache. # Creates alarm if more than 200 from any server. # ==== # It is run every 30 min from a cron job. import json from datetime import datetime import requests from alerts import alarms config_path = '/config/config.json' with open(config_path) as json_data: config = json.load(json_data,) print('current time', datetime.now()) res = requests.get( 'http://graphite.mwt2.org/render?target=dcache.xrootd.*&format=json&from=now-2min') if (res.status_code == 200): data = res.json() print(data) print('recieved data on {} servers'.format(len(data))) else: print('problem in receiving connections!') ALARM = alarms('Virtual Placement', 'XCache', 'large number of connections') for server in data: serverIP = server['target'].replace('dcache.xrootd.', '').replace('_', '.') connections = server['datapoints'][-1][0] timestamp = server['datapoints'][-1][1] timestamp = datetime.fromtimestamp(timestamp) timestamp = timestamp.strftime("%Y-%m-%d %H:%M:%S") if not connections: print('n connections not retrieved... skipping.') continue if connections < 200: print('server {} has {} connections.'.format(serverIP, connections)) else: source = { "xcache": serverIP, "n_connections": connections, "timestamp": timestamp } print(source) ALARM.addAlarm( body='too many connections.', tags=[serverIP], source=source )
[ 2, 47719, 1271, 286, 24580, 8787, 422, 1395, 34, 3694, 284, 29961, 51, 17, 288, 30562, 13, 201, 198, 2, 7921, 274, 10436, 611, 517, 621, 939, 422, 597, 4382, 13, 201, 198, 2, 796, 18604, 201, 198, 2, 632, 318, 1057, 790, 1542, 9...
2.447289
664
import csv filename = "traingles.csv" with open(filename, 'r') as csv_file: csv_reader = csv.reader(csv_file) next(csv_reader) for line in csv_reader: if line: print(isInside(line))
[ 11748, 269, 21370, 628, 198, 34345, 796, 366, 9535, 278, 829, 13, 40664, 1, 198, 198, 4480, 1280, 7, 34345, 11, 705, 81, 11537, 355, 269, 21370, 62, 7753, 25, 198, 220, 220, 220, 269, 21370, 62, 46862, 796, 269, 21370, 13, 46862, ...
2.16
100
t = np.arange(0, 10, 0.1) # Time from 0 to 10 years in 0.1 steps with plt.xkcd(): p = np.exp(0.3 * t) fig = plt.figure(figsize=(6, 4)) plt.plot(t, p) plt.ylabel('Population (millions)') plt.xlabel('time (years)') plt.show()
[ 83, 796, 45941, 13, 283, 858, 7, 15, 11, 838, 11, 657, 13, 16, 8, 1303, 3862, 422, 657, 284, 838, 812, 287, 657, 13, 16, 4831, 198, 198, 4480, 458, 83, 13, 87, 74, 10210, 33529, 628, 220, 279, 796, 45941, 13, 11201, 7, 15, 1...
2.125
112
# Tests for code in squarepants/src/main/python/squarepants/plugins/copy_resources/tasks/copy_resource_jars # # Run with: # ./pants test squarepants/src/test/python/squarepants_test/plugins:copy_resources from pants.backend.jvm.targets.jar_dependency import JarDependency from pants.backend.jvm.targets.jar_library import JarLibrary from pants_test.tasks.task_test_base import TaskTestBase from squarepants.plugins.link_resources_jars.targets.resources_jar import ResourcesJar from squarepants.plugins.link_resources_jars.tasks.link_resources_jars import LinkResourcesJars
[ 2, 30307, 329, 2438, 287, 6616, 38895, 14, 10677, 14, 12417, 14, 29412, 14, 23415, 38895, 14, 37390, 14, 30073, 62, 37540, 14, 83, 6791, 14, 30073, 62, 31092, 62, 73, 945, 198, 2, 198, 2, 5660, 351, 25, 198, 2, 24457, 38895, 1332,...
3.217877
179
import os import sys # If installed to a custom prefix directory, binwalk may not be in # the default module search path(s). Try to resolve the prefix module # path and make it the first entry in sys.path. # Ensure that 'src/binwalk' becomes '.' instead of an empty string _parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) for _module_path in [ # from repo: src/scripts/ -> src/ _parent_dir, # from build dir: build/scripts-3.4/ -> build/lib/ os.path.join(_parent_dir, "lib"), # installed in non-default path: bin/ -> lib/python3.4/site-packages/ os.path.join(_parent_dir, "lib", "python%d.%d" % (sys.version_info[0], sys.version_info[1]), "site-packages") ]: if os.path.exists(_module_path) and _module_path not in sys.path: sys.path = [_module_path] + sys.path import binwalk import binwalk.modules if __name__ == "__main__": main()
[ 11748, 28686, 198, 11748, 25064, 198, 198, 2, 1002, 6589, 284, 257, 2183, 21231, 8619, 11, 9874, 11152, 743, 407, 307, 287, 198, 2, 262, 4277, 8265, 2989, 3108, 7, 82, 737, 9993, 284, 10568, 262, 21231, 8265, 198, 2, 3108, 290, 787,...
2.519789
379
#!/usr/bin/python3 # file: mini_frame.py # Created by Guang at 19-7-19 # description: # *-* coding:utf8 *-* import time
[ 2, 48443, 14629, 14, 8800, 14, 29412, 18, 198, 2, 2393, 25, 9927, 62, 14535, 13, 9078, 198, 2, 15622, 416, 36980, 379, 678, 12, 22, 12, 1129, 198, 2, 6764, 25, 198, 198, 2, 1635, 12, 9, 19617, 25, 40477, 23, 1635, 12, 9, 198, ...
2.45098
51
import time import pymongo import schedule from order import * from utils import * # MONGODB db = pymongo.MongoClient("mongodb://localhost:27017/")["ShaurmaBinanceTerminal"] order_db = db["orders"] JOB_INTERVAL = 10.0 # interval of updating jobs_pool = {} if __name__ == '__main__': # initialize_test_db() run_server()
[ 11748, 640, 198, 198, 11748, 279, 4948, 25162, 198, 11748, 7269, 198, 198, 6738, 1502, 1330, 1635, 198, 6738, 3384, 4487, 1330, 1635, 198, 198, 2, 337, 18494, 3727, 33, 198, 9945, 796, 279, 4948, 25162, 13, 44, 25162, 11792, 7203, 310...
2.704
125
""" Modu spiajcy wszystkie wyszukiwania w jedn klas - wszystkei dane dla adresu ip/domeny. Klasa bezporednio uywana w widoku Django. """ from rest_framework.reverse import reverse from typing import List, Dict import whois import socket from connectors.credential import CredentialsNotFoundError from api_searcher.search_engines.censys_engine.censys_host_search import CensysHostSearch from api_searcher.search_engines.shodan_engine.shodan_host_search import ShodanHostSearch from .dns.dns_searcher import DNSSearcher, DNSSearcherError
[ 37811, 198, 5841, 84, 599, 544, 73, 948, 266, 82, 7357, 301, 49375, 266, 893, 89, 11308, 86, 5411, 266, 474, 276, 77, 479, 21921, 532, 266, 82, 7357, 301, 365, 72, 288, 1531, 288, 5031, 512, 411, 84, 20966, 14, 3438, 28558, 13, ...
2.918478
184
# Python - 3.6.0 get_average = lambda marks: int(sum(marks) / len(marks))
[ 2, 11361, 532, 513, 13, 21, 13, 15, 198, 198, 1136, 62, 23913, 796, 37456, 8849, 25, 493, 7, 16345, 7, 14306, 8, 1220, 18896, 7, 14306, 4008, 198 ]
2.586207
29
import torch import torch.nn as nn import torch.optim as optim from torch.optim import lr_scheduler import torchvision from torchvision import datasets, transforms from torch.utils.data import DataLoader import hawtorch import hawtorch.io as io from hawtorch import Trainer from hawtorch.metrics import ClassificationMeter from hawtorch.utils import backup import models import argparse parser = argparse.ArgumentParser() parser.add_argument('--config', type=str, default='configs.json') parser = parser.parse_args() config_file = parser.config args = io.load_json(config_file) logger = io.logger(args["workspace_path"]) names = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') if __name__ == "__main__": backup(args["workspace_path"]) trainer = create_trainer() trainer.train(args["epochs"]) trainer.evaluate()
[ 11748, 28034, 198, 11748, 28034, 13, 20471, 355, 299, 77, 198, 11748, 28034, 13, 40085, 355, 6436, 198, 6738, 28034, 13, 40085, 1330, 300, 81, 62, 1416, 704, 18173, 198, 198, 11748, 28034, 10178, 198, 6738, 28034, 10178, 1330, 40522, 11...
3.088968
281
""" Example of how to make a Scatterplot with a time component """ import slayer as sly import pandas as pd DATA_URL = 'https://raw.githubusercontent.com/ajduberstein/sf_growth/master/public/data/business.csv' businesses = pd.read_csv(DATA_URL) FUCHSIA_RGBA = [255, 0, 255, 140] color_scale = sly.ColorScale( palette='random', variable_name='neighborhood', scale_type='categorical_random') s = sly.Slayer(sly.Viewport(longitude=-122.43, latitude=37.76, zoom=11)) +\ sly.Timer(tick_rate=0.75) + \ sly.Scatterplot( businesses, position=['lng', 'lat'], color=color_scale, radius=50, time_field='start_date') s.to_html('scatterplot.html', interactive=True)
[ 37811, 198, 16281, 286, 703, 284, 787, 257, 1446, 1436, 29487, 351, 257, 640, 7515, 198, 37811, 198, 11748, 1017, 2794, 355, 49822, 198, 11748, 19798, 292, 355, 279, 67, 628, 198, 26947, 62, 21886, 796, 705, 5450, 1378, 1831, 13, 1256...
2.433898
295
import dictondisk import random import pytest import os remove_keys = {0, (33, 12.23), "c", ""} vanilla_dict = { 0: 1337, 1: 3.14, 2: 2.71, 3: 1.61, "a": "", "b": "!", "c": "", "": "", (1, .5): "lorem", (33, 12.23): "ipsum", -1: ["one", "two", "three"] }
[ 11748, 8633, 623, 1984, 198, 11748, 4738, 198, 11748, 12972, 9288, 198, 11748, 28686, 198, 198, 28956, 62, 13083, 796, 1391, 15, 11, 357, 2091, 11, 1105, 13, 1954, 828, 366, 66, 1600, 13538, 92, 198, 198, 10438, 5049, 62, 11600, 796, ...
1.992701
137
from src.extentions import db from flask_login import UserMixin from werkzeug.security import generate_password_hash, check_password_hash import datetime as dt # https://help.twitter.com/en/managing-your-account/twitter-username-rules # https://office-hack.com/gmail/password/
[ 6738, 12351, 13, 2302, 298, 507, 1330, 20613, 198, 6738, 42903, 62, 38235, 1330, 11787, 35608, 259, 198, 6738, 266, 9587, 2736, 1018, 13, 12961, 1330, 7716, 62, 28712, 62, 17831, 11, 2198, 62, 28712, 62, 17831, 198, 11748, 4818, 8079, ...
3.119565
92
import copy import enum from typing import * import numpy as np from shapely.geometry import MultiPolygon as ShapelyMultiPolygon from shapely.geometry import Point as ShapelyPoint from shapely.geometry import Polygon as ShapelyPolygon from shapely.strtree import STRtree import commonroad.geometry.transform from commonroad.common.validity import * from commonroad.geometry.shape import Polygon, ShapeGroup, Circle, Rectangle, Shape from commonroad.scenario.intersection import Intersection from commonroad.scenario.obstacle import Obstacle from commonroad.scenario.traffic_sign import TrafficSign, TrafficLight from commonroad.visualization.drawable import IDrawable from commonroad.visualization.param_server import ParamServer from commonroad.visualization.renderer import IRenderer __author__ = "Christian Pek, Sebastian Maierhofer" __copyright__ = "TUM Cyber-Physical Systems Group" __credits__ = ["BMW CAR@TUM"] __version__ = "2022.1" __maintainer__ = "Sebastian Maierhofer" __email__ = "commonroad@lists.lrz.de" __status__ = "released" class Lanelet: """ Class which describes a Lanelet entity according to the CommonRoad specification. Each lanelet is described by a left and right boundary (polylines). Furthermore, lanelets have relations to other lanelets, e.g. an adjacent left neighbor or a predecessor. """ def __init__(self, left_vertices: np.ndarray, center_vertices: np.ndarray, right_vertices: np.ndarray, lanelet_id: int, predecessor=None, successor=None, adjacent_left=None, adjacent_left_same_direction=None, adjacent_right=None, adjacent_right_same_direction=None, line_marking_left_vertices=LineMarking.NO_MARKING, line_marking_right_vertices=LineMarking.NO_MARKING, stop_line=None, lanelet_type=None, user_one_way=None, user_bidirectional=None, traffic_signs=None, traffic_lights=None, ): """ Constructor of a Lanelet object :param left_vertices: The vertices of the left boundary of the Lanelet described as a polyline [[x0,y0],[x1,y1],...,[xn,yn]] :param center_vertices: The vertices of the center line of the Lanelet described as a polyline [[x0,y0],[x1,y1],...,[xn,yn]] :param right_vertices: The vertices of the right boundary of the Lanelet described as a polyline [[x0,y0],[x1,y1],...,[xn,yn]] :param lanelet_id: The unique id (natural number) of the lanelet :param predecessor: The list of predecessor lanelets (None if not existing) :param successor: The list of successor lanelets (None if not existing) :param adjacent_left: The adjacent left lanelet (None if not existing) :param adjacent_left_same_direction: True if the adjacent left lanelet has the same driving direction, false otherwise (None if no left adjacent lanelet exists) :param adjacent_right: The adjacent right lanelet (None if not existing) :param adjacent_right_same_direction: True if the adjacent right lanelet has the same driving direction, false otherwise (None if no right adjacent lanelet exists) :param line_marking_left_vertices: The type of line marking of the left boundary :param line_marking_right_vertices: The type of line marking of the right boundary :param stop_line: The stop line of the lanelet :param lanelet_type: The types of lanelet applicable here :param user_one_way: type of users that will use the lanelet as one-way :param user_bidirectional: type of users that will use the lanelet as bidirectional way :param traffic_signs: Traffic signs to be applied :param traffic_lights: Traffic lights to follow """ # Set required properties self._left_vertices = None self._right_vertices = None self._center_vertices = None self._lanelet_id = None self.lanelet_id = lanelet_id self.left_vertices = left_vertices self.right_vertices = right_vertices self.center_vertices = center_vertices # check if length of each polyline is the same assert len(left_vertices[0]) == len(center_vertices[0]) == len( right_vertices[0]), '<Lanelet/init>: Provided polylines do not share the same length! {}/{}/{}'.format( len(left_vertices[0]), len(center_vertices[0]), len(right_vertices[0])) # Set lane markings self._line_marking_left_vertices = line_marking_left_vertices self._line_marking_right_vertices = line_marking_right_vertices # Set predecessors and successors self._predecessor = None if predecessor is None: self._predecessor = [] else: self.predecessor = predecessor self._successor = None if successor is None: self._successor = [] else: self.successor = successor # Set adjacent lanelets self._adj_left = None self._adj_left_same_direction = None if adjacent_left is not None: self.adj_left = adjacent_left self.adj_left_same_direction = adjacent_left_same_direction self._adj_right = None self._adj_right_same_direction = None if adjacent_right is not None: self.adj_right = adjacent_right self.adj_right_same_direction = adjacent_right_same_direction self._distance = None self._inner_distance = None # create empty polygon self._polygon = Polygon(np.concatenate((self.right_vertices, np.flip(self.left_vertices, 0)))) self._dynamic_obstacles_on_lanelet = {} self._static_obstacles_on_lanelet = set() self._stop_line = None if stop_line: self.stop_line = stop_line self._lanelet_type = None if lanelet_type is None: self._lanelet_type = set() else: self.lanelet_type = lanelet_type self._user_one_way = None if user_one_way is None: self._user_one_way = set() else: self.user_one_way = user_one_way self._user_bidirectional = None if user_bidirectional is None: self._user_bidirectional = set() else: self.user_bidirectional = user_bidirectional # Set Traffic Rules self._traffic_signs = None if traffic_signs is None: self._traffic_signs = set() else: self.traffic_signs = traffic_signs self._traffic_lights = None if traffic_lights is None: self._traffic_lights = set() else: self.traffic_lights = traffic_lights def add_predecessor(self, lanelet: int): """ Adds the ID of a predecessor lanelet to the list of predecessors. :param lanelet: Predecessor lanelet ID. """ if lanelet not in self.predecessor: self.predecessor.append(lanelet) def remove_predecessor(self, lanelet: int): """ Removes the ID of a predecessor lanelet from the list of predecessors. :param lanelet: Predecessor lanelet ID. """ if lanelet in self.predecessor: self.predecessor.remove(lanelet) def add_successor(self, lanelet: int): """ Adds the ID of a successor lanelet to the list of successors. :param lanelet: Successor lanelet ID. """ if lanelet not in self.successor: self.successor.append(lanelet) def remove_successor(self, lanelet: int): """ Removes the ID of a successor lanelet from the list of successors. :param lanelet: Successor lanelet ID. """ if lanelet in self.successor: self.successor.remove(lanelet) def translate_rotate(self, translation: np.ndarray, angle: float): """ This method translates and rotates a lanelet :param translation: The translation given as [x_off,y_off] for the x and y translation :param angle: The rotation angle in radian (counter-clockwise defined) """ assert is_real_number_vector(translation, 2), '<Lanelet/translate_rotate>: provided translation ' \ 'is not valid! translation = {}'.format(translation) assert is_valid_orientation( angle), '<Lanelet/translate_rotate>: provided angle is not valid! angle = {}'.format(angle) # create transformation matrix t_m = commonroad.geometry.transform.translation_rotation_matrix(translation, angle) # transform center vertices tmp = t_m.dot(np.vstack((self.center_vertices.transpose(), np.ones((1, self.center_vertices.shape[0]))))) tmp = tmp[0:2, :] self._center_vertices = tmp.transpose() # transform left vertices tmp = t_m.dot(np.vstack((self.left_vertices.transpose(), np.ones((1, self.left_vertices.shape[0]))))) tmp = tmp[0:2, :] self._left_vertices = tmp.transpose() # transform right vertices tmp = t_m.dot(np.vstack((self.right_vertices.transpose(), np.ones((1, self.right_vertices.shape[0]))))) tmp = tmp[0:2, :] self._right_vertices = tmp.transpose() # transform the stop line if self._stop_line is not None: self._stop_line.translate_rotate(translation, angle) # recreate polygon in case it existed self._polygon = Polygon(np.concatenate((self.right_vertices, np.flip(self.left_vertices, 0)))) def interpolate_position(self, distance: float) -> Tuple[np.ndarray, np.ndarray, np.ndarray, int]: """ Computes the interpolated positions on the center/right/left polyline of the lanelet for a given distance along the lanelet :param distance: The distance for the interpolation :return: The interpolated positions on the center/right/left polyline and the segment id of the polyline where the interpolation takes place in the form ([x_c,y_c],[x_r,y_r],[x_l,y_l], segment_id) """ assert is_real_number(distance) and np.greater_equal(self.distance[-1], distance) and np.greater_equal(distance, 0), \ '<Lanelet/interpolate_position>: provided distance is not valid! distance = {}'.format( distance) idx = np.searchsorted(self.distance, distance) - 1 while not self.distance[idx] <= distance: idx += 1 r = (distance - self.distance[idx]) / (self.distance[idx + 1] - self.distance[idx]) return ((1 - r) * self._center_vertices[idx] + r * self._center_vertices[idx + 1], (1 - r) * self._right_vertices[idx] + r * self._right_vertices[idx + 1], (1 - r) * self._left_vertices[idx] + r * self._left_vertices[idx + 1], idx) def convert_to_polygon(self) -> Polygon: """ Converts the given lanelet to a polygon representation :return: The polygon of the lanelet """ warnings.warn("Use the lanelet property <polygon> instead", DeprecationWarning) return self._polygon def contains_points(self, point_list: np.ndarray) -> List[bool]: """ Checks if a list of points is enclosed in the lanelet :param point_list: The list of points in the form [[px1,py1],[px2,py2,],...] :return: List of Boolean values with True indicating point is enclosed and False otherwise """ assert isinstance(point_list, ValidTypes.ARRAY), '<Lanelet/contains_points>: provided list of points is not a list! type ' \ '= {}'.format(type(point_list)) assert is_valid_polyline( point_list), 'Lanelet/contains_points>: provided list of points is malformed! points = {}'.format( point_list) return [self._polygon.contains_point(p) for p in point_list] def get_obstacles(self, obstacles: List[Obstacle], time_step: int = 0) -> List[Obstacle]: """ Returns the subset of obstacles, which are located in the lanelet, of a given candidate set :param obstacles: The set of obstacle candidates :param time_step: The time step for the occupancy to check :return: """ assert isinstance(obstacles, list) and all( isinstance(o, Obstacle) for o in obstacles), '<Lanelet/get_obstacles>: Provided list of obstacles' \ ' is malformed! obstacles = {}'.format(obstacles) # output list res = list() lanelet_shapely_obj = self._polygon.shapely_object # look at each obstacle for o in obstacles: o_shape = o.occupancy_at_time(time_step).shape # vertices to check shape_shapely_objects = list() # distinguish between shape and shape group and extract vertices if isinstance(o_shape, ShapeGroup): shape_shapely_objects.extend([sh.shapely_object for sh in o_shape.shapes]) else: shape_shapely_objects.append(o_shape.shapely_object) # check if obstacle is in lane for shapely_obj in shape_shapely_objects: if lanelet_shapely_obj.intersects(shapely_obj): res.append(o) break return res def find_lanelet_successors_in_range(self, lanelet_network: "LaneletNetwork", max_length=50.0) -> List[List[int]]: """ Finds all possible successor paths (id sequences) within max_length. :param lanelet_network: lanelet network :param max_length: abort once length of path is reached :return: list of lanelet IDs """ paths = [[s] for s in self.successor] paths_final = [] lengths = [lanelet_network.find_lanelet_by_id(s).distance[-1] for s in self.successor] while paths: paths_next = [] lengths_next = [] for p, le in zip(paths, lengths): successors = lanelet_network.find_lanelet_by_id(p[-1]).successor if not successors: paths_final.append(p) else: for s in successors: if s in p or s == self.lanelet_id or le >= max_length: # prevent loops and consider length of first successor paths_final.append(p) continue l_next = le + lanelet_network.find_lanelet_by_id(s).distance[-1] if l_next < max_length: paths_next.append(p + [s]) lengths_next.append(l_next) else: paths_final.append(p + [s]) paths = paths_next lengths = lengths_next return paths_final def add_dynamic_obstacle_to_lanelet(self, obstacle_id: int, time_step: int): """ Adds a dynamic obstacle ID to lanelet :param obstacle_id: obstacle ID to add :param time_step: time step at which the obstacle should be added """ if self.dynamic_obstacles_on_lanelet.get(time_step) is None: self.dynamic_obstacles_on_lanelet[time_step] = set() self.dynamic_obstacles_on_lanelet[time_step].add(obstacle_id) def add_static_obstacle_to_lanelet(self, obstacle_id: int): """ Adds a static obstacle ID to lanelet :param obstacle_id: obstacle ID to add """ self.static_obstacles_on_lanelet.add(obstacle_id) def add_traffic_sign_to_lanelet(self, traffic_sign_id: int): """ Adds a traffic sign ID to lanelet :param traffic_sign_id: traffic sign ID to add """ self.traffic_signs.add(traffic_sign_id) def add_traffic_light_to_lanelet(self, traffic_light_id: int): """ Adds a traffic light ID to lanelet :param traffic_light_id: traffic light ID to add """ self.traffic_lights.add(traffic_light_id) def dynamic_obstacle_by_time_step(self, time_step) -> Set[int]: """ Returns all dynamic obstacles on lanelet at specific time step :param time_step: time step of interest :returns: list of obstacle IDs """ if self.dynamic_obstacles_on_lanelet.get(time_step) is not None: return self.dynamic_obstacles_on_lanelet.get(time_step) else: return set() class LaneletNetwork(IDrawable): """ Class which represents a network of connected lanelets """ def __init__(self): """ Constructor for LaneletNetwork """ self._lanelets: Dict[int, Lanelet] = {} # lanelet_id, shapely_polygon self._buffered_polygons: Dict[int, ShapelyPolygon] = {} self._strtee = None # id(shapely_polygon), lanelet_id self._lanelet_id_index_by_id: Dict[int, int] = {} self._intersections: Dict[int, Intersection] = {} self._traffic_signs: Dict[int, TrafficSign] = {} self._traffic_lights: Dict[int, TrafficLight] = {} # pickling of STRtree is not supported by shapely at the moment # use this workaround described in this issue: # https://github.com/Toblerity/Shapely/issues/1033 def _create_strtree(self): """ Creates spatial index for lanelets for faster querying the lanelets by position. Since it is an immutable object, it has to be recreated after every lanelet addition or it should be done once after all lanelets are added. """ # validate buffered polygons self._buffered_polygons = {lanelet_id: lanelet_shapely_polygon for lanelet_id, lanelet_shapely_polygon in self._buffered_polygons.items() if assert_shapely_polygon(lanelet_id, lanelet_shapely_polygon)} self._lanelet_id_index_by_id = {id(lanelet_shapely_polygon): lanelet_id for lanelet_id, lanelet_shapely_polygon in self._buffered_polygons.items()} self._strtee = STRtree(list(self._buffered_polygons.values())) def remove_lanelet(self, lanelet_id: int, rtree: bool = True): """ Removes a lanelet from a lanelet network and deletes all references. @param lanelet_id: ID of lanelet which should be removed. @param rtree: Boolean indicating whether rtree should be initialized """ if lanelet_id in self._lanelets.keys(): del self._lanelets[lanelet_id] del self._buffered_polygons[lanelet_id] self.cleanup_lanelet_references() if rtree: self._create_strtree() def cleanup_lanelet_references(self): """ Deletes lanelet IDs which do not exist in the lanelet network. Useful when cutting out lanelet networks. """ existing_ids = set(self._lanelets.keys()) for la in self.lanelets: la._predecessor = list(set(la.predecessor).intersection(existing_ids)) la._successor = list(set(la.successor).intersection(existing_ids)) la._adj_left = None if la.adj_left is None or la.adj_left not in existing_ids else la.adj_left la._adj_left_same_direction = None \ if la.adj_left_same_direction is None or la.adj_left not in existing_ids else la.adj_left_same_direction la._adj_right = None if la.adj_right is None or la.adj_right not in existing_ids else la.adj_right la._adj_right_same_direction = None \ if la.adj_right_same_direction is None or la.adj_right not in existing_ids else \ la.adj_right_same_direction for inter in self.intersections: for inc in inter.incomings: inc._incoming_lanelets = set(inc.incoming_lanelets).intersection(existing_ids) inc._successors_straight = set(inc.successors_straight).intersection(existing_ids) inc._successors_right = set(inc.successors_right).intersection(existing_ids) inc._successors_left = set(inc.successors_left).intersection(existing_ids) inter._crossings = set(inter.crossings).intersection(existing_ids) def remove_traffic_sign(self, traffic_sign_id: int): """ Removes a traffic sign from a lanelet network and deletes all references. @param traffic_sign_id: ID of traffic sign which should be removed. """ if traffic_sign_id in self._traffic_signs.keys(): del self._traffic_signs[traffic_sign_id] self.cleanup_traffic_sign_references() def cleanup_traffic_sign_references(self): """ Deletes traffic sign IDs which do not exist in the lanelet network. Useful when cutting out lanelet networks. """ existing_ids = set(self._traffic_signs.keys()) for la in self.lanelets: la._traffic_signs = la.traffic_signs.intersection(existing_ids) if la.stop_line is not None and la.stop_line.traffic_sign_ref is not None: la.stop_line._traffic_sign_ref = la.stop_line.traffic_sign_ref.intersection(existing_ids) def remove_traffic_light(self, traffic_light_id: int): """ Removes a traffic light from a lanelet network and deletes all references. @param traffic_light_id: ID of traffic sign which should be removed. """ if traffic_light_id in self._traffic_lights.keys(): del self._traffic_lights[traffic_light_id] self.cleanup_traffic_light_references() def cleanup_traffic_light_references(self): """ Deletes traffic light IDs which do not exist in the lanelet network. Useful when cutting out lanelet networks. """ existing_ids = set(self._traffic_lights.keys()) for la in self.lanelets: la._traffic_lights = la.traffic_lights.intersection(existing_ids) if la.stop_line is not None and la.stop_line.traffic_light_ref is not None: la.stop_line._traffic_light_ref = la.stop_line.traffic_light_ref.intersection(existing_ids) def remove_intersection(self, intersection_id: int): """ Removes a intersection from a lanelet network and deletes all references. @param intersection_id: ID of intersection which should be removed. """ if intersection_id in self._intersections.keys(): del self._intersections[intersection_id] def find_lanelet_by_id(self, lanelet_id: int) -> Lanelet: """ Finds a lanelet for a given lanelet_id :param lanelet_id: The id of the lanelet to find :return: The lanelet object if the id exists and None otherwise """ assert is_natural_number( lanelet_id), '<LaneletNetwork/find_lanelet_by_id>: provided id is not valid! id = {}'.format(lanelet_id) return self._lanelets[lanelet_id] if lanelet_id in self._lanelets else None def find_traffic_sign_by_id(self, traffic_sign_id: int) -> TrafficSign: """ Finds a traffic sign for a given traffic_sign_id :param traffic_sign_id: The id of the traffic sign to find :return: The traffic sign object if the id exists and None otherwise """ assert is_natural_number( traffic_sign_id), '<LaneletNetwork/find_traffic_sign_by_id>: provided id is not valid! ' \ 'id = {}'.format(traffic_sign_id) return self._traffic_signs[traffic_sign_id] if traffic_sign_id in self._traffic_signs else None def find_traffic_light_by_id(self, traffic_light_id: int) -> TrafficLight: """ Finds a traffic light for a given traffic_light_id :param traffic_light_id: The id of the traffic light to find :return: The traffic light object if the id exists and None otherwise """ assert is_natural_number( traffic_light_id), '<LaneletNetwork/find_traffic_light_by_id>: provided id is not valid! ' \ 'id = {}'.format(traffic_light_id) return self._traffic_lights[traffic_light_id] if traffic_light_id in self._traffic_lights else None def find_intersection_by_id(self, intersection_id: int) -> Intersection: """ Finds a intersection for a given intersection_id :param intersection_id: The id of the intersection to find :return: The intersection object if the id exists and None otherwise """ assert is_natural_number(intersection_id), '<LaneletNetwork/find_intersection_by_id>: ' \ 'provided id is not valid! id = {}'.format(intersection_id) return self._intersections[intersection_id] if intersection_id in self._intersections else None def add_lanelet(self, lanelet: Lanelet, rtree: bool = True): """ Adds a lanelet to the LaneletNetwork :param lanelet: The lanelet to add :param eps: The size increase of the buffered polygons :param rtree: Boolean indicating whether rtree should be initialized :return: True if the lanelet has successfully been added to the network, false otherwise """ assert isinstance(lanelet, Lanelet), '<LaneletNetwork/add_lanelet>: provided lanelet is not of ' \ 'type lanelet! type = {}'.format(type(lanelet)) # check if lanelet already exists in network and warn user if lanelet.lanelet_id in self._lanelets.keys(): warnings.warn('Lanelet already exists in network! No changes are made.') return False else: self._lanelets[lanelet.lanelet_id] = lanelet self._buffered_polygons[lanelet.lanelet_id] = lanelet.polygon.shapely_object if rtree: self._create_strtree() return True def add_traffic_sign(self, traffic_sign: TrafficSign, lanelet_ids: Set[int]): """ Adds a traffic sign to the LaneletNetwork :param traffic_sign: The traffic sign to add :param lanelet_ids: Lanelets the traffic sign should be referenced from :return: True if the traffic sign has successfully been added to the network, false otherwise """ assert isinstance(traffic_sign, TrafficSign), '<LaneletNetwork/add_traffic_sign>: provided traffic sign is ' \ 'not of type traffic_sign! type = {}'.format(type(traffic_sign)) # check if traffic already exists in network and warn user if traffic_sign.traffic_sign_id in self._traffic_signs.keys(): warnings.warn('Traffic sign with ID {} already exists in network! ' 'No changes are made.'.format(traffic_sign.traffic_sign_id)) return False else: self._traffic_signs[traffic_sign.traffic_sign_id] = traffic_sign for lanelet_id in lanelet_ids: lanelet = self.find_lanelet_by_id(lanelet_id) if lanelet is not None: lanelet.add_traffic_sign_to_lanelet(traffic_sign.traffic_sign_id) else: warnings.warn('Traffic sign cannot be referenced to lanelet because the lanelet does not exist.') return True def add_traffic_light(self, traffic_light: TrafficLight, lanelet_ids: Set[int]): """ Adds a traffic light to the LaneletNetwork :param traffic_light: The traffic light to add :param lanelet_ids: Lanelets the traffic sign should be referenced from :return: True if the traffic light has successfully been added to the network, false otherwise """ assert isinstance(traffic_light, TrafficLight), '<LaneletNetwork/add_traffic_light>: provided traffic light ' \ 'is not of type traffic_light! ' \ 'type = {}'.format(type(traffic_light)) # check if traffic already exists in network and warn user if traffic_light.traffic_light_id in self._traffic_lights.keys(): warnings.warn('Traffic light already exists in network! No changes are made.') return False else: self._traffic_lights[traffic_light.traffic_light_id] = traffic_light for lanelet_id in lanelet_ids: lanelet = self.find_lanelet_by_id(lanelet_id) if lanelet is not None: lanelet.add_traffic_light_to_lanelet(traffic_light.traffic_light_id) else: warnings.warn('Traffic light cannot be referenced to lanelet because the lanelet does not exist.') return True def add_intersection(self, intersection: Intersection): """ Adds a intersection to the LaneletNetwork :param intersection: The intersection to add :return: True if the traffic light has successfully been added to the network, false otherwise """ assert isinstance(intersection, Intersection), '<LaneletNetwork/add_intersection>: provided intersection is ' \ 'not of type Intersection! type = {}'.format(type(intersection)) # check if traffic already exists in network and warn user if intersection.intersection_id in self._intersections.keys(): warnings.warn('Intersection already exists in network! No changes are made.') return False else: self._intersections[intersection.intersection_id] = intersection return True def add_lanelets_from_network(self, lanelet_network: 'LaneletNetwork'): """ Adds lanelets from a given network object to the current network :param lanelet_network: The lanelet network :return: True if all lanelets have been added to the network, false otherwise """ flag = True # add lanelets to the network for la in lanelet_network.lanelets: flag = flag and self.add_lanelet(la, rtree=False) self._create_strtree() return flag def translate_rotate(self, translation: np.ndarray, angle: float): """ Translates and rotates the complete lanelet network :param translation: The translation given as [x_off,y_off] for the x and y translation :param angle: The rotation angle in radian (counter-clockwise defined) """ assert is_real_number_vector(translation, 2), '<LaneletNetwork/translate_rotate>: provided translation is not valid! ' \ 'translation = {}'.format(translation) assert is_valid_orientation( angle), '<LaneletNetwork/translate_rotate>: provided angle is not valid! angle = {}'.format(angle) # rotate each lanelet for lanelet in self._lanelets.values(): lanelet.translate_rotate(translation, angle) for traffic_sign in self._traffic_signs.values(): traffic_sign.translate_rotate(translation, angle) for traffic_light in self._traffic_lights.values(): traffic_light.translate_rotate(translation, angle) def find_lanelet_by_position(self, point_list: List[np.ndarray]) -> List[List[int]]: """ Finds the lanelet id of a given position :param point_list: The list of positions to check :return: A list of lanelet ids. If the position could not be matched to a lanelet, an empty list is returned """ assert isinstance(point_list, ValidTypes.LISTS), '<Lanelet/contains_points>: provided list of points is not a list! type ' \ '= {}'.format( type(point_list)) return [[self._get_lanelet_id_by_shapely_polygon(lanelet_shapely_polygon) for lanelet_shapely_polygon in self._strtee.query(point) if lanelet_shapely_polygon.intersects(point) or lanelet_shapely_polygon.buffer(1e-15).intersects(point)] for point in [ShapelyPoint(point) for point in point_list]] def find_lanelet_by_shape(self, shape: Shape) -> List[int]: """ Finds the lanelet id of a given shape :param shape: The shape to check :return: A list of lanelet ids. If the position could not be matched to a lanelet, an empty list is returned """ assert isinstance(shape, (Circle, Polygon, Rectangle)), '<Lanelet/find_lanelet_by_shape>: ' \ 'provided shape is not a shape! ' \ 'type = {}'.format(type(shape)) return [self._get_lanelet_id_by_shapely_polygon(lanelet_shapely_polygon) for lanelet_shapely_polygon in self._strtee.query(shape.shapely_object) if lanelet_shapely_polygon.intersects(shape.shapely_object)] def filter_obstacles_in_network(self, obstacles: List[Obstacle]) -> List[Obstacle]: """ Returns the list of obstacles which are located in the lanelet network :param obstacles: The list of obstacles to check :return: The list of obstacles which are located in the lanelet network """ res = list() obstacle_to_lanelet_map = self.map_obstacles_to_lanelets(obstacles) for k in obstacle_to_lanelet_map.keys(): obs = obstacle_to_lanelet_map[k] for o in obs: if o not in res: res.append(o) return res def map_obstacles_to_lanelets(self, obstacles: List[Obstacle]) -> Dict[int, List[Obstacle]]: """ Maps a given list of obstacles to the lanelets of the lanelet network :param obstacles: The list of CR obstacles :return: A dictionary with the lanelet id as key and the list of obstacles on the lanelet as a List[Obstacles] """ mapping = {} for la in self.lanelets: # map obstacles to current lanelet mapped_objs = la.get_obstacles(obstacles) # check if mapping is not empty if len(mapped_objs) > 0: mapping[la.lanelet_id] = mapped_objs return mapping def lanelets_in_proximity(self, point: np.ndarray, radius: float) -> List[Lanelet]: """ Finds all lanelets which intersect a given circle, defined by the center point and radius :param point: The center of the circle :param radius: The radius of the circle :return: The list of lanelets which intersect the given circle """ assert is_real_number_vector(point, length=2), '<LaneletNetwork/lanelets_in_proximity>: provided point is ' \ 'not valid! point = {}'.format(point) assert is_positive( radius), '<LaneletNetwork/lanelets_in_proximity>: provided radius is not valid! radius = {}'.format( radius) # get list of lanelet ids ids = self._lanelets.keys() # output list lanes = dict() rad_sqr = radius ** 2 # distance dict for sorting distance_list = list() # go through list of lanelets for i in ids: # if current lanelet has not already been added to lanes list if i not in lanes: lanelet = self.find_lanelet_by_id(i) # compute distances (we are not using the sqrt for computational effort) distance = (lanelet.center_vertices - point) ** 2. distance = distance[:, 0] + distance[:, 1] # check if at least one distance is smaller than the radius if any(np.greater_equal(rad_sqr, distance)): lanes[i] = self.find_lanelet_by_id(i) distance_list.append(np.min(distance)) # check if adjacent lanelets can be added as well index_min_dist = np.argmin(distance - rad_sqr) # check right side of lanelet if lanelet.adj_right is not None: p = (lanelet.right_vertices[index_min_dist, :] - point) ** 2 p = p[0] + p[1] if np.greater(rad_sqr, p) and lanelet.adj_right not in lanes: lanes[lanelet.adj_right] = self.find_lanelet_by_id(lanelet.adj_right) distance_list.append(p) # check left side of lanelet if lanelet.adj_left is not None: p = (lanelet.left_vertices[index_min_dist, :] - point) ** 2 p = p[0] + p[1] if np.greater(rad_sqr, p) and lanelet.adj_left not in lanes: lanes[lanelet.adj_left] = self.find_lanelet_by_id(lanelet.adj_left) distance_list.append(p) # sort list according to distance indices = np.argsort(distance_list) lanelets = list(lanes.values()) # return sorted list return [lanelets[i] for i in indices]
[ 11748, 4866, 198, 11748, 33829, 198, 6738, 19720, 1330, 1635, 198, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 5485, 306, 13, 469, 15748, 1330, 15237, 34220, 14520, 355, 25959, 306, 29800, 34220, 14520, 198, 6738, 5485, 306, 13, 469, ...
2.31865
16,300
from django.urls import path from . import views urlpatterns = [ path("test/", views.index, name = "index"), path('completed/',views.show_completed,name= "completed"), path('<int:action_id>/', views.show_action, name='action'), path('update/', views.update_status, name="update_status"), path('new/', views.new_action,name = "new_action"), ]
[ 198, 6738, 42625, 14208, 13, 6371, 82, 1330, 3108, 198, 6738, 764, 1330, 5009, 198, 198, 6371, 33279, 82, 796, 685, 198, 220, 220, 220, 3108, 7203, 9288, 14, 1600, 5009, 13, 9630, 11, 1438, 796, 366, 9630, 12340, 198, 220, 220, 220,...
2.75
132
import ast from typing import List, Optional, cast import pytest from dataframe_expressions import ( Column, DataFrame, ast_Callable, ast_Column, ast_DataFrame, define_alias) from .utils_for_testing import reset_var_counter # NOQA # numpy math functions (??) # Advanced math operators # (https://docs.python.org/3/reference/datamodel.html?highlight=__add__#emulating-numeric-types) # the operator "in" (contains)? to see if one jet is in another collection? # the operator len # Make sure if d1 and d2 are two different sized,sourced DataFrames, then d1[d2.x] fails # Filter functions - so pass a filter that gets called with whatever you are filtering on, and # returns. # https://stackoverflow.com/questions/847936/how-can-i-find-the-number-of-arguments-of-a-python-function # Aliases allow some recursion, but with total flexability. If there is a circle and you want # things done a second time, they # won't be. Perhaps when we have an actual problem we can resolve this.
[ 11748, 6468, 198, 6738, 19720, 1330, 7343, 11, 32233, 11, 3350, 198, 198, 11748, 12972, 9288, 198, 198, 6738, 1366, 14535, 62, 42712, 507, 1330, 357, 198, 220, 220, 220, 29201, 11, 6060, 19778, 11, 6468, 62, 14134, 540, 11, 6468, 62, ...
2.966759
361
#!/usr/bin/env python # -*- coding: utf-8 -*- import redis rc = redis.StrictRedis(host='localhost', port=6379, db=0)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 11748, 2266, 271, 198, 198, 6015, 796, 2266, 271, 13, 1273, 2012, 7738, 271, 7, 4774, 11639, 36750, 3256, 2493, ...
2.232143
56
# import encode import eel import cv2 import io import numpy as np import base64 import os import time import face_recognition import pickle import imutils import datetime from multiprocessing.pool import ThreadPool import random import shutil from database import * from camera import VideoCamera from SceneChangeDetect import sceneChangeDetect import login import encode_student_data import warnings warnings.filterwarnings('ignore') eel.init('web') # ------ Global Variable ---- camera_status = 1 capture_status = False student_id = '' fullnamee = '' def gen(url): video = cv2.VideoCapture(url) global camera_status global capture_status camera_status = 1 while camera_status == 1: success, img = video.read() if success == False: print("cam nt cnt") break if capture_status == True: save_path = 'dataset/' + student_id + '_' + fullnamee filename = save_path + "/photo" + \ str(random.randint(0, 999)) + ".jpg" if not os.path.exists(save_path): os.makedirs(save_path) cv2.imwrite(filename, img) send_capture_photo(img) capture_status = False ret, jpeg = cv2.imencode('.jpg', img) img = jpeg.tobytes() yield img # adding new student data def delete_student_data_file(student_id): # delete face data from file # load the face data with open('encodings.pickle', 'rb') as f: face_data = pickle.load(f) index = [] encodings = face_data['encodings'] names = face_data['names'] # count face data length for i, item in enumerate(names): if student_id in item: index.append(i) # delete id for i in index: names.remove(student_id) # delete encoding for i in index: del encodings[index[0]] # saved modified face data face_data['names'] = names face_data['encodings'] = encodings f = open("encodings.pickle", "wb") f.write(pickle.dumps(face_data)) f.close() eel.start('template/pages/samples/login.html', size=(1307, 713)) #eel.start('template/index.html', size=(1307, 713)) # eel.start('dashboard.html', size=(1307, 713))
[ 2, 1330, 37773, 201, 198, 11748, 304, 417, 201, 198, 11748, 269, 85, 17, 201, 198, 11748, 33245, 201, 198, 11748, 299, 32152, 355, 45941, 201, 198, 11748, 2779, 2414, 201, 198, 11748, 28686, 201, 198, 11748, 640, 201, 198, 11748, 1986...
2.233742
1,061
# -*- coding: utf-8 -*- __author__ = 'Huang, Hua' from models.object import JsonSerializableObj
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 834, 9800, 834, 796, 705, 38202, 648, 11, 43709, 6, 198, 198, 6738, 4981, 13, 15252, 1330, 449, 1559, 32634, 13821, 49201, 628, 628 ]
2.631579
38
#!/usr/bin/python # -*- coding: utf-8 -*- import logging, sys from django.contrib.auth.decorators import login_required from django.shortcuts import render from walletgui.controller.global_constants import * from walletgui.controller.crypto_utils import CryptoUtility from walletgui.controller.walletmanager import WalletManager from walletgui.controller.paymentmethodmanager import PaymentMethodManager from walletgui.views import errorpageview from walletgui.views.models.useraccountinfo import * logger = logging.getLogger("site.dashboard")
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 11748, 18931, 11, 25064, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 13, 12501, 273, 2024, 1330, 17594, 62, 35827,...
3.671141
149
import numpy as np import networkx as nx # For illustration purpose only [easy to understand the process] # ----------------------------- def pure_cascade_virality(G): '''G is a directed graph(tree)''' if not nx.is_weakly_connected(G): # return None return nodes = [k for (k,v) in G.out_degree() if v>0] # non-leaf nodes virality = 0 for source in nodes: path_lens = nx.single_source_shortest_path_length(G, source) # shortest path length path_lens = {k: v for k, v in path_lens.items() if v > 0} # filter 0 virality += np.array(list(path_lens.values())).mean() # mean length from source to other nodes return virality # Works in a recursive manner [more efficient] # ----------------------------- def recursive_path_length(G, V, seed): '''G is a directed graph(tree)''' V[seed] = [] for i in G.successors(seed): V[seed].append(1) V[seed] += [j+1 for j in recursive_path_length(G, V, i)] return V[seed] def recursive_cascade_virality(G, source=None): '''G is a directed graph(tree)''' if not nx.is_weakly_connected(G): # return None return if not source: # if root is not given, find it by yourself source = [k for (k,v) in G.in_degree() if v==0][0] V_dic = {} recursive_path_length(G, V_dic, source) # return V_dic # return original paths virality = 0 for (k, v) in V_dic.items(): # print(k, v) if len(v)>0: virality += np.mean(v) return virality # return cascade virality
[ 11748, 299, 32152, 355, 45941, 198, 11748, 3127, 87, 355, 299, 87, 628, 628, 198, 2, 1114, 20936, 4007, 691, 685, 38171, 284, 1833, 262, 1429, 60, 198, 2, 34400, 32501, 198, 4299, 5899, 62, 66, 28966, 62, 37040, 1483, 7, 38, 2599, ...
2.304348
713
import pyjira.api as _api import json as _json def get_issues(id, limit=50): """Return 50 issues for a project. Parameters: - id: id of a project. - limit: max number of results to be returned. """ return _api.rest("/search?jql=project=" + str(id) + "&maxResults=" + str(limit)) def get_issue(id): """Get issue and its details. Parameters: - id: id of an issue. """ return _api.rest("/issue/" + str(id)) def get_all_fields(): """Get all existing fields.""" return _api.rest("/field") def get_field(id): """Get field and its details. Parameters: - id: id of a field. """ fields = _json.loads(get_all_fields()) for f in fields: if (f["id"] == str(id) or f["id"].replace("customfield_", "") == str(id)): return _json.dumps(f) def get_issue_fields(id, field_names_enabled=True): """Get all fields listed for an issue. Parameters: - id: id of an issue. - field_names_enabled: if False, returns result with "customfield_" names. True by default. """ issue = _json.loads(get_issue(id)) result = {} for key, value in issue["fields"].items(): if ("customfield_" in key and value and field_names_enabled): field = _json.loads(get_field(key)) field_name = field["name"] result[field_name] = value elif value: result[key] = value return _json.dumps(result)
[ 11748, 12972, 73, 8704, 13, 15042, 355, 4808, 15042, 198, 11748, 33918, 355, 4808, 17752, 628, 198, 4299, 651, 62, 37165, 7, 312, 11, 4179, 28, 1120, 2599, 198, 220, 220, 220, 37227, 13615, 2026, 2428, 329, 257, 1628, 13, 198, 220, ...
2.368254
630
import importlib from argparse import ArgumentParser from wsgiref.simple_server import make_server p = ArgumentParser( prog="gongish serve", description="Serve a WSGI application" ) p.add_argument( "module", nargs="?", help="Module and application name (e.g: myapp:app)", type=str, ) p.add_argument( "-b", "--bind", type=str, help="Bind address (default: localhost:8080)", default="localhost:8080", )
[ 11748, 1330, 8019, 198, 6738, 1822, 29572, 1330, 45751, 46677, 198, 6738, 266, 45213, 557, 69, 13, 36439, 62, 15388, 1330, 787, 62, 15388, 628, 198, 79, 796, 45751, 46677, 7, 198, 220, 220, 220, 1172, 2625, 70, 506, 680, 4691, 1600, ...
2.674699
166
import unittest import redis import random
[ 11748, 555, 715, 395, 198, 11748, 2266, 271, 198, 11748, 4738, 628 ]
3.666667
12
#Secret Messages. New position alphabet = 'abcdefghijklmnopqrstuvwxyz' key = 3 character = input('Please enter a character ') position = alphabet.find(character) print('Position of a character ', character, ' is ', position) newPosition = position + key print('New position of a character ', character, ' is ', newPosition)
[ 2, 23725, 43534, 13, 968, 2292, 198, 198, 17307, 8380, 796, 705, 39305, 4299, 456, 2926, 41582, 10295, 404, 80, 81, 301, 14795, 86, 5431, 89, 6, 198, 2539, 796, 513, 198, 22769, 796, 5128, 10786, 5492, 3802, 257, 2095, 705, 8, 198, ...
3.571429
91
from examples.example_imports import * from manim_express.eager import PlotObj scene = EagerModeScene(screen_size=Size.bigger) graph = Line().scale(0.2) # t0 = time.time() # # delta_t = 0.5 # for a in np.linspace(3, 12, 3): # graph2 = ParametricCurve(lambda t: [t, # 0.8 * np.abs(t) ** (6 / 7) + 0.9 * np.sqrt(abs(a - t ** 2)) * np.sin( # a * t + 0.2), # 0], # t_range=(-math.sqrt(a), math.sqrt(a))).scale(0.5) # scene.play(Transform(graph, graph2), run_time=3) ps = np.random.rand(10, 3) print(ps.shape) print(ps[:, 0].max()) theta = np.linspace(0, 2 * PI, 100) x = np.cos(theta) y = np.sin(theta) p = PlotObj(x, y) scene.play(ShowCreation(p)) s = PlotObj(theta, x).set_color(RED) scene.play(ShowCreation(s)) grid = p.get_grid(3, 3) scene.add(grid) scene.play(grid.animate.shift(LEFT)) scene.play(grid.animate.set_submobject_colors_by_gradient(BLUE, GREEN, RED)) scene.play(grid.animate.set_height(TAU - MED_SMALL_BUFF)) # scene.play(grid.animate.apply_complex_function(np.exp), run_time=5) scene.play( grid.animate.apply_function( lambda p: [ p[0] + 0.5 * math.sin(p[1]), p[1] + 0.5 * math.sin(p[0]), p[2] ] ), run_time=5, ) scene.hold_on()
[ 6738, 6096, 13, 20688, 62, 320, 3742, 1330, 1635, 198, 6738, 582, 320, 62, 42712, 13, 68, 3536, 1330, 28114, 49201, 198, 198, 29734, 796, 412, 3536, 19076, 36542, 7, 9612, 62, 7857, 28, 10699, 13, 14261, 1362, 8, 198, 198, 34960, 79...
1.864499
738
#!/usr/bin/env python # Note that this is python3 only import argparse import requests parser = argparse.ArgumentParser( "Get an observation from a FHIR server with authentication") parser.add_argument( "id", help="The observation id to retrieve") parser.add_argument( "auth", default="Admin", help="The authorization string to use. \"Bearer \" will be added to " "the front.") parser.add_argument( "--url", default="http://35.245.174.218:8080/hapi-fhir-jpaserver/fhir/", help="The base url of the server") args = parser.parse_args() headers = { 'Content-Type': "application/fhir+json; charset=utf-8", 'Authorization': "Bearer " + args.auth, } response = requests.get(args.url + "/Observation/" + args.id, headers=headers) print(response.json())
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 5740, 326, 428, 318, 21015, 18, 691, 198, 11748, 1822, 29572, 198, 11748, 7007, 198, 198, 48610, 796, 1822, 29572, 13, 28100, 1713, 46677, 7, 198, 220, 220, 220, 366, 3855, 281, 1...
2.907407
270
# # MythBox for XBMC - http://mythbox.googlecode.com # Copyright (C) 2010 analogue@yahoo.com # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # from odict import odict # -------------------------------------------------------------------------------------------------------------- # Duplicate Check Method Check for Duplicates in Episode Filter dupin dupmethod Makes sense # -------------------------------------------------------------------------------------------------------------- # None All Recordings None 15 1 Y # Subtitle All Recordings None 15 2 Y # Description All Recordings None 15 4 Y # Subtitle & Desc All Recordings None 15 6 Y # Subtitle then Desc All Recordings None 15 8 Y # # None Current Recordings None 1 1 Y # Subtitle Current Recordings None 1 2 Y # # None Current Recordings New Epi Only 17 (16+1) 1 Y # None All Recordings New Epi Only 31 (16+15) 1 Y # None All Recordings Exclude Generics 79 (64+15 1 Y # None Previous Recordings Exclude Rep&Gen 98 (64+32+2) 1 Y #
[ 2, 198, 2, 220, 18900, 14253, 329, 1395, 33, 9655, 532, 2638, 1378, 1820, 400, 3524, 13, 13297, 8189, 13, 785, 198, 2, 220, 15069, 357, 34, 8, 3050, 45304, 31, 40774, 13, 785, 198, 2, 220, 198, 2, 220, 770, 1430, 318, 1479, 3788...
2.112199
1,123
""" script to post-process training images by using OpenCV face detection and normalization MIT License Copyright (c) 2019 JinJie Chen Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import argparse import cv2 import numpy as np import os """ process all image in the user_id subdirectory , save processed images in the user_id folderdirectory """ """ Normalize image by Truncate out the face from teh image using the bounding box Resize the image with interpolation using openCv """ parser = argparse.ArgumentParser() parser.add_argument( '--user', help='user id, -1 for all') args = parser.parse_args() print(args) classifier = cv2.CascadeClassifier("../src/models/haarcascade_frontalface_default.xml") images, labels, labels_dic = process_images(args.user) print("num images: ", len(images)) print("labels_dic: ", labels_dic)
[ 37811, 198, 12048, 284, 1281, 12, 14681, 3047, 4263, 416, 1262, 4946, 33538, 1986, 13326, 220, 198, 392, 3487, 1634, 198, 198, 36393, 13789, 198, 198, 15269, 357, 66, 8, 13130, 17297, 41, 494, 12555, 198, 198, 5990, 3411, 318, 29376, ...
3.54845
516
import flask from flask import render_template from flask import request from flask import url_for import json import logging ### # Globals ### app = flask.Flask(__name__) import CONFIG ### # Pages ### ############# # # Set up to run from cgi-bin script, from # gunicorn, or stand-alone. # app.secret_key = CONFIG.secret_key app.debug=CONFIG.DEBUG app.logger.setLevel(logging.DEBUG) if __name__ == "__main__": print("Opening for global access on port {}".format(CONFIG.PORT)) app.run(port=CONFIG.PORT, host="0.0.0.0")
[ 11748, 42903, 198, 6738, 42903, 1330, 8543, 62, 28243, 198, 6738, 42903, 1330, 2581, 198, 6738, 42903, 1330, 19016, 62, 1640, 198, 198, 11748, 33918, 198, 11748, 18931, 198, 198, 21017, 198, 2, 40713, 874, 198, 21017, 198, 1324, 796, 42...
2.802083
192
import xarray as xr import numpy as np from pathlib import Path from tcvx21.grillix_post.components import FieldlineTracer from tcvx21.grillix_post.lineouts import Lineout xr.set_options(keep_attrs=True) def initialise_lineout_for_parallel_gradient( lineout, grid, equi, norm, npol, stored_trace: Path = None ): """ Traces to find the forward and reverse lineouts for a given lineout Expensive! Needs to be done once per lineout that you want to take gradients with """ fieldline_tracer = FieldlineTracer(equi) try: print(f"Attempting to read stored trace from {stored_trace}") ds = xr.open_dataset(stored_trace) assert np.allclose(ds["lineout_x"], lineout.r_points) assert np.allclose(ds["lineout_y"], lineout.z_points) except (FileNotFoundError, ValueError): forward_trace, reverse_trace = fieldline_tracer.find_neighbouring_points( lineout.r_points, lineout.z_points, n_toroidal_planes=int(npol) ) ds = xr.Dataset( data_vars=dict( forward_x=("points", forward_trace[:, 0]), forward_y=("points", forward_trace[:, 1]), forward_l=("points", forward_trace[:, 2]), reverse_x=("points", reverse_trace[:, 0]), reverse_y=("points", reverse_trace[:, 1]), reverse_l=("points", reverse_trace[:, 2]), lineout_x=("points", lineout.r_points), lineout_y=("points", lineout.z_points), ) ) if stored_trace is not None: if stored_trace.exists(): stored_trace.unlink() ds.to_netcdf(stored_trace) lineout.forward_lineout = Lineout(ds["forward_x"], ds["forward_y"]) lineout.forward_lineout.setup_interpolation_matrix(grid, use_source_points=True) lineout.reverse_lineout = Lineout(ds["reverse_x"], ds["reverse_y"]) lineout.reverse_lineout.setup_interpolation_matrix(grid, use_source_points=True) lineout.forward_distance = xr.DataArray( ds["forward_l"], dims="interp_points" ).assign_attrs(norm=norm.R0) lineout.reverse_distance = xr.DataArray( ds["reverse_l"], dims="interp_points" ).assign_attrs(norm=norm.R0) def compute_parallel_gradient(lineout, field): """ Computes the parallel gradient via centred differences Note that you should multiply this by the penalisation direction function to get the direction 'towards the wall'. This isn't quite the same as projecting onto the wall normal, but for computing the parallel heat flux this is actually more helpful """ assert hasattr(lineout, "forward_lineout") and hasattr( lineout, "reverse_lineout" ), f"Have to call initialise_lineout_for_parallel_gradient on lineout before trying to compute_parallel_gradient" parallel_gradients = [ compute_gradient_on_plane(lineout, field, plane) for plane in range(field.sizes["phi"]) ] return xr.concat(parallel_gradients, dim="phi") def compute_gradient_on_plane(lineout, field, plane): """Computes the parallel gradient on a single plane""" forward_value = lineout.forward_lineout.interpolate( field.isel(phi=np.mod(plane + 1, field.sizes["phi"])) ) reverse_value = lineout.forward_lineout.interpolate( field.isel(phi=np.mod(plane - 1, field.sizes["phi"])) ) two_plane_distance = lineout.forward_distance - lineout.reverse_distance centred_difference = forward_value - reverse_value return ( (centred_difference / two_plane_distance) .assign_coords(phi=plane) .assign_attrs(norm=field.norm / two_plane_distance.norm) )
[ 11748, 2124, 18747, 355, 2124, 81, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 3108, 8019, 1330, 10644, 198, 6738, 256, 33967, 87, 2481, 13, 2164, 359, 844, 62, 7353, 13, 5589, 3906, 1330, 7663, 1370, 2898, 11736, 198, 6738, 256, 3...
2.43578
1,526
# Test cases for HT operations with hostapd # Copyright (c) 2013-2014, Jouni Malinen <j@w1.fi> # # This software may be distributed under the terms of the BSD license. # See README for more details. import time import logging logger = logging.getLogger() import struct import subprocess import hostapd def test_ap_ht40_scan(dev, apdev): """HT40 co-ex scan""" clear_scan_cache(apdev[0]['ifname']) params = { "ssid": "test-ht40", "channel": "5", "ht_capab": "[HT40-]"} hapd = hostapd.add_ap(apdev[0]['ifname'], params, wait_enabled=False) state = hapd.get_status_field("state") if state != "HT_SCAN": time.sleep(0.1) state = hapd.get_status_field("state") if state != "HT_SCAN": raise Exception("Unexpected interface state - expected HT_SCAN") ev = hapd.wait_event(["AP-ENABLED"], timeout=10) if not ev: raise Exception("AP setup timed out") state = hapd.get_status_field("state") if state != "ENABLED": raise Exception("Unexpected interface state - expected ENABLED") freq = hapd.get_status_field("freq") if freq != "2432": raise Exception("Unexpected frequency") pri = hapd.get_status_field("channel") if pri != "5": raise Exception("Unexpected primary channel") sec = hapd.get_status_field("secondary_channel") if sec != "-1": raise Exception("Unexpected secondary channel") dev[0].connect("test-ht40", key_mgmt="NONE", scan_freq=freq) def test_ap_ht40_scan_conflict(dev, apdev): """HT40 co-ex scan conflict""" clear_scan_cache(apdev[0]['ifname']) params = { "ssid": "test-ht40", "channel": "6", "ht_capab": "[HT40+]"} hostapd.add_ap(apdev[1]['ifname'], params) params = { "ssid": "test-ht40", "channel": "5", "ht_capab": "[HT40-]"} hapd = hostapd.add_ap(apdev[0]['ifname'], params, wait_enabled=False) state = hapd.get_status_field("state") if state != "HT_SCAN": time.sleep(0.1) state = hapd.get_status_field("state") if state != "HT_SCAN": raise Exception("Unexpected interface state - expected HT_SCAN") ev = hapd.wait_event(["AP-ENABLED"], timeout=10) if not ev: raise Exception("AP setup timed out") state = hapd.get_status_field("state") if state != "ENABLED": raise Exception("Unexpected interface state - expected ENABLED") freq = hapd.get_status_field("freq") if freq != "2432": raise Exception("Unexpected frequency") pri = hapd.get_status_field("channel") if pri != "5": raise Exception("Unexpected primary channel") sec = hapd.get_status_field("secondary_channel") if sec != "0": raise Exception("Unexpected secondary channel: " + sec) dev[0].connect("test-ht40", key_mgmt="NONE", scan_freq=freq) def test_ap_ht40_scan_legacy_conflict(dev, apdev): """HT40 co-ex scan conflict with legacy 20 MHz AP""" clear_scan_cache(apdev[0]['ifname']) params = { "ssid": "legacy-20", "channel": "7", "ieee80211n": "0" } hostapd.add_ap(apdev[1]['ifname'], params) params = { "ssid": "test-ht40", "channel": "5", "ht_capab": "[HT40-]"} hapd = hostapd.add_ap(apdev[0]['ifname'], params, wait_enabled=False) state = hapd.get_status_field("state") if state != "HT_SCAN": time.sleep(0.1) state = hapd.get_status_field("state") if state != "HT_SCAN": raise Exception("Unexpected interface state - expected HT_SCAN") ev = hapd.wait_event(["AP-ENABLED"], timeout=10) if not ev: raise Exception("AP setup timed out") state = hapd.get_status_field("state") if state != "ENABLED": raise Exception("Unexpected interface state - expected ENABLED") freq = hapd.get_status_field("freq") if freq != "2432": raise Exception("Unexpected frequency: " + freq) pri = hapd.get_status_field("channel") if pri != "5": raise Exception("Unexpected primary channel: " + pri) sec = hapd.get_status_field("secondary_channel") if sec != "0": raise Exception("Unexpected secondary channel: " + sec) dev[0].connect("test-ht40", key_mgmt="NONE", scan_freq=freq) def test_ap_ht40_scan_match(dev, apdev): """HT40 co-ex scan matching configuration""" clear_scan_cache(apdev[0]['ifname']) params = { "ssid": "test-ht40", "channel": "5", "ht_capab": "[HT40-]"} hostapd.add_ap(apdev[1]['ifname'], params) params = { "ssid": "test-ht40", "channel": "5", "ht_capab": "[HT40-]"} hapd = hostapd.add_ap(apdev[0]['ifname'], params, wait_enabled=False) state = hapd.get_status_field("state") if state != "HT_SCAN": time.sleep(0.1) state = hapd.get_status_field("state") if state != "HT_SCAN": raise Exception("Unexpected interface state - expected HT_SCAN") ev = hapd.wait_event(["AP-ENABLED"], timeout=10) if not ev: raise Exception("AP setup timed out") state = hapd.get_status_field("state") if state != "ENABLED": raise Exception("Unexpected interface state - expected ENABLED") freq = hapd.get_status_field("freq") if freq != "2432": raise Exception("Unexpected frequency") pri = hapd.get_status_field("channel") if pri != "5": raise Exception("Unexpected primary channel") sec = hapd.get_status_field("secondary_channel") if sec != "-1": raise Exception("Unexpected secondary channel: " + sec) dev[0].connect("test-ht40", key_mgmt="NONE", scan_freq=freq) def test_ap_ht40_5ghz_match(dev, apdev): """HT40 co-ex scan on 5 GHz with matching pri/sec channel""" clear_scan_cache(apdev[0]['ifname']) try: params = { "ssid": "test-ht40", "hw_mode": "a", "channel": "36", "country_code": "US", "ht_capab": "[HT40+]"} hostapd.add_ap(apdev[1]['ifname'], params) params = { "ssid": "test-ht40", "hw_mode": "a", "channel": "36", "ht_capab": "[HT40+]"} hapd = hostapd.add_ap(apdev[0]['ifname'], params, wait_enabled=False) state = hapd.get_status_field("state") if state != "HT_SCAN": time.sleep(0.1) state = hapd.get_status_field("state") if state != "HT_SCAN": raise Exception("Unexpected interface state - expected HT_SCAN") ev = hapd.wait_event(["AP-ENABLED"], timeout=10) if not ev: raise Exception("AP setup timed out") state = hapd.get_status_field("state") if state != "ENABLED": raise Exception("Unexpected interface state - expected ENABLED") freq = hapd.get_status_field("freq") if freq != "5180": raise Exception("Unexpected frequency") pri = hapd.get_status_field("channel") if pri != "36": raise Exception("Unexpected primary channel") sec = hapd.get_status_field("secondary_channel") if sec != "1": raise Exception("Unexpected secondary channel: " + sec) dev[0].connect("test-ht40", key_mgmt="NONE", scan_freq=freq) finally: subprocess.call(['sudo', 'iw', 'reg', 'set', '00']) def test_ap_ht40_5ghz_switch(dev, apdev): """HT40 co-ex scan on 5 GHz switching pri/sec channel""" clear_scan_cache(apdev[0]['ifname']) try: params = { "ssid": "test-ht40", "hw_mode": "a", "channel": "36", "country_code": "US", "ht_capab": "[HT40+]"} hostapd.add_ap(apdev[1]['ifname'], params) params = { "ssid": "test-ht40", "hw_mode": "a", "channel": "40", "ht_capab": "[HT40-]"} hapd = hostapd.add_ap(apdev[0]['ifname'], params, wait_enabled=False) state = hapd.get_status_field("state") if state != "HT_SCAN": time.sleep(0.1) state = hapd.get_status_field("state") if state != "HT_SCAN": raise Exception("Unexpected interface state - expected HT_SCAN") ev = hapd.wait_event(["AP-ENABLED"], timeout=10) if not ev: raise Exception("AP setup timed out") state = hapd.get_status_field("state") if state != "ENABLED": raise Exception("Unexpected interface state - expected ENABLED") freq = hapd.get_status_field("freq") if freq != "5180": raise Exception("Unexpected frequency: " + freq) pri = hapd.get_status_field("channel") if pri != "36": raise Exception("Unexpected primary channel: " + pri) sec = hapd.get_status_field("secondary_channel") if sec != "1": raise Exception("Unexpected secondary channel: " + sec) dev[0].connect("test-ht40", key_mgmt="NONE", scan_freq=freq) finally: subprocess.call(['sudo', 'iw', 'reg', 'set', '00']) def test_ap_ht40_5ghz_switch2(dev, apdev): """HT40 co-ex scan on 5 GHz switching pri/sec channel (2)""" clear_scan_cache(apdev[0]['ifname']) try: params = { "ssid": "test-ht40", "hw_mode": "a", "channel": "36", "country_code": "US", "ht_capab": "[HT40+]"} hostapd.add_ap(apdev[1]['ifname'], params) id = dev[0].add_network() dev[0].set_network(id, "mode", "2") dev[0].set_network_quoted(id, "ssid", "wpas-ap-open") dev[0].set_network(id, "key_mgmt", "NONE") dev[0].set_network(id, "frequency", "5200") dev[0].set_network(id, "scan_freq", "5200") dev[0].select_network(id) time.sleep(1) params = { "ssid": "test-ht40", "hw_mode": "a", "channel": "40", "ht_capab": "[HT40-]"} hapd = hostapd.add_ap(apdev[0]['ifname'], params, wait_enabled=False) state = hapd.get_status_field("state") if state != "HT_SCAN": time.sleep(0.1) state = hapd.get_status_field("state") if state != "HT_SCAN": raise Exception("Unexpected interface state - expected HT_SCAN") ev = hapd.wait_event(["AP-ENABLED"], timeout=10) if not ev: raise Exception("AP setup timed out") state = hapd.get_status_field("state") if state != "ENABLED": raise Exception("Unexpected interface state - expected ENABLED") freq = hapd.get_status_field("freq") if freq != "5180": raise Exception("Unexpected frequency: " + freq) pri = hapd.get_status_field("channel") if pri != "36": raise Exception("Unexpected primary channel: " + pri) sec = hapd.get_status_field("secondary_channel") if sec != "1": raise Exception("Unexpected secondary channel: " + sec) dev[0].connect("test-ht40", key_mgmt="NONE", scan_freq=freq) finally: subprocess.call(['sudo', 'iw', 'reg', 'set', '00']) def test_obss_scan(dev, apdev): """Overlapping BSS scan request""" params = { "ssid": "obss-scan", "channel": "6", "ht_capab": "[HT40-]", "obss_interval": "10" } hapd = hostapd.add_ap(apdev[0]['ifname'], params) params = { "ssid": "another-bss", "channel": "9", "ieee80211n": "0" } hostapd.add_ap(apdev[1]['ifname'], params) dev[0].connect("obss-scan", key_mgmt="NONE", scan_freq="2437") hapd.set("ext_mgmt_frame_handling", "1") logger.info("Waiting for OBSS scan to occur") ev = dev[0].wait_event(["CTRL-EVENT-SCAN-STARTED"], timeout=15) if ev is None: raise Exception("Timed out while waiting for OBSS scan to start") ev = dev[0].wait_event(["CTRL-EVENT-SCAN-RESULTS"], timeout=10) if ev is None: raise Exception("Timed out while waiting for OBSS scan results") received = False for i in range(0, 4): frame = hapd.mgmt_rx(timeout=5) if frame is None: raise Exception("MGMT RX wait timed out") if frame['subtype'] != 13: continue payload = frame['payload'] if len(payload) < 3: continue (category, action, ie) = struct.unpack('BBB', payload[0:3]) if category != 4: continue if action != 0: continue if ie == 72: logger.info("20/40 BSS Coexistence report received") received = True break if not received: raise Exception("20/40 BSS Coexistence report not seen") def test_obss_scan_40_intolerant(dev, apdev): """Overlapping BSS scan request with 40 MHz intolerant AP""" params = { "ssid": "obss-scan", "channel": "6", "ht_capab": "[HT40-]", "obss_interval": "10" } hapd = hostapd.add_ap(apdev[0]['ifname'], params) params = { "ssid": "another-bss", "channel": "7", "ht_capab": "[40-INTOLERANT]" } hostapd.add_ap(apdev[1]['ifname'], params) dev[0].connect("obss-scan", key_mgmt="NONE", scan_freq="2437") hapd.set("ext_mgmt_frame_handling", "1") logger.info("Waiting for OBSS scan to occur") ev = dev[0].wait_event(["CTRL-EVENT-SCAN-STARTED"], timeout=15) if ev is None: raise Exception("Timed out while waiting for OBSS scan to start") ev = dev[0].wait_event(["CTRL-EVENT-SCAN-RESULTS"], timeout=10) if ev is None: raise Exception("Timed out while waiting for OBSS scan results") received = False for i in range(0, 4): frame = hapd.mgmt_rx(timeout=5) if frame is None: raise Exception("MGMT RX wait timed out") if frame['subtype'] != 13: continue payload = frame['payload'] if len(payload) < 3: continue (category, action, ie) = struct.unpack('BBB', payload[0:3]) if category != 4: continue if action != 0: continue if ie == 72: logger.info("20/40 BSS Coexistence report received") received = True break if not received: raise Exception("20/40 BSS Coexistence report not seen") def test_olbc(dev, apdev): """OLBC detection""" params = { "ssid": "test-olbc", "channel": "6", "ht_capab": "[HT40-]", "ap_table_expiration_time": "2" } hapd = hostapd.add_ap(apdev[0]['ifname'], params) status = hapd.get_status() if status['olbc'] != '0' or status['olbc_ht'] != '0': raise Exception("Unexpected OLBC information") params = { "ssid": "olbc-ap", "hw_mode": "b", "channel": "6", "wmm_enabled": "0" } hostapd.add_ap(apdev[1]['ifname'], params) time.sleep(0.5) status = hapd.get_status() if status['olbc'] != '1' or status['olbc_ht'] != '1': raise Exception("Missing OLBC information") hapd_global = hostapd.HostapdGlobal() hapd_global.remove(apdev[1]['ifname']) logger.info("Waiting for OLBC state to time out") cleared = False for i in range(0, 15): time.sleep(1) status = hapd.get_status() if status['olbc'] == '0' and status['olbc_ht'] == '0': cleared = True break if not cleared: raise Exception("OLBC state did nto time out") def test_olbc_5ghz(dev, apdev): """OLBC detection on 5 GHz""" try: params = { "ssid": "test-olbc", "country_code": "FI", "hw_mode": "a", "channel": "36", "ht_capab": "[HT40+]" } hapd = hostapd.add_ap(apdev[0]['ifname'], params) status = hapd.get_status() if status['olbc'] != '0' or status['olbc_ht'] != '0': raise Exception("Unexpected OLBC information") params = { "ssid": "olbc-ap", "country_code": "FI", "hw_mode": "a", "channel": "36", "ieee80211n": "0", "wmm_enabled": "0" } hostapd.add_ap(apdev[1]['ifname'], params) time.sleep(0.5) status = hapd.get_status() if status['olbc_ht'] != '1': raise Exception("Missing OLBC information") finally: subprocess.call(['sudo', 'iw', 'reg', 'set', '00']) def test_ap_require_ht(dev, apdev): """Require HT""" params = { "ssid": "require-ht", "require_ht": "1" } hapd = hostapd.add_ap(apdev[0]['ifname'], params, wait_enabled=False) dev[1].connect("require-ht", key_mgmt="NONE", scan_freq="2412", disable_ht="1", wait_connect=False) dev[0].connect("require-ht", key_mgmt="NONE", scan_freq="2412") ev = dev[1].wait_event(["CTRL-EVENT-ASSOC-REJECT"]) if ev is None: raise Exception("Association rejection timed out") if "status_code=27" not in ev: raise Exception("Unexpected rejection status code") dev[2].connect("require-ht", key_mgmt="NONE", scan_freq="2412", ht_mcs="0x01 00 00 00 00 00 00 00 00 00", disable_max_amsdu="1", ampdu_factor="2", ampdu_density="1", disable_ht40="1", disable_sgi="1", disable_ldpc="1") def test_ap_require_ht_limited_rates(dev, apdev): """Require HT with limited supported rates""" params = { "ssid": "require-ht", "supported_rates": "60 120 240 360 480 540", "require_ht": "1" } hapd = hostapd.add_ap(apdev[0]['ifname'], params, wait_enabled=False) dev[1].connect("require-ht", key_mgmt="NONE", scan_freq="2412", disable_ht="1", wait_connect=False) dev[0].connect("require-ht", key_mgmt="NONE", scan_freq="2412") ev = dev[1].wait_event(["CTRL-EVENT-ASSOC-REJECT"]) if ev is None: raise Exception("Association rejection timed out") if "status_code=27" not in ev: raise Exception("Unexpected rejection status code") def test_ap_ht_capab_not_supported(dev, apdev): """HT configuration with driver not supporting all ht_capab entries""" params = { "ssid": "test-ht40", "channel": "5", "ht_capab": "[HT40-][LDPC][SMPS-STATIC][SMPS-DYNAMIC][GF][SHORT-GI-20][SHORT-GI-40][TX-STBC][RX-STBC1][RX-STBC12][RX-STBC123][DELAYED-BA][MAX-AMSDU-7935][DSSS_CCK-40][LSIG-TXOP-PROT]"} hapd = hostapd.add_ap(apdev[0]['ifname'], params, no_enable=True) if "FAIL" not in hapd.request("ENABLE"): raise Exception("Unexpected ENABLE success") def test_ap_ht_40mhz_intolerant_sta(dev, apdev): """Associated STA indicating 40 MHz intolerant""" clear_scan_cache(apdev[0]['ifname']) params = { "ssid": "intolerant", "channel": "6", "ht_capab": "[HT40-]" } hapd = hostapd.add_ap(apdev[0]['ifname'], params) if hapd.get_status_field("num_sta_ht40_intolerant") != "0": raise Exception("Unexpected num_sta_ht40_intolerant value") if hapd.get_status_field("secondary_channel") != "-1": raise Exception("Unexpected secondary_channel") dev[0].connect("intolerant", key_mgmt="NONE", scan_freq="2437") if hapd.get_status_field("num_sta_ht40_intolerant") != "0": raise Exception("Unexpected num_sta_ht40_intolerant value") if hapd.get_status_field("secondary_channel") != "-1": raise Exception("Unexpected secondary_channel") dev[2].connect("intolerant", key_mgmt="NONE", scan_freq="2437", ht40_intolerant="1") time.sleep(1) if hapd.get_status_field("num_sta_ht40_intolerant") != "1": raise Exception("Unexpected num_sta_ht40_intolerant value (expected 1)") if hapd.get_status_field("secondary_channel") != "0": raise Exception("Unexpected secondary_channel (did not disable 40 MHz)") dev[2].request("DISCONNECT") time.sleep(1) if hapd.get_status_field("num_sta_ht40_intolerant") != "0": raise Exception("Unexpected num_sta_ht40_intolerant value (expected 0)") if hapd.get_status_field("secondary_channel") != "-1": raise Exception("Unexpected secondary_channel (did not re-enable 40 MHz)") def test_ap_ht_40mhz_intolerant_ap(dev, apdev): """Associated STA reports 40 MHz intolerant AP after association""" clear_scan_cache(apdev[0]['ifname']) params = { "ssid": "ht", "channel": "6", "ht_capab": "[HT40-]", "obss_interval": "1" } hapd = hostapd.add_ap(apdev[0]['ifname'], params) dev[0].connect("ht", key_mgmt="NONE", scan_freq="2437") if hapd.get_status_field("secondary_channel") != "-1": raise Exception("Unexpected secondary channel information") logger.info("Start 40 MHz intolerant AP") params = { "ssid": "intolerant", "channel": "5", "ht_capab": "[40-INTOLERANT]" } hapd2 = hostapd.add_ap(apdev[1]['ifname'], params) logger.info("Waiting for co-ex report from STA") ok = False for i in range(0, 20): time.sleep(1) if hapd.get_status_field("secondary_channel") == "0": logger.info("AP moved to 20 MHz channel") ok = True break if not ok: raise Exception("AP did not move to 20 MHz channel") if "OK" not in hapd2.request("DISABLE"): raise Exception("Failed to disable 40 MHz intolerant AP") # make sure the intolerant AP disappears from scan results more quickly dev[0].scan(only_new=True) dev[0].scan(freq="2432", only_new=True) logger.info("Waiting for AP to move back to 40 MHz channel") ok = False for i in range(0, 30): time.sleep(1) if hapd.get_status_field("secondary_channel") == "-1": ok = True if not ok: raise Exception("AP did not move to 40 MHz channel")
[ 2, 6208, 2663, 329, 7154, 4560, 351, 2583, 499, 67, 198, 2, 15069, 357, 66, 8, 2211, 12, 4967, 11, 449, 977, 72, 4434, 42326, 1279, 73, 31, 86, 16, 13, 12463, 29, 198, 2, 198, 2, 770, 3788, 743, 307, 9387, 739, 262, 2846, 286,...
2.142485
10,373
# coding: utf-8 # author: Haydara https://www.youtube.com/haydara import pickle with open('vocabs.pkl', 'rb') as pickle_load: voc_list = pickle.load(pickle_load) allowed_chars = ['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', ' '] max_word_length = 9
[ 2, 19617, 25, 3384, 69, 12, 23, 198, 2, 1772, 25, 9075, 67, 3301, 220, 3740, 1378, 2503, 13, 11604, 13, 785, 14, 71, 323, 67, 3301, 198, 198, 11748, 2298, 293, 198, 198, 4480, 1280, 10786, 18893, 8937, 13, 79, 41582, 3256, 705, ...
1.965
200
import facade import pytest import schemas
[ 11748, 43562, 198, 11748, 12972, 9288, 198, 11748, 3897, 5356, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 198 ]
2.7
30
import pandas import numpy as np from .reference_histogram_outlier import HistOutlier from typing import Union, Tuple
[ 11748, 19798, 292, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 764, 35790, 62, 10034, 21857, 62, 448, 2505, 1330, 5590, 7975, 2505, 198, 6738, 19720, 1330, 4479, 11, 309, 29291, 628 ]
3.71875
32
import numpy as np import fast_functions as ff import time print('python') G = np.zeros((512,512,240),dtype=np.uint8) G[128:384,128:384,60:180]=1 volume_data = np.load('1.npz') F = volume_data['volume'].astype(np.uint8) start_time = time.time() F = post_processing(F, F, 1.0, False) print(time.time() - start_time) start_time = time.time() for l in range(10): DSC = DSC_computation(F,G) print(DSC) print(time.time() - start_time) print('SWIG') volume_data = np.load('1.npz') G = np.zeros((512,512,240),dtype=np.uint8) G[128:384,128:384,60:180]=1 F = volume_data['volume'].astype(np.uint8) start_time = time.time() ff.post_processing(F, F, 1.0, False) print(time.time() - start_time) start_time = time.time() for l in range(10): P = np.zeros(3, dtype = np.uint32) ff.DSC_computation(F,G,P) print(P, float(P[2]) * 2 / (P[0] + P[1])) print(time.time() - start_time)
[ 11748, 299, 32152, 355, 45941, 198, 11748, 3049, 62, 12543, 2733, 355, 31246, 198, 11748, 640, 628, 198, 198, 4798, 10786, 29412, 11537, 198, 38, 796, 45941, 13, 9107, 418, 19510, 25836, 11, 25836, 11, 16102, 828, 67, 4906, 28, 37659, ...
2.247423
388
# -*- coding: utf-8 -*- # @Time : 2019/5/25 16:09 # @Author : Alan # @Email : xiezhengwen2013@163.com # @File : data_preprocess2.py # @Software: PyCharm #
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 2488, 7575, 220, 220, 220, 1058, 13130, 14, 20, 14, 1495, 1467, 25, 2931, 198, 2, 2488, 13838, 220, 1058, 12246, 198, 2, 2488, 15333, 220, 220, 1058, 2124, 494, 8...
2.075
80
#!/usr/bin/env python # coding:utf-8 import pymysql default_config = { 'host': '139.196.96.149', 'port': 13306, 'user': 'dataway-rw', 'password': 'QqHVMhmN*8', 'db': 'jumei', 'charset': 'utf8mb4' } apollo_config = { 'host': '127.0.0.1', 'port': 11306, 'user': 'apollo-rw', 'password': 'QBT094bt', 'db': 'apollo', 'charset': 'utf8mb4', 'autocommit': True } allsite_config = { 'host': '127.0.0.1', 'port': 15306, 'user': 'apollo-rw', 'password': 'QBT094bt', 'db': 'all_site', 'charset': 'utf8mb4' } dataway_config = { 'host': '139.196.96.149', 'port': 13306, 'user': 'dataway-rw', 'password': 'QqHVMhmN*8', 'db': 'jumei', 'charset': 'utf8mb4' } dw_entity_config = { 'host': '127.0.0.1', 'port': 18306, 'user': 'qbt', 'password': 'QBT094bt', 'db': 'dw_entity', 'charset': 'utf8mb4', 'autocommit': True } channel_config = { 'host': 'channel.ecdataway.com', 'port': 3306, 'user': 'comment_catcher', 'password': 'cc33770880', 'db': 'monitor', 'charset': 'utf8mb4', 'cursorclass': pymysql.cursors.DictCursor }
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 19617, 25, 40477, 12, 23, 198, 11748, 279, 4948, 893, 13976, 198, 198, 12286, 62, 11250, 796, 1391, 198, 220, 220, 220, 705, 4774, 10354, 705, 20219, 13, 25272, 13, 4846, 13, 1944...
1.92691
602
import pyximport pyximport.install() from aoc24 import do_it do_it()
[ 11748, 12972, 87, 11748, 198, 9078, 87, 11748, 13, 17350, 3419, 198, 198, 6738, 257, 420, 1731, 1330, 466, 62, 270, 198, 4598, 62, 270, 3419, 198 ]
2.592593
27
from opensimplex import OpenSimplex import torch, time print(opensimplex_test('cuda')) print('') print(opensimplex_test('cpu'))
[ 6738, 9808, 320, 11141, 1330, 4946, 8890, 11141, 201, 198, 11748, 28034, 11, 640, 201, 198, 201, 198, 4798, 7, 44813, 320, 11141, 62, 9288, 10786, 66, 15339, 6, 4008, 201, 198, 4798, 7, 7061, 8, 201, 198, 4798, 7, 44813, 320, 11141,...
2.714286
49
from .levenshtein_distance import levenshtein_distance from .bk_tree import BKTree
[ 6738, 764, 293, 574, 1477, 22006, 62, 30246, 1330, 443, 574, 1477, 22006, 62, 30246, 198, 6738, 764, 65, 74, 62, 21048, 1330, 347, 42, 27660, 198 ]
3.074074
27
import torch import torch.nn as nn from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence from collections import namedtuple from ELMo.modules.char_embedding import CharEmbedding
[ 11748, 28034, 198, 11748, 28034, 13, 20471, 355, 299, 77, 198, 6738, 28034, 13, 20471, 13, 26791, 13, 81, 20471, 1330, 2353, 62, 79, 29373, 62, 43167, 11, 14841, 62, 34860, 62, 43167, 198, 6738, 17268, 1330, 3706, 83, 29291, 198, 6738...
3.45614
57
import datetime as dt import cx_Oracle from src.app.externalOutages.getReasonId import getReasonId def createRealTimeOutage(pwcDbConnStr: str, elemTypeId: int, elementId: int, outageDt: dt.datetime, outageTypeId: int, reason: str, elementName: str, sdReqId: int, outageTagId: int) -> int: """create a new row in real time outages pwc table and return the id of newly created row Args: pwcDbConnStr (str): [description] elemTypeId (int): [description] elementId (int): [description] outageDt (dt.datetime): [description] outageTypeId (int): [description] reason (str): [description] elementName (str): [description] sdReqId (int): [description] outageTagId (int): [description] Returns: int: id of newly created row """ newRtoId = -1 if outageDt == None: return -1 if reason == None or reason == "": reason = "NA" reasId = getReasonId(pwcDbConnStr, reason, outageTypeId) if reasId == -1: return -1 outageDate: dt.datetime = dt.datetime( outageDt.year, outageDt.month, outageDt.day) outageTime: str = dt.datetime.strftime(outageDt, "%H:%M") newRtoIdFetchSql = """ SELECT MAX(rto.ID)+1 FROM REPORTING_WEB_UI_UAT.real_time_outage rto """ rtoInsertSql = """ insert into reporting_web_ui_uat.real_time_outage rto(ID, ENTITY_ID, ELEMENT_ID, OUTAGE_DATE, OUTAGE_TIME, RELAY_INDICATION_SENDING_ID, RELAY_INDICATION_RECIEVING_ID, CREATED_DATE, SHUT_DOWN_TYPE, REASON_ID, CREATED_BY, MODIFIED_BY, REGION_ID, ELEMENTNAME, SHUTDOWNREQUEST_ID, LOAD_AFFECTED, IS_LOAD_OR_GEN_AFFECTED, SHUTDOWN_TAG_ID, IS_DELETED) values (:id, :elemTypeId, :elementId, :outageDate, :outageTime, 0, 0, CURRENT_TIMESTAMP, :outageTypeId, :reasonId, 123, 123, 4, :elementName, :sdReqId, 0, 0, :outageTagId, NULL) """ dbConn = None dbCur = None try: # get connection with raw data table dbConn = cx_Oracle.connect(pwcDbConnStr) # get cursor for raw data table dbCur = dbConn.cursor() # execute the new rto id fetch sql dbCur.execute(newRtoIdFetchSql) dbRows = dbCur.fetchall() newRtoId = dbRows[0][0] sqlData = {"id": newRtoId, "elemTypeId": elemTypeId, "elementId": elementId, "outageDate": outageDate, "outageTime": outageTime, "outageTypeId": outageTypeId, "reasonId": reasId, "elementName": elementName, "sdReqId": sdReqId, "outageTagId": outageTagId} # execute the new row insertion sql dbCur.execute(rtoInsertSql, sqlData) # commit the changes dbConn.commit() except Exception as e: newRtoId = -1 print('Error while creating new real time outage entry in pwc table') print(e) finally: # closing database cursor and connection if dbCur is not None: dbCur.close() if dbConn is not None: dbConn.close() return newRtoId
[ 11748, 4818, 8079, 355, 288, 83, 198, 11748, 43213, 62, 48625, 198, 6738, 12351, 13, 1324, 13, 22615, 7975, 1095, 13, 1136, 45008, 7390, 1330, 651, 45008, 7390, 628, 198, 4299, 2251, 15633, 7575, 7975, 496, 7, 79, 86, 66, 43832, 37321...
2.2044
1,409