id
stringlengths
1
265
text
stringlengths
6
5.19M
dataset_id
stringclasses
7 values
108099
import json import logging from datetime import datetime, timedelta import pymongo from ecn import StationKind, PPTIK_GRAVITY, StationState class StationaryV1Handler: logger = logging.getLogger(__name__) SAMPLE_RATE = 40 def __init__(self, db: pymongo.database.Database): self.db: pymongo.database.Database = db def receive(self, body: bytearray): body = body.replace(b'nan,', b'null,') # Workaround for ECNv1 bug # logger.debug('Decoding %s', body) try: msg = json.loads(body) except Exception as e: self.logger.error('Ignoring broken JSON: %s', str(body), exc_info = e) return client_id = msg['clientID'] station_coll: pymongo.collection.Collection = self.db.station station = station_coll.find_one({'k': StationKind.V1, 'i': client_id}, projection={'_id': 1}) if not station: self.logger.error('Unknown v1 station: %s', client_id) return station_id = station['_id'] # self.logger.debug('Station: %s', station_id) accel_coll: pymongo.collection.Collection = self.db.accel ts = datetime.utcnow() - timedelta(seconds=1) tstr = ts.strftime('%Y%m%d%H') accel_id = '%s:%s' % (tstr, station_id) second_of_hour = (60 * ts.minute) + ts.second self.logger.debug('Accel ID: %s at %d (%d:%d)', accel_id, second_of_hour, ts.minute, ts.second) existing_accel_doc = accel_coll.find_one({'_id': accel_id}, projection={'_id': 1}) if not existing_accel_doc: # "Preallocate" arrays except innermost accels = [None for sec in range(60 * 60)] self.logger.debug('Inserting stationary_v1 accel %s sample_rate=%d', accel_id, self.SAMPLE_RATE) accel_coll.insert_one({'_id': accel_id, 'r': self.SAMPLE_RATE, 'z': accels, 'n': accels, 'e': accels}) if client_id == 'ECN-4': z_values = [(orig['y'] + 6.598601 if orig['y'] else None) for orig in msg['accelerations']] n_values = [(orig['x'] if orig['x'] else None) for orig in msg['accelerations']] e_values = [(orig['z'] if orig['z'] else None) for orig in msg['accelerations']] else: z_values = [(-orig['z'] + PPTIK_GRAVITY if orig['z'] else None) for orig in msg['accelerations']] n_values = [(orig['x'] if orig['x'] else None) for orig in msg['accelerations']] e_values = [(orig['y'] if orig['y'] else None) for orig in msg['accelerations']] # Update accel Z/N/E # logger.debug('%s a.%d.%d Z = %s', accel_id, ts.minute, ts.second, z_values) accel_coll.update_one({'_id': accel_id}, {'$set': { 'z.%d' % (second_of_hour): z_values, 'n.%d' % (second_of_hour): n_values, 'e.%d' % (second_of_hour): e_values, }}) # logger.debug('%s a.%d.%d Z = %s', accel_id, ts.minute, ts.second, z_values) # accel_z_coll.update_one({'_id': accel_id}, {'$set': {'a.%d.%d' % (ts.minute, ts.second): z_values}}) # logger.debug('%s a.%d.%d NS = %s', accel_id, ts.minute, ts.second, n_values) # accel_n_coll.update_one({'_id': accel_id}, {'$set': {'a.%d.%d' % (ts.minute, ts.second): n_values}}) # logger.debug('%s a.%d.%d EW = %s', accel_id, ts.minute, ts.second, e_values) # accel_e_coll.update_one({'_id': accel_id}, {'$set': {'a.%d.%d' % (ts.minute, ts.second): e_values}}) # mark as 'H'igh rate station_coll.update_one({'_id': station_id}, {'$set': {'s': StationState.HIGH_RATE, 't': datetime.utcnow()}})
StarcoderdataPython
1640217
<reponame>raghuraju/tango-with-django-110 """ Django settings for tango_with_django project. Generated by 'django-admin startproject' using Django 1.10.1. For more information on this file, see https://docs.djangoproject.com/en/1.10/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.10/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '<KEY>' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'grappelli', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'rango', 'registration', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'tango_with_django.urls' TEMPLATES_DIR = os.path.join(BASE_DIR, 'templates') TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [TEMPLATES_DIR], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', 'django.template.context_processors.media', ], }, }, ] WSGI_APPLICATION = 'tango_with_django.wsgi.application' DB_NAME = os.environ.get("DB_NAME") DB_USER = os.environ.get("DB_USER") DB_PASSWORD = os.environ.get("DB_PASSWORD") DB_HOST = os.environ.get("DB_HOST") DB_PORT = os.environ.get("DB_PORT") # Database # https://docs.djangoproject.com/en/1.10/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql', 'NAME': DB_NAME, 'USER': DB_USER, 'PASSWORD': <PASSWORD>, 'HOST': DB_HOST, 'PORT': DB_PORT } } # Password validation # https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Password hashing # https://docs.djangoproject.com/en/1.10/topics/auth/passwords/ PASSWORD_HASHERS = ( 'django.contrib.auth.hashers.BCryptSHA256PasswordHasher', 'django.contrib.auth.hashers.BCryptPasswordHasher', 'django.contrib.auth.hashers.PBKDF2PasswordHasher', 'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher', ) # Internationalization # https://docs.djangoproject.com/en/1.10/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.10/howto/static-files/ STATIC_URL = '/static/' STATIC_DIR = os.path.join(BASE_DIR, 'static') STATIC_ROOT = os.path.join(BASE_DIR, 'static_root') STATICFILES_DIRS = [STATIC_DIR, ] MEDIA_URL = '/media/' MEDIA_ROOT = os.path.join(BASE_DIR, 'media_root') LOGIN_URL = '/accounts/login/' # Registration related settings REGISTRATION_OPEN = True ACCOUNT_ACTIVATION_DAYS = 1 REGISTRATION_AUTO_LOGIN = True LOGIN_REDIRECT_URL = '/rango/' LOGIN_URL = '/accounts/login/'
StarcoderdataPython
3391184
import colored import functools blue = functools.partial(colored.stylize, styles=colored.fore.BLUE) green = functools.partial(colored.stylize, styles=colored.fore.GREEN) red = functools.partial(colored.stylize, styles=colored.fore.RED) print(blue("This is blue")) print(green("This is green")) print(red("This is red"))
StarcoderdataPython
3261235
<filename>chess_1.py ''' 8 ♜ ♞ ♝ ♛ ♚ ♝ ♞ ♜ 7 ♟ ♟ ♟ ♟ ♟ ♟ ♟ ♟ 6 5 4 3 2 ♙ ♙ ♙ ♙ ♙ ♙ ♙ ♙ 1 ♖ ♘ ♗ ♕ ♔ ♗ ♘ ♖ a b c d e f g h ''' board = [ '♜', '♞', '♝', '♛', '♚', '♝', '♞', '♜', '♟', '♟', '♟', '♟', '♟', '♟', '♟', '♟', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '♙', '♙', '♙', '♙', '♙', '♙', '♙', '♙', '♖', '♘', '♗', '♕', '♔', '♗', '♘', '♖', ] hor_line = '-----------------' ver_line_char = '|' while True: # Чистим экран for y in range(30): print("") # Рисуем доску cur_x = 0 cur_y = 0 print(hor_line) for c in board: if cur_x == 0: print(ver_line_char, sep="", end="") print(c, sep="", end=ver_line_char) cur_x += 1 if cur_x > 7: cur_x = 0 cur_y += 1 print("") print(hor_line) x1 = int(input("Введите х1:")) y1 = int(input("Введите y1:")) x2 = int(input("Введите х2:")) y2 = int(input("Введите y2:")) # Делаем ход char1 = board[y1*8 + x1] char2 = board[y2*8 + x2] if char1 == ' ' or not char2 == ' ': print("Ход невозможен") else: board[y2*8 + x2] = char1 board[y1*8 + x1] = char2
StarcoderdataPython
66198
<reponame>lunaisnotaboy/oasisclosed from lxml import html # Todo: move all requests to using requests instead of urllib3 import urllib.request, urllib.error import requests from lxml import etree from random import choice import json import time import os.path from PIL import Image, ExifTags, ImageFile from datetime import datetime from binascii import crc32 from math import floor from hashlib import md5, sha1 # lol bye Cloudinary, see you another day #import cloudinary #import cloudinary.uploader #import cloudinary.api import io from uuid import uuid4 import imghdr import base64 from closedverse import settings import re from os import remove, rename def HumanTime(date, full=False): now = time.time() if ((now - date) >= 345600) or full: return datetime.fromtimestamp(date).strftime('%m/%d/%Y %I:%M %p') interval = (now - date) or 1 if interval <= 59: return 'Less than a minute ago' intvals = [86400, 3600, 60, ] for i in intvals: if interval < i: continue nounits = floor(interval / i) text = {86400: 'day', 3600: 'hour', 60: 'minute', }.get(i) if nounits > 1: text += 's' return str(nounits) + ' ' + text + ' ago'; def get_mii(id): # Using Miiverse off-device server, doesn't work after Miiverse shutdown """ try: page = urllib.request.urlopen('https://miiverse.nintendo.net/users/{0}/favorites'.format(id)).read() except urllib.error.HTTPError: return False ftree = html.fromstring(page) miihash = ftree.xpath('//*[@id="sidebar-profile-body"]/div/a/img/@src')[0].split('.net/')[1].split('_n')[0] screenname = ftree.xpath('//*[@id="sidebar-profile-body"]/a/text()')[0] ou_check = ftree.xpath('//*[@id="sidebar-profile-body"]/div/@class') if ou_check and 'official-user' in ou_check[0]: return False if "img/anonymous-mii.png" in miihash: miihash = '' """ # Using AccountWS dmca = { 'X-Nintendo-Client-ID': 'a2efa818a34fa16b8afbc8a74eba3eda', 'X-Nintendo-Client-Secret': 'c91cdb5658bd4954ade78533a339cf9a', } # TODO: Make this, the gravatar request, and reCAPTCHA request escape (or plainly use) URL params nnid = requests.get('https://accountws.nintendo.net/v1/api/admin/mapped_ids?input_type=user_id&output_type=pid&input=' + id, headers=dmca) nnid_dec = etree.fromstring(nnid.content) del(nnid) pid = nnid_dec[0][1].text if not pid: return False del(nnid_dec) mii = requests.get('https://accountws.nintendo.net/v1/api/miis?pids=' + pid, headers=dmca) try: mii_dec = etree.fromstring(mii.content) # Can't be fucked to put individual exceptions to catch here except: return False del(mii) try: miihash = mii_dec[0][2][0][0].text.split('.net/')[1].split('_')[0] except IndexError: miihash = None screenname = mii_dec[0][3].text nnid = mii_dec[0][6].text del(mii_dec) # Also todo: Return the NNID based on what accountws returns, not the user's input!!! return [miihash, screenname, nnid] def recaptcha_verify(request, key): if not request.POST.get('g-recaptcha-response'): return False re_request = urllib.request.urlopen('https://www.google.com/recaptcha/api/siteverify?secret={0}&response={1}'.format(key, request.POST['g-recaptcha-response'])) jsond = json.loads(re_request.read().decode()) if not jsond['success']: return False return True ImageFile.LOAD_TRUNCATED_IMAGES = True def image_upload(img, stream=False, drawing=False): if stream: decodedimg = img.read() else: # Brand New drawing checksum # Never mind if drawing: if not '----/' in img: return 1 hasha = img.split('----/') # Appears to be broken; works some of the time, other times #if not 0 > int(hasha[0]) and crc32(bytes(hasha[1], 'utf-8')) != int(hasha[0]): # return 1 img = hasha[1] try: decodedimg = base64.b64decode(img) except ValueError: return 1 if stream: if not 'image' in img.content_type: return 1 if 'audio' in img.content_type or 'video' in img.content_type: return 1 # upload svg? #if 'svg' in mime: # try: im = Image.open(io.BytesIO(decodedimg)) # OSError is probably from invalid images, SyntaxError probably from unsupported images except (OSError, SyntaxError): return 1 # Taken from https://coderwall.com/p/nax6gg/fix-jpeg-s-unexpectedly-rotating-when-saved-with-pil if hasattr(im, '_getexif'): orientation = 0x0112 exif = im._getexif() if exif is not None: orientation = exif.get(orientation) rotations = { 3: Image.ROTATE_180, 6: Image.ROTATE_270, 8: Image.ROTATE_90 } if orientation in rotations: im = im.transpose(rotations[orientation]) im.thumbnail((1280, 1280), Image.ANTIALIAS) # Let's check the aspect ratio and see if it's crazy # IF this is a drawing if drawing and ((im.size[0] / im.size[1]) < 0.30): return 1 # I know some people have aneurysms when they see people actually using SHA1 in the real world, for anything in general. # Yes, we are really using it. Sorry if that offends you. It's just fast and I don't feel I need anything more random, since we are talking about IMAGES. imhash = sha1(im.tobytes()).hexdigest() # File saving target target = 'png' if stream: # If we have a stream and either a JPEG or a WEBP, save them as those since those are a bit better than plain PNG if 'jpeg' in img.content_type: target = 'jpeg' im = im.convert('RGB') elif 'webp' in img.content_type: target = 'webp' floc = imhash + '.' + target # If the file exists, just use it, that's what hashes are for. if not os.path.exists(settings.MEDIA_ROOT + floc): im.save(settings.MEDIA_ROOT + floc, target, optimize=True) return settings.MEDIA_URL + floc # Todo: Put this into post/comment delete thingy method def image_rm(image_url): if settings.image_delete_opt: if settings.MEDIA_URL in image_url: sysfile = image_url.split(settings.MEDIA_URL)[1] sysloc = settings.MEDIA_ROOT + sysfile if settings.image_delete_opt > 1: try: remove(sysloc) except: return False else: return True # The RM'd directory to move it to rmloc = sysloc.replace(settings.MEDIA_ROOT, settings.MEDIA_ROOT + 'rm/') try: rename(sysloc, rmloc) except: return False else: return True else: return False def get_gravatar(email): try: page = urllib.request.urlopen('https://gravatar.com/avatar/'+ md5(email.encode('utf-8').lower()).hexdigest() +'?d=404&s=128') except: return False return page.geturl() def filterchars(str=""): # If string is blank, None, any other object, etc, make it whitespace so it's detected by isspace. if not str: str = " " # Forbid chars in this list, currently: Right-left override, largest Unicode character. # Now restricting everything in https://www.reddit.com/r/Unicode/comments/5qa7e7/widestlongest_unicode_characters_list/ forbid = ["\u202e", "\ufdfd", "\u01c4", "\u0601", "\u2031", "\u0bb9", "\u0bf8", "\u0bf5", "\ua9c4", "\u102a", "\ua9c5", "\u2e3b", "\ud808", "\ude19", "\ud809", "\udc2b", "\ud808", "\udf04", "\ud808", "\ude1f", "\ud808", "\udf7c", "\ud808", "\udc4e", "\ud808", "\udc31", "\ud808", "\udf27", "\ud808", "\udd43", "\ud808", "\ude13", "\ud808", "\udf59", "\ud808", "\ude8e", "\ud808", "\udd21", "\ud808", "\udd4c", "\ud808", "\udc4f", "\ud808", "\udc30", "\ud809", "\udc2a", "\ud809", "\udc29", "\ud808", "\ude19", "\ud809", "\udc2b"] for char in forbid: if char in str: str = str.replace(char, " ") if str.isspace(): try: girls = json.load(open(settings.BASE_DIR + '/girls.json')) except: girls = ['None'] return choice(girls) return str """ Not using getipintel anymore def getipintel(addr): # My router's IP prefix is 192.168.1.*, so this works in debug if settings.ipintel_email and not '192.168' in addr: try: site = urllib.request.urlopen('https://check.getipintel.net/check.php?ip={0}&contact={1}&flags=f' .format(addr, settings.ipintel_email)) except: return 0 return float(site.read().decode()) else: return 0 """ # Now using iphub def iphub(addr): if settings.iphub_key and not '192.168' in addr: get = requests.get('http://v2.api.iphub.info/ip/' + addr, headers={'X-Key': settings.iphub_key}) if get.json()['block'] == 1: return True else: return False # NNID blacklist check def nnid_blacked(nnid): blacklist = json.load(open(settings.nnid_forbiddens)) # The NNID server omits dashes and dots from NNIDs, gotta make sure nobody gets through this nnid = nnid.lower().replace('-', '').replace('.', '') if nnid in blacklist: return True return False
StarcoderdataPython
3312246
<reponame>swasthikshetty10/EPAX-AI from django.urls import path from . import views urlpatterns = [ path('', views.home, name='home'), path('response/<str:content>', views.response, name='response'), path('userdata/', views.userdata, name='userdata'), path('sendnotes/<str:title>/<str:notes>/', views.sendnotes, name='sendnotes'), path('deletenotes/<str:title>', views.deletenotes, name='deletenotes'), path('shownotes/', views.shownotes, name='shownotes'), path('readnotes/<str:title>', views.readnotes, name="readnotes"), path('getjoke/', views.getjoke, name="getjoke"), path('getmeme/', views.getmeme, name="getmeme"), path('test/', views.test, name="test"), path('music/', views.music, name="Music"), path('username/', views.username, name="username"), path('sendfeedback/<str:value>', views.feedback, name="sendfeedback"), path('feedback/', views.feedbackpage, name="feedback"), path('epaxsearch/<str:query>', views.searchengine, name="epaxsearch"), ]
StarcoderdataPython
3302820
<gh_stars>0 # -*- coding: utf-8 -*- """ Bencode encoding code by <NAME>, slightly simplified by uriel, additionally modified by <NAME> """ from itertools import chain def bencode(x): r = [] if isinstance(x, (int, bool)): r.extend(('i', str(x), 'e')) elif isinstance(x, str): r.extend((str(len(x)), ':', x)) elif isinstance(x, (list, tuple)): # FIXME do interface checking rather than type checking r.append('l') r.extend(bencode(i) for i in x) r.append('e') elif isinstance(x, dict): for key in x.keys(): if isinstance(key, int): raise TypeError r.append('d') encoded_list = [(bencode(k), bencode(v)) for k, v in sorted(x.items())] r.extend(tuple(chain(*encoded_list))) r.append('e') return ''.join(r)
StarcoderdataPython
1719041
import sys sys.path.insert(0, '/Users/swehr/devel/libPyshell/src/') from shell import * import json import pathlib import snoozeDb import time import timeSuggest import inputDateTime thisDir = pathlib.Path(__file__).parent.absolute() scriptPath = os.path.join(thisDir, 'mail.js') def abort(msg): sys.stderr.write(msg + '\n') sys.exit(1) def runScript(cmd, args=[], noResult=False): if type(args) == str: args = [args] if noResult: captureStdout=False stderrToStdout=False else: captureStdout=splitLines stderrToStdout=True lines = run( ['osascript', scriptPath, cmd] + args, captureStdout=captureStdout, stderrToStdout=stderrToStdout ).stdout jsons = [] for l in lines: if l.startswith('JSON: '): l = l[len('JSON: '):] jsons.append(json.loads(l)) if len(jsons) == 1: return jsons[0] elif len(jsons) == 0: if noResult: return None else: abort("No JSON output found for mail.js command " + cmd + ": " + str(lines)) else: abort("Too many JSON outputs found for mail.js command " + cmd + ": " + str(lines)) def getSelectedMsgs(): return runScript('selected-messages-json') def moveMailsToSnoozed(msgIds): runScript('snooze-messages', msgIds, noResult=True) def getSnoozedMsgIdsFromMail(): return runScript('snoozed-ids-json') def unsnoozeEmail(msgId): runScript('unsnooze-message', msgId, noResult=True) def askForSnoozeDate(infoLines): now = time.time() suggestions = timeSuggest.suggestTime(now) return inputDateTime.enterDateTime(infoLines, suggestions) def snoozeMails(db): msgs = getSelectedMsgs() if len(msgs) == 0: abort("No messages selected in Mail.app") infoLines = [] n = 2 if len(msgs) == n + 1: n += 1 for i in range(n): if i < len(msgs): m = msgs[i] infoLines.append(m['subject'] + ' (' + m['from'] + ')') delta = len(msgs) - len(infoLines) if delta > 0: infoLines.append(f'({delta} more messages)') wakeUp = askForSnoozeDate(infoLines) msgIds = [] # We first add the mails to our DB. If the move in Mail.app then fails for some reasons, # we only have some superfluous mails in the DB. If we did it the other way round, then # we would have snoozed emails in Mail.app without an record in the DB. for m in msgs: entry = snoozeDb.SnoozeDbEntry.fromJsonString(m, wakeUp) print(f"Snoozing email {repr(entry.subject)} ({entry.sender}) until {formatTime(wakeUp)}") db.addOrUpdateEntry(entry) msgIds.append(entry.msgId) moveMailsToSnoozed(msgIds) def tryUnsnooze(db, force): t = time.time() if force: t = sys.float_info.max entries = db.getEntries(t) if len(entries) == 0: print("No emails found that should be unsnoozed.") # We first unsnooze the mail in Mail.app, then delete it from the DB. This way, an error # would leave at most superfluous entries in the DB. We do not want emails in the Snooze # folder without reference in the DB. for e in entries: print(f"Unsnoozing {repr(e.subject)} ({e.sender})") unsnoozeEmail(e.msgId) db.deleteEntry(e.msgId) def formatTime(f): local = time.localtime(f) return time.strftime("%Y-%m-%d %H:%M", local) def displaySnoozeDb(db): allEntries = db.getEntries() allEntries.reverse() # most recent last if len(allEntries) == 0: print('No snoozed emails') for e in allEntries: timeStr = formatTime(e.wakeUp) print(f"[{timeStr}] {e.sender}: {e.subject}")
StarcoderdataPython
1735027
#{{{ Marathon from default import * #}}} Marathon def test(): set_java_recorded_version("1.8.0_271") if window('My Java Http Server-[Stopped]'): select('Server listening on port', '8082') click('...') if window('Select root directory'): select('JFileChooser_0', '#C/jruby-parser') close() click('button') if window('Select maintenance page'): select('JFileChooser_0', '#C/README.txt') close() assert_p('Maintenance page', 'Text', 'C:\\Users\\User\\Downloads\\marathon\\README.txt') assert_p('Web root directory', 'Text', 'C:\\Users\\User\\Downloads\\marathon\\jruby-parser') assert_p('Server listening on port', 'Text', '8082') window_closed('My Java Http Server-[Stopped]') close() set_java_recorded_version("1.8.0_271") if window('My Java Http Server-[Stopped]'): rightclick('Server listening on port') assert_p('Web root directory', 'Text', 'C:\\Users\\User\\Downloads\\marathon\\jruby-parser') assert_p('Maintenance page', 'Text', 'C:\\Users\\User\\Downloads\\marathon\\README.txt') close() pass
StarcoderdataPython
30196
<reponame>Snufkin0866/btc_bot_framework import json from urllib.parse import urlencode import requests from bs4 import BeautifulSoup from .api import BitflyerApi class BitflyerApiWithWebOrder(BitflyerApi): def __init__(self, ccxt, login_id, password, account_id, device_id=None, device_token=None): super().__init__(ccxt) self.api = BitFlyerWebAPI( login_id, password, account_id, device_id, device_token) self.api.login() def create_order(self, symbol, type_, side, size, price=0, minute_to_expire=43200, time_in_force='GTC'): self.api.set_timeout(20) res = self._exec(self.api.send_order, symbol, type_.upper(), side.upper(), size, price, minute_to_expire, time_in_force) # {'status': 0, # 'error_message': None, # 'data': {'order_ref_id': 'JRF20180509-220225-476540'}} # # 0: success # -501: session expired # -153: minimum size >= 0.01 # ... st, err = res['status'], res['error_message'] if st != 0: if res['status'] == -501: self.api.login() raise Exception(f'create_order: {st}, {err}') return {'id': res['data']['order_ref_id']} class BitFlyerWebAPI: def __init__(self, login_id, password, account_id, device_id=None, device_token=None): self.login_id = login_id self.password = password self.account_id = account_id self.device_id = device_id self.device_token = device_token self.domain = 'lightning.bitflyer.jp' self.url = 'https://lightning.bitflyer.jp/api/trade' self.headers = { 'User-agent': 'Mozilla/5.0 (X11; Linux x86_64) \ AppleWebKit/537.36 (KHTML, like Gecko) \ Chrome/65.0.3325.181 Safari/537.36', 'Content-Type': 'application/json; charset=utf-8', 'X-Requested-With': 'XMLHttpRequest', } self.timeout = (10, 10) self.session = None def set_timeout(self, sec): self.timeout = (sec, sec) def login(self): s = requests.Session() if self.device_id and self.device_token: s.cookies.set( 'device_id', self.device_id, domain=self.domain) s.cookies.set( 'device_token', self.device_token, domain=self.domain) r = s.get('https://' + self.domain) params = { 'LoginId': self.login_id, 'password': <PASSWORD>, '__RequestVerificationToken': BeautifulSoup(r.text, 'html.parser').find( attrs={'name': '__RequestVerificationToken'}).get('value'), } s.post('https://' + self.domain, data=params) self.session = s def post(self, path, param): url = self.url + path param['account_id'] = self.account_id param['lang'] = 'ja' data = json.dumps(param).encode('utf-8') res = self.session.post(url, data=data, headers=self.headers, timeout=self.timeout) return json.loads(res.text) def get(self, path, param=None): if not param: param = {} param['account_id'] = self.account_id param['lang'] = 'ja' param['v'] = '1' url = self.url + path + '?' + urlencode(param) res = self.session.get(url, headers=self.headers, timeout=self.timeout) return json.loads(res.text) def param(self, symbol, ord_type, side, price=0, size=0, minute_to_expire=43200, trigger=0, offset=0): return { 'product_code': symbol, 'ord_type': ord_type, 'side': side, 'price': price, 'size': size, 'minuteToExpire': minute_to_expire, 'trigger': trigger, 'offset': offset, } def health(self, symbol): param = {'product_code': symbol} return self.get('/gethealth', param) def ticker(self, symbol): param = { 'product_code': symbol, 'offset_seconds': 300, 'v': 1, } return self.get('/ticker', param) def all_tickers(self): param = {'v': 1} return self.get('/ticker/all', param) def ticker_data(self, symbol): param = {'product_code': symbol} return self.get('/tickerdata', param) def send_order(self, symbol, type_, side, size, price=0, minute_to_expire=43200, time_in_force='GTC'): param = { 'product_code': symbol, 'ord_type': type_, 'side': side, 'price': price, 'size': size, 'minuteToExpire': minute_to_expire, 'time_in_force': time_in_force, 'is_check': False, } return self.post('/sendorder', param) def cancel_order(self, symbol, order_id, parent_order_id): param = { 'product_code': symbol, 'order_id': order_id, 'parent_order_id': parent_order_id, } return self.post('/cancelorder', param) def cancel_all_order(self, symbol): param = { 'product_code': symbol, } return self.post('/cancelallorder', param) def get_collateral(self, symbol): param = { 'product_code': symbol, } return self.post('/getmyCollateral', param) def my_board_orders(self, symbol): param = { 'product_code': symbol, } return self.post('/getMyBoardOrders', param) def my_child_order(self, symbol, order_id): param = { 'product_code': symbol, 'order_id': order_id, } return self.post('/getMyChildOrder', param) def my_executions(self, symbol, count): param = { 'product_code': symbol, 'number_of_executions': count, } return self.post('/getmyexecutionhistory', param) def send_chat(self, message): param = { 'channel': 'MAIN_JP', 'nickname': '', 'message': message, } return self.post('/sendchat', param)
StarcoderdataPython
3389203
from pathlib import Path from scipy.interpolate import interp1d import numpy as np from msdsl.rf import s4p_to_step THIS_DIR = Path(__file__).resolve().parent TOP_DIR = THIS_DIR.parent.parent COMPARISON_FILE = 'peters_01_0605_B1_thru.s4p' COMPARISON_TOVER = 0.1e-12 COMPARISON_TDUR = 10e-9 COMPARISON_XDATA = [ 2.00e-09, 2.01e-09, 2.02e-09, 2.03e-09, 2.04e-09, 2.05e-09, 2.06e-09, 2.07e-09, 2.08e-09, 2.09e-09, 2.10e-09, 2.11e-09, 2.12e-09, 2.13e-09, 2.14e-09, 2.15e-09, 2.16e-09, 2.17e-09, 2.18e-09, 2.19e-09, 2.20e-09, 2.21e-09, 2.22e-09, 2.23e-09, 2.24e-09, 2.25e-09, 2.26e-09, 2.27e-09, 2.28e-09, 2.29e-09, 2.30e-09, 2.31e-09, 2.32e-09, 2.33e-09, 2.34e-09, 2.35e-09, 2.36e-09, 2.37e-09, 2.38e-09, 2.39e-09, 2.40e-09, 2.41e-09, 2.42e-09, 2.43e-09, 2.44e-09, 2.45e-09, 2.46e-09, 2.47e-09, 2.48e-09, 2.49e-09, 2.50e-09, 2.51e-09, 2.52e-09, 2.53e-09, 2.54e-09, 2.55e-09, 2.56e-09, 2.57e-09, 2.58e-09, 2.59e-09, 2.60e-09, 2.61e-09, 2.62e-09, 2.63e-09, 2.64e-09, 2.65e-09, 2.66e-09, 2.67e-09, 2.68e-09, 2.69e-09, 2.70e-09, 2.71e-09, 2.72e-09, 2.73e-09, 2.74e-09, 2.75e-09, 2.76e-09, 2.77e-09, 2.78e-09, 2.79e-09, 2.80e-09, 2.81e-09, 2.82e-09, 2.83e-09, 2.84e-09, 2.85e-09, 2.86e-09, 2.87e-09, 2.88e-09, 2.89e-09, 2.90e-09, 2.91e-09, 2.92e-09, 2.93e-09, 2.94e-09, 2.95e-09, 2.96e-09, 2.97e-09, 2.98e-09, 2.99e-09, 3.00e-09, 3.01e-09, 3.02e-09, 3.03e-09, 3.04e-09, 3.05e-09, 3.06e-09, 3.07e-09, 3.08e-09, 3.09e-09, 3.10e-09, 3.11e-09, 3.12e-09, 3.13e-09, 3.14e-09, 3.15e-09, 3.16e-09, 3.17e-09, 3.18e-09, 3.19e-09, 3.20e-09, 3.21e-09, 3.22e-09, 3.23e-09, 3.24e-09, 3.25e-09, 3.26e-09, 3.27e-09, 3.28e-09, 3.29e-09, 3.30e-09, 3.31e-09, 3.32e-09, 3.33e-09, 3.34e-09, 3.35e-09, 3.36e-09, 3.37e-09, 3.38e-09, 3.39e-09, 3.40e-09, 3.41e-09, 3.42e-09, 3.43e-09, 3.44e-09, 3.45e-09, 3.46e-09, 3.47e-09, 3.48e-09, 3.49e-09, 3.50e-09, 3.51e-09, 3.52e-09, 3.53e-09, 3.54e-09, 3.55e-09, 3.56e-09, 3.57e-09, 3.58e-09, 3.59e-09, 3.60e-09, 3.61e-09, 3.62e-09, 3.63e-09, 3.64e-09, 3.65e-09, 3.66e-09, 3.67e-09, 3.68e-09, 3.69e-09, 3.70e-09, 3.71e-09, 3.72e-09, 3.73e-09, 3.74e-09, 3.75e-09, 3.76e-09, 3.77e-09, 3.78e-09, 3.79e-09, 3.80e-09, 3.81e-09, 3.82e-09, 3.83e-09, 3.84e-09, 3.85e-09, 3.86e-09, 3.87e-09, 3.88e-09, 3.89e-09, 3.90e-09, 3.91e-09, 3.92e-09, 3.93e-09, 3.94e-09, 3.95e-09, 3.96e-09, 3.97e-09, 3.98e-09, 3.99e-09, 4.00e-09 ] COMPARISON_YDATA = [ -0.0024284965177232163, -0.0025258034975130426, -0.00262530131708786, -0.0026377144850399338, -0.0025113087817993512, -0.0023049105775929364, -0.002164866258496961, -0.002210337554403723, -0.002421010996463318, -0.002634038764233514, -0.002670978485361768, -0.002497132700280082, -0.0022702188097917537, -0.002216171296070835, -0.002422403117026467, -0.002732291147131853, -0.0028572345283530013, -0.002619087923836228, -0.0020517976540216786, -0.0011053688328478736, 0.0010393122569787507, 0.006666469052205385, 0.019651566915632072, 0.04461128413586859, 0.08519545808812988, 0.14222653035323018, 0.21265755815633008, 0.29003580043764277, 0.3664290829035581, 0.43499658536662783, 0.49202318740506507, 0.5375144455519761, 0.5742125740195771, 0.6056969954601125, 0.6346179954354738, 0.6618897886250497, 0.6870233290361907, 0.7091291639713331, 0.7278457216269115, 0.7436566787936149, 0.7575507823740196, 0.770400317394081, 0.7825355319183765, 0.7937626317092986, 0.8037213619113166, 0.8122623712073024, 0.8195702485755081, 0.8259914763112522, 0.8317527556239167, 0.8368116325078789, 0.8409492100667857, 0.8440146710110406, 0.8461198220457283, 0.8476332429139357, 0.8489845571157791, 0.8504309888236287, 0.8519580683324245, 0.8533798771743847, 0.8545591063258953, 0.855590112028882, 0.8568261379700702, 0.8587452727607315, 0.8617507852015089, 0.8660231765076581, 0.8714825598303119, 0.8778372763290722, 0.8846523649549297, 0.8913928914722556, 0.8974523137928876, 0.9022116485294115, 0.9051588763182451, 0.9060426405810592, 0.9049853968733684, 0.9024804143371944, 0.8992506999177994, 0.8960230495363251, 0.8933189748234018, 0.8913558604677805, 0.8900921682240042, 0.889375500772014, 0.8891037630852593, 0.8893114503012348, 0.8901405253948554, 0.8917209423725383, 0.8940354140617599, 0.8968522037541476, 0.8997744087027381, 0.902391732455248, 0.9044623727846384, 0.9060294734279501, 0.907403727094814, 0.9090103460426376, 0.9111708241770301, 0.913927669158789, 0.9170005727934443, 0.919894623635226, 0.922103053503182, 0.9233028774958857, 0.9234549041770428, 0.9227781304900433, 0.9216344387551078, 0.9203942887453878, 0.9193434171427084, 0.9186506568322375, 0.9183795287500415, 0.918514154181342, 0.9189827495905671, 0.9196803734979541, 0.9204976918755897, 0.9213517081340168, 0.9222019946952664, 0.9230385010506021, 0.9238467048251988, 0.9245771077275047, 0.925148508986991, 0.925491663394712, 0.9256071081432646, 0.9255943284056096, 0.9256247956687176, 0.9258697110636565, 0.9264260589025685, 0.9272859452280833, 0.9283621447318978, 0.9295420024680494, 0.9307247937165422, 0.9318182868042983, 0.9327116856775411, 0.9332690907797115, 0.9333748498374097, 0.933016297491681, 0.9323460258425642, 0.9316627874974778, 0.9312967791313135, 0.9314521490238252, 0.9320987592293082, 0.9329845856668684, 0.9337691700325887, 0.9342032836934432, 0.9342510250905791, 0.9340873400178287, 0.9339812077295578, 0.9341410755490528, 0.9346131881286116, 0.9352813268636024, 0.9359512304431018, 0.9364586079323102, 0.9367400718923778, 0.9368419841899855, 0.9368822378112068, 0.9369977085139112, 0.9373013315400159, 0.9378540105016578, 0.9386472966933299, 0.9395981218863195, 0.9405652938209238, 0.9413939556140499, 0.9419762266374518, 0.9422977585560056, 0.9424396665646563, 0.9425292187230404, 0.9426663925527071, 0.94287183678349, 0.9430889830338611, 0.9432362067343246, 0.943270295907327, 0.943215808583266, 0.9431414161663697, 0.9431042218262451, 0.9431063311939107, 0.9430973876756895, 0.9430209355252819, 0.9428688902216569, 0.9427023228140816, 0.9426220546923895, 0.9427094130112266, 0.9429782868925911, 0.9433700220537617, 0.9437916797437347, 0.9441698038076589, 0.9444859966714728, 0.9447784343708449, 0.945118288332281, 0.9455821797022868, 0.946234467003473, 0.9471166281040084, 0.9482323576767672, 0.9495248007161452, 0.9508590771309924, 0.9520320127834603, 0.9528204486751757, 0.9530545227208639, 0.9526811613934134, 0.9517830884035554, 0.9505423128982529, 0.9491689600508987, 0.9478346040889727, 0.9466419073614913, 0.9456358413896552, 0.9448356790246695, 0.9442590560825345, 0.9439228089907333 ] def test_s4p(): # read S4P file t_step, v_step = s4p_to_step(TOP_DIR / COMPARISON_FILE, COMPARISON_TOVER, COMPARISON_TDUR) # build function f_step = interp1d(t_step, v_step) # calculate step response at known points v_meas = f_step(COMPARISON_XDATA) # perform the comparison assert np.all(np.isclose(v_meas, COMPARISON_YDATA))
StarcoderdataPython
95313
<reponame>jenuk/imagenet_metaclasses import os # change path to imagenet folder here # expect structure like: # ILSVRC/ # ├─ ILSVRC2012_train/ # │ ├─ data/ # │ │ ├─ n02643566/ # │ │ │ ├─ n02643566_ID.JPEG # │ │ │ ├─ ... .JPEG # │ │ ├─ n.../ # │ ├─ other_folder/ # │ ├─ other_files # ├─ ILSVRC2012_validation/ # │ ├─ data/ # │ │ ├─ n02643566/ # │ │ │ ├─ n02643566_ID.JPEG # │ │ │ ├─ ... .JPEG # │ │ ├─ n.../ # │ ├─ other_folder/ # │ ├─ other_files path = "/export/compvis-nfs/group/datasets/ILSVRC" train_path = os.path.join(path, "ILSVRC2012_train", "data") val_path = os.path.join(path, "ILSVRC2012_validation", "data") classes = os.listdir(train_path) for wnid in classes: count_train = len(os.listdir(os.path.join(train_path, wnid))) count_val = len(os.listdir(os.path.join(val_path, wnid))) print(wnid, count_train, count_val)
StarcoderdataPython
1791595
# I. С<NAME> # ID успешной посылки 65303331 import math def is_power_of_four(number: int) -> bool: log = math.log(number, 4) if int(log * 100) % 100 == 0: return 'True' else: return 'False' print(is_power_of_four(int(input())))
StarcoderdataPython
68996
<gh_stars>1-10 import math with open("error_collection") as f: data = eval(f.read()) def find_erdst(x1, y1, x2, y2): return math.sqrt( (x1-x2)**2 + (y1 - y2) ** 2) error_distance = 0 for i in data: error_distance += find_erdst(*i) print float(error_distance) / len(data)
StarcoderdataPython
3328259
<reponame>tomjshine/pynet #!/usr/share/env python from ciscoconfparse import CiscoConfParse conf = CiscoConfParse("cisco_ipsec.txt") crypto = conf.find_objects(r"crypto map CRYPTO") #print crypto print "\nCRYPTO MAPS:" for c in crypto: print "!\n" + c.text for chil in c.children: print chil.text print "\n"
StarcoderdataPython
3286604
<gh_stars>0 # coding=utf-8 import heapq from collections import deque from itertools import izip import random class LifoList(deque): '''List that pops from the end.''' def sorted(self): return list(self)[::-1] class FifoList(deque): '''List that pops from the beginning.''' def pop(self): return super(FifoList, self).popleft() def sorted(self): return list(self) class BoundedPriorityQueue(object): def __init__(self, limit=None, *args): self.limit = limit self.queue = list() def __getitem__(self, val): return self.queue[val] def __len__(self): return len(self.queue) def append(self, x): heapq.heappush(self.queue, x) if self.limit and len(self.queue) > self.limit: self.queue.remove(heapq.nlargest(1, self.queue)[0]) def pop(self): return heapq.heappop(self.queue) def extend(self, iterable): for x in iterable: self.append(x) def clear(self): for x in self: self.queue.remove(x) def remove(self, x): self.queue.remove(x) def sorted(self): return heapq.nsmallest(len(self.queue), self.queue) class InverseTransformSampler(object): def __init__(self, weights, objects): assert weights and objects and len(weights) == len(objects) self.objects = objects tot = float(sum(weights)) if tot == 0: tot = len(weights) weights = [1 for x in weights] accumulated = 0 self.probs = [] for w, x in izip(weights, objects): p = w / tot accumulated += p self.probs.append(accumulated) def sample(self): target = random.random() i = 0 while i + 1 != len(self.probs) and target > self.probs[i]: i += 1 return self.objects[i] def _generic_arg(iterable, function, better_function): values = [function(x) for x in iterable] better_value = better_function(values) candidates = [x for x, value in zip(iterable, values) if value == better_value] return random.choice(candidates) def argmin(iterable, function): return _generic_arg(iterable, function, min) def argmax(iterable, function): return _generic_arg(iterable, function, max)
StarcoderdataPython
3215990
# -*- coding: utf-8 -*- from __future__ import division, unicode_literals import os from . import updateBackRefs from . import updateCrossRefs from . import updateBiblio from . import updateCanIUse from . import updateLinkDefaults from . import updateTestSuites from . import updateLanguages from . import updateWpt from . import manifest from .. import config from ..messages import * def update(anchors=False, backrefs=False, biblio=False, caniuse=False, linkDefaults=False, testSuites=False, languages=False, wpt=False, path=None, dryRun=False, force=False): if path is None: path = config.scriptPath("spec-data") # Update via manifest by default, falling back to a full update only if failed or forced. if not force: success = manifest.updateByManifest(path=path, dryRun=dryRun) if not success: say("Falling back to a manual update...") force = True if force: # If all are False, update everything updateAnyway = not (anchors or backrefs or biblio or caniuse or linkDefaults or testSuites or languages or wpt) if anchors or updateAnyway: updateCrossRefs.update(path=path, dryRun=dryRun) if backrefs or updateAnyway: updateBackRefs.update(path=path, dryRun=dryRun) if biblio or updateAnyway: updateBiblio.update(path=path, dryRun=dryRun) if caniuse or updateAnyway: updateCanIUse.update(path=path, dryRun=dryRun) if linkDefaults or updateAnyway: updateLinkDefaults.update(path=path, dryRun=dryRun) if testSuites or updateAnyway: updateTestSuites.update(path=path, dryRun=dryRun) if languages or updateAnyway: updateLanguages.update(path=path, dryRun=dryRun) if wpt or updateAnyway: updateWpt.update(path=path, dryRun=dryRun) manifest.createManifest(path=path, dryRun=dryRun) def fixupDataFiles(): ''' Checks the readonly/ version is more recent than your current mutable data files. This happens if I changed the datafile format and shipped updated files as a result; using the legacy files with the new code is quite bad! ''' try: localVersion = int(open(localPath("version.txt"), 'r').read()) except IOError: localVersion = None try: remoteVersion = int(open(remotePath("version.txt"), 'r').read()) except IOError, err: warn("Couldn't check the datafile version. Bikeshed may be unstable.\n{0}", err) return if localVersion == remoteVersion: # Cool return # If versions don't match, either the remote versions have been updated # (and we should switch you to them, because formats may have changed), # or you're using a historical version of Bikeshed (ditto). try: for filename in os.listdir(remotePath()): copyanything(remotePath(filename), localPath(filename)) except Exception, err: warn("Couldn't update datafiles from cache. Bikeshed may be unstable.\n{0}", err) return def updateReadonlyDataFiles(): ''' Like fixupDataFiles(), but in the opposite direction -- copies all my current mutable data files into the readonly directory. This is a debugging tool to help me quickly update the built-in data files, and will not be called as part of normal operation. ''' try: for filename in os.listdir(localPath()): if filename.startswith("readonly"): continue copyanything(localPath(filename), remotePath(filename)) except Exception, err: warn("Error copying over the datafiles:\n{0}", err) return def copyanything(src, dst): import shutil import errno try: shutil.rmtree(dst, ignore_errors=True) shutil.copytree(src, dst) except OSError as exc: if exc.errno in [errno.ENOTDIR, errno.EINVAL]: shutil.copy(src, dst) else: raise def localPath(*segs): return config.scriptPath("spec-data", *segs) def remotePath(*segs): return config.scriptPath("spec-data", "readonly", *segs)
StarcoderdataPython
1628190
<filename>camera_images_cleaning/plot_cleaned_images.py import photon_stream as ps from fact.plotting import camera, mark_pixel import numpy as np from fact.instrument.camera import get_neighbor_matrix, get_border_pixel_mask import matplotlib.pyplot as plt from matplotlib.patches import Ellipse from matplotlib.backends.backend_pdf import PdfPages from feature_stream import cleaning, calc_hillas_features_image, phs2image, calc_hillas_features_phs, facttools_cleaning, is_simulation_event from tqdm import tqdm import click from astropy.table import Table import warnings from fact.io import read_data # from IPython import embed picture_thresh = 5 boundary_thresh = 2 @click.command() @click.argument('method', required=True) @click.argument('path', required=True) @click.argument('file', required=True) @click.argument('feat', required=True) @click.option('-n', '--number', default=100, type=int, help='Number of events to plot') def main(method, path, file, feat, number): border_pix = get_border_pixel_mask() if method == "thresholds": reader = ps.EventListReader('/net/big-tank/POOL/projects/fact/photon-stream/stream_data/{}/{}.phs.jsonl.gz'.format(path, file)) with PdfPages('cleaning_thresh_{}_{}.pdf'.format(feat, file)) as pdf: for i in tqdm(range(number)): fig = plt.figure() ax = fig.add_axes([0.05, 0.05, 0.9, 0.9]) #ax.set_axis_off() event = next(reader) lol = event.photon_stream.list_of_lists lol = [[t for t in l if ((35 <= t) & (t < 75))] for l in lol] image = phs2image(lol)#, lower=30, upper=70) cleaned_pix = facttools_cleaning(image, lol, 35, 75, picture_thresh, boundary_thresh) with warnings.catch_warnings(): warnings.simplefilter("ignore") arrival_times = np.array([np.nanmedian(l) for l in lol]) # cleaned_pix = cleaning(image, lol, picture_thresh, boundary_thresh) if len(cleaned_pix[cleaned_pix != 0]) > 1: # border_ph = [(border_pix[i] and cleaned_pix[i]) for i in range(1440)] # leakage = image[border_ph].sum()/image[cleaned_pix].sum() df = calc_hillas_features_image(image, cleaned_pix) # ell = Ellipse( # [df['cog_x'], df['cog_y']], # df['length']*2, # df['width']*2, # angle=np.rad2deg(df['delta']), # fill=False, linewidth=2, color='b' # ) # ax.add_patch(ell) ell = Ellipse( [df['cog_x'], df['cog_y']], df['length']*4, df['width']*4, angle=np.rad2deg(df['delta']), fill=False, linewidth=1.5, color='b' ) # ax.add_patch(ell) if is_simulation_event(event): fig.suptitle('run {} event {} reuse {}'.format(event.simulation_truth.run, event.simulation_truth.event, event.simulation_truth.reuse)) else: fig.suptitle('{} event {} delta {}'.format(file, event.observation_info.event, df['delta'])) if feat == 'arrival_times': with warnings.catch_warnings(): warnings.simplefilter("ignore") x = arrival_times-np.nanmean(arrival_times) x[np.isnan(x)] = 0 c = camera(x, cmap='Spectral', ax=ax) mark_pixel(cleaned_pix, color='k', linewidth=2.5) else: c = camera(image, cmap='viridis', ax=ax) mark_pixel(cleaned_pix, color=(128/255, 186/255, 38/255), linewidth=2.5) ax.axis('off') fig.colorbar(c) pdf.savefig(fig) ax.cla() plt.close(fig) if method == "DBSCAN": reader = ps.EventListReader('/net/big-tank/POOL/projects/fact/photon-stream/stream_data/{}/{}.phs.jsonl.gz'.format(path, file)) with PdfPages('cleaning_DBSCAN_biggest_{}_{}.pdf'.format(feat, file)) as pdf: for i in tqdm(range(number)): fig = plt.figure() ax = fig.add_axes([0.05, 0.05, 0.9, 0.9]) event = next(reader) # clustering of events clustering = ps.photon_cluster.PhotonStreamCluster(event.photon_stream) if clustering.number > 0: lol = event.photon_stream.list_of_lists image = phs2image(lol) with warnings.catch_warnings(): warnings.simplefilter("ignore") arrival_times = np.array([np.nanmedian(l) for l in lol]) # biggest cluster: biggest_cluster = np.argmax(np.bincount(clustering.labels[clustering.labels != -1])) mask = clustering.labels == biggest_cluster # mask = clustering.labels != -1 xyt = event.photon_stream.point_cloud x, y, t = xyt.T cleaned_pix = np.zeros(len(image), dtype=bool) k = 0 cleaned_img = np.zeros(len(image)) for i in range(len(lol)): for j in range(len(lol[i])): k += 1 if mask[k-1]: cleaned_pix[i] = True cleaned_img[i] += 1 cleaned_pix_perc = np.zeros(1440, dtype=bool) for i in range(1440): if cleaned_pix[i] and (cleaned_img[i] > mask.sum() / 200): cleaned_pix_perc[i] = True df = calc_hillas_features_phs(event.photon_stream, clustering) # ell = Ellipse( # [df['cog_x'], df['cog_y']], # df['length']*2, # df['width']*2, # angle=np.rad2deg(df['delta']), # fill=False, linewidth=2, color='b' # ) # ax.add_patch(ell) ell = Ellipse( [df['cog_x'], df['cog_y']], df['length']*4, df['width']*4, angle=np.rad2deg(df['delta']), fill=False, linewidth=1.5, color='b' ) # ax.add_patch(ell) if is_simulation_event(event): fig.suptitle('run {} event {} reuse {}'.format(event.simulation_truth.run, event.simulation_truth.event, event.simulation_truth.reuse)) else: fig.suptitle('{} event {} delta {:.2f}'.format(file, event.observation_info.event, np.rad2deg(df['delta']))) if feat == 'arrival_times': with warnings.catch_warnings(): warnings.simplefilter("ignore") x = arrival_times-np.nanmean(arrival_times) c = camera(x, cmap='viridis', ax=ax) mark_pixel(cleaned_pix, color=(128/255, 186/255, 38/255), linewidth=2.5) else: c = camera(image, cmap='viridis', ax=ax) mark_pixel(cleaned_pix, color=(128/255, 186/255, 38/255), linewidth=2.5) mark_pixel(cleaned_pix_perc, color='red', linewidth=1.5) ax.axis('off') fig.colorbar(c) pdf.savefig(fig) ax.cla() plt.close(fig) if method == "facttools": print('facttools') with PdfPages('cleaning_facttools_{}_{}.pdf'.format(feat, file)) as pdf: t = Table.read('/net/big-tank/POOL/projects/fact/photon-stream/facttools/{}/{}_dl1.fits'.format(path, file)) dl2 = read_data('/home/ksedlaczek/Packages/open_crab_sample_analysis/dl2/crab.hdf5', key='events') for i in tqdm(range(number)): fig = plt.figure() ax = fig.add_axes([0.05, 0.05, 0.9, 0.9]) # if path != 'crab': # fig.suptitle('run {} event {} reuse {}'.format(file, t[i]['MCorsikaEvtHeader.fEvtNumber'], t[i]['MCorsikaEvtHeader.fNumReuse'])) # else: # # fig.suptitle('{} event {} delta {:.4f}'.format(file, t[i]['EventNum'], dl2.query('night == 20131104 & run_id == 162 & event_num == {}'.format(t[i]['EventNum']))['delta'].values[0])) t[i]['photoncharge'][t[i]['photoncharge'] < 0] = 0.0 if feat == 'arrival_times': c = camera(t[i]['arrivalTime']-t[i]['arrivalTime'].mean(), cmap='Spectral', ax=ax) # mark_pixel(t[i]['shower'], color='k', linewidth=2.5) ax.axis('off') cb = fig.colorbar(c) cb.set_label(label=r'$t-\bar{t}$ / ns', fontsize=16) else: c = camera(t[i]['photoncharge'], cmap='viridis', ax=ax) ax.axis('off') cb = fig.colorbar(c) cb.set_label(label=r'Number of Photons', fontsize=16) #mark_pixel(t[i]['shower'], color=(128/255, 186/255, 38/255), linewidth=2.5) # mark_pixel(t[i]['shower'], color=(128/255, 186/255, 38/255), linewidth=2.5) pdf.savefig(fig) ax.cla() plt.close(fig) if __name__ == '__main__': main()
StarcoderdataPython
1682030
<reponame>raphaelavalos/ray import pytest import time import yaml import tempfile import shutil import unittest import ray from ray.tests.test_autoscaler import SMALL_CLUSTER, MockProvider, \ MockProcessRunner from ray.autoscaler.autoscaler import StandardAutoscaler from ray.autoscaler.load_metrics import LoadMetrics from ray.autoscaler.node_provider import NODE_PROVIDERS from ray.autoscaler.resource_demand_scheduler import _utilization_score, \ get_bin_pack_residual, get_instances_for from time import sleep TYPES_A = { "m4.large": { "resources": { "CPU": 2 }, "max_workers": 10, }, "m4.4xlarge": { "resources": { "CPU": 16 }, "max_workers": 8, }, "m4.16xlarge": { "resources": { "CPU": 64 }, "max_workers": 4, }, "p2.xlarge": { "resources": { "CPU": 16, "GPU": 1 }, "max_workers": 10, }, "p2.8xlarge": { "resources": { "CPU": 32, "GPU": 8 }, "max_workers": 4, }, } MULTI_WORKER_CLUSTER = dict(SMALL_CLUSTER, **{ "available_instance_types": TYPES_A, }) def test_util_score(): assert _utilization_score({"CPU": 64}, [{"TPU": 16}]) is None assert _utilization_score({"GPU": 4}, [{"GPU": 2}]) == (0.5, 0.5) assert _utilization_score({"GPU": 4}, [{"GPU": 1}, {"GPU": 1}]) == \ (0.5, 0.5) assert _utilization_score({"GPU": 2}, [{"GPU": 2}]) == (2, 2) assert _utilization_score({"GPU": 2}, [{"GPU": 1}, {"GPU": 1}]) == (2, 2) assert _utilization_score({"GPU": 2, "TPU": 1}, [{"GPU": 2}]) == (0, 1) assert _utilization_score({"CPU": 64}, [{"CPU": 64}]) == (64, 64) assert _utilization_score({"CPU": 64}, [{"CPU": 32}]) == (8, 8) assert _utilization_score({"CPU": 64}, [{"CPU": 16}, {"CPU": 16}]) == \ (8, 8) def test_bin_pack(): assert get_bin_pack_residual([], [{"GPU": 2}, {"GPU": 2}]) == \ [{"GPU": 2}, {"GPU": 2}] assert get_bin_pack_residual([{"GPU": 2}], [{"GPU": 2}, {"GPU": 2}]) == \ [{"GPU": 2}] assert get_bin_pack_residual([{"GPU": 4}], [{"GPU": 2}, {"GPU": 2}]) == [] arg = [{"GPU": 2}, {"GPU": 2, "CPU": 2}] assert get_bin_pack_residual(arg, [{"GPU": 2}, {"GPU": 2}]) == [] arg = [{"CPU": 2}, {"GPU": 2}] assert get_bin_pack_residual(arg, [{"GPU": 2}, {"GPU": 2}]) == [{"GPU": 2}] def test_get_instances_packing_heuristic(): assert get_instances_for(TYPES_A, {}, 9999, [{"GPU": 8}]) == \ [("p2.8xlarge", 1)] assert get_instances_for(TYPES_A, {}, 9999, [{"GPU": 1}] * 6) == \ [("p2.8xlarge", 1)] assert get_instances_for(TYPES_A, {}, 9999, [{"GPU": 1}] * 4) == \ [("p2.xlarge", 4)] assert get_instances_for(TYPES_A, {}, 9999, [{"CPU": 32, "GPU": 1}] * 3) \ == [("p2.8xlarge", 3)] assert get_instances_for(TYPES_A, {}, 9999, [{"CPU": 64, "GPU": 1}] * 3) \ == [] assert get_instances_for(TYPES_A, {}, 9999, [{"CPU": 64}] * 3) == \ [("m4.16xlarge", 3)] assert get_instances_for(TYPES_A, {}, 9999, [{"CPU": 64}, {"CPU": 1}]) \ == [("m4.16xlarge", 1), ("m4.large", 1)] assert get_instances_for( TYPES_A, {}, 9999, [{"CPU": 64}, {"CPU": 9}, {"CPU": 9}]) == \ [("m4.16xlarge", 1), ("m4.4xlarge", 2)] assert get_instances_for(TYPES_A, {}, 9999, [{"CPU": 16}] * 5) == \ [("m4.16xlarge", 1), ("m4.4xlarge", 1)] assert get_instances_for(TYPES_A, {}, 9999, [{"CPU": 8}] * 10) == \ [("m4.16xlarge", 1), ("m4.4xlarge", 1)] assert get_instances_for(TYPES_A, {}, 9999, [{"CPU": 1}] * 100) == \ [("m4.16xlarge", 1), ("m4.4xlarge", 2), ("m4.large", 2)] assert get_instances_for( TYPES_A, {}, 9999, [{"GPU": 1}] + ([{"CPU": 1}] * 64)) == \ [("m4.16xlarge", 1), ("p2.xlarge", 1)] assert get_instances_for( TYPES_A, {}, 9999, ([{"GPU": 1}] * 8) + ([{"CPU": 1}] * 64)) == \ [("m4.16xlarge", 1), ("p2.8xlarge", 1)] def test_get_instances_respects_max_limit(): types = { "m4.large": { "resources": { "CPU": 2 }, "max_workers": 10, }, "gpu": { "resources": { "GPU": 1 }, "max_workers": 99999, }, } assert get_instances_for(types, {}, 2, [{"CPU": 1}] * 10) == \ [("m4.large", 2)] assert get_instances_for(types, {"m4.large": 9999}, 9999, [{ "CPU": 1 }] * 10) == [] assert get_instances_for(types, {"m4.large": 0}, 9999, [{ "CPU": 1 }] * 10) == [("m4.large", 5)] assert get_instances_for(types, {"m4.large": 7}, 4, [{ "CPU": 1 }] * 10) == [("m4.large", 3)] assert get_instances_for(types, {"m4.large": 7}, 2, [{ "CPU": 1 }] * 10) == [("m4.large", 2)] class AutoscalingTest(unittest.TestCase): def setUp(self): NODE_PROVIDERS["mock"] = \ lambda config: self.create_provider self.provider = None self.tmpdir = tempfile.mkdtemp() def tearDown(self): self.provider = None del NODE_PROVIDERS["mock"] shutil.rmtree(self.tmpdir) ray.shutdown() def waitForNodes(self, expected, comparison=None, tag_filters={}): MAX_ITER = 50 for i in range(MAX_ITER): n = len(self.provider.non_terminated_nodes(tag_filters)) if comparison is None: comparison = self.assertEqual try: comparison(n, expected) return except Exception: if i == MAX_ITER - 1: raise time.sleep(.1) def create_provider(self, config, cluster_name): assert self.provider return self.provider def write_config(self, config): path = self.tmpdir + "/simple.yaml" with open(path, "w") as f: f.write(yaml.dump(config)) return path def testScaleUpMinSanity(self): config_path = self.write_config(MULTI_WORKER_CLUSTER) self.provider = MockProvider(default_instance_type="m4.large") runner = MockProcessRunner() autoscaler = StandardAutoscaler( config_path, LoadMetrics(), max_failures=0, process_runner=runner, update_interval_s=0) assert len(self.provider.non_terminated_nodes({})) == 0 autoscaler.update() self.waitForNodes(2) autoscaler.update() self.waitForNodes(2) def testRequestBundles(self): config = MULTI_WORKER_CLUSTER.copy() config["min_workers"] = 0 config["max_workers"] = 50 config_path = self.write_config(config) self.provider = MockProvider(default_instance_type="m4.large") runner = MockProcessRunner() autoscaler = StandardAutoscaler( config_path, LoadMetrics(), max_failures=0, process_runner=runner, update_interval_s=0) assert len(self.provider.non_terminated_nodes({})) == 0 autoscaler.update() self.waitForNodes(0) autoscaler.request_resources([{"CPU": 1}]) autoscaler.update() self.waitForNodes(1) assert self.provider.mock_nodes[0].instance_type == "m4.large" autoscaler.request_resources([{"GPU": 8}]) autoscaler.update() self.waitForNodes(2) assert self.provider.mock_nodes[1].instance_type == "p2.8xlarge" autoscaler.request_resources([{"CPU": 32}] * 4) autoscaler.update() self.waitForNodes(4) assert self.provider.mock_nodes[2].instance_type == "m4.16xlarge" assert self.provider.mock_nodes[3].instance_type == "m4.16xlarge" def testResourcePassing(self): config = MULTI_WORKER_CLUSTER.copy() config["min_workers"] = 0 config["max_workers"] = 50 config_path = self.write_config(config) self.provider = MockProvider(default_instance_type="m4.large") runner = MockProcessRunner() autoscaler = StandardAutoscaler( config_path, LoadMetrics(), max_failures=0, process_runner=runner, update_interval_s=0) assert len(self.provider.non_terminated_nodes({})) == 0 autoscaler.update() self.waitForNodes(0) autoscaler.request_resources([{"CPU": 1}]) autoscaler.update() self.waitForNodes(1) assert self.provider.mock_nodes[0].instance_type == "m4.large" autoscaler.request_resources([{"GPU": 8}]) autoscaler.update() self.waitForNodes(2) assert self.provider.mock_nodes[1].instance_type == "p2.8xlarge" # TODO (Alex): Autoscaler creates the node during one update then # starts the updater in the enxt update. The sleep is largely # unavoidable because the updater runs in its own thread and we have no # good way of ensuring that the commands are sent in time. autoscaler.update() sleep(0.1) # These checks are done separately because we have no guarantees on the # order the dict is serialized in. runner.assert_has_call("192.168.3.11", "RAY_OVERRIDE_RESOURCES=") runner.assert_has_call("192.168.3.11", "CPU: 2") runner.assert_has_call("172.16.31.10", "RAY_OVERRIDE_RESOURCES=") runner.assert_has_call("172.16.31.10", "CPU: 32") runner.assert_has_call("172.16.31.10", "GPU: 8") if __name__ == "__main__": import sys sys.exit(pytest.main(["-v", __file__]))
StarcoderdataPython
3252894
<gh_stars>1000+ #!/usr/bin/python # Author : n0fate # E-Mail <EMAIL>, <EMAIL> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or (at # your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # import struct import datetime from binascii import unhexlify from ctypes import * from .pbkdf2 import pbkdf2 from .Schema import * from lazagne.config.write_output import print_debug from lazagne.config.crypto.pyDes import * ATOM_SIZE = 4 SIZEOFKEYCHAINTIME = 16 KEYCHAIN_SIGNATURE = "kych" BLOCKSIZE = 8 KEYLEN = 24 class _APPL_DB_HEADER(BigEndianStructure): _fields_ = [ ("Signature", c_char * 4), ("Version", c_int), ("HeaderSize", c_int), ("SchemaOffset", c_int), ("AuthOffset", c_int) ] class _APPL_DB_SCHEMA(BigEndianStructure): _fields_ = [ ("SchemaSize", c_int), ("TableCount", c_int) ] class _KEY_BLOB_REC_HEADER(BigEndianStructure): _fields_ = [ ("RecordSize", c_uint), ("RecordCount", c_uint), ("Dummy", c_char * 0x7C), ] class _GENERIC_PW_HEADER(BigEndianStructure): _fields_ = [ ("RecordSize", c_uint), ("RecordNumber", c_uint), ("Unknown2", c_uint), ("Unknown3", c_uint), ("SSGPArea", c_uint), ("Unknown5", c_uint), ("CreationDate", c_uint), ("ModDate", c_uint), ("Description", c_uint), ("Comment", c_uint), ("Creator", c_uint), ("Type", c_uint), ("ScriptCode", c_uint), ("PrintName", c_uint), ("Alias", c_uint), ("Invisible", c_uint), ("Negative", c_uint), ("CustomIcon", c_uint), ("Protected", c_uint), ("Account", c_uint), ("Service", c_uint), ("Generic", c_uint) ] class _APPLE_SHARE_HEADER(BigEndianStructure): _fields_ = [ ("RecordSize", c_uint), ("RecordNumber", c_uint), ("Unknown2", c_uint), ("Unknown3", c_uint), ("SSGPArea", c_uint), ("Unknown5", c_uint), ("CreationDate", c_uint), ("ModDate", c_uint), ("Description", c_uint), ("Comment", c_uint), ("Creator", c_uint), ("Type", c_uint), ("ScriptCode", c_uint), ("PrintName", c_uint), ("Alias", c_uint), ("Invisible", c_uint), ("Negative", c_uint), ("CustomIcon", c_uint), ("Protected", c_uint), ("Account", c_uint), ("Volume", c_uint), ("Server", c_uint), ("Protocol", c_uint), ("AuthType", c_uint), ("Address", c_uint), ("Signature", c_uint) ] class _INTERNET_PW_HEADER(BigEndianStructure): _fields_ = [ ("RecordSize", c_uint), ("RecordNumber", c_uint), ("Unknown2", c_uint), ("Unknown3", c_uint), ("SSGPArea", c_uint), ("Unknown5", c_uint), ("CreationDate", c_uint), ("ModDate", c_uint), ("Description", c_uint), ("Comment", c_uint), ("Creator", c_uint), ("Type", c_uint), ("ScriptCode", c_uint), ("PrintName", c_uint), ("Alias", c_uint), ("Invisible", c_uint), ("Negative", c_uint), ("CustomIcon", c_uint), ("Protected", c_uint), ("Account", c_uint), ("SecurityDomain", c_uint), ("Server", c_uint), ("Protocol", c_uint), ("AuthType", c_uint), ("Port", c_uint), ("Path", c_uint) ] class _X509_CERT_HEADER(BigEndianStructure): _fields_ = [ ("RecordSize", c_uint), ("RecordNumber", c_uint), ("Unknown1", c_uint), ("Unknown2", c_uint), ("CertSize", c_uint), ("Unknown3", c_uint), ("CertType", c_uint), ("CertEncoding", c_uint), ("PrintName", c_uint), ("Alias", c_uint), ("Subject", c_uint), ("Issuer", c_uint), ("SerialNumber", c_uint), ("SubjectKeyIdentifier", c_uint), ("PublicKeyHash", c_uint) ] # http://www.opensource.apple.com/source/Security/Security-55179.1/include/security_cdsa_utilities/KeySchema.h # http://www.opensource.apple.com/source/libsecurity_keychain/libsecurity_keychain-36940/lib/SecKey.h class _SECKEY_HEADER(BigEndianStructure): _fields_ = [ ("RecordSize", c_uint32), ("RecordNumber", c_uint32), ("Unknown1", c_uint32), ("Unknown2", c_uint32), ("BlobSize", c_uint32), ("Unknown3", c_uint32), ("KeyClass", c_uint32), ("PrintName", c_uint32), ("Alias", c_uint32), ("Permanent", c_uint32), ("Private", c_uint32), ("Modifiable", c_uint32), ("Label", c_uint32), ("ApplicationTag", c_uint32), ("KeyCreator", c_uint32), ("KeyType", c_uint32), ("KeySizeInBits", c_uint32), ("EffectiveKeySize", c_uint32), ("StartDate", c_uint32), ("EndDate", c_uint32), ("Sensitive", c_uint32), ("AlwaysSensitive", c_uint32), ("Extractable", c_uint32), ("NeverExtractable", c_uint32), ("Encrypt", c_uint32), ("Decrypt", c_uint32), ("Derive", c_uint32), ("Sign", c_uint32), ("Verify", c_uint32), ("SignRecover", c_uint32), ("VerifyRecover", c_uint32), ("Wrap", c_uint32), ("UnWrap", c_uint32) ] class _TABLE_HEADER(BigEndianStructure): _fields_ = [ ("TableSize", c_uint), ("TableId", c_uint), ("RecordCount", c_uint), ("Records", c_uint), ("IndexesOffset", c_uint), ("FreeListHead", c_uint), ("RecordNumbersCount", c_uint), ] class _SCHEMA_INFO_RECORD(BigEndianStructure): _fields_ = [ ("RecordSize", c_uint), ("RecordNumber", c_uint), ("Unknown2", c_uint), ("Unknown3", c_uint), ("Unknown4", c_uint), ("Unknown5", c_uint), ("Unknown6", c_uint), ("RecordType", c_uint), ("DataSize", c_uint), ("Data", c_uint) ] class _COMMON_BLOB(BigEndianStructure): _fields_ = [ ("magic", c_uint32), ("blobVersion", c_uint32) ] # _ENCRYPTED_BLOB_METADATA class _KEY_BLOB(BigEndianStructure): _fields_ = [ ("CommonBlob", _COMMON_BLOB), ("startCryptoBlob", c_uint32), ("totalLength", c_uint32), ("iv", c_ubyte * 8) ] class _DB_PARAMETERS(BigEndianStructure): _fields_ = [ ("idleTimeout", c_uint32), # uint32 ("lockOnSleep", c_uint32) # uint8 ] class _DB_BLOB(BigEndianStructure): _fields_ = [ ("CommonBlob", _COMMON_BLOB), ("startCryptoBlob", c_uint32), ("totalLength", c_uint32), ("randomSignature", c_ubyte * 16), ("sequence", c_uint32), ("params", _DB_PARAMETERS), ("salt", c_ubyte * 20), ("iv", c_ubyte * 8), ("blobSignature", c_ubyte * 20) ] class _SSGP(BigEndianStructure): _fields_ = [ ("magic", c_char * 4), ("label", c_ubyte * 16), ("iv", c_ubyte * 8) ] def _memcpy(buf, fmt): return cast(c_char_p(buf), POINTER(fmt)).contents class KeyChain(): def __init__(self, filepath): self.filepath = filepath self.fbuf = '' def open(self): try: fhandle = open(self.filepath, 'rb') except Exception: return False self.fbuf = fhandle.read() if len(self.fbuf): fhandle.close() return True return False def checkValidKeychain(self): if self.fbuf[0:4] != KEYCHAIN_SIGNATURE: return False return True # get apple DB Header def getHeader(self): header = _memcpy(self.fbuf[:sizeof(_APPL_DB_HEADER)], _APPL_DB_HEADER) return header def getSchemaInfo(self, offset): table_list = [] # schema_info = struct.unpack(APPL_DB_SCHEMA, self.fbuf[offset:offset + APPL_DB_SCHEMA_SIZE]) _schemainfo = _memcpy(self.fbuf[offset:offset + sizeof(_APPL_DB_SCHEMA)], _APPL_DB_SCHEMA) for i in xrange(_schemainfo.TableCount): BASE_ADDR = sizeof(_APPL_DB_HEADER) + sizeof(_APPL_DB_SCHEMA) table_list.append( struct.unpack('>I', self.fbuf[BASE_ADDR + (ATOM_SIZE * i):BASE_ADDR + (ATOM_SIZE * i) + ATOM_SIZE])[0]) return _schemainfo, table_list def getTable(self, offset): record_list = [] BASE_ADDR = sizeof(_APPL_DB_HEADER) + offset TableMetaData = _memcpy(self.fbuf[BASE_ADDR:BASE_ADDR + sizeof(_TABLE_HEADER)], _TABLE_HEADER) RECORD_OFFSET_BASE = BASE_ADDR + sizeof(_TABLE_HEADER) record_count = 0 offset = 0 while TableMetaData.RecordCount != record_count: RecordOffset = struct.unpack('>I', self.fbuf[ RECORD_OFFSET_BASE + (ATOM_SIZE * offset):RECORD_OFFSET_BASE + ( ATOM_SIZE * offset) + ATOM_SIZE])[0] # if len(record_list) >= 1: # if record_list[len(record_list)-1] >= RecordOffset: # continue if (RecordOffset != 0x00) and (RecordOffset % 4 == 0): record_list.append(RecordOffset) # print ' [-] Record Offset: 0x%.8x'%RecordOffset record_count += 1 offset += 1 return TableMetaData, record_list def getTablenametoList(self, recordList, tableList): TableDic = {} for count in xrange(len(recordList)): tableMeta, GenericList = self.getTable(tableList[count]) TableDic[tableMeta.TableId] = count # extract valid table list return len(recordList), TableDic def getKeyblobRecord(self, base_addr, offset): BASE_ADDR = sizeof(_APPL_DB_HEADER) + base_addr + offset KeyBlobRecHeader = _memcpy(self.fbuf[BASE_ADDR:BASE_ADDR + sizeof(_KEY_BLOB_REC_HEADER)], _KEY_BLOB_REC_HEADER) record = self.fbuf[ BASE_ADDR + sizeof(_KEY_BLOB_REC_HEADER):BASE_ADDR + KeyBlobRecHeader.RecordSize] # password data area KeyBlobRecord = _memcpy(record[:+sizeof(_KEY_BLOB)], _KEY_BLOB) if SECURE_STORAGE_GROUP != str(record[KeyBlobRecord.totalLength + 8:KeyBlobRecord.totalLength + 8 + 4]): return '', '', '', 1 CipherLen = KeyBlobRecord.totalLength - KeyBlobRecord.startCryptoBlob if CipherLen % BLOCKSIZE != 0: print_debug('ERROR', "Bad ciphertext len") return '', '', '', 1 ciphertext = record[KeyBlobRecord.startCryptoBlob:KeyBlobRecord.totalLength] # match data, keyblob_ciphertext, Initial Vector, success return record[KeyBlobRecord.totalLength + 8:KeyBlobRecord.totalLength + 8 + 20], ciphertext, KeyBlobRecord.iv, 0 def getGenericPWRecord(self, base_addr, offset): record = [] BASE_ADDR = sizeof(_APPL_DB_HEADER) + base_addr + offset RecordMeta = _memcpy(self.fbuf[BASE_ADDR:BASE_ADDR + sizeof(_GENERIC_PW_HEADER)], _GENERIC_PW_HEADER) Buffer = self.fbuf[BASE_ADDR + sizeof( _GENERIC_PW_HEADER):BASE_ADDR + RecordMeta.RecordSize] # record_meta[0] => record size if RecordMeta.SSGPArea != 0: record.append(Buffer[:RecordMeta.SSGPArea]) else: record.append('') record.append(self.getKeychainTime(BASE_ADDR, RecordMeta.CreationDate & 0xFFFFFFFE)) record.append(self.getKeychainTime(BASE_ADDR, RecordMeta.ModDate & 0xFFFFFFFE)) record.append(self.getLV(BASE_ADDR, RecordMeta.Description & 0xFFFFFFFE)) record.append(self.getFourCharCode(BASE_ADDR, RecordMeta.Creator & 0xFFFFFFFE)) record.append(self.getFourCharCode(BASE_ADDR, RecordMeta.Type & 0xFFFFFFFE)) record.append(self.getLV(BASE_ADDR, RecordMeta.PrintName & 0xFFFFFFFE)) record.append(self.getLV(BASE_ADDR, RecordMeta.Alias & 0xFFFFFFFE)) record.append(self.getLV(BASE_ADDR, RecordMeta.Account & 0xFFFFFFFE)) record.append(self.getLV(BASE_ADDR, RecordMeta.Service & 0xFFFFFFFE)) return record def getInternetPWRecord(self, base_addr, offset): record = [] BASE_ADDR = sizeof(_APPL_DB_HEADER) + base_addr + offset RecordMeta = _memcpy(self.fbuf[BASE_ADDR:BASE_ADDR + sizeof(_INTERNET_PW_HEADER)], _INTERNET_PW_HEADER) Buffer = self.fbuf[BASE_ADDR + sizeof(_INTERNET_PW_HEADER):BASE_ADDR + RecordMeta.RecordSize] if RecordMeta.SSGPArea != 0: record.append(Buffer[:RecordMeta.SSGPArea]) else: record.append('') record.append(self.getKeychainTime(BASE_ADDR, RecordMeta.CreationDate & 0xFFFFFFFE)) record.append(self.getKeychainTime(BASE_ADDR, RecordMeta.ModDate & 0xFFFFFFFE)) record.append(self.getLV(BASE_ADDR, RecordMeta.Description & 0xFFFFFFFE)) record.append(self.getLV(BASE_ADDR, RecordMeta.Comment & 0xFFFFFFFE)) record.append(self.getFourCharCode(BASE_ADDR, RecordMeta.Creator & 0xFFFFFFFE)) record.append(self.getFourCharCode(BASE_ADDR, RecordMeta.Type & 0xFFFFFFFE)) record.append(self.getLV(BASE_ADDR, RecordMeta.PrintName & 0xFFFFFFFE)) record.append(self.getLV(BASE_ADDR, RecordMeta.Alias & 0xFFFFFFFE)) record.append(self.getLV(BASE_ADDR, RecordMeta.Protected & 0xFFFFFFFE)) record.append(self.getLV(BASE_ADDR, RecordMeta.Account & 0xFFFFFFFE)) record.append(self.getLV(BASE_ADDR, RecordMeta.SecurityDomain & 0xFFFFFFFE)) record.append(self.getLV(BASE_ADDR, RecordMeta.Server & 0xFFFFFFFE)) record.append(self.getFourCharCode(BASE_ADDR, RecordMeta.Protocol & 0xFFFFFFFE)) record.append(self.getLV(BASE_ADDR, RecordMeta.AuthType & 0xFFFFFFFE)) record.append(self.getInt(BASE_ADDR, RecordMeta.Port & 0xFFFFFFFE)) record.append(self.getLV(BASE_ADDR, RecordMeta.Path & 0xFFFFFFFE)) return record def getx509Record(self, base_addr, offset): record = [] BASE_ADDR = sizeof(_APPL_DB_HEADER) + base_addr + offset RecordMeta = _memcpy(self.fbuf[BASE_ADDR:BASE_ADDR + sizeof(_X509_CERT_HEADER)], _X509_CERT_HEADER) x509Certificate = self.fbuf[BASE_ADDR + sizeof(_X509_CERT_HEADER):BASE_ADDR + sizeof( _X509_CERT_HEADER) + RecordMeta.CertSize] record.append(self.getInt(BASE_ADDR, RecordMeta.CertType & 0xFFFFFFFE)) # Cert Type record.append(self.getInt(BASE_ADDR, RecordMeta.CertEncoding & 0xFFFFFFFE)) # Cert Encoding record.append(self.getLV(BASE_ADDR, RecordMeta.PrintName & 0xFFFFFFFE)) record.append(self.getLV(BASE_ADDR, RecordMeta.Alias & 0xFFFFFFFE)) record.append(self.getLV(BASE_ADDR, RecordMeta.Subject & 0xFFFFFFFE)) record.append(self.getLV(BASE_ADDR, RecordMeta.Issuer & 0xFFFFFFFE)) record.append(self.getLV(BASE_ADDR, RecordMeta.SerialNumber & 0xFFFFFFFE)) record.append(self.getLV(BASE_ADDR, RecordMeta.SubjectKeyIdentifier & 0xFFFFFFFE)) record.append(self.getLV(BASE_ADDR, RecordMeta.PublicKeyHash & 0xFFFFFFFE)) record.append(x509Certificate) return record def getKeyRecord(self, base_addr, offset): ## PUBLIC and PRIVATE KEY record = [] BASE_ADDR = sizeof(_APPL_DB_HEADER) + base_addr + offset RecordMeta = _memcpy(self.fbuf[BASE_ADDR:BASE_ADDR + sizeof(_SECKEY_HEADER)], _SECKEY_HEADER) KeyBlob = self.fbuf[BASE_ADDR + sizeof(_SECKEY_HEADER):BASE_ADDR + sizeof(_SECKEY_HEADER) + RecordMeta.BlobSize] record.append(self.getLV(BASE_ADDR, RecordMeta.PrintName & 0xFFFFFFFE)) record.append(self.getLV(BASE_ADDR, RecordMeta.Label & 0xFFFFFFFE)) record.append(self.getInt(BASE_ADDR, RecordMeta.KeyClass & 0xFFFFFFFE)) record.append(self.getInt(BASE_ADDR, RecordMeta.Private & 0xFFFFFFFE)) record.append(self.getInt(BASE_ADDR, RecordMeta.KeyType & 0xFFFFFFFE)) record.append(self.getInt(BASE_ADDR, RecordMeta.KeySizeInBits & 0xFFFFFFFE)) record.append(self.getInt(BASE_ADDR, RecordMeta.EffectiveKeySize & 0xFFFFFFFE)) record.append(self.getInt(BASE_ADDR, RecordMeta.Extractable & 0xFFFFFFFE)) record.append(str(self.getLV(BASE_ADDR, RecordMeta.KeyCreator & 0xFFFFFFFE)).split('\x00')[0]) IV, Key = self.getEncryptedDatainBlob(KeyBlob) record.append(IV) record.append(Key) return record def getEncryptedDatainBlob(self, BlobBuf): KeyBlob = _memcpy(BlobBuf[:sizeof(_KEY_BLOB)], _KEY_BLOB) if KeyBlob.CommonBlob.magic != 0xFADE0711: return '', '' KeyData = BlobBuf[KeyBlob.startCryptoBlob:KeyBlob.totalLength] return KeyBlob.iv, KeyData # IV, Encrypted Data def getKeychainTime(self, BASE_ADDR, pCol): if pCol <= 0: return '' else: data = str(struct.unpack('>16s', self.fbuf[BASE_ADDR + pCol:BASE_ADDR + pCol + struct.calcsize('>16s')])[0]) return str(datetime.datetime.strptime(data.strip('\x00'), '%Y%m%d%H%M%SZ')) def getInt(self, BASE_ADDR, pCol): if pCol <= 0: return 0 else: return struct.unpack('>I', self.fbuf[BASE_ADDR + pCol:BASE_ADDR + pCol + 4])[0] def getFourCharCode(self, BASE_ADDR, pCol): if pCol <= 0: return '' else: return struct.unpack('>4s', self.fbuf[BASE_ADDR + pCol:BASE_ADDR + pCol + 4])[0] def getLV(self, BASE_ADDR, pCol): if pCol <= 0: return '' str_length = struct.unpack('>I', self.fbuf[BASE_ADDR + pCol:BASE_ADDR + pCol + 4])[0] # 4byte arrangement if (str_length % 4) == 0: real_str_len = (str_length / 4) * 4 else: real_str_len = ((str_length / 4) + 1) * 4 unpack_value = '>' + str(real_str_len) + 's' try: data = struct.unpack(unpack_value, self.fbuf[BASE_ADDR + pCol + 4:BASE_ADDR + pCol + 4 + real_str_len])[0] except struct.error: # print 'Length is too long : %d'%real_str_len return '' return data def getAppleshareRecord(self, base_addr, offset): record = [] BASE_ADDR = sizeof(_APPL_DB_HEADER) + base_addr + offset RecordMeta = _memcpy(self.fbuf[BASE_ADDR:BASE_ADDR + sizeof(_APPLE_SHARE_HEADER)], _APPLE_SHARE_HEADER) Buffer = self.fbuf[BASE_ADDR + sizeof(_APPLE_SHARE_HEADER):BASE_ADDR + RecordMeta.RecordSize] if RecordMeta.SSGPArea != 0: record.append(Buffer[:RecordMeta.SSGPArea]) else: record.append('') record.append(self.getKeychainTime(BASE_ADDR, RecordMeta.CreationDate & 0xFFFFFFFE)) record.append(self.getKeychainTime(BASE_ADDR, RecordMeta.ModDate & 0xFFFFFFFE)) record.append(self.getLV(BASE_ADDR, RecordMeta.Description & 0xFFFFFFFE)) record.append(self.getLV(BASE_ADDR, RecordMeta.Comment & 0xFFFFFFFE)) record.append(self.getFourCharCode(BASE_ADDR, RecordMeta.Creator & 0xFFFFFFFE)) record.append(self.getFourCharCode(BASE_ADDR, RecordMeta.Type & 0xFFFFFFFE)) record.append(self.getLV(BASE_ADDR, RecordMeta.PrintName & 0xFFFFFFFE)) record.append(self.getLV(BASE_ADDR, RecordMeta.Alias & 0xFFFFFFFE)) record.append(self.getLV(BASE_ADDR, RecordMeta.Protected & 0xFFFFFFFE)) record.append(self.getLV(BASE_ADDR, RecordMeta.Account & 0xFFFFFFFE)) record.append(self.getLV(BASE_ADDR, RecordMeta.Volume & 0xFFFFFFFE)) record.append(self.getLV(BASE_ADDR, RecordMeta.Server & 0xFFFFFFFE)) record.append(self.getFourCharCode(BASE_ADDR, RecordMeta.Protocol & 0xFFFFFFFE)) record.append(self.getLV(BASE_ADDR, RecordMeta.Address & 0xFFFFFFFE)) record.append(self.getLV(BASE_ADDR, RecordMeta.Signature & 0xFFFFFFFE)) return record ## decrypted dbblob area ## Documents : http://www.opensource.apple.com/source/securityd/securityd-55137.1/doc/BLOBFORMAT ## http://www.opensource.apple.com/source/libsecurity_keychain/libsecurity_keychain-36620/lib/StorageManager.cpp def SSGPDecryption(self, ssgp, dbkey): SSGP = _memcpy(ssgp, _SSGP) plain = kcdecrypt(dbkey, SSGP.iv, ssgp[sizeof(_SSGP):]) return plain # Documents : http://www.opensource.apple.com/source/securityd/securityd-55137.1/doc/BLOBFORMAT # source : http://www.opensource.apple.com/source/libsecurity_cdsa_client/libsecurity_cdsa_client-36213/lib/securestorage.cpp # magicCmsIV : http://www.opensource.apple.com/source/Security/Security-28/AppleCSP/AppleCSP/wrapKeyCms.cpp def KeyblobDecryption(self, encryptedblob, iv, dbkey): magicCmsIV = unhexlify('4adda22c79e82105') plain = kcdecrypt(dbkey, magicCmsIV, encryptedblob) if plain.__len__() == 0: return '' # now we handle the unwrapping. we need to take the first 32 bytes, # and reverse them. revplain = '' for i in range(32): revplain += plain[31 - i] # now the real key gets found. */ plain = kcdecrypt(dbkey, iv, revplain) keyblob = plain[4:] if len(keyblob) != KEYLEN: # raise "Bad decrypted keylen!" return '' return keyblob # test code # http://opensource.apple.com/source/libsecurity_keychain/libsecurity_keychain-55044/lib/KeyItem.cpp def PrivateKeyDecryption(self, encryptedblob, iv, dbkey): magicCmsIV = unhexlify('4adda22c79e82105') plain = kcdecrypt(dbkey, magicCmsIV, encryptedblob) if plain.__len__() == 0: return '', '' # now we handle the unwrapping. we need to take the first 32 bytes, # and reverse them. revplain = '' for i in range(len(plain)): revplain += plain[len(plain) - 1 - i] # now the real key gets found. */ plain = kcdecrypt(dbkey, iv, revplain) Keyname = plain[:12] # Copied Buffer when user click on right and copy a key on Keychain Access keyblob = plain[12:] return Keyname, keyblob # Documents : http://www.opensource.apple.com/source/securityd/securityd-55137.1/doc/BLOBFORMAT def generateMasterKey(self, pw, symmetrickey_offset): base_addr = sizeof(_APPL_DB_HEADER) + symmetrickey_offset + 0x38 # header dbblob = _memcpy(self.fbuf[base_addr:base_addr + sizeof(_DB_BLOB)], _DB_BLOB) masterkey = pbkdf2(pw, str(bytearray(dbblob.salt)), 1000, KEYLEN) return masterkey # find DBBlob and extract Wrapping key def findWrappingKey(self, master, symmetrickey_offset): base_addr = sizeof(_APPL_DB_HEADER) + symmetrickey_offset + 0x38 dbblob = _memcpy(self.fbuf[base_addr:base_addr + sizeof(_DB_BLOB)], _DB_BLOB) # get cipher text area ciphertext = self.fbuf[base_addr + dbblob.startCryptoBlob:base_addr + dbblob.totalLength] # decrypt the key plain = kcdecrypt(master, dbblob.iv, ciphertext) if plain.__len__() < KEYLEN: return '' dbkey = plain[:KEYLEN] # return encrypted wrapping key return dbkey # SOURCE : extractkeychain.py def kcdecrypt(key, iv, data): if len(data) == 0: # print>>stderr, "FileSize is 0" return '' if len(data) % BLOCKSIZE != 0: return '' cipher = triple_des(key, CBC, str(bytearray(iv))) # the line below is for pycrypto instead # cipher = DES3.new( key, DES3.MODE_CBC, iv ) plain = cipher.decrypt(data) # now check padding pad = ord(plain[-1]) if pad > 8: # print>> stderr, "Bad padding byte. You probably have a wrong password" return '' for z in plain[-pad:]: if ord(z) != pad: # print>> stderr, "Bad padding. You probably have a wrong password" return '' plain = plain[:-pad] return plain def dump_creds(keychain_file, password=None, key=None): keychain = KeyChain(keychain_file) if keychain.open() is False: print_debug('ERROR', '%s Open Failed' % keychain_file) return False KeychainHeader = keychain.getHeader() if KeychainHeader.Signature != KEYCHAIN_SIGNATURE: print_debug('ERROR', 'Invalid Keychain Format') return False SchemaInfo, TableList = keychain.getSchemaInfo(KeychainHeader.SchemaOffset) TableMetadata, RecordList = keychain.getTable(TableList[0]) tableCount, tableEnum = keychain.getTablenametoList(RecordList, TableList) # generate database key if password: masterkey = keychain.generateMasterKey(password, TableList[tableEnum[CSSM_DL_DB_RECORD_METADATA]]) dbkey = keychain.findWrappingKey(masterkey, TableList[tableEnum[CSSM_DL_DB_RECORD_METADATA]]) else: dbkey = keychain.findWrappingKey(unhexlify(key), TableList[tableEnum[CSSM_DL_DB_RECORD_METADATA]]) # DEBUG print_debug('DEBUG', 'DB Key: %s' % str(repr(dbkey))) key_list = {} # keyblob list # get symmetric key blob print_debug('DEBUG', 'Symmetric Key Table: 0x%.8x' % ( sizeof(_APPL_DB_HEADER) + TableList[tableEnum[CSSM_DL_DB_RECORD_SYMMETRIC_KEY]])) TableMetadata, symmetrickey_list = keychain.getTable(TableList[tableEnum[CSSM_DL_DB_RECORD_SYMMETRIC_KEY]]) for symmetrickey_record in symmetrickey_list: keyblob, ciphertext, iv, return_value = keychain.getKeyblobRecord( TableList[tableEnum[CSSM_DL_DB_RECORD_SYMMETRIC_KEY]], symmetrickey_record) if return_value == 0: passwd = keychain.KeyblobDecryption(ciphertext, iv, dbkey) if passwd != '': key_list[keyblob] = passwd pwdFound = [] legend = ['', 'Create DateTime', 'Last Modified DateTime', 'Description', 'Creator', 'Type', 'PrintName', 'Alias', 'Account', 'Service'] try: TableMetadata, genericpw_list = keychain.getTable(TableList[tableEnum[CSSM_DL_DB_RECORD_GENERIC_PASSWORD]]) for genericpw in genericpw_list: record = keychain.getGenericPWRecord(TableList[tableEnum[CSSM_DL_DB_RECORD_GENERIC_PASSWORD]], genericpw) # print '[+] Generic Password Record' try: real_key = key_list[record[0][0:20]] passwd = keychain.SSGPDecryption(record[0], real_key) except KeyError: passwd = '' if passwd: values = {} for cpt in range(1, len(record)): if record[cpt]: values[legend[cpt]] = unicode(record[cpt]) try: values['Password'] = unicode(passwd) except: values['Password'] = unicode(repr(passwd)) pwdFound.append(values) except KeyError: print_debug('INFO', 'Generic Password Table is not available') pass legend = ['', 'Create DateTime', 'Last Modified DateTime', 'Description', 'Comment', 'Creator', 'Type', 'PrintName', 'Alias', 'Protected', 'Account', 'SecurityDomain', 'Server', 'Protocol Type', 'Auth Type', 'Port', 'Path'] try: TableMetadata, internetpw_list = keychain.getTable(TableList[tableEnum[CSSM_DL_DB_RECORD_INTERNET_PASSWORD]]) for internetpw in internetpw_list: record = keychain.getInternetPWRecord(TableList[tableEnum[CSSM_DL_DB_RECORD_INTERNET_PASSWORD]], internetpw) try: real_key = key_list[record[0][0:20]] passwd = keychain.SSGPDecryption(record[0], real_key) except KeyError: passwd = '' if passwd: values = {} for cpt in range(1, len(record)): if record[cpt]: values[legend[cpt]] = record[cpt] try: values['Password'] = unicode(passwd) except Exception: values['Password'] = unicode(repr(passwd)) pwdFound.append(values) except KeyError: print_debug('INFO', 'Internet Password Table is not available') pass return pwdFound
StarcoderdataPython
97226
# -*- coding: utf-8 -*- """ Created on Mon Apr 23 12:31:50 2018 @author: <NAME> """ from QChemTool import Structure from QChemTool.Development.polarizablesytem_periodic import PolarizableSystem from QChemTool import energy_units from QChemTool.QuantumChem.Fluorographene.fluorographene import orientFG import numpy as np parameters_type_manual = False system = "2perylene" # "anthanthrene", "perylene", "2perylene" if not parameters_type_manual: # Automatic definition of parameters # Set parameters of the system FG_charges = "ESPfit" params_polar={"VinterFG": True,"coarse_grain": "plane", "charge_type": FG_charges,"approximation": 1.1, "symm": True} # Load FG structure struc = Structure() if system == "perylene": struc.load_xyz("FGrph_1perylene_2dist_ser_TDDFT-wB97XD_geom_BLYP-landl2dz_symm.xyz") # For practical calculation also reorient sheet in propper direction (plane) and carbons has to be before fluorines #struc.center(72,73,86) struc = orientFG(struc) elif system == "anthanthrene": struc.load_xyz("FGrph_1anthranthrene_1dist_par_TDDFT-wB97XD_geom_BLYP-landl2dz_symm_7x11.xyz") # For practical calculation also reorient sheet in propper direction (plane) and carbons has to be before fluorines # struc.center(41,43,133) struc = orientFG(struc) elif system == "2perylene": struc.load_xyz("FGrph_2perylene_1dist_par_TDDFT-wB97XD_geom_BLYP-landl2dz_symm_9x12.xyz") # For practical calculation also reorient sheet in propper direction (plane) and carbons has to be before fluorines # struc.center(58,57,83) struc = orientFG(struc) struc.output_to_xyz("FGrph_2perylene_1dist_par_reorient.xyz") # Initialize the system elstat = {"structure": struc,"charge": FG_charges} diel = {"structure": struc,"polar": params_polar} params = {"energy_type": "QC","permivity": 1.0,"order": 2} system = PolarizableSystem(diel = diel, elstat = elstat, params = params) # identify defects - separated because now changes can be made to the database system.identify_defects() # Calculate energies in the system Ndef = len(system.defects) HH = np.zeros((Ndef,Ndef),dtype='f8') for ii in range(Ndef): dAVA = system.get_elstat_energy(ii,"excited-ground") Eshift, res_Energy, TrDip = system.get_SingleDefectProperties(ii) E01_vacuum = system.defects[ii].get_transition_energy() HH[ii,ii] = E01_vacuum._value + Eshift._value with energy_units("1/cm"): # print(system.defects[0].name,dAVA.value) print(system.defects[ii].name,ii+1,"energy shift:",Eshift.value) print(system.defects[ii].name,ii+1,"transition dipole:",TrDip) for ii in range(Ndef): for jj in range(ii+1,Ndef): J_inter, res = system.get_HeterodimerProperties(ii, jj, EngA = HH[ii,ii], EngB = HH[jj,jj], approx=1.1) #J_inter, res = system.get_HeterodimerProperties(ii, jj, approx=1.1) HH[ii,jj] = J_inter._value HH[jj,ii] = HH[ii,jj] with energy_units("1/cm"): print(system.defects[ii].name,ii+1,"-",system.defects[jj].name,jj+1,"interaction E:",J_inter.value) else: # Set fluorographene charges # manual definition CF_charge = -0.0522 CF2_charge = 2*CF_charge FG_charges={'CF': CF_charge,'CF2': CF2_charge,'CD': 0.0,'C': 0.0} FG_charges['FC'] = -FG_charges['CF'] FG_charges['F2C'] = -FG_charges["CF2"]/2.0 # set fluorographene atomic polarizabilities # manual definition #------------------------------------------------------------------------------ # polxy polz amp per phase # CF_AE_params = [7.53538330517, 0.0000, 1.0326577124, 2, 0.0] # CF_A_E_params = [0.505521019116, 0.000000, 0.4981493, 2, np.pi/2] # CF_BE_params = [0.129161747387, 0.0000, 0.05876077, 2, 0.0] # CF_Ast_params = [2.30828107, 0.0000000, 0.08196599, 2, 0.0] #[2.30828107, 0.00000, 0.081966, 2] CF_AE_params = [8.94690348, 4.50738195, 1.65097606, 3, 0.0] CF_A_E_params = [0.39013017, 2.09784509, 0.59003868, 3, 0.0] CF_BE_params = [0.57543444, 3.98822098, 0.63754235, 3, 0.0] CF_Ast_params = [5.17064221/2, 4.99791421/2, 0.25093473/2, 3, 0.0] VinterFG = 0.0 C_params = [0.00000000, 0.0000000, 0.0, 0, 0.0] FC_AE_params = [0.00000000, 0.0000000, 0.0, 0, 0.0] FC_A_E_params = [0.0000000, 0.0000000, 0.0, 0, 0.0] FC_BE_params = [0.00000000, 0.0000000, 0.0, 0, 0.0] FC_Ast_params = [0.0000000, 0.0000000, 0.0, 0, 0.0] polar = {'AlphaE': {"CF": CF_AE_params, "FC": FC_AE_params, "C": C_params}} polar['Alpha_E'] = {"CF": CF_A_E_params, "FC": FC_A_E_params, "C": C_params} polar['BetaEE'] = {"CF": CF_BE_params, "FC": FC_BE_params, "C": C_params} polar['Alpha_st'] = {"CF": CF_Ast_params, "FC": FC_Ast_params, "C": C_params} params_polar={"VinterFG": 0.0,"coarse_grain": "C", "polarizability": polar,"approximation": 1.1} # Load FG structure struc = Structure() if system == "perylene": struc.load_xyz("FGrph_1perylene_2dist_ser_TDDFT-wB97XD_geom_BLYP-landl2dz_symm.xyz") # For practical calculation also reorient sheet in propper direction (plane) and carbons has to be before fluorines #struc.center(72,73,86) struc = orientFG(struc) elif system == "anthanthrene": struc.load_xyz("FGrph_1anthranthrene_1dist_par_TDDFT-wB97XD_geom_BLYP-landl2dz_symm_7x11.xyz") # For practical calculation also reorient sheet in propper direction (plane) and carbons has to be before fluorines # struc.center(41,43,133) struc = orientFG(struc) elif system == "2perylene": struc.load_xyz("FGrph_2perylene_1dist_par_TDDFT-wB97XD_geom_BLYP-landl2dz_symm_9x12.xyz") # For practical calculation also reorient sheet in propper direction (plane) and carbons has to be before fluorines # struc.center(58,57,83) struc = orientFG(struc) struc.output_to_xyz("FGrph_2perylene_1dist_par_reorient.xyz") # Initialize the system elstat = {"structure": struc,"charge": FG_charges} diel = {"structure": struc,"polar": params_polar} params = {"energy_type": "QC","permivity": 1.0,"order": 2} system = PolarizableSystem(diel = diel, elstat = elstat, params = params) # identify defects - separated because now changes can be made to the database system.identify_defects() # Calculate energies in the system Ndef = len(system.defects) HH = np.zeros((Ndef,Ndef),dtype='f8') for ii in range(Ndef): dAVA = system.get_elstat_energy(ii,"excited-ground") Eshift, res_Energy, TrDip = system.get_SingleDefectProperties(ii) E01_vacuum = system.defects[ii].get_transition_energy() HH[ii,ii] = E01_vacuum._value + Eshift._value with energy_units("1/cm"): # print(system.defects[0].name,dAVA.value) print(system.defects[ii].name,ii+1,"energy shift:",Eshift.value) print(system.defects[ii].name,ii+1,"transition dipole:",TrDip) for ii in range(Ndef): for jj in range(ii+1,Ndef): J_inter, res = system.get_HeterodimerProperties(ii, jj, EngA = HH[ii,ii], EngB = HH[jj,jj], approx=1.1) #J_inter, res = system.get_HeterodimerProperties(ii, jj, approx=1.1) HH[ii,jj] = J_inter._value HH[jj,ii] = HH[ii,jj] with energy_units("1/cm"): print(system.defects[ii].name,ii+1,"-",system.defects[jj].name,jj+1,"interaction E:",J_inter.value)
StarcoderdataPython
137341
<gh_stars>0 import pygsl import pygsl._numobj as numx import pygsl.rng import pygsl.multifit def calculate(x, y, sigma): n = len(x) X = numx.ones((n,3),)*1. X[:,0] = 1.0 X[:,1] = x X[:,2] = x ** 2 w = 1.0 / sigma ** 2 work = pygsl.multifit.linear_workspace(n,3) c, cov, chisq = pygsl.multifit.wlinear(X, w, y, work) c, cov, chisq = pygsl.multifit.linear(X, y, work) print "# best fit: Y = %g + %g * X + %g * X ** 2" % tuple(c) print "# covariance matrix #" print "[[ %+.5e, %+.5e, %+.5e ] " % tuple(cov[0,:]) print " [ %+.5e, %+.5e, %+.5e ] " % tuple(cov[1,:]) print " [ %+.5e, %+.5e, %+.5e ]]" % tuple(cov[2,:]) print "# chisq = %g " % chisq return c, cov, chisq def generate_data(): r = pygsl.rng.mt19937() a = numx.arange(20) / 10.# + .1 y0 = numx.exp(a) sigma = 0.1 * y0 dy = numx.array(map(r.gaussian, sigma)) return a, y0+dy, sigma if __name__ == '__main__': x, y, sigma = generate_data() c, cov , chisq = calculate(x, y, sigma) #import Gnuplot #g = Gnuplot.Gnuplot() #xref = numx.arange(100) / 50. #yref = c[0] + c[1] * xref + c[2] * xref **2 #t1 = Gnuplot.Data(x,y, with='points') #t2 = Gnuplot.Data(xref, yref, with='line') #g.plot(t1,t2) #print "Press return !" #raw_input()
StarcoderdataPython
3203338
<filename>_netcat.py """ """ import socket import sys import getopt import threading import subprocess import getpass from textwrap import dedent from typing import Tuple, Union, List class Helpers: """Static functions, to use as helpers""" @staticmethod def send_data(to_socket: socket.socket, data_stream: bytes, send_timeout=2) -> None: """ Centralised function to handle sending data stream to receive data. Sends data in consistent buffer sizes Args: to_socket: Socket to send stream to data_stream: Data stream to send send_timeout: Set timeout for to_socket """ to_socket.settimeout(send_timeout) try: data_fragments = [] for i in range(0, len(data_stream), 4096): # Break data stream into byte sized bites data_fragments.append(data_stream[i:i + 4096]) if data_fragments[-1] == 4096: # Make sure last fragment isn't BUFFER bytes long data_fragments.append(b'\n') for frag in data_fragments: to_socket.send(frag) except TimeoutError: pass @staticmethod def receive_data(from_socket: socket.socket, from_timeout=2) -> bytes: """ Centralised fuction to handle receiving one or more packet buffers from TCP socket Args: from_socket: Socket sending stream to this instance. from_timeout: Set timeout for from_socket Returns: Complete binary stream from socket """ from_socket.settimeout(from_timeout) fragments: List[bytes] = [] try: stream = from_socket.recv(4096) fragments.append(stream) while True: if len(stream) < 4096: break else: stream = from_socket.recv(4096) fragments.append(stream) except TimeoutError: pass return b''.join(fragments) @staticmethod def bin_join(*to_join: Union[str, bytes]) -> bytes: """ Funnel function to reliably concatenate binary and strings into binaries. Can also be used to ensure a single item is bytes string Args: to_join: Item/s to join together. Either bytes or regular strings Return: Properly concatenated bytes string """ binary_bytes = [] for item in to_join: if not item: pass elif isinstance(item, int): binary_bytes.append(str(item).encode()) elif isinstance(item, str): binary_bytes.append(item.encode()) else: binary_bytes.append(item) return b''.join(binary_bytes) @staticmethod def bin_print(*to_display, end='\n'): """ Funnel function to reliably print binary or regular strings. Args: to_display: Item/s to join together. Either bytes or regular strings end: default print end arg """ for item in to_display: try: print(item.decode(), end=end) except AttributeError: print(item, end=end) class SshcAttributes: """Dataclass-like, used to host running SSHCustom's running attributes""" # Carries defaults @staticmethod def usage(): """Module docstring doubles as --help""" print(__doc__) exit() def __init__(self): if __name__ == '__main__' and len(sys.argv) == 1: self.usage() try: opts, args = getopt.getopt(sys.argv[1:], "ht:p:k:bci:u:lw:e:sv", ['help', 'target=', 'port=', 'user=', 'pass=', 'banner' 'connect', 'initial=', 'upload=', 'listen', 'write=', 'execute=', 'shell', 'verbose']) for opt, arg in opts: if opt in ('-h', '--help'): self.usage() elif opt in ('-t', '--target'): # self.target = arg self.__setattr__('target', arg) elif opt in ('-p', '--port'): # self.port = arg self.__setattr__('port', int(arg)) elif opt in ('-c', '--connecting'): # self.connecting = True self.__setattr__('connecting', True) elif opt == 'k': # self.known_hosts = arg self.__setattr__('known_hosts', arg) elif opt == 'user': # self.user = arg self.__setattr__('user', arg) elif opt in ('b', '--banner'): # self.banner = True self.__setattr__('banner', True) elif opt == 'pass': # self.password = arg self.__setattr__('password', arg) elif opt in ('-u', '--upload'): # self.upload = arg self.__setattr__('upload', arg) elif opt in ('-l', '--listen'): # self.listening = True self.__setattr__('upload', True) elif opt in ('-w', '--write'): # self.write_to = arg self.__setattr__('write_to', arg) elif opt in ('-e', '--execute'): # self.execute = arg self.__setattr__('execute', arg) elif opt in ('-s', '--shell'): # self.shell = True self.__setattr__('shell', True) elif opt in ('-v', '--verbose'): # self.verbose = True self.__setattr__('verbose', True) elif not self.target or not self.port: raise SyntaxError("Must explicitly state target IP and Port!") elif True not in [not self.connecting or not self.listening]: input((not self.connecting or not self.listening)) raise SyntaxError("Must explicitly state connecting or listening function!") else: raise SyntaxError(f"Unhandled option: {opt}") except (getopt.GetoptError, SyntaxError) as err: print(err) self.usage() target: str = '127.0.0.1' """Target IP""" port: int = 9999 """Target port""" known_hosts = '' """Optional key support, using absolute path to .ssh/known_hosts""" user: str = getpass.getuser() """Username to pass to custom server""" password: str = None """password to sign in with""" # Connecting functions connecting: bool = False """Bool to connect to listening server on [host]:[port]""" # Listening functions listening: bool = False """Bool to listen on [host]:[port] for incoming connections""" shell: bool = False """Initialize a shell loop, to run one-off commands by connecting clients""" close_connection: str = 'bhpquit' """Specific command to disconnect connected client""" shutdown_listening: str = 'bhpshutdown' """Specific command to shutdown listening script""" listening_active: bool = False """Boolean used to keep server alive""" timeout: int = 60 """Listening server's Timeout value""" verbose = True """ """ class ShutdownServer(socket.error): """Custom error used to shutdown listening server""" class ShutdownClient(socket.error): """Custom error used to safely disconnect connecting client""" class SSHCustom: """ Dedicated SSH client and server, designed specifically for windows implementations (Note that it's usefullness is arguably lessened by the lastest Win10's built in SSH port) See --help for more information """ def __init__(self): """ Custom SSH Client/Server built on Paramiko API. Can be imported or run from command line. See Readme or --help for more information """ self.atts: SshcAttributes = SshcAttributes() """Attributes module""" self.help = Helpers() """Helper static functions""" def verprint(self, *to_print) -> None: """ Default check against verbosity attribute, to see if allowed to print Args: *to_print: emulation of print *args. pass as normal """ if self.atts.verbose: for item in to_print: self.help.bin_print(item, end=' ') print() def main(self): """ Primary logic loop. After init, builds listening post or starts connecting client """ if self.atts.listening: # Time to listen, potentially upload items, execute commands, and drop a shell back child = SSHServer() child.server() else: # Try connecting to target, send a potential starting command child = SSHClient() child.client() class SSHServer(SSHCustom, paramiko.ServerInterface): """Custom SSH client, using Paramiko API wrapper""" def __init__(self): super(SSHServer, self).__init__() # Extension to super init, name spacing an Event self.event = threading.Event() def server(self): """ Start a TCP server socket, spool threads to handle incoming clients """ self.verprint(f"[*] Listening on {self.atts.target}:{self.atts.port}") try: # Spool main SSH server server = socket.socket() # Bind socket settings server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) server.bind((self.atts.target, self.atts.port)) server.listen(5) while self.atts.listening_active: server_acceptance = server.accept() # Tuple containing client_socket and addr if self.atts.listening_active: client_thread = threading.Thread(target=self.handle_connects, args=(server_acceptance,)) client_thread.start() except ShutdownServer: print("") except Exception as err: closing = dedent(f""" --[*] Unexpected error: {err} ----- Closing server""") self.verprint(closing) def handle_connects(self, connected_client: Tuple[socket.socket, any]): """ Called by server socket for each connection Args: connected_client: Returns: """ # Identify target TCP connection client_socket, addr = connected_client client_socket.settimeout(self.atts.timeout) self.verprint(f'--[*] Accepted connection, handler spooled for {addr[0]}:{addr[1]}') closing = '' try: # Create SSH transport object over client_socket ssh_session = paramiko.Transport(client_socket) ssh_session.add_server_key(self.rsa_key) ssh_session.start_server() ssh_channel = ssh_session.accept(20) buffer_stream = self.help.receive_data(ssh_channel) """Received buffer stream from connecting client""" response = b'' """First response to send to connecting client""" # Determine if server set to init shell or not. Respond either way if not self.atts.shell: response = self.help.bin_join( response, f"\nClosing connection to {self.atts.target}:{self.atts.port}") self.help.send_data(to_socket=ssh_channel, data_stream=response) else: self.shell_loop(ssh_channel, response) # # # Exception Handling except ShutdownClient: closing = dedent(f""" --[*] Client requested connection close ----- Closing handler {addr[0]}:{addr[1]} """) except ShutdownServer: closing = dedent(f""" --[*] Client {addr[0]}:{addr[1]} requested shutdown listening post ----- Shutting down """) # self.atts.listening_active = False raise ShutdownServer except Exception as err: closing = dedent(f""" --[*] Unexpected error: {err} ----- Closing handler {addr[0]}:{addr[1]} """) finally: self.verprint(closing) # Low effort try to send to connected client try: self.help.send_data(to_socket=ssh_channel, data_stream=self.help.bin_join(closing)) # client_socket.shutdown(socket.SHUT_RDWR) # client_socket.close() ssh_channel.close() except Exception as err: self.verprint(f"Unexpected error while closing handler {addr[0]}:{addr[1]} : ") self.verprint(err) def check_for_commands(self, stream: bytes): """ Given a datastream, check if a closing command is in it. Raise appropriate handling error Args: stream: bytes stream sent from connecting client, to check for bhp commands """ # Catch bhp specific commands in stream if self.atts.close_connection in str(stream): raise ShutdownClient if self.atts.shutdown_listening in str(stream): raise ShutdownServer def write_file(self, data_buffer) -> bytes: """ If allowed, Extension to write a caught data_buffer to local file (self.write_to) Return feedback to calling functions Args: data_buffer: handle_connects's received data stream from it's client_socket. Returns: File write feedback, either successful or failure with error if write_to is None (i.e. not set) return empty bytes string """ send_feedback = '' if self.atts.write_to: try: with open(self.atts.write_to, "wb") as file: file.write(data_buffer) send_feedback = f"Successfully saved file to {self.atts.write_to}\r\n" except Exception as err: send_feedback = f"""Failed to save file to {self.atts.write_to}\r\n{err}\r\n""" return self.help.bin_join(send_feedback) def run_command(self, command: Union[str, bytes, None]) -> bytes: """ Locally run given command using subprocess, and return results as bytes string Args: command: given command to run """ if not command: command_run = '' elif isinstance(command, bytes): command_run = command.decode() else: command_run = command try: output = subprocess.check_output(command_run, stderr=subprocess.STDOUT, shell=True) except Exception as err: output = dedent(f""" Failed to execute command Command : {command_run} Error : {err}\r\n""") return self.help.bin_join(output) def shell_loop(self, client_socket: socket.socket, initial_response: bytes): """ Function to handle one off commands from connecting client. Loops until connection broken. Args: client_socket: Answered socket to accept shell commands from initial_response: Initial response from handle_connects' steps, if any. Passed here so shell loop can return, with prompt characters """ response = initial_response prompt = f'\n<BHP@{self.atts.target}:{self.atts.port}>#' while True: # Loop is broken by explicit errors or commands self.help.send_data(to_socket=client_socket, data_stream=self.help.bin_join(response, prompt)) try: cmd_buffer = self.help.receive_data(from_socket=client_socket) self.check_for_commands(cmd_buffer) response = self.run_command(cmd_buffer) except TimeoutError: raise TimeoutError("Listening server timeout reached") except Exception as err: raise err class SSHClient(SSHCustom): """Custom SSH Client, , using paramiko API wrapper""" def client(self): """ Spool up TCP socket, catch return data, prompt for new to_send. Rinse and repeat """ self.verprint(f"Connecting to {self.atts.target}:{self.atts.port}...") # Bind new SSH client client = paramiko.SSHClient() try: # Optional key support if self.atts.known_hosts: client.load_host_keys(self.atts.known_hosts) # Auto add missing keys client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # Connect client.connect(self.atts.target, port=self.atts.port, username=self.atts.user, password=<PASSWORD>) # request session channel to server ssh_session = client.get_transport().open_session() # Catch banner if self.atts.banner: banner = self.help.receive_data(ssh_session) self.help.bin_print(banner) # Build initial data to send if self.atts.upload: to_send = self.file_stream() else: to_send = self.help.bin_join(self.atts.initial_cmd, '\n') # Primary running loop while True: self.help.send_data(ssh_session, to_send) server_response = self.help.receive_data(ssh_session) self.help.bin_print('\n', server_response, end=' ') to_send = input() + '\n' # # # Exception Handling except KeyboardInterrupt: self.verprint("Disconnecting") pass except ConnectionRefusedError: self.verprint('Cannot connect, is listening active?') except ConnectionAbortedError: # Socket closed by listener self.verprint("Closing connection...") except ConnectionResetError: self.verprint("Connection prematurely closed. Did server shutdown?") except Exception as err: self.verprint("Unknown error!\n", err, "\nDisconnecting") finally: try: # client.shutdown(socket.SHUT_RDWR) # ssh_session.close() client.close() except Exception as err: self.verprint( f"Unexpected error when disconnecting from {self.atts.target}:{self.atts.port}") self.verprint(err) def file_stream(self): """ Targets file at upload and converts to binary stream, to send to listening server Returns: Single binary stream of indicated file """ file_stream = b'' with open(self.atts.upload, 'rb') as file: for ndx, line in enumerate(file): file_stream = self.help.bin_join(file_stream, line) return file_stream + b'\r\n' if __name__ == '__main__': nc = SSHCustom() nc.main()
StarcoderdataPython
1729827
<reponame>nishp77/thenewboston-node import logging from dataclasses import dataclass, field from datetime import datetime from typing import Any, Optional, Type, TypeVar from thenewboston_node.business_logic.models import AccountState from thenewboston_node.business_logic.models.base import BaseDataclass from thenewboston_node.business_logic.models.mixins.message import MessageMixin from thenewboston_node.business_logic.validators import ( validate_gte_value, validate_is_none, validate_not_none, validate_type ) from thenewboston_node.core.logging import validates from thenewboston_node.core.utils.dataclass import cover_docstring, revert_docstring from thenewboston_node.core.utils.types import hexstr logger = logging.getLogger(__name__) T = TypeVar('T', bound='BlockchainStateMessage') @revert_docstring @dataclass @cover_docstring class BlockchainStateMessage(MessageMixin, BaseDataclass): account_states: dict[hexstr, AccountState] = field( metadata={'example_value': { '00f3d2477317d53bcc2a410decb68c769eea2f0d74b679369b7417e198bd97b6': {} }} ) """Account number to account state map""" last_block_number: Optional[int] = field(default=None, metadata={'example_value': 5}) """Number of the last block included into the blockchain state (optional for blockchain genesis state)""" # TODO(dmu) MEDIUM: Do we really need last_block_identifier? last_block_identifier: Optional[hexstr] = field( default=None, metadata={'example_value': 'b0dabd367eb1ed670ab9ce4cef9d45106332f211c7b50ddd60dec4ae62711fb7'} ) """Identifier of the last block included into the blockchain state (optional for blockchain genesis state)""" # TODO(dmu) HIGH: Do we really need `last_block_timestamp`? last_block_timestamp: Optional[datetime] = field( default=None, metadata={'example_value': datetime(2021, 5, 19, 10, 34, 5, 54106)} ) """Timestamp of the last block included into the blockchain state (optional for blockchain genesis state)""" next_block_identifier: Optional[hexstr] = field( default=None, metadata={'example_value': 'dc6671e1132cbb7ecbc190bf145b5a5cfb139ca502b5d66aafef4d096f4d2709'} ) """Identifier of the next block to be added on top of the blockchain state (optional for blockchain genesis state, blockchain state hash is used as next block identifier in this case)""" @classmethod def deserialize_from_dict( cls: Type[T], dict_, complain_excessive_keys=True, override: Optional[dict[str, Any]] = None ) -> T: override = override or {} if 'account_states' in dict_ and 'account_states' not in override: # Replace null value of node.identifier with account number account_states = dict_.pop('account_states') account_state_objects = {} for account_number, account_state in account_states.items(): account_state_object = AccountState.deserialize_from_dict(account_state) if (node := account_state_object.node) and node.identifier is None: node.identifier = account_number account_state_objects[account_number] = account_state_object override['account_states'] = account_state_objects return super().deserialize_from_dict(dict_, override=override) def serialize_to_dict(self, skip_none_values=True, coerce_to_json_types=True, exclude=()): serialized = super().serialize_to_dict( skip_none_values=skip_none_values, coerce_to_json_types=coerce_to_json_types, exclude=exclude ) for account_number, account_state in serialized['account_states'].items(): if account_state.get('balance_lock') == account_number: del account_state['balance_lock'] if node := account_state.get('node'): node.pop('identifier', None) return serialized @validates('blockchain state') def validate(self, is_initial=False): self.validate_attributes(is_initial=is_initial) self.validate_accounts() @validates('blockchain state attributes', is_plural_target=True) def validate_attributes(self, is_initial=False): self.validate_last_block_number(is_initial) self.validate_last_block_identifier(is_initial) self.validate_last_block_timestamp(is_initial) self.validate_next_block_identifier(is_initial) @validates('blockchain state last_block_number') def validate_last_block_number(self, is_initial): if is_initial: validate_is_none(f'Initial {self.humanized_class_name} last_block_number', self.last_block_number) else: validate_type(f'{self.humanized_class_name} last_block_number', self.last_block_number, int) validate_gte_value(f'{self.humanized_class_name} last_block_number', self.last_block_number, 0) @validates('blockchain state last_block_identifier') def validate_last_block_identifier(self, is_initial): if is_initial: validate_is_none(f'Initial {self.humanized_class_name} last_block_identifier', self.last_block_identifier) else: validate_not_none(f'{self.humanized_class_name} last_block_identifier', self.last_block_identifier) validate_type(f'{self.humanized_class_name} last_block_identifier', self.last_block_identifier, str) @validates('blockchain state last_block_timestamp') def validate_last_block_timestamp(self, is_initial): timestamp = self.last_block_timestamp if is_initial: validate_is_none(f'Initial {self.humanized_class_name} last_block_timestamp', timestamp) else: validate_not_none(f'{self.humanized_class_name} last_block_timestamp', timestamp) validate_type(f'{self.humanized_class_name} last_block_timestamp', timestamp, datetime) validate_is_none(f'{self.humanized_class_name} last_block_timestamp timezone', timestamp.tzinfo) @validates('blockchain state next_block_identifier') def validate_next_block_identifier(self, is_initial): if is_initial: validate_is_none(f'Initial {self.humanized_class_name} next_block_identifier', self.next_block_identifier) else: validate_type(f'{self.humanized_class_name} next_block_identifier', self.next_block_identifier, str) @validates('blockchain state accounts', is_plural_target=True) def validate_accounts(self): for account, account_state in self.account_states.items(): with validates(f'blockchain state account {account}'): validate_type(f'{self.humanized_class_name} account', account, str) account_state.validate()
StarcoderdataPython
1699763
from .fixtures import * from tenable.errors import * @pytest.fixture def targetgroup(request, api): group = api.target_groups.create(str(uuid.uuid4()), ['192.168.0.1']) def teardown(): try: api.target_groups.delete(group['id']) except NotFoundError: pass request.addfinalizer(teardown) return group def test_create_name_typeerror(api): with pytest.raises(TypeError): api.target_groups.create(False, []) def test_create_type_typeerror(api): with pytest.raises(TypeError): api.target_groups.create('nope', [], type=1) def test_create_type_unexpectedvalue(api): with pytest.raises(UnexpectedValueError): api.target_groups.create('nope', [], type='nope') def test_create_acls_typeerror(api): with pytest.raises(TypeError): api.target_groups.create('nope', [], acls='nope') def test_create_members_unexpectedvalue(api): with pytest.raises(UnexpectedValueError): api.target_groups.create('nope', []) def test_create(api, targetgroup): assert isinstance(targetgroup, dict) t = targetgroup check(t, 'acls', list) for a in t['acls']: check(a, 'display_name', str, allow_none=True) check(a, 'id', int, allow_none=True) check(a, 'name', str, allow_none=True) check(a, 'owner', int, allow_none=True) check(a, 'permissions', int) check(a, 'type', str) check(t, 'creation_date', int) check(t, 'default_group', int) check(t, 'id', int) check(t, 'last_modification_date', int) check(t, 'members', str) check(t, 'name', str) check(t, 'owner', str) check(t, 'owner_id', int) check(t, 'shared', int) check(t, 'user_permissions', int) def test_delete_id_typeerror(api): with pytest.raises(TypeError): api.target_groups.delete('nope') def test_delete(api, targetgroup): pass def test_details_id_typeerror(api): with pytest.raises(TypeError): api.target_groups.details('nope') def test_details(api, targetgroup): group = api.target_groups.details(targetgroup['id']) assert isinstance(group, dict) assert group['id'] == targetgroup['id'] t = group check(t, 'acls', list) for a in t['acls']: check(a, 'display_name', str, allow_none=True) check(a, 'id', int, allow_none=True) check(a, 'name', str, allow_none=True) check(a, 'owner', int, allow_none=True) check(a, 'permissions', int) check(a, 'type', str) check(t, 'creation_date', int) check(t, 'default_group', int) check(t, 'id', int) check(t, 'last_modification_date', int) check(t, 'members', str) check(t, 'name', str) check(t, 'owner', str) check(t, 'owner_id', int) check(t, 'shared', int) check(t, 'user_permissions', int) def test_edit_id_typeerror(api): with pytest.raises(TypeError): api.target_groups.delete('nope') def test_edit_name_typeerror(api): with pytest.raises(TypeError): api.target_groups.edit(1, 1) def test_edit_acls_typeerror(api): with pytest.raises(TypeError): api.target_groups.edit(1, acls=False) def test_edit_type_typeerror(api): with pytest.raises(TypeError): api.target_groups.edit(1, type=False) def test_edit_type_unexpectedvalue(api): with pytest.raises(UnexpectedValueError): api.target_groups.edit(1, type='nope') def test_edit(api, targetgroup): members = targetgroup['members'].split(',') members.append('192.168.0.2') mod = api.target_groups.edit(targetgroup['id'], members=members) assert isinstance(mod, dict) t = mod check(t, 'acls', list) for a in t['acls']: check(a, 'display_name', str, allow_none=True) check(a, 'id', int, allow_none=True) check(a, 'name', str, allow_none=True) check(a, 'owner', int, allow_none=True) check(a, 'permissions', int) check(a, 'type', str) check(t, 'creation_date', int) check(t, 'default_group', int) check(t, 'id', int) check(t, 'last_modification_date', int) check(t, 'members', str) check(t, 'name', str) check(t, 'owner', str) check(t, 'owner_id', int) check(t, 'shared', int) check(t, 'user_permissions', int) assert mod['members'] == ', '.join(members) def test_list(api): assert isinstance(api.target_groups.list(), list)
StarcoderdataPython
1694621
#!/usr/bin/env python # -*- coding: utf8 -*- # ***************************************************************** # ** PTS -- Python Toolkit for working with SKIRT ** # ** © Astronomical Observatory, Ghent University ** # ***************************************************************** ## \package pts.core.plot.timeline Contains the TimeLinePlotter class, which is used to create timeline diagrams # of the different phases of a SKIRT simulation. # ----------------------------------------------------------------- # Ensure Python 3 compatibility from __future__ import absolute_import, division, print_function # Import standard modules import numpy as np import matplotlib.pyplot as plt # Import the relevant PTS classes and modules from .plotter import Plotter from ..tools.logging import log from ..tools import filesystem as fs # ----------------------------------------------------------------- # Define the colors for the different simulation phases in the plot colors = {"setup": 'r', # setup -> red "stellar": 'g', # stellar emission -> green "comm": '#FF7626', # communication -> orange "spectra": 'm', # spectra calculation -> magenta "dust": 'c', # dust emission -> cyan "write": 'y', # writing -> yellow "wait": 'b', # waiting -> blue "other": 'k'} # other -> black # Define the names identifying the different phases in the plot phase_label_names = {"setup": "setup", "stellar": "stellar", "comm": "communication", "spectra": "spectra", "dust": "dust", "write": "write", "wait": "waiting", "other": "other"} # ----------------------------------------------------------------- class TimeLinePlotter(Plotter): """ An instance of the TimeLinePlotter class is used to create timeline diagrams for the different simulation phases """ def __init__(self): """ The constructor ... :return: """ # Call the constructor of the base class super(TimeLinePlotter, self).__init__() # -- Attributes -- # A list of the process ranks self.ranks = None # ----------------------------------------------------------------- @staticmethod def default_input(): """ This function ... :return: """ return "timeline.dat" # ----------------------------------------------------------------- def prepare_data(self): """ This function ... :return: """ # Get a list of the different process ranks self.ranks = np.unique(self.table["Process rank"]) # Initialize the data structure to contain the start times and endtimes for the different processes, # indexed on the phase self.data = [] # Iterate over the different entries in the timeline table for i in range(len(self.table)): if self.table["Process rank"][i] == 0: phase = self.table["Simulation phase"][i] # Few special cases where we want the phase indicator to just say 'other' if phase is None or phase == "start" or isinstance(phase, np.ma.core.MaskedConstant): phase = "other" # Add the data self.data.append([phase, [], []]) self.data[len(self.data) - 1][1].append(self.table["Start time"][i]) self.data[len(self.data) - 1][2].append(self.table["End time"][i]) else: nphases = len(self.data) self.data[i % nphases][1].append(self.table["Start time"][i]) self.data[i % nphases][2].append(self.table["End time"][i]) # ----------------------------------------------------------------- def plot(self): """ This function ... :param path: :return: """ # Inform the user log.info("Making the plots...") # Create the plot plot_path = fs.join(self.output_path, "timeline.pdf") create_timeline_plot(self.data, plot_path, self.ranks) # ----------------------------------------------------------------- def create_timeline_plot(data, path, procranks, figsize=(12, 8), percentages=False, totals=False, unordered=False, numberofproc=False, cpu=False, title=None): """ This function actually plots the timeline based on a data structure containing the starttimes and endtimes for the different simulation phases :param data: :param path: :param procranks: :param figsize: :param percentages: :param totals: :param unordered: :param numberofproc: :param cpu: :return: """ # Initialize figure plt.figure(figsize=figsize) plt.clf() ax = plt.gca() legend_entries = [] legend_names = [] unique_phases = [] # A LIST OF THE UNIQUE PHASE NAMES # Determine the number of processes nprocs = len(procranks) # Get the ordering if unordered: yticks = np.array(procranks).argsort().argsort() else: yticks = procranks #print("yticks=", yticks) #print("durations=", durations) durations_list = [] totaldurations = np.zeros(nprocs) patch_handles = [] # Make the timeline plot, consisting of a set of bars of the same color for each simulation phase for phase, starttimes, endtimes in data: durations = np.array(endtimes) - np.array(starttimes) durations_list.append(durations) totaldurations += durations patch_handle = ax.barh(yticks, durations, color=colors[phase], align='center', left=starttimes, alpha=0.8, lw=0) patch_handles.append(patch_handle) if phase not in unique_phases and not (phase == "comm" and nprocs == 1): unique_phases.append(phase) legend_entries.append(patch_handle) legend_names.append(phase_label_names[phase]) if percentages: # For the different phases for phase, patch_handle in enumerate(patch_handles): durations = durations_list[phase] for sorting_number, rectangle in enumerate(patch_handle.get_children()): duration = durations[sorting_number] percentage = float(duration) / float(totaldurations[sorting_number]) * 100.0 x = 0.5 * rectangle.get_width() + rectangle.get_x() y = 0.5 * rectangle.get_height() + rectangle.get_y() if rectangle.get_width() > 2000: plt.text(x, y, "%d%%" % percentage, ha='center', va='center', fontsize=10) if totals: for sorting_number, rectangle in enumerate(patch_handles[-1].get_children()): width = rectangle.get_width() label_text = str(int(totaldurations[sorting_number])) plt.text(rectangle.get_x() + width + 0.02*rectangle.get_x(), rectangle.get_y() + rectangle.get_height() / 2., label_text, ha="left", va="center", fontsize=10) if unordered: plt.yticks(yticks, procranks) else: ax.set_yticks(procranks) ax.set_yticklabels(procranks) # Format the axis ticks and labels if cpu: ax.set_xlabel('CPU time (s)', fontsize='large') else: ax.set_xlabel('Time (s)', fontsize='large') if numberofproc: ax.set_ylabel('Number of processes', fontsize='large') else: ax.set_ylabel('Process rank', fontsize='large') #ax.yaxis.grid(True) if nprocs == 1: ax.set_frame_on(False) fig = plt.gcf() fig.set_size_inches(10,2) ax.xaxis.tick_bottom() ax.yaxis.set_visible(False) # Shrink current axis's height by 20% on the bottom box = ax.get_position() ax.set_position([box.x0, box.y0 + box.height * 0.2, box.width, box.height * 0.8]) # Set the plot title if title is None: plt.title("Timeline of the different simulation phases") else: plt.title(title) # Put a legend below current axis ax.legend(legend_entries, legend_names, loc='upper center', bbox_to_anchor=(0.5, -0.10), fancybox=True, shadow=False, ncol=4, prop={'size': 12}) # Save the figure plt.savefig(path, bbox_inches="tight", pad_inches=0.40) plt.close() # -----------------------------------------------------------------
StarcoderdataPython
4821514
import math a1 = float(input('Digite o Ângulo: ')) s = math.sin(math.radians(a1)) c = math.cos(math.radians(a1)) t = math.tan(math.radians(a1)) print('\n O seno é {:.2f} \n O cosseno é {:.2f} \n A tangente é `{:.2f}'.format(s,c,t))
StarcoderdataPython
3382563
#!/usr/bin/env python3.7 import uuid from ton import generate_addr import sys print(generate_addr(str(uuid.uuid4().hex), sys.argv[1]))
StarcoderdataPython
3203688
#!/usr/bin/python3 import pytest @pytest.fixture(scope="function", autouse=True) def isolate(fn_isolation): # perform a chain rewind after completing each test, to ensure proper isolation # https://eth-brownie.readthedocs.io/en/v1.10.3/tests-pytest-intro.html#isolation-fixtures pass @pytest.fixture(scope="session") def owner(accounts): return accounts[0] @pytest.fixture(scope="session") def minter(accounts): return accounts[1] @pytest.fixture(scope="module") def token(Token, owner, minter): return Token.deploy("Test Token", "<PASSWORD>", 18, 1e21, owner, minter, {'from': owner})
StarcoderdataPython
3262028
<reponame>nl2go/hetzner-invoice search_duplicate = ( "SELECT * FROM invoices WHERE type = %s AND description = %s AND id " "= %s AND invoice_nr = %s " ) update_record = ( "UPDATE invoices SET start_date = %s, end_date = %s, quantity = %s, price= %s, last_updated " "= %s WHERE type = %s AND description = %s AND id " "= %s AND invoice_nr = %s " ) insert_record = ( "INSERT INTO invoices " "(type, description, start_date, end_date, quantity, unit_price, price, cores, memory, disk, " "price_monthly, role, id, environment, invoice_nr, last_updated) " "VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)" )
StarcoderdataPython
1630771
<filename>src/bin/shipyard_airflow/tests/unit/plugins/test_get_k8s_logs.py<gh_stars>10-100 # Copyright 2018 AT&T Intellectual Property. All other rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for get_k8s_logs functions""" from unittest import mock from shipyard_airflow.plugins.get_k8s_logs import get_pod_logs class Metadata: def __init__(self, _name): self.name = _name class Pods: def __init__(self, _name): self.metadata = Metadata(_name) class TestGetK8slogs: @mock.patch("shipyard_airflow.plugins" ".get_k8s_logs.client.CoreV1Api", autospec=True) @mock.patch("shipyard_airflow.plugins" ".get_k8s_logs.config.load_incluster_config") def test_get_pod_logs(self, mock_config, mock_client): """Assert that get_pod_logs picks up accurate pods get_pod_logs('armada-api', 'ucp', 'armada-api', 3600) First case - old logic to find pods with "in" Second case - new logic with "starts" """ test_pods = [ Pods('armada-api-66d5f59856-h9c27'), Pods('armada-api-66d5f59856-42zvp'), # this is the offender if we use "in" instead of "startwith" Pods('clcp-ucp-armada-armada-api-test'), Pods('armada-ks-endpoints-6ztcg') ] mock_client.return_value \ .list_namespaced_pod.return_value \ .items = test_pods get_pod_logs('armada-api', 'ucp', 'armada-api', 3600) mock_client.return_value \ .read_namespaced_pod_log \ .assert_any_call(container='armada-api', name='armada-api-66d5f59856-h9c27', namespace='ucp', pretty='true', since_seconds=3600) mock_client.return_value \ .read_namespaced_pod_log \ .assert_any_call(container='armada-api', name='armada-api-66d5f59856-42zvp', namespace='ucp', pretty='true', since_seconds=3600)
StarcoderdataPython
1754610
import fileinput CRABS = [int(x) for x in fileinput.input()[0].split(',')] part_1 = part_2 = 10000000000000 for pos in range(max(CRABS) + 1): part_1_fuel = 0 part_2_fuel = 0 for c in CRABS: part_1_fuel += abs(pos - c) delta = abs(pos - c) part_2_fuel += ((delta + 1) * delta) // 2 part_1 = min(part_1, part_1_fuel) part_2 = min(part_2, part_2_fuel) print "Part 1:", part_1 print "Part 2:", part_2
StarcoderdataPython
4824057
<gh_stars>0 import numpy as np import pandas as pd import tensorflow as tf import matplotlib.pyplot as plt from keras.models import Sequential from keras.layers.core import Dense, Activation from keras.layers.recurrent import LSTM from keras.layers import Dropout df = pd.read_csv('data/elec_load.csv', error_bad_lines=False) plt.subplot() plot_test, = plt.plot(df.values[:1500], label='Load') plt.legend(handles=[plot_test]) plt.show() print(df.describe()) array = (df.values - 145.33) / 338.21 # minus the mean and divided between the max value plt.subplot() plot_test, = plt.plot(array[:1500], label='Normalized Load') plt.legend(handles=[plot_test]) plt.show() listX = [] listy = [] X = {} y = {} for i in range(0, len(array) - 6): listX.append(array[i:i + 5].reshape([5, 1])) listy.append(array[i + 6]) arrayX = np.array(listX) arrayY = np.array(listy) X['train'] = arrayX[0:13000] X['test'] = arrayX[13000:14000] y['train'] = arrayY[0:13000] y['test'] = arrayY[13000:14000] # Build the model model = Sequential() model.add(LSTM(units=50, input_shape=(None, 1), return_sequences=True)) model.add(Dropout(0.2)) model.add(LSTM(units=200, input_shape=(None, 100), return_sequences=False)) model.add(Dropout(0.2)) model.add(Dense(units=1)) model.add(Activation("linear")) model.compile(loss="mse", optimizer="rmsprop") # Fit the model to the data model.fit(X['train'], y['train'], batch_size=512, epochs=10, validation_split=0.08) # Rescale the test dataset and predicted data test_results = model.predict(X['test']) test_results = test_results * 338.21 + 145.33 y['test'] = y['test'] * 338.21 + 145.33 plt.figure(figsize=(40, 30)) plot_predicted, = plt.plot(test_results, label='predicted') plot_test, = plt.plot(y['test'], label='test') plt.legend(handles=[plot_predicted, plot_test]) plt.show()
StarcoderdataPython
60915
def minesweeper(matrix): row = len(matrix) col = len(matrix[0]) def neighbouring_squares(i, j): return sum( matrix[x][y] for x in range(i - 1, i + 2) if 0 <= x < row for y in range(j - 1, j + 2) if 0 <= y < col if i != x or j != y ) return [[neighbouring_squares(i, j) for j in range(col)] for i in range(row)]
StarcoderdataPython
3311541
# -*- coding: utf-8 -*- """ # Idle/Jupyter Startup File Defines functions for use in `IDLE`, but so far they seem to work in `python3`, `iPython`, `IDLE`, and `jupyter`. They WON'T work in Python 2! Hopefully this won't cause problems when PYTHONSTARTUP is set to this file's path, but it may very well happen. Many of the objects imported by this module are useful inside an interpreter environment like Jupyter, so it's recommended to import this file by executing it: `exec(Path("startup.py").read_text())` """ from __future__ import annotations from cmd import Cmd from collections import ChainMap, namedtuple from configparser import ConfigParser import cProfile import csv from enum import Enum import fnmatch from functools import singledispatch, singledispatchmethod import glob from importlib import import_module, reload import inspect from inspect import currentframe, getmembers from math import floor, log10 import os from os import chdir, curdir, getenv, listdir, mkdir from pprint import pprint pp = pprint from pathlib import Path import re import shlex from shutil import copy2 as copy, copytree, ignore_patterns, move, rmtree import string from subprocess import check_output import sys from xml.dom.minidom import Document import webbrowser from bash import bash from bs4 import BeautifulSoup #import file as magic from IPython.display import display, HTML import pandas as pd import requests EMPTY = "" PERIOD = '.' NEWLINE = '\n' UNDERSCORE = '_' HYPHEN = '-' COMMA = ',' SPACE = ' ' COLON = ':' SEMICOLON = ';' ELLIPSIS = '.' * 3 SLASH = '/' TILDE = '~' ASTERISK = '*' LPAREN = '(' RPAREN = ')' DOLLAR = '$' UTF_8 = 'utf-8' TREE = 'tree' IGNORE_FILES = ['*.pyc', 'setup.py'] IGNORE_DIRS = ["*__pycache__", ".git", "build", "dist", "*.egg-info", ".ipynb_*", ".*", "_*"] IGNORE_PATS = list(set(IGNORE_FILES).union(IGNORE_DIRS)) try: src = requests.get("https://docs.python.org/3/py-modindex.html").text except: src = Path("/usr/share/doc/python3.7-doc/html/py-modindex.html").read_text() class BS(BeautifulSoup): def __init__(self, s): super().__init__(s, features="html.parser") PYTHON_MODULES = None try: PYTHON_MODULES = {a.text.split('.')[0] for a in BS(src).findAll("a") if len(a.text) > 1 and not a.text[0].isupper()} except: path = "usr/lib/python3.8" PYTHON_MODULES = list(sorted([n for n in listdir(path) if not n.startswith(PERIOD)], key=str.lower)) RUNNING_WINDOWS = os.name == 'nt' SITE_PKG_DIR = sys.prefix + os.sep + ('Lib' if RUNNING_WINDOWS else 'lib/python3.8') + os.sep + 'site-packages' PACKAGE_MODULES = sorted([re.sub(".py$", "", n) for n in listdir(SITE_PKG_DIR) if not re.search(".*-info", n)]) BACKUP_SUFFIX = '+' if RUNNING_WINDOWS else "~" class FileType(Enum): UNKNOWN = 0 PYTHON = 1 BASH = 2 FILE_TYPES = { ".py": FileType.PYTHON, ".sh": FileType.BASH } PYTHON_VERSION = '.'.join(sys.version.split()[0].split('.')[:2]) HILITE_ME = "http://hilite.me/api" PYTHON_MOD_INDEX = 0 PACKAGE_MOD_INDEX = 1 LOCAL_MOD_INDEX = 2 FILES = [ "__init__.py", "testing.py", "constants.py", "percentage.py", "startup.py" ] class BS(BeautifulSoup): def __init__(self, s): super().__init__(s, features="html.parser") def get_interpreter(): """ Return the currently active interpreter as a string Enum. """ f = currentframe().f_back.f_code.co_filename # print("File:", f) if re.match('<pyshell#\d*>', f): return("idle") elif re.match("<ipython-", f): f2 = currentframe().f_back.f_back if f2 and f2.f_code.co_filename.endswith('IPython/core/interactiveshell.py'): if sys.stdin.isatty(): return "ipython" else: return "jupyter" return "script" if f else "stdin" @singledispatch def num_digits(x: object): print(f"bad argument: num_digits: {x}") @num_digits.register def _(i:int) -> int: """ Assumes that i is an integer and not a float. """ return int(floor(log10(i)) + 1) def get_all(path, ignore=set()): """ Get the names of all the variables, classes and functions defined within a module. """ result = set() if type(path) is str: path = Path(path) lines = path.read_text().split('\n') regex = re.compile("def (\w*)|class (\w*)|(\w*)\s+=") for s in lines: m = regex.match(s) if m: i = 1 while not m.group(i): i += 1 assert i < 4 word = m.group(i) if word and not word.startswith('_'): result.add(word) return sorted(list(result.difference(ignore))) def ignore(filename): """ Return `True` if `filename` is included in the `list` `constants.IGNORE_PATS` and `False` otherwise. >>> ignore('*.pyc') True >>> ignore('*.js') False """ for pat in IGNORE_PATS: if fnmatch.fnmatch(filename, pat): return True return False def run(s: str)->str: """ Run `s` as a command line. `s` is a `str`, not a `list`. """ return check_output(shlex.split(s), encoding=UTF_8) or None def runfile(): """ TODO: write `startup.runfile` """ pass def columnize(s): """ Print the contents of the list `s` in neat-looking columns. """ if s: Cmd().columnize(list(s)) else: print("None") def ls(*paths, all=False, recursive=False, out=True): paths = list(paths) if not paths: paths.append(PERIOD) for path in paths: if recursive: value = run(TREE + (" -a" if all else EMPTY) + SPACE + (str(path) if path else EMPTY)) print(value) else: columnize(sorted([n for n in listdir(path if path else Path.cwd()) if ((not n.startswith(PERIOD)) if not all else True) ], key=str.lower)) cd = chdir def tree(path=None, all=False): ls(path, all=all, recursive=True) def pwd(): print(Path.cwd()) def touch(s: str): Path(s).touch() def public(obj): columnize([member for member in dir(obj) if not re.match('_', member)]) def cat(*paths): for path in paths: if type(path) is str: path = Path(path) if path.exists() and path.is_file(): print(path.read_text()) def find(pattern="*", topdir=Path.cwd(), recursive=True): """ Print the path to any files whose names match `pattern` in any subdirectory of `topdir` if `recursive` is `True` and only `topdir` if it's `False`. """ paths = set() if type(topdir) is str: topdir=Path(topdir) if not recursive: paths = {name for name in listdir() if fnmatch.fnmatch(name, pattern)} # pp([name for name in listdir() if fnmatch.fnmatch(name, pattern)]) else: for path in topdir.iterdir(): if path.is_dir(): paths = paths.union(find(pattern, path, recursive)) elif path.is_file() and fnmatch.fnmatch(path.name, pattern): # print(path) paths.add(path) return paths @singledispatch def grep(PATTERN, # `str`, a compiled pattern or a list of either *GLOBS, # `str` representing a `glob` pattern or a list of them **options): """ Search for `PATTERN: re.Pattern` in each file whose name matches a pattern in `FILES`. """ try: return grep(re.compile(str(PATTERN)), *GLOBS, **options) except: print(f"ERROR: grep: bad argument type: {type(PATTERN)}") grep.DEFAULTS = { "fixed": False, # fixed list of strings "basic": True, # PATTERN is a basic regular expression "perl": False, # PATTERN is a Perl regexp "pattern": None, # use this as the pattern "file": None, # get a list of patterns from the file "case": True, # default search displays case-sensitivity "invert": False, # return non-matching lines "word": False, # select only lines that match whole words "line": False, # select only lines that match entirely "count": False, # print only a count of matching lines for each file "color": False, # print colored output if possible "match": False, # output filenames with matches "matchless": False, # output filenames without matches "max": None, # stop reading a file after N matches "only": False, # print only the matched (non-empty) parts of a matching line, each such part on a separate output line "quiet": False, # don't output anything "errors": True, # output error messages for non-existent or unreadable files "bytes": False, # output the byte offset with respect to the file before each line "fname": True, # print the file name for each match # default to True if searching more than one file # default to False if there is only one file or only stdin to search "label": None, # display input from stdin as coming from the given file "number": True, # display the line number of each match "tab": None, # if True, prefix actual line output with a tab # if a `str`, use it instead of a tab "text": False, # process a binary file as if it were text "binary": None, # assume that binary files are of the given type "device": input, # action to process a device "directory": grep, # action to process a directory "exclude": list(), # list of filename patterns to be excluded or a Path to a file that lists them "include": list(), # list of filename patterns to be included in the search "recursive": False, # read all files under each directory recursively "follow": False, # follow all symbolic links } @grep.register def _(PATTERN: str, *args, **kwargs): return grep(re.compile(PATTERN), *args, **kwargs) @grep.register def _(PATTERN: list, *args, **kwargs): value = list() for p in PATTERN: value.extend(grep(p, *args, **kwargs)) return value @grep.register def _(PATTERN: re.Pattern, # `str`, a compiled pattern or a list of either *GLOBS, # `str` representing a `glob` pattern or a list of them **options # keyword arguments ): """ Search for `PATTERN: re.Pattern` in each file whose name matches a pattern in `FILES`. """ options = ChainMap(options, grep.DEFAULTS) results = dict() for g in GLOBS: # print(f"{g=}") for f in iglob(g, recursive=options["recursive"]): f = Path(f) # print(f"{f=}") try: value = list() if f.is_dir() and options["recursive"]: value.extend(grep(PATTERN, f.name, **options)) elif f.is_file() and not any([fm.fnmatch(f.name, p) for p in options["exclude"]]): for i, line in enumerate(f.read_text().split('\n')): if PATTERN.search(line): value.append(GrepMatch(i, line)) results[f] = value except UnicodeDecodeError: print(f"unicode error: file {f}") if not options["quiet"]: for k, v in results.items(): print(f"File: {k}\n") for i, line in enumerate(v): print(f"{v[i].lineno}: {v[i].line}") print() else: return results def cut(inList, delim=SPACE, indices=[]): if issubclass(type(inList), str): inList = Path(inList) if issubclass(type(inList), Path): inList = inList.read_text().split('\n') if not indices: indices = [i for i in range(len(inList))] result = None if inList: result = list() for l in inList: subString = str() for w in l.split(delim): for i in indices: if type(i) is int: subString += w else: for i in range(indices[i][0], indices[i][1]): subString += SPACE + w result.append(subString) return result SearchAndReplace = namedtuple("SearchAndReplace", ["pattern", "replace"]) class Replacement(SearchAndReplace): pass class ReplacementList(list): pass def sed(lines, *args, all=True, inplace=False): """ Substitute occurences of `pattern` with `replace` in the file `path`. """ if issubclass(type(lines), Path): text = lines.read_text().split('\n') else: text = copy(lines) if type(args[0]) is str: arguments = ReplacementList() for i in range(0, len(args), 2): repl = Replacement(args[i], args[i+1]) arguments.append(repl) args = arguments for i, sr in enumerate(args): print(f"Pattern: {sr.pattern}") print(f"Replacement: {sr.replace}") for j, line in enumerate(text): line2 = re.sub(sr.pattern, sr.replace, line) print(f"{line} ==>\n{line2}") text[j] = line2 if line2 != line and not all: break text = '\n'.join(text) print(text) if issubclass(type(lines), Path) and inplace: Path(lines.parent / (lines.name + inplace)).write_text(lines.read_text()) print("Writing sed output to file") lines.write_text(text) def sort(): pass def uniq(): pass def profile(s): cProfile.run(s) def backup(d:Path=Path.cwd())->Path: """ Copy the entire current directory to `../<DIR>~/`. """ BACKUP_DIR_NAME = str(d) + BACKUP_SUFFIX if Path(BACKUP_DIR_NAME).exists(): rmtree(BACKUP_DIR_NAME) copytree(str(d), BACKUP_DIR_NAME, ignore=ignore_patterns(*IGNORE_PATS)) print(f"{BACKUP_DIR_NAME=}") print(f"{d=}") return Path(BACKUP_DIR_NAME) def clean(s=None): """ Clean the current working directory by default or the one targeted by `s`. """ def config(path): """ Return a dictionary containing the keys and values of the given file. """ if type(path) is str: path = Path(path) if path.exists(): cp = ConfigParser() cp.read(str(path)) return dict(cp["DEFAULT"]) else: return None def docs(obj=None): """ Some automatic detective work could be done here to determine where to find the latest documentation. # TODO: * Determine which version of Python is actually running. * Search `/usr/share/doc` for `python-{version}-doc/html/index.html`. * If that file exists, open it in a Web browser. * If that file doesn't exist, try to open its remote counterpart online. * If that fails, work backwards through 3.7, 3.6, 3.5, etc. until a * suitable version can be found. * If all that fails, then the computer is not hooked up to the Internet and has no online documentation installed locally. Pydoc might have an answer for that. """ if not obj: webbrowser.open("file:///usr/share/doc/python3.7/html/index.html") else: #TODO: Fix this because the line below only works for modules or something like that. run("pydoc3 -w {} ".format(str(type(obj)))) def open_src_file(s): if issubclass(type(s), Path): s = str(s) run("atom" + SPACE + s) def slurp(s): if issubclass(type(s), str): s = Path(s) return s.read_text() return None def get_imports(path=Path.cwd(), recursive=True): """ Return a `set` containing the names of the modules imported by `path`. """ FILES = glob.iglob(str(path / "*.py"), recursive=recursive) LOCAL_MODULES = sorted([n.stem for n in FILES if not n.stem.startswith(PERIOD)], key=str.lower) # print(f'{LOCAL_MODULES=}') results = [set(), set(), set()] if type(path) is str: path = Path(path) if path.is_dir(): for f in find("*.py", path): result = get_imports(f) results = [r.union(result[i]) for i, r in enumerate(results)] else: result = set() lines = path.read_text().split('\n') regex = re.compile("\s*import (\w*)|\s*from (\w*)") for s in lines: m = regex.match(s) if m: i = 1 while not m.group(i): i += 1 assert i < 4 word = m.group(i) if word: m2 = re.search(r'(\w*)\.\w*', word) if m2: word = m2.group(1) result.add(word) for r in result: if r in PYTHON_MODULES: results[0].add(r) elif r in LOCAL_MODULES: results[2].add(r) else: # if not r == "py" results[1].add(r) results = [sorted(list(r)) for r in results] return results def print_imports(path=Path.cwd()): results = get_imports(path) if len(results[0]): print("Python Modules:") print() columnize(results[0]) print() if len(results[1]): print("Packages:") print() columnize(results[1]) print() if len(results[2]): print("Local Modules:") print() columnize(results[2]) print() def csv2html(path=None, code=False): """ Read a (specially designed) CSV file and return it as HTML. TODO: Handle the first row specially and optionally. """ if type(path) is str: path = Path(path) with path.open() as f: reader = csv.reader(f) output = '<table>' for i, row in enumerate(reader): if code: row[0] = "<code>" + row[0] + "</code>" output+=('<tr><td>{}</td></tr>\n' .format("</td><td>".join(row))) output+=("</table>\n") # print( output) return output def hilite_src_lines(obj): codeStr = inspect.getsource(obj) hilite_params = { "code": codeStr } return requests.post(HILITE_ME, hilite_params).text def get_desc(s): """ Return the first line of the docstring of the object named by `s`. """ print(s) print() print(f"{vars().keys()=}") return inspect.getdoc(vars()[s]).split('\n')[0] def describe(p: Path) -> str: """ Return a HTML table of function names and their descriptions. Get the description of each function from the first line of its docstring. """ rowData = list() desc = '' for s in get_all(Path("startup.py")): if not s[0].isupper(): desc = inspect.getdoc(globals()[s]) if desc: desc = desc.split('\n')[0] if desc else '' rowData.append((s, desc)) doc = Document() table = doc.createElement("table") # doc.appendChild(table) for d in rowData: row = doc.createElement("tr") cell = doc.createElement("td") tag = doc.createElement("code") link = doc.createElement("a") text = doc.createTextNode(d[0]) p2 = Path("d[0]") tag.appendChild(text) cell.appendChild(tag) row.appendChild(cell) cell = doc.createElement("td") tag = doc.createElement("p") text = doc.createTextNode(d[1] if d[1] else '') tag.appendChild(text) cell.appendChild(tag) row.appendChild(cell) table.appendChild(row) return table.toxml() def hilite_definition(obj): if not type(obj) is str: obj = obj.__name__ src = grep(obj + " = ", files='*.py', quiet=True) for k, v in src.items(): if k.name in FILES: line = v[1] hilite_params = {'code': line} LOCATION = f"<p><code>{obj}</code> is defined in <code>{k.name}</code>.</p>\n" return LOCATION + requests.post(HILITE_ME, hilite_params).text return None def get_class_names(p:Path)->list: return [s for s in get_all(p) if s[0].isupper() and not s[1].isupper()] def highlight(obj): display(HTML(hilite_definition(obj) if type(obj) is str else hilite_src_lines(obj))) if __name__ == '__main__': print("hello world")
StarcoderdataPython
1656973
sounds = ["super", "cali", "fragil", "istic", "expi", "ali", "docious"] result = '' for fragment in sounds: result += fragment result = result.upper() print(result)
StarcoderdataPython
1714965
# coding=utf-8 # Copyright 2021 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Base template config for pre-training and fine-tuning.""" import enum import ml_collections class ModelArchitecture(enum.Enum): """Determines model architecture - in particular, the mixing layer.""" BERT = 'bert' F_NET = 'f_net' FF_ONLY = 'ff_only' # Feed forward sublayers only; no token mixing LINEAR = 'linear' # Matrix multiplications with learnable weights RANDOM = 'random' # Constant, random matrix multiplications class TrainingMode(enum.Enum): """Determines type of training.""" PRETRAINING = 'pretraining' CLASSIFICATION = 'classification' def get_config(): """Base config for training models.""" config = ml_collections.ConfigDict() # How often to save the model checkpoint. config.save_checkpoints_steps: int = 1000 # Frequency fo eval during training, e.g. every 1000 steps. config.eval_frequency: int = 1000 # Total batch size for training. config.train_batch_size: int = 32 # Total batch size for eval. config.eval_batch_size: int = 8 # The base learning rate for Adam. config.learning_rate: float = 1e-4 # Initial checkpoint directory (usually from a pre-trained model). config.init_checkpoint_dir: str = '' # Whether to lower case the input text. Should be True for uncased models and # False for cased models. config.do_lower_case: bool = True # Model parameters. # For pre-training, we only need 2 segment types (for NSP), but we allow up to # 4 for GLUE/SuperGLUE fine-tuning. config.type_vocab_size: int = 4 # Embedding dimension for each token. config.d_emb: int = 768 # Hidden dimension of model. config.d_model: int = 768 # Hidden dimension for feed-forward layer. config.d_ff: int = 3072 # The maximum total input sequence length after tokenization. Sequences longer # than this will be truncated, and sequences shorter than this will be padded. config.max_seq_length: int = 512 # Number of self-attention heads. Only used for BERT models. config.num_heads: int = 12 # Number of model blocks / layers. config.num_layers: int = 12 # Regular dropout rate, applied throughout model. config.dropout_rate: float = 0.1 # Dropout rate used in mixing module, e.g. self-attention sublayer. config.mixing_dropout_rate: float = 0.1 # Determines how discrete Fourier Transforms are computed. Only used for FNet # models. Set to true if running on TPU hardware, in which case matrix # multiplications will be favored for relatively shorter input sequences. Set # to false for GPU/CPU hardware, in which case FFTs are used for all input # sequence lengths. config.use_tpu_fourier_optimizations: bool = False # Dummy parameter for repeated runs. config.trial: int = 0 return config
StarcoderdataPython
155230
<filename>example/migrations/0004_imagen.py<gh_stars>0 # Generated by Django 2.2.1 on 2019-10-21 03:24 from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('example', '0003_example2'), ] operations = [ migrations.CreateModel( name='Imagen', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('model_pic1', models.ImageField(upload_to='pic_folder/')), ('model_pic2', models.ImageField(upload_to='pic_folder/')), ('model_pic3', models.ImageField(upload_to='pic_folder/')), ('delete', models.BooleanField(default=False)), ('create', models.DateTimeField(default=django.utils.timezone.now)), ], options={ 'db_table': 'Imagen', }, ), ]
StarcoderdataPython
3277955
<reponame>JoaoPauloPereirax/Python-Study ''' Escreva um programa que leia um número inteiro qualquer e peça para o usuário escolher qual será a base de conversão: 1. para binário 2. para octal 3. para hexadecimal ===Etapas=== 1. Pedir para digitar um número e guardar o valor em uma variável. 2. Pedir para escolher a base para a conversão. 3. Aninnhamento das condições. ''' numero=int(input('\nDigite um número inteiro:')) base=int(input('Escolha a base para a conversão:\n1 - Binário\n2 - Octal\n3 - Hexadecimal\n')) if (base == 1): print('Binário de {} é {}'.format(numero,bin(numero))) elif (base == 2): print('Octal de {} é {}'.format(numero,oct(numero))) elif (base == 3): print('Hexadecimal de {} é {}'.format(numero,hex(numero))) else: print('Opção inválida')
StarcoderdataPython
3358920
from asyncio import CancelledError from typing import Any, Coroutine, Union class BaseHooks: """ Interface for implementation of hooks. """ def on_apply_for(self, coro: Coroutine[Any, Any, Any], ident: str) -> None: """ Calls when ``async_reduce`` apply to coroutine. """ def on_executing_for( self, coro: Coroutine[Any, Any, Any], ident: str ) -> None: """ Calls when coroutine executing as aggregated coroutine. """ def on_reducing_for( self, coro: Coroutine[Any, Any, Any], ident: str ) -> None: """ Calls when coroutine reduced to aggregated coroutine. """ def on_result_for( self, coro: Coroutine[Any, Any, Any], ident: str, result: Any ) -> None: """ Calls when aggregated coroutine returns value. """ def on_exception_for( self, coro: Coroutine[Any, Any, Any], ident: str, exception: Union[Exception, CancelledError] ) -> None: """ Calls when aggregated coroutine raises exception. """ def __and__(self, other: 'BaseHooks') -> 'MultipleHooks': if isinstance(other, MultipleHooks): return other & self return MultipleHooks(self, other) class MultipleHooks(BaseHooks): """ Internal class to gather multiple hooks (via operator `&`). Each hook will be called in the addition sequence. """ def __init__(self, *hooks: BaseHooks) -> None: self.hooks_list = [*hooks] def __and__(self, other: BaseHooks) -> 'MultipleHooks': if isinstance(other, MultipleHooks): self.hooks_list.extend(other.hooks_list) return self self.hooks_list.append(other) return self def __len__(self) -> int: """ Count of gathered hooks """ return len(self.hooks_list) def on_apply_for(self, coro: Coroutine[Any, Any, Any], ident: str) -> None: for hooks in self.hooks_list: hooks.on_apply_for(coro, ident) def on_executing_for( self, coro: Coroutine[Any, Any, Any], ident: str ) -> None: for hooks in self.hooks_list: hooks.on_executing_for(coro, ident) def on_reducing_for( self, coro: Coroutine[Any, Any, Any], ident: str ) -> None: for hooks in self.hooks_list: hooks.on_reducing_for(coro, ident) def on_result_for( self, coro: Coroutine[Any, Any, Any], ident: str, result: Any ) -> None: for hooks in self.hooks_list: hooks.on_result_for(coro, ident, result) def on_exception_for( self, coro: Coroutine[Any, Any, Any], ident: str, exception: Union[Exception, CancelledError] ) -> None: for hooks in self.hooks_list: hooks.on_exception_for(coro, ident, exception)
StarcoderdataPython
3223691
<filename>caiyun/caiyun.py # -*- coding: utf-8 -*- import base64 import json import os import re from urllib import parse import requests from requests import utils import rsa class CaiYunCheckIn: def __init__(self, check_item): self.check_item = check_item self.public_key = """-----<KEY>""" @staticmethod def get_encrypt_time(session): payload = parse.urlencode({"op": "currentTimeMillis"}) resp = session.post( url="https://caiyun.feixin.10086.cn:7071/portal/ajax/tools/opRequest.action", data=payload ).json() if resp.get("code") != 10000: print("获取时间戳失败: ", resp["msg"]) return 0 return resp.get("result", 0) def get_ticket(self, session): payload = json.dumps({"sourceId": 1003, "type": 1, "encryptTime": self.get_encrypt_time(session=session)}) pubkey = rsa.PublicKey.load_pkcs1_openssl_pem(self.public_key) crypto = b"" divide = int(len(payload) / 117) divide = divide if (divide > 0) else divide + 1 line = divide if (len(payload) % 117 == 0) else divide + 1 for i in range(line): crypto += rsa.encrypt(payload[i * 117: (i + 1) * 117].encode(), pubkey) crypto1 = base64.b64encode(crypto) return crypto1.decode() @staticmethod def user_info(session): resp = session.get(url="https://caiyun.feixin.10086.cn:7071/portal/newsignin/index.jsp").text account = re.findall(r"var loginAccount = \"(.*?)\";", resp) if account: account = account[0] else: account = "未获取到用户信息" return account def sign(self, session): ticket = self.get_ticket(session=session) payload = parse.urlencode({"op": "receive", "data": ticket}) resp = session.post( url="https://caiyun.feixin.10086.cn:7071/portal/ajax/common/caiYunSignIn.action", data=payload, ).json() if resp["code"] != 10000: msg = "签到失败:" + resp["msg"] else: msg = f'月签到天数: {resp["result"]["monthDays"]}\n当前总积分:{resp["result"]["totalPoints"]}' return msg def main(self): caiyun_cookie = { item.split("=")[0]: item.split("=")[1] for item in self.check_item.get("caiyun_cookie").split("; ") } session = requests.session() requests.utils.add_dict_to_cookiejar(session.cookies, caiyun_cookie) session.headers.update( { "User-Agent": "Mozilla/5.0 (Linux; Android 10; M2007J3SC Build/QKQ1.191222.002; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/83.0.4103.106 Mobile Safari/537.36 MCloudApp/7.6.0", "Content-Type": "application/x-www-form-urlencoded", "Origin": "https://caiyun.feixin.10086.cn:7071", "Referer": "https://caiyun.feixin.10086.cn:7071/portal/newsignin/index.jsp", } ) username = self.user_info(session=session) sign_msg = self.sign(session=session) msg = f"用户信息: {username}\n{sign_msg}".strip() return msg if __name__ == "__main__": with open( os.path.join(os.path.dirname(os.path.dirname(__file__)), "config/config.json"), "r", encoding="utf-8" ) as f: datas = json.loads(f.read()) _check_item = datas.get("CAIYUN_COOKIE_LIST", [])[0] print(CaiYunCheckIn(check_item=_check_item).main())
StarcoderdataPython
36937
<filename>ferris/controllers/oauth.py from __future__ import absolute_import from google.appengine.ext import ndb from ferris.core.controller import Controller, route, route_with from oauth2client.client import OAuth2WebServerFlow from ferris.core.oauth2.user_credentials import UserCredentials as OAuth2UserCredentials from ferris.core import settings class Oauth(Controller): @route def start(self, session): config = settings.get('oauth2') session = ndb.Key(urlsafe=session).get() callback_uri = self.uri(action='callback', _full=True) flow = OAuth2WebServerFlow( client_id=config['client_id'], client_secret=config['client_secret'], scope=session.scopes, redirect_uri=callback_uri) flow.params['state'] = session.key.urlsafe() if session.admin or session.force_prompt: flow.params['approval_prompt'] = 'force' uri = flow.step1_get_authorize_url() session.flow = flow session.put() return self.redirect(uri) @route_with(template='/oauth2callback') def callback(self): session = ndb.Key(urlsafe=self.request.params['state']).get() credentials = session.flow.step2_exchange(self.request.params['code']) OAuth2UserCredentials.create( user=self.user, scopes=session.scopes, credentials=credentials, admin=session.admin ) session.key.delete() # No need for the session any longer return self.redirect(str(session.redirect))
StarcoderdataPython
1758974
from setuptools import setup, find_packages with open("README.md", "r") as fh: long_desc = fh.read() setup( name='sshepherd', version="0.2", packages=["sshepherd"], package_dir={'': "src"}, scripts=['scripts/sshepherd'], author="George", author_email="<EMAIL>", description="SSHepherd: Automated SSH User Management", long_description=long_desc, long_description_content_type="text/markdown", url="https://github.com/drpresq/sshepherd", install_requires=[ "paramiko>=2.7.2", "scp==0.13.6" ], extras_require={ 'dev': [ 'pytest>=6.2.4' ] }, keywords="", classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Information Technology', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'Topic :: Utilities' ], )
StarcoderdataPython
3304766
import numpy as np import time from greensconvolution.greensconvolution_fast import greensconvolution_integrate from greensconvolution.greensconvolution_fast import greensconvolution_greensfcn_curved from greensconvolution.greensconvolution_calc import read_greensconvolution gc_kernel="opencl_interpolator" # greensconvolution kernel to use .. laptop: 18 sec #gc_kernel="openmp_interpolator" # laptop: 19 sec #gc_kernel="opencl_quadpack" # laptop: 1 min 10 sec #gc_kernel="opencl_simplegaussquad" # Laptop: VERY slow # define materials: composite_k=.138 # W/m/deg K composite_rho=1.57e3 # W/m/deg K composite_c=730 # J/kg/deg K zrange=np.arange(.1e-3,5e-3,.002e-3,dtype='f') # step was .02e-3 xrange=np.arange(-20e-3,20e-3,.001e-3,dtype='f') # step was .1e-3 zrange=zrange.reshape(zrange.shape[0],1,1) xrange=xrange.reshape(1,xrange.shape[0],1) rrange=np.sqrt(xrange**2.0+zrange**2.0) trange=np.arange(10e-3,10.0,1.0,dtype='f') # step was 10e-3 trange=trange.reshape(1,1,trange.shape[0]) greensconvolution_params=read_greensconvolution() greensconvolution_params.get_opencl_context("GPU") cpu_starttime=time.time() cpu_result=greensconvolution_integrate(greensconvolution_params,zrange,rrange,trange,0.0,composite_k,composite_rho,composite_c,1.0,(),kernel="openmp_interpolator") cpu_elapsed=time.time()-cpu_starttime starttime=time.time() result=greensconvolution_integrate(greensconvolution_params,zrange,rrange,trange,0.0,composite_k,composite_rho,composite_c,1.0,(),kernel=gc_kernel) #result=greensconvolution_greensfcn_curved(greensconvolution_params,np.ones((1,1,1),dtype='f'),rrange,zrange,rrange/5e-3,trange,composite_k,composite_rho,composite_c,(0,2),(1.0/5e-3)*np.ones((1,1,1),dtype='f'),np.zeros((1,1,1),dtype='f'),np.ones((1,1,1),dtype='f'),np.ones((1,1,1),dtype='f')) elapsed=time.time()-starttime GPUrate=rrange.shape[0]*rrange.shape[1]*trange.shape[2]/elapsed CPUrate=np.prod(cpu_result.shape)/cpu_elapsed # For 24x22mm tiles, 0.5 mm point spacing # 24*22 -> 2112 surface points. Assume 1000 frames -> 2,112,000 # matrix rows. by 1240 colums = 2.62 billion evals. # Each # Laptop with Intel GPU gives compute time of ~9000 seconds # spec'd at ~ 325 Gflops computetime=(24.0*22.0/0.5/0.5)*1000.*1240./GPUrate print("Needed Compute time=%f s" % (computetime)) print("Rate: %f/usec" % (GPUrate/1e6)) print("CPU Rate: %f/usec" % (CPUrate/1e6)) #docompare=cpu_result > 1e-3 #print("Max relative error=%f%%" % (np.max(np.abs((result[docompare]-cpu_result[docompare])/cpu_result[docompare]))*100.0))
StarcoderdataPython
142453
""" Multi-device matrix multiplication using parla with cupy as the kernel engine. """ import sys import time import numpy as np import cupy as cp from parla import Parla, get_all_devices from parla.array import copy, clone_here from parla.cpu import cpu from parla.cuda import gpu from parla.function_decorators import specialized from parla.ldevice import LDeviceSequenceBlocked from parla.tasks import spawn, TaskSpace, CompletedTaskSpace, reserve_persistent_memory from parla.parray import asarray_batch def main(): @spawn(placement=cpu) async def main_task(): ngpus = cp.cuda.runtime.getDeviceCount() repetitions = int(sys.argv[1]) # set up two n x n arrays to multiply together. # n is chosen so that all three can be # stored within the memory of a single GPU # so that strong scaling numbers make sense. n = 32000 blocks = ngpus block_size = n // ngpus h_ordr = 'C' d_ordr = 'F' print("BlockSize: ", block_size, "GPUS: ", ngpus) # Overdecomposing doesn't actually seem to help in this case # with the current parla runtime. This may be related to # some weirdness within the scheduler though, so # we can leave the code for blocks in-place for further # testing later. np.random.seed(0) a_cpu = np.random.rand(n, n).astype(dtype=np.float32, order=h_ordr) b_cpu = np.random.rand(n, n).astype(dtype=np.float32, order=h_ordr) print("Finished Data Allocation", flush=True) # Partition the two arrays and set up the # partitioned array where the result will be stored. # This could also be done using a parla mapper object. a_part = [] b_part = [] c_part = [] distribute=True reset=True fixed_placement=False verbose=False sync=False time_list = list() # Start all operans from CPU memory. for i in range(blocks): if distribute: with cp.cuda.Device(i): a_part.append(cp.asarray(a_cpu[i * block_size : (i + 1) * block_size], order=d_ordr)) b_part.append(cp.asarray(b_cpu[i * block_size : (i + 1) * block_size], order=d_ordr)) cp.cuda.stream.get_current_stream().synchronize() else: a_part.append(a_cpu[i * block_size : (i + 1) * block_size]) b_part.append(b_cpu[i * block_size : (i + 1) * block_size]) for i in range(blocks): c_part.append(list()) for j in range(blocks): c_part[i].append(np.empty((0, 0), dtype=np.float32, order=h_ordr)) #print(len(c_part), len(c_part[0]), c_part[0][0].shape) # 1. NEW: convert to parray in batch a_part, b_part = asarray_batch(a_part, b_part) c_part = asarray_batch(c_part) #print(len(c_part), len(c_part[0])) for repetition in range(repetitions): #reset cblocks to None for i in range(blocks): for j in range(blocks): c_part[i][j].update(np.empty((0, 0), dtype=np.float32, order=h_ordr)) if reset: #reset coherence to only be in starting locations rspace = TaskSpace("reset") for i in range(blocks): @spawn(rspace[i], placement=gpu(i%ngpus), memory=2*block_size*n, inout=[a_part[i], b_part[i]]) def reset_task(): a_part[i].update(a_part[i].array) b_part[i].update(b_part[i].array) await rspace matmul = TaskSpace("matmul") start = time.perf_counter() for i in range(blocks): for j in range(blocks): a_block = a_part[i] b_block = b_part[j] c_block = c_part[i][j] memsize = (block_size**2)*4 if fixed_placement: loc = gpu(i%ngpus) else: loc = gpu @spawn(matmul[i, j], placement = loc, memory=memsize, input=[a_block, b_block], output=[c_block]) def matmul_task(): a = a_block.array b = b_block.array c = c_block.array stream = cp.cuda.get_current_stream() stream.synchronize() assert(a.device.id == b.device.id) if verbose: print(f"+({i}, {j}): ", a.shape, b.shape, c.shape, " | On Device: ", cp.cuda.runtime.getDevice(), a.device.id, flush=True) local_start = time.perf_counter() c = a @ b.T if sync: stream.synchronize() local_end = time.perf_counter() c_block.update(c) c = c_block.array if verbose: print(f"-({i}, {j}): ", a.shape, b.shape, c.shape, " | Elapsed: ", local_end-local_start, flush=True) await matmul stop = time.perf_counter() print(f"Iteration {repetition} | Time elapsed: ", stop - start, flush=True) time_list.append(stop-start) mean = np.mean(np.array(time_list)) median = np.median(np.array(time_list)) print(f"Execution Time:: Average = {mean} | Median = {median}", flush=True) if __name__ == "__main__": with Parla(): main()
StarcoderdataPython
3260835
<reponame>gschivley/pg_misc<filename>create_clusters/least_cost_path.py from typing import List, Union import numpy as np import rasterio import rasterio.features from affine import Affine from shapely.geometry import shape, box from skimage.graph import MCP_Geometric from skimage.graph import _mcp import geopandas as gpd import pandas as pd import matplotlib.pyplot as plt import shutil from IPython import embed as IP import pickle def convert_coordinates(start_points, end_points, dataset_avoid): # transform dataset pixel_width, _, originX, _, pixel_height, originY, *args = dataset_avoid.transform print("**Converting the X coordinates of start and end points**") # Convert start and end to coordinate pixel start_points["X"] = start_points.geometry.x start_points["xcoord"] = xcoord2pixelOffset( start_points.X, originX, pixel_width ).round(0) end_points["X"] = end_points.geometry.x end_points["xcoord"] = xcoord2pixelOffset(end_points.X, originX, pixel_width).round( 0 ) print("**Converting the Y coordinates of start and end points**") start_points["Y"] = start_points.geometry.y start_points["ycoord"] = ycoord2pixelOffset( start_points.Y, originY, pixel_height ).round(0) end_points["Y"] = end_points.geometry.y end_points["ycoord"] = ycoord2pixelOffset( end_points.Y, originY, pixel_height ).round(0) return start_points, end_points def xcoord2pixelOffset(x, originX, pixelWidth): return (x - originX) / pixelWidth def ycoord2pixelOffset(y, originY, pixelHeight): return (y - originY) / pixelHeight def create_offsets(): print("**Creating offsets**") # Create tuples listing 8 directions in terms of -1,0,1 offsets = _mcp.make_offsets(2, True) # Destinations are marked with -1 in traceback, so add (0, 0) to the end of offsets offsets.append(np.array([0, 0])) offsets_arr = np.array(offsets) # Calculate distance for moving in each direction. up, down or sideways = 1 and diagonal = sqrt(2) tb_distance_dict = { idx: (1 if any(a == 0) else np.sqrt(2)) for idx, a in enumerate(offsets_arr) } tb_distance_dict[-1] = 1 # need to include the -1 destination values return offsets_arr, tb_distance_dict def calc_path_cost( cost_surface_list: List[np.ndarray], # cost_surface2: np.ndarray, traceback_arr: np.ndarray, tb_dist_dict: dict, route: List[tuple], pixel_size: Union[int, float] = 1, ): """Determine the cost of a pre-calculated path using the cost surface accounting for travel on the diagonal. Parameters ---------- cost_surface_list : List[np.ndarray] List of NxM arrays of cost values for each pixel traceback_arr : np.ndarray NxM array indicating the adjacent pixel with lowest cumulative cost. Points in the least-cost direction to the nearest destination point. Destination points have a value of -1, 0-7 are for adjacent pixels. tb_dist_dict : dict Keys are -1 through 7, corresponding to values in the `traceback_arr`. Values indicate unit path distance to the next pixel (1 or sqrt(2)). route : List[tuple] A list of coordinates in the `cost_surface` array that the path travels from start to end. pixel_size : Union[int, float] Assuming a square pixel, this is the height or width, and it is used to scale the cost per pixel to total path cost. Default value is 1 (assumes costs are already scaled). Returns ------- float Total path cost """ c = 0 _p = None for p in route: c += sum(cs[p] for cs in cost_surface_list) * tb_dist_dict[traceback_arr[p]] / 2 if _p: c += ( sum(cs[_p] for cs in cost_surface_list) * tb_dist_dict[traceback_arr[_p]] / 2 ) _p = p return c * pixel_size def cost_function(dataset_avoid, actual_cost_surface_list, start_points, end_points): cost_surface_avoid = dataset_avoid.read(1) # transform the dataset pixel_width, _, originX, _, pixel_height, originY, *args = dataset_avoid.transform # Create offsets offsets_arr, tb_distance_dict = create_offsets() print("**Initialize MCP_G**") # Initialize MCP_G to get the cost and path from each pixel on the rastor image to the start points mcp_g = MCP_Geometric(cost_surface_avoid) costs, traceback = mcp_g.find_costs( zip(end_points.ycoord.values, end_points.xcoord.values) ) print("**Saving the list of routes, their actual cost, and **") # Save route list with association to CPA (and substations to MSA) cost_route = {} for row in start_points.itertuples(): # Use traceback to create a route ( it shows cell locations in (x,y)) _route = mcp_g.traceback((row.ycoord, row.xcoord)) # Calculate cost of the route cost = calc_path_cost( actual_cost_surface_list, traceback, tb_distance_dict, _route, pixel_width ) cost_route[row.OBJECTID] = {"cost": cost, "route": _route} return cost_route
StarcoderdataPython
3398393
import torch.nn as nn from typing import Sequence, MutableSequence from __types import Module, Loader, CustomLayerTypes, CustomLayerSuperclasses, Tensor, ModuleType, Shape, Any from model.execution import Trainer from model.execution import dry_run # assumes that any nested submodules have already had their shape inferred if necessary # TODO: in the future may want a recursive function for that def infer_shapes(layers: MutableSequence[Module], loader: Loader) -> Sequence[Module]: infer = ShapeInferer(loader) for l in range(len(layers[1:])): layers[l+1] = layers[l+1](infer(layers[:l+1])) return layers[1:] class Input(nn.Module): def __init__(self, input_size: Shape = None): super(Input, self).__init__() self.size = input_size def forward(self, input): return input # can be passed a loader, in which case all supported shapes can utilize shape inference, or not, in which case # only certain layers can utilize shape inference # custom shape inference logic can be passed in via custom_layer_types and custom_layer_superclasses # custom layers take precedence, then custom layer superclasses, then built-in implementation # device isn't used since it's probably less efficient to move the incrementally created network each time, anyway # if the loader is used, it's assumed every example will have the same number of channels/features # TODO: abstract out base implementation into CustomLayerTypes and CustomLayerSuperclasses (and rename?) class ShapeInferer: def __init__( self, loader: Loader = None, custom_layer_types: CustomLayerTypes = None, custom_layer_superclasses: CustomLayerSuperclasses = None ): self.shape: int self.loader = loader if custom_layer_types is None: self.custom_layer_types = {} else: self.custom_layer_types = custom_layer_types if custom_layer_superclasses is None: self.custom_layer_superclasses = {} else: self.custom_layer_superclasses = custom_layer_superclasses def __call__(self, prev_layers: Sequence[Module]) -> Shape: return self.infer(prev_layers) def infer(self, prev_layers: Sequence[Module]) -> Shape: self.shape = self._infer(prev_layers[-1], prev_layers) return self.shape # helper function # feed an example through the data loader for manually capturing the output shape, returns the output tensor def _run_prev_layers(self, prev_layers: Sequence[Module], layer_type: ModuleType) -> Tensor: model = nn.Sequential(*prev_layers) def _run(net, trainer, device): def _r(inputs, gtruth): return model(inputs.to('cpu')) # all shape inference is done on cpu return _r if self.loader is not None: return dry_run(model, self.loader, None, _run)() else: raise TypeError("A data loader must be provided for shape inference with " + layer_type.__name__) def _infer(self, prev_layer: Module, prev_layers: Sequence[Module]) -> int: layer_type = type(prev_layer) if layer_type.__name__ in self.custom_layer_types: # the value returned from the custom layer inference return self.custom_layer_types[layer_type.__name__](prev_layer, prev_layers) elif any((issubclass(layer_type, c) for c in self.custom_layer_superclasses.keys())): # the value returned from the custom layer superclass inference return self.custom_layer_superclasses[layer_type.__name__](prev_layer, prev_layers) elif layer_type.__name__ is 'Input': # the Input's size if it has one, else get an example from the data loader if prev_layer.size is not None: return prev_layer.size else: if self.loader is None: raise ValueError("must provide a loader to shape inference for layer type " + layer_type.__name__) inputs = next(iter(self.loader))['inputs'] return inputs.size()[1] elif (issubclass(layer_type, nn.modules.conv._ConvNd) or layer_type.__name__ is 'Linear' ): # the out_features dim return prev_layer.weight.size()[0] elif layer_type.__name__ is 'Flatten': # feed an example through the data loader and manually capture the output shape return self._run_prev_layers(prev_layers, layer_type).size()[-1] elif (layer_type.__name__ is 'ReLU' or layer_type.__name__ is 'LeakyReLU' or layer_type.__name__ is 'Residual' or issubclass(layer_type, nn.modules.batchnorm._BatchNorm) or issubclass(layer_type, nn.modules.pooling._MaxPoolNd) or issubclass(layer_type, nn.modules.padding._ReflectionPadNd) or issubclass(layer_type, nn.modules.dropout._DropoutNd)): # reuse and pass along the previously inferred shape unchanged return self._infer(prev_layers[-2], prev_layers[:-1]) else: raise NotImplementedError("No shape inference implementation for layer of type " + layer_type.__name__)
StarcoderdataPython
3294287
<filename>tests/Exscript/protocols/OsGuesserTest.py<gh_stars>0 import sys import unittest import re import os sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) from Exscript.protocols.osguesser import OsGuesser from Exscript.protocols import drivers class OsGuesserTest(unittest.TestCase): CORRELATE = OsGuesser def setUp(self): self.sa = OsGuesser() def testConstructor(self): osg = OsGuesser() self.assertIsInstance(osg, OsGuesser) def testReset(self): self.testSet() self.sa.reset() self.testSet() def testSet(self): self.assertEqual(self.sa.get('test'), None) self.assertEqual(self.sa.get('test', 0), None) self.assertEqual(self.sa.get('test', 50), None) self.assertEqual(self.sa.get('test', 100), None) self.sa.set('test', 'foo', 0) self.assertEqual(self.sa.get('test'), 'foo') self.assertEqual(self.sa.get('test', 0), 'foo') self.assertEqual(self.sa.get('test', 10), None) self.sa.set('test', 'foo', 10) self.assertEqual(self.sa.get('test'), 'foo') self.assertEqual(self.sa.get('test', 0), 'foo') self.assertEqual(self.sa.get('test', 10), 'foo') self.assertEqual(self.sa.get('test', 11), None) self.sa.set('test', 'foo', 5) self.assertEqual(self.sa.get('test'), 'foo') self.assertEqual(self.sa.get('test', 0), 'foo') self.assertEqual(self.sa.get('test', 10), 'foo') self.assertEqual(self.sa.get('test', 11), None) def testSetFromMatch(self): match_list = ((re.compile('on'), 'uno', 50), (re.compile('two'), 'doe', 0), (re.compile('one'), 'eins', 90)) self.assertEqual(self.sa.get('test'), None) self.sa.set_from_match('test', match_list, '2two2') self.assertEqual(self.sa.get('test'), 'doe') self.sa.set_from_match('test', match_list, '2one2') self.assertEqual(self.sa.get('test'), 'eins') def testGet(self): pass # See testSet(). def testDataReceived(self): dirname = os.path.dirname(__file__) banner_dir = os.path.join(dirname, 'banners') for file in os.listdir(banner_dir): if file.startswith('.'): continue osname = file.split('.')[0] if not drivers.driver_map[osname].supports_os_guesser(): continue file = os.path.join(banner_dir, file) with open(file) as fp: banner = fp.read().rstrip('\n') osg = OsGuesser() for char in banner: osg.data_received(char, False) self.assertEqual(osg.get('os'), osname) def testProtocolInfo(self): osg = OsGuesser() osg.protocol_info('') self.assertEqual(osg.get('os'), "unknown") osg.protocol_info(None) self.assertEqual(osg.get('os'), "unknown") osg.protocol_info("SSH-2.0-Cisco-1.25") self.assertEqual(osg.get('os'), "ios") def suite(): return unittest.TestLoader().loadTestsFromTestCase(OsGuesserTest) if __name__ == '__main__': unittest.TextTestRunner(verbosity=2).run(suite())
StarcoderdataPython
3376464
<gh_stars>1-10 #!/usr/bin/env python3 import argparse import os from yaml import safe_load, dump def setupArgs(): parser = argparse.ArgumentParser(description='Fix permalinks to be hierarchical in directory') parser.add_argument('directory', type=str, help='Directory to recursively fix all .md files.') parser.add_argument('action', help='Action to take', type=str, default=None, choices=['yaml', 'fixlinks']) return parser.parse_args() # TODO: Convert to use yaml def fixLink(old, path, name): new = [] for line in old.split('\n'): if line and not line.startswith('permalink:'): new.append(line) if name == 'index.md': url = f'/{path}/' else: url = f'/{path}/{name[:-3]}/' new.append(f'permalink: {url}') return '\n'.join(new) + '\n' def getTitle(frontmatter): data = safe_load(frontmatter) return data['title'] def getUrl(frontmatter): data = safe_load(frontmatter) return data['permalink'] if __name__=="__main__": args = setupArgs() pages = [] for dirpath, dirnames, filenames in os.walk(args.directory): for filename in filter(lambda a: a.endswith('.md'), filenames): with open(os.path.join(dirpath,filename)) as f: d = f.read() begin = d.find('---') end = d.find('---', begin+1) frontmatter = d[begin:end] path = '/'.join(dirpath.split('/')[1:]) new = fixLink(frontmatter, path, filename) if args.action == 'fixlinks': with open(os.path.join(dirpath,filename), 'w') as f: f.write(d[:begin] + new + d[end:]) else: if new != frontmatter: d_new = safe_load(new) d_old = safe_load(frontmatter) if d_new['permalink'] != d_old['permalink']: print(f"Warning. Need to update permalink for {dirpath}/{filename}.") if args.action == 'yaml': title = getTitle(frontmatter) url = getUrl(frontmatter) d = {'page': title, 'url': url} pages.append(d) if args.action == 'yaml': print(dump(pages))
StarcoderdataPython
169471
#%% import os import pandas as pd import numpy as np import copy from tqdm import tqdm from plot import plot from utils.evaluator import evaluate, set_thresholds from utils.evaluator_seg import compute_anomaly_scores, compute_metrics # Univariate from utils.data_loader import load_kpi, load_IoT_fridge # Multivariate from utils.data_loader import load_samsung, load_energy, load_unsw, load_IoT_modbus def _elements(array): return array.ndim and array.size def train(AE_model, Temporal_AE_model, model_name, window_size, stride, lamda_t, wavelet_num, seed, dataset, temporal=False, decomposition=False, segmentation=False): ts_scores = {'dataset': [], 'f1': [], 'precision': [], 'recall': [], 'pr_auc': [], 'roc_auc': [], 'th_index': [], 'predicted_index': []} seg_scores = {'dataset': [], 'avg_f1': [], 'avg_p': [], 'avg_r': [], 'max_p': [], 'max_r': [], 'max_f1': [], 'correct_count': [], 'correct_ratio': []} if temporal == True: datasets_auxiliary = globals()[f'load_{dataset}'](window_size, stride, lamda_t, wavelet_num, temporal=temporal) ax_trains, ax_tests = datasets_auxiliary['x_train'], datasets_auxiliary['x_test'] # There are eight cases #1-1~#1-4 & #2-1~#2-4 # 1) decomposition==True: Decompose time series and evaluate through traditional metrics (Temporal) # 4) decomposition==False: Evaluate through traditional metrics with common methods if segmentation == False: datasets = globals()[f'load_{dataset}'](window_size, stride, lamda_t, wavelet_num, decomposition=decomposition, segmentation=segmentation) x_trains, x_tests, y_tests = datasets['x_train'], datasets['x_test'], datasets['y_test'] test_seq, label_seq = datasets['test_seq'], datasets['label_seq'] if decomposition == True: train_residual, test_residual = datasets['x_train_resid'], datasets['x_test_resid'] per_window_idx = [] for data_num in tqdm(range(len(x_trains))): # 1) if decomposition == True if decomposition == True: X_test = x_tests[data_num] residual_X_train = train_residual[data_num] residual_X_test = test_residual[data_num] # 1-1) temporal=True, decomposition=True, Segmentation=False if temporal == True: X_train_ax = ax_trains[data_num] X_test_ax = ax_tests[data_num] model = Temporal_AE_model(X_train_ax, residual_X_train) rec_x = model.predict([X_test_ax, residual_X_test]) thresholds = set_thresholds(residual_X_test, rec_x, is_reconstructed=True) test_scores = evaluate(thresholds, residual_X_test, rec_x, y_tests[data_num], is_reconstructed=True) # 2-1) temporal=False, decomposition=True, Segmentation=False else: if model_name == "MS-RNN": model = AE_model(residual_X_train) rec_x = [np.flip(rec, axis=1) for rec in model.predict(residual_X_test)] thresholds = set_thresholds(residual_X_test, rec_x, is_reconstructed=True, scoring='square_median') test_scores = evaluate(thresholds, residual_X_test, rec_x, y_tests[data_num], is_reconstructed=True, scoring='square_median') else: model = AE_model(residual_X_train) rec_x = model.predict(residual_X_test) thresholds = set_thresholds(residual_X_test, rec_x, is_reconstructed=True) test_scores = evaluate(thresholds, residual_X_test, rec_x, y_tests[data_num], is_reconstructed=True) # 4) if decomposition == False else: X_train = x_trains[data_num] X_test = x_tests[data_num] # 1-4) temporal=True, decomposition=False, segmentation=False if temporal == True: X_train_ax = ax_trains[data_num] X_test_ax = ax_tests[data_num] model = Temporal_AE_model(X_train_ax, X_train) rec_x = model.predict([X_test_ax, X_test]) thresholds = set_thresholds(X_test, rec_x, is_reconstructed=True) test_scores = evaluate(thresholds, X_test, rec_x, y_tests[data_num], is_reconstructed=True) # 2-4) temporal=False, decomposition=False, segmentation:False else: if model_name == "MS-RNN": model = AE_model(X_train) rec_x = [np.flip(rec, axis=1) for rec in model.predict(X_test)] thresholds = set_thresholds(X_test, rec_x, is_reconstructed=True, scoring='square_median') test_scores = evaluate(thresholds, X_test, rec_x, y_tests[data_num], is_reconstructed=True, scoring='square_median') else: model = AE_model(X_train) rec_x = model.predict(X_test) thresholds = set_thresholds(X_test, rec_x, is_reconstructed=True) test_scores = evaluate(thresholds, X_test, rec_x, y_tests[data_num], is_reconstructed=True) ts_scores['dataset'].append(f'Data{data_num+1}') ts_scores['f1'].append(np.max(test_scores['f1'])) ts_scores['precision'].append(np.mean(test_scores['precision'])) ts_scores['recall'].append(np.mean(test_scores['recall'])) ts_scores['pr_auc'].append(test_scores['pr_auc']) ts_scores['roc_auc'].append(test_scores['roc_auc']) th_index = int(np.median(np.where(test_scores['f1']==np.max(test_scores['f1']))[0])) ts_scores['th_index'].append(th_index) print(f'{seed}th {model_name} Data{data_num+1}', np.max(test_scores['f1']), np.mean(test_scores['precision']), np.mean(test_scores['recall']), test_scores['pr_auc'], test_scores['roc_auc']) pred_anomal_idx = [] for t in range(len(X_test)): pred_anomalies = np.where(test_scores['rec_errors'][t] > thresholds[th_index])[0] isEmpty = (_elements(pred_anomalies) == 0) if isEmpty: pass else: if pred_anomalies[0] == 0: pred_anomal_idx.append(t) per_window_idx.append(pred_anomal_idx) ts_scores['predicted_index'].extend(per_window_idx) scores_all = copy.deepcopy(ts_scores) del ts_scores['th_index'] results_df = pd.DataFrame(ts_scores) print("@"*5, f'{seed}th Seed {model_name} R{decomposition}_T{temporal}_Ts', "@"*5) print(results_df.groupby('dataset').mean()) save_results_path = f'./results/{dataset}/Ts' try: if not(os.path.isdir(save_results_path)): os.makedirs(os.path.join(save_results_path), exist_ok=True) except OSError as e: print("Failed to create directory!!!!!") results_df.to_csv(f'{save_results_path}/{model_name}_R{decomposition}_T{temporal}_ts_seed{seed}.csv', index=False) plot(model_name, ts_scores, test_seq, label_seq, seed, save_results_path, decomposition, temporal) # 2) decomposition==True: Decompose time series and evalutate new metrics (Temporal+Seg_evaluation) # 3) decomposition==False: Evaluate through new metrics with common methods (Seg_evaluation) elif segmentation == True: datasets = globals()[f'load_{dataset}'](window_size, stride, lamda_t, wavelet_num, decomposition=decomposition, segmentation=segmentation) x_trains, x_tests = datasets['x_train'], datasets['x_test'] y_tests, y_segment_tests = datasets['y_test'], datasets['y_segment_test'] if decomposition == True: train_residual, test_residual = datasets['x_train_resid'], datasets['x_test_resid'] per_window_idx = [] for data_num in tqdm(range(len(x_trains))): # 2) if decomposition == True if decomposition == True: residual_X_train = train_residual[data_num] residual_X_test = test_residual[data_num] # 1-2) temporal=True, decomposition=True, segmentation=True if temporal == True: X_train_ax = ax_trains[data_num] X_test_ax = ax_tests[data_num] model = Temporal_AE_model(X_train_ax, residual_X_train) scores = compute_anomaly_scores(residual_X_test, model.predict([X_test_ax, residual_X_test])) test_scores = compute_metrics(scores, y_tests[data_num], y_segment_tests[data_num]) else: # 2-2) temporal=False, decomposition=True, segmentation=True if model_name == "MS-RNN": model = AE_model(residual_X_train) rec_x = np.mean([np.flip(rec, axis=1) for rec in model.predict(residual_X_test)], axis=0) scores = compute_anomaly_scores(residual_X_test, rec_x, scoring='square_median') test_scores = compute_metrics(scores, y_tests[data_num], y_segment_tests[data_num]) else: model = AE_model(residual_X_train) scores = compute_anomaly_scores(residual_X_test, model.predict(residual_X_test)) test_scores = compute_metrics(scores, y_tests[data_num], y_segment_tests[data_num]) # 3) if decomposition == False else: X_train = x_trains[data_num] X_test = x_tests[data_num] # 1-3) temporal=True, decomposition=False, segmentation=True if temporal == True: X_train_ax = ax_trains[data_num] X_test_ax = ax_tests[data_num] model = Temporal_AE_model(X_train_ax, X_train) scores = compute_anomaly_scores(X_test, model.predict([X_test_ax, X_test])) test_scores = compute_metrics(scores, y_tests[data_num], y_segment_tests[data_num]) # 2-3) temporal=False, decomposition=False, segmentation=True else: if model_name == "MS-RNN": model = AE_model(X_train) rec_x = np.mean([np.flip(rec, axis=1) for rec in model.predict(X_test)], axis=0) scores = compute_anomaly_scores(X_test, rec_x, scoring='square_median') test_scores = compute_metrics(scores, y_tests[data_num], y_segment_tests[data_num]) else: model = AE_model(X_train) scores = compute_anomaly_scores(X_test, model.predict(X_test)) test_scores = compute_metrics(scores, y_tests[data_num], y_segment_tests[data_num]) seg_scores['dataset'].append(f'Data{data_num+1}') seg_scores['max_f1'].append(np.max(test_scores['f1'])) seg_scores['max_p'].append(np.max(test_scores['precision'])) seg_scores['max_r'].append(np.max(test_scores['recall'])) seg_scores['avg_f1'].append(np.average(test_scores['f1'])) seg_scores['avg_p'].append(np.average(test_scores['precision'])) seg_scores['avg_r'].append(np.average(test_scores['recall'])) seg_scores['correct_count'].append(np.average(test_scores['count'])) seg_scores['correct_ratio'].append(np.average(test_scores['ratio'])) print(f'{seed}th {model_name} Data{data_num+1}', np.max(test_scores['f1']), np.mean(test_scores['precision']), np.mean(test_scores['recall']), np.mean(test_scores['count']), np.mean(test_scores['ratio'])) results_df = pd.DataFrame(seg_scores) print("@"*5, f'{seed}th Seed {model_name} R{decomposition}_T{temporal}_Seg', "@"*5) print(results_df.groupby('dataset').mean()) save_results_path = f'./results/{dataset}/Seg' try: if not(os.path.isdir(save_results_path)): os.makedirs(os.path.join(save_results_path), exist_ok=True) except OSError as e: print("Failed to create directory!!!!!") results_df.to_csv(f'{save_results_path}/{model_name}_R{decomposition}_T{temporal}_seg_seed{seed}.csv', index=False) # %%
StarcoderdataPython
3281687
from dataclasses import dataclass from bindings.gmd.ellipsoid_property_type import EllipsoidPropertyType __NAMESPACE__ = "http://www.opengis.net/gml" @dataclass class UsesEllipsoid(EllipsoidPropertyType): class Meta: name = "usesEllipsoid" namespace = "http://www.opengis.net/gml"
StarcoderdataPython
1770250
<reponame>vivian-dai/Competitive-Programming-Code<gh_stars>0 with open("Advent of Code/2021/Day 1/input.txt") as inp: content = list(map(int, inp.read().splitlines())) out = 0 for i in range(1, len(content)): if content[i] > content[i - 1]: out += 1 print(out)
StarcoderdataPython
1633079
from .alerts import Alerts class ToolsNotifications: def __init__(self, logger, ifttt_alerts): self._logger = logger self._ifttt_alerts = ifttt_alerts self._alerts = Alerts(self._logger) self._printer_was_printing_above_tool0_low = False # Variable used for tool0 cooling alerts self._printer_alerted_reached_tool0_target = False # Variable used for tool0 warm alerts def check_temps(self, settings, printer): temps = printer.get_current_temperatures() # self._logger.debug(u"CheckTemps(): %r" % (temps,)) if not temps: # self._logger.debug(u"No Temperature Data") return for k in temps.keys(): # example dictionary from octoprint # { # 'bed': {'actual': 0.9, 'target': 0.0, 'offset': 0}, # 'tool0': {'actual': 0.0, 'target': 0.0, 'offset': 0}, # 'tool1': {'actual': 0.0, 'target': 0.0, 'offset': 0} # } if k == 'tool0': tool0_threshold_low = settings.get_int(['tool0_low']) target_temp = settings.get(['tool0_target_temp']) else: continue # Check if tool0 has cooled down to specified temperature once print is finished # Remember if we are printing and current tool0 temp is above the low tool0 threshold if not self._printer_was_printing_above_tool0_low and printer.is_printing() and tool0_threshold_low and \ temps[k]['actual'] > tool0_threshold_low: self._printer_was_printing_above_tool0_low = True # If we are not printing and we were printing before with tool0 temp above threshold and tool0 temp is now # below threshold if self._printer_was_printing_above_tool0_low and not printer.is_printing() and tool0_threshold_low \ and temps[k]['actual'] < tool0_threshold_low: self._logger.debug( "Print done and tool0 temp is now below threshold {0}. Actual {1}.".format(tool0_threshold_low, temps[k]['actual'])) self._printer_was_printing_above_tool0_low = False self.__send__tool_notification(settings, "tool0-cooled", tool0_threshold_low) # Check if tool0 has reached target temp and user wants to receive alerts for this event if temps[k]['target'] > 0 and target_temp: diff = temps[k]['actual'] - temps[k]['target'] # If we have not alerted user and printer reached target temp then alert user. Only alert # when actual is equal to target or passed target by 5. Useful if hotend is too hot after # print and you want to be alerted when it cooled down to a target temp if not self._printer_alerted_reached_tool0_target and 0 <= diff < 5: self._printer_alerted_reached_tool0_target = True self.__send__tool_notification(settings, "tool0-warmed", temps[k]['target']) elif temps[k]['target'] == 0: # There is no target temp so reset alert flag so we can alert again # once a target temp is set self._printer_alerted_reached_tool0_target = False ##~~ Private functions - Tool Notifications def __send__tool_notification(self, settings, event_code, temperature): # Send IFTTT Notifications self._ifttt_alerts.fire_event(settings, event_code, temperature) server_url = settings.get(["server_url"]) if not server_url or not server_url.strip(): # No APNS server has been defined so do nothing return -1 tokens = settings.get(["tokens"]) if len(tokens) == 0: # No iOS devices were registered so skip notification return -2 # For each registered token we will send a push notification # We do it individually since 'printerID' is included so that # iOS app can properly render local notification with # proper printer name used_tokens = [] last_result = None for token in tokens: apns_token = token["apnsToken"] # Ignore tokens that already received the notification # This is the case when the same OctoPrint instance is added twice # on the iOS app. Usually one for local address and one for public address if apns_token in used_tokens: continue # Keep track of tokens that received a notification used_tokens.append(apns_token) if 'printerName' in token and token["printerName"] is not None: # We can send non-silent notifications (the new way) so notifications are rendered even if user # killed the app printer_name = token["printerName"] language_code = token["languageCode"] url = server_url + '/v1/push_printer' last_result = self._alerts.send_alert_code(settings, language_code, apns_token, url, printer_name, event_code, None, None) return last_result
StarcoderdataPython
78899
from flask import current_app as app from flask_migrate import Migrate, migrate, upgrade, stamp, current from alembic.migration import MigrationContext from sqlalchemy import create_engine from sqlalchemy.engine.url import make_url from sqlalchemy_utils import ( database_exists as database_exists_util, create_database as create_database_util, drop_database as drop_database_util ) from six import StringIO migrations = Migrate() def create_database(): url = make_url(app.config['SQLALCHEMY_DATABASE_URI']) if url.drivername == 'postgres': url.drivername = 'postgresql' if url.drivername.startswith('mysql'): url.query['charset'] = 'utf8mb4' # Creates database if the database database does not exist if not database_exists_util(url): if url.drivername.startswith('mysql'): create_database_util(url, encoding='utf8mb4') else: create_database_util(url) return url def drop_database(): url = make_url(app.config['SQLALCHEMY_DATABASE_URI']) if url.drivername == 'postgres': url.drivername = 'postgresql' drop_database_util(url) def get_current_revision(): engine = create_engine(app.config.get('SQLALCHEMY_DATABASE_URI')) conn = engine.connect() context = MigrationContext.configure(conn) current_rev = context.get_current_revision() return current_rev
StarcoderdataPython
79886
from django.shortcuts import render, redirect from django.contrib.messages.views import SuccessMessageMixin from django.contrib.auth.views import LoginView, LogoutView from user.forms import UserRegisterForm from django.views.generic import CreateView class UserRegisterView(SuccessMessageMixin, CreateView): template_name = 'user/user-register.html' form_class = UserRegisterForm success_url = '/' success_message = 'Usuario criado com sucesso' def form_valid(self, form): user = form.save(commit=False) user_type = form.cleaned_data['user_types'] if user_type == 'is_employee': user.is_employee = True elif user_type == 'is_employer': user.is_employer = True user.save() return redirect(self.success_url) class UserLoginView(LoginView): template_name = 'user/login.html' class UserLogoutView(LogoutView): template_name = 'user/login.html'
StarcoderdataPython
1606704
<filename>django_handy/objs.py<gh_stars>1-10 from operator import attrgetter from typing import Hashable, Iterable, List, Sized class classproperty: def __init__(self, method=None): self.fget = method def __get__(self, instance, cls=None): return self.fget(cls) def is_empty(val): """ Check where value is logically `empty` - does not contain information. False and 0 are not considered empty, but empty collections are. """ if val is None or isinstance(val, Sized) and len(val) == 0: # Empty string is also Sized of len 0 return True return False def unique_ordered(sequence: Iterable[Hashable]) -> List: return list(dict.fromkeys(sequence)) def unique_objs(objs: List[object], unique_attrs: List[str]) -> List: """ Get list of unique objs from sequence, preserving order when the objs first occurred in original sequence. """ seen_obj_footprints = set() unique = [] footprint_func = attrgetter(*unique_attrs) for obj in objs: obj_footprint = footprint_func(obj) if obj_footprint in seen_obj_footprints: continue seen_obj_footprints.add(obj_footprint) unique.append(obj) return unique
StarcoderdataPython
1707831
import torch from .Flatten import Flatten class CNNAutoencoder(torch.nn.Module): def __init__(self,input_size,hidden_size,conv_kernel,pool_kernel ,padding, stride=1,dilation=1, dropout=0.0,input_noise=0.0): super(CNNAutoencoder, self).__init__() self.num_layers=len(hidden_size) self.noise=torch.nn.Dropout(input_noise) layers=[] for i, (h,c,p,pad) in enumerate(list(zip(hidden_size,conv_kernel,pool_kernel,padding))): s = input_size if i ==0 else hidden_size[i-1] layers+=[ torch.nn.Conv1d(s,h,c,stride=1,padding=pad,dilation=1), torch.nn.MaxPool1d(p), torch.nn.ReLU(), torch.nn.Dropout(dropout) ] self.encoder=torch.nn.Sequential(*layers) layers=[] for i, (h,c,p,pad) in enumerate(list(zip(hidden_size,conv_kernel,pool_kernel,padding))[::-1]): if i ==self.num_layers-1: s= input_size layers+=[ torch.nn.ConvTranspose1d(h,s,c,stride=p,padding=0,dilation=1) ] else : s = hidden_size[::-1][i+1] layers+=[ torch.nn.ConvTranspose1d(h,s,c,stride=p,padding=0,dilation=1), torch.nn.ReLU(), torch.nn.Dropout(dropout) ] self.decoder=torch.nn.Sequential(*layers) def forward(self,subject): noisy=self.noise(subject) code=self.encoder(noisy) decode=self.decoder(code) return code,decode#self.sigmoid(d_c1)
StarcoderdataPython
3270672
#!/usr/bin/python2.7 """ Copyright (C) 2014 Reinventing Geospatial, Inc. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>, or write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. Authors: <NAME>, Reinventing Geospatial Inc (RGi) <NAME>, Reinventing Geospatial Inc (RGi) Date: 2018-11-11 Requires: sqlite3, argparse Optional: Python Imaging Library (PIL or Pillow) Credits: MapProxy imaging functions: http://mapproxy.org gdal2mb on github: https://github.com/developmentseed/gdal2mb Version: """ class SpatialReferenceSystemEntry(object): """ An object representing an entry in the gpkg_spatial_ref_sys Table """ def __init__(self, srs_name, srs_id, organization, organization_coordsys_id, definition, description): """ Constructor :param srs_name: Human readable name of this SRS :type srs_name: str :param srs_id: Unique identifier for each Spatial Reference System within a GeoPackage e.g. 4326 :type srs_id: int :param organization: Case-insensitive name of the defining organization e.g. EPSG or epsg :type organization: str :param organization_coordsys_id: Numeric ID of the Spatial Reference System assigned by the organization :type organization_coordsys_id: int :param definition: Well-known Text Representation of the Spatial Reference System :type definition: str :param description: Human readable description of this SRS :type description: str """ self.srs_name = srs_name self.organization = organization self.organization_coordsys_id = organization_coordsys_id self.srs_id = srs_id self.definition = definition self.description = description
StarcoderdataPython
152091
def bestSum(t, arr, memo=None): """ m: target sum, t n: len(arr) time = O(n^m*m) space = O(m*m) [in each stack frame, I am storing an array, which in worst case would be m] // Memoized complexity time = O(n*m*m) space = O(m*m) """ if memo is None: memo = {} if t in memo: return memo[t] if t == 0: return [] if t < 0: return None shortestCombination = None for ele in arr: r = t - ele rRes = bestSum(r, arr, memo) if rRes is not None: combination = rRes + [ele] # If the combination is shorter than current shortest, update it if (shortestCombination is None or len(combination) < len(shortestCombination)): shortestCombination = combination memo[t] = shortestCombination return memo[t] print(bestSum(7, [5,3,4,7])) print(bestSum(8, [2,3,5])) print(bestSum(8, [1,4,5])) print(bestSum(100, [1,2,5,25]))
StarcoderdataPython
4824612
# Copyright(C) 1999-2020 National Technology & Engineering Solutions # of Sandia, LLC (NTESS). Under the terms of Contract DE-NA0003525 with # NTESS, the U.S. Government retains certain rights in this software. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # # * Neither the name of NTESS nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from phactori import * from paraview.simple import * #phactori_combine_to_single_python_file_subpiece_begin_1 class PhactoriSliceWithPlaneOperation(PhactoriPlaneOpBase): """slice with plane operation, adapter to the catalyst filter PhactoriSliceWithPlaneOperation is the phactori manager for working with the ParaView/Catalyst SliceWithPlane() filter and its parameters, providing access and pipeline/input/output managment via the json, lexx/yacc, or soon yaml interface. The user may specify a named input to the filter, with the unnamed default being the incoming data mesh. For information on the SliceWithPlane() filter from ParaView, see the ParaView Documentation. Our experience has been that as of ParaView 5.8 there are some mesh geometries where the paraview Slice() filter with a plane slice type will crash while SliceWithPlane() behaves correctly. The user must define the plane with a point and normal or with three points. There are defaults which will be used if the user does not supply some or all of the definition. PhactoriSliceWithPlaneOperation is a child class of PhactoriPlaneOpBase, along with PhactoriSliceOperation and PhactoriClipPlaneOperation. Check the documentation for PhactoriPlaneOpBase for the many options for defining the plane point(s), including absolute or relative 3D points, dynamic data-driven point locations, or collocating with mesh nodes or elements (or offset therefrom). To add a PhactoriSliceWithPlaneOperation to the incoming script, you add a sub-block to the "operation blocks" section of the data with the "type" key given a value of "slicewithplane". One complete but simple example script: :: { "camera blocks":{"myslicecam1":{"type":"camera", "look direction":[1.0, 2.0, 3.0]}}, "representation blocks":{"rep_tmprtr":{"color by scalar":"temperature"}}, "imageset blocks":{ "temperature_on_slice_1":{ "operation":"myslicewithplane1", "camera":"myslicecam1", "representation":"rep_tmprtr", "image basedirectory":"CatalystOutput", "image basename":"slice1_temperature." } }, "operation blocks":{ "myslicewithplane1":{ "type":"slicewithplane", "relative point on plane":[0.1, -0.2, 0.3], "plane normal":[1.0, 2.0, 3.0] } } } the plane normal does not need to be a unit vector: Phactori will normalize it for you (again, see PhactoriClipPlaneOperation) A minimalist example script using all default behavior will produce 8 images with autogenerated names with 6 axis aligned views and 2 diagonal xyz views colored by block index number (cylically). :: { "camera blocks":{}, "representation blocks":{}, "imageset blocks":{"slice_1_imageset":{"operation":"myslicewithplane1"}}, "operation blocks":{"myslicewithplane1":{"type":"slicewithplane"}} } """ def CreateParaViewFilter(self, inInputFilter): #don't need our own init code at this point, but this is how it would be #added #def __init__(self): # MySuperClass.__init__(self) """create the slice plane filter for ParaView""" if PhactoriDbg(100): myDebugPrint3('PhactoriSliceWithPlaneOperation.CreateParaViewFilter entered\n', 100) #info in block class should already be parsed and checked savedActiveSource = GetActiveSource() newParaViewFilter = SliceWithPlane(Input=inInputFilter) newParaViewFilter.Plane = 'Plane' self.UpdateSlice(inInputFilter, newParaViewFilter) SetActiveSource(newParaViewFilter) SetActiveSource(savedActiveSource) if PhactoriDbg(100): myDebugPrint3('PhactoriSliceWithPlaneOperation.CreateParaViewFilter returning\n', 100) return newParaViewFilter def DoUpdateDueToChangeInData(self, inIncomingPvFilter, outOutgoingPvFilter): """the PhactoriSliceWithPlaneOperation may need to update if the point on the slice plane was tied to a node, element, or variable min/max location""" if PhactoriDbg(): myDebugPrint3("PhactoriSliceWithPlaneOperation::" "DoUpdateDueToChangeInData override executing\n") if self.MayChangeWithData() == False: if PhactoriDbg(): myDebugPrint3("PhactoriSliceWithPlanePlaneOperation::" "DoUpdateDueToChangeInData returning (absolute point or points)\n") return self.UpdateSlice(inIncomingPvFilter, outOutgoingPvFilter) if PhactoriDbg(): myDebugPrint3("PhactoriSliceWithPlanePlaneOperation::" "DoUpdateDueToChangeInData override returning\n") def UpdateSlice(self, inIncomingPvFilter, ioOutgoingPvFilter): """using the current info on the slice, get all the paraview stuff set up correctly""" if PhactoriDbg(): myDebugPrint3("PhactoriSliceWithPlanePlaneOperation::UpdateSlice entered\n") originToUse = [0,0,0] normalToUse = [0,1,0] self.CalculateUpdatedOriginAndNormal( inIncomingPvFilter, originToUse, normalToUse) if PhactoriDbg(): myDebugPrint3(' updateslice using normal: ' + \ str(normalToUse) + '\n') ioOutgoingPvFilter.Plane.Normal = normalToUse if PhactoriDbg(): myDebugPrint3(' updateslice using origin: ' + str(originToUse) + '\n') ioOutgoingPvFilter.Plane.Origin = originToUse if PhactoriDbg(): myDebugPrint3("PhactoriSliceWithPlanePlaneOperation::UpdateSlice returning\n") #phactori_combine_to_single_python_file_subpiece_end_1
StarcoderdataPython
1723309
<reponame>smak0v/planeks_news from django.contrib.auth.decorators import login_required from django.urls import path from .views import PostsListView, PostCreateView, PostDetailView, PostEditView urlpatterns = [ path('', PostsListView.as_view(), name='main'), path('create/', login_required(PostCreateView.as_view()), name='create'), path('edit/<int:pk>/', login_required(PostEditView.as_view()), name='edit'), path('details/<int:pk>/', PostDetailView.as_view(), name='details'), ] app_name = 'posts'
StarcoderdataPython
3319779
<gh_stars>0 # -*- coding: utf-8 -*- from enum import Enum from fastapi import APIRouter from fastapi import Path from fastapi import Query from starlette import status from starlette.requests import Request from starlette.responses import RedirectResponse from starlette.responses import Response router = APIRouter() class RedirectTypes(str, Enum): DEFAULT = 'redirect' ABSOLUTE = 'absolute_redirect' RELATIVE = 'relative_redirect' @router.api_route( '/redirect-to', methods=['GET', 'POST', 'PUT', 'DELETE', 'PATCH', 'TRACE'], description='302/3XX Redirects to the given URL.', response_description='A redirection.', response_class=Response ) async def redirect_to( *, url: str = Query(..., title='Redirect to URL'), status_code: int = Query( status.HTTP_302_FOUND, title='status code', ge=status.HTTP_300_MULTIPLE_CHOICES, lt=status.HTTP_400_BAD_REQUEST, ), ): resp = Response(status_code=status_code) resp.headers['Location'] = url return resp @router.get( '/redirect/{n}', description='302 Redirects n times.', response_description='A redirection.' ) async def redirect_n_times( *, n: int = Path(..., title='Redirects n times.', gt=0, le=10), is_absolute: bool = Query( False, alias='absolute', title='is an absolute redirection?' ), request: Request ): if n == 1: return RedirectResponse(request.url_for('get')) if is_absolute: return _redirect(request, type_=RedirectTypes.ABSOLUTE, n=n) else: return _redirect(request, type_=RedirectTypes.RELATIVE, n=n) @router.get( '/absolute-redirect/{n}', description='Absolutely 302 Redirects n times.', response_description='A redirection.', response_class=RedirectResponse, ) async def absolute_redirect_n_times( *, n: int = Path(..., title='Redirects n times.', gt=0, le=10), request: Request ): if n == 1: return RedirectResponse(request.url_for('get')) return _redirect(request, type_=RedirectTypes.ABSOLUTE, n=n) @router.get( '/relative-redirect/{n}', description='Relatively 302 Redirects n times.', response_description='A redirection.' ) async def relative_redirect_n_times( *, n: int = Path(..., title='Redirects n times.', gt=0, le=10), request: Request ): resp = Response(status_code=status.HTTP_302_FOUND) if n == 1: resp.headers['Location'] = request.url_for('get') return resp redirect_name = relative_redirect_n_times.__name__ resp.headers['Location'] = request.url_for(redirect_name, n=n - 1) return resp def _redirect(request: Request, type_: RedirectTypes, n: int): # TODO external func_prefix = f'{type_}_n_times' return RedirectResponse(request.url_for(func_prefix, n=n - 1))
StarcoderdataPython
1773976
<gh_stars>10-100 # -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import division from __future__ import print_function ''' .. _module_mc_inssserv: mc_inssserv / inssserv functions ================================== ''' # Import python libs import logging import mc_states.api __name = 'inssserv' PREFIX ='makina-states.localsettings.{0}'.format(__name) log = logging.getLogger(__name__) def settings(): ''' inssserv settings ''' _g = __grains__ _s = __salt__ @mc_states.api.lazy_subregistry_get(__salt__, __name) def _settings(): data = { 'configs': { '/etc/insserv.conf.d/dnsmasq': {}, } } return data return _settings() # vim:set et sts=4 ts=4 tw=80:
StarcoderdataPython
1687674
import winsound import cv2 import numpy as np learning_parameter = 0.005 def main(): cam = cv2.VideoCapture(0) sub_mog2 = cv2.createBackgroundSubtractorMOG2() while True: ret_val, img = cam.read() img = cv2.flip(img, 1) cv2.imshow('my webcam', img) img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) img_gray = cv2.resize(img_gray, (0, 0), fx=0.25, fy=0.25) mask_sub_mog2 = sub_mog2.apply(img_gray, learningRate=learning_parameter) mask_binary = np.array(mask_sub_mog2 >= 127, dtype='uint8') move_percentage = np.sum(mask_binary) / mask_sub_mog2.size frequency = int(3800 * move_percentage / 2 + 100) winsound.Beep(frequency, 100) if cv2.waitKey(1) == 27: break # esc to quit cv2.destroyAllWindows() if __name__ == '__main__': main()
StarcoderdataPython
1743602
<filename>PlotlyandPython/Lessons/(01) Intro to Python/Notebooks/Python Scripts/(04) Variable Types - Strings (2).py # coding: utf-8 # # Variable Types - Strings (2) # # In the last class we learnt how to create strings, how to convert between string, integers and floats using the <code>str()</code>, <code>int()</code> and <code>float()</code> functions respectively. We also learnt that we can add and multiply strings, but that we cannot divide or subtract them. # # In this lesson we're going to learn how to index and slice strings, as well as how to implement some common functions on them. # # ## Indexing and Slicing Strings # # Each character in a string has a numeric position, starting from 0. We can access the character at this position by using square brackets: # In[1]: testString = "Hi there, this is a string" testString[0] # In[2]: testString[1] # We can access the last string with <code>[-1]</code>, and the next-last with <code>[-2]</code> and so on: # In[3]: testString[-1] # In[4]: testString[-2] # We can take slices of a string using <code>[0:5]</code>. A string slice includes the first number and stops just before the second: # In[5]: testString2 = '0123456789' testString2[0:5] # We can take a slice starting from the beginning of the string by leaving the first index blank: # In[6]: testString2[:5] # We can slice to the end by leaving the last index blank: # In[7]: testString[5:] # We can also use variables instead of the numbers in a string slice: # In[8]: x = 3 y = 6 testString2[x:y] # ### String functions and methods # # There are many different functions which we can use with strings. We'll take a look at the most useful. # # #### Get the length of a string # # We can find the length of a string by using the <code>len()</code> function: # In[9]: testString = "Hi there, this is a string" print(len(testString)) # #### Find out if a word or character is in a string # # We can see if a string contains another string: # In[10]: testString = "Hi there, this is a string" print('string' in testString) # In[11]: print('badger' in testString) # In[12]: print('badger' not in testString) # We can find the position of a string within another string using the <code>str.find()</code> function. # # This function returns the index of the first letter of the substring within the string, or -1 if the string is not found: # In[13]: testString.find('string') # In[14]: testString[20:26] # In[15]: testString.find('badger') # ## What have we learnt in this lesson? # In this lesson we've learnt more about strings in Python. We've seen how to index and slice strings, as well as how to find the length of a string and how to find the position of a substring within the main string. # # The next lesson is about Boolean, or True and False values. Thanks for watching. # If you have any questions, please ask in the comments section or email <a href="mailto:<EMAIL>"><EMAIL></a>
StarcoderdataPython
3262038
<filename>app copy.py import os import glob import shutil # from creator import create from flask import Flask, send_from_directory, flash, request, redirect, url_for, render_template from werkzeug.utils import secure_filename from parser import parse_arguments from functions.transformer import get_transforms from functions.makeDataset import make_dataset from functions.loadNetwork import load_network from functions.getInput import get_input from functions.normalize import normalize_lab, normalize_rgb, normalize_seg, denormalize_lab, denormalize_rgb from functions.vis import vis_image, vis_patch from functions.getInputv import get_inputv from classes.imageFolder import ImageFolder from classes.textureGan import TextureGAN from PIL import Image import os import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import numpy as np import torch from torch.utils.data import DataLoader SKETCH_FOLDER = '/Users/spuliz/Desktop/schedio/img/val_skg/wendy' VAL_SEG_FOLDER = '/Users/spuliz/Desktop/schedio/img/val_seg/wendy' VAL_ERODED_FOLDER = '/Users/spuliz/Desktop/schedio/img/eroded_val_seg/wendy' IMG_FOLDER = '/Users/spuliz/Desktop/schedio/img/val_img/wendy' TEXTURE_FOLDER = '/Users/spuliz/Desktop/schedio/img/val_txt/wendy' ALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'} app = Flask(__name__, static_url_path='/Users/spuliz/Desktop/schedio/static') # The absolute path of the directory containing images for users to upload app.config['SKETCH_FOLDER'] = SKETCH_FOLDER app.config['VAL_SEG_FOLDER'] = VAL_SEG_FOLDER app.config['VAL_ERODED_FOLDER'] = VAL_ERODED_FOLDER app.config['TEXTURE_FOLDER'] = TEXTURE_FOLDER # The absolute path of the directory containing images for users to download app.config["CLIENT_IMAGES"] = "/Users/spuliz/Desktop/schedio/img/output" def allowed_file(filename): return '.' in filename and \ filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS @app.route('/home', methods=['GET', 'POST']) def home(): return render_template('home.html') @app.route('/', methods=['GET', 'POST']) def upload_file(): if request.method == 'POST': # check if the post request has the file part if 'file' not in request.files: flash('No file part') return redirect(request.url) file = request.files['file'] # if user does not select file, browser also # submit an empty part without filename if file.filename == '': flash('No selected file') return redirect(request.url) if file and allowed_file(file.filename): filename = secure_filename(file.filename) file.save(os.path.join(app.config['SKETCH_FOLDER'], 'sketch.jpg')) for jpgfile in glob.iglob(os.path.join(SKETCH_FOLDER, "*.jpg")): shutil.copy(jpgfile, VAL_SEG_FOLDER) shutil.copy(jpgfile, VAL_ERODED_FOLDER) shutil.copy(jpgfile, IMG_FOLDER) # file.save(os.path.join(app.config['VAL_SEG_FOLDER'], 'sketch.jpg')) # file.save(os.path.join(app.config['VAL_ERODED_FOLDER'], 'sketch.jpg')) # return redirect(url_for('uploaded_file',filename=filename)) return redirect(url_for('upload_texture')) return render_template('main.html') @app.route('/garment', methods=['GET', 'POST']) def upload_garment(): if request.method == 'POST': # check if the post request has the file part if 'file' not in request.files: flash('No file part') return redirect(request.url) file = request.files['file'] # if user does not select file, browser also # submit an empty part without filename if file.filename == '': flash('No selected file') return redirect(request.url) if file and allowed_file(file.filename): filename = secure_filename(file.filename) file.save(os.path.join(app.config['SKETCH_FOLDER'], 'sketch.jpg')) for jpgfile in glob.iglob(os.path.join(SKETCH_FOLDER, "*.jpg")): shutil.copy(jpgfile, VAL_SEG_FOLDER) shutil.copy(jpgfile, VAL_ERODED_FOLDER) shutil.copy(jpgfile, IMG_FOLDER) # file.save(os.path.join(app.config['VAL_SEG_FOLDER'], 'sketch.jpg')) # file.save(os.path.join(app.config['VAL_ERODED_FOLDER'], 'sketch.jpg')) # return redirect(url_for('uploaded_file',filename=filename)) return redirect(url_for('upload_cloth_texture')) return render_template('garment.html') @app.route('/upload_texture', methods=['GET', 'POST']) def upload_texture(): if request.method == 'POST': # check if the post request has the file part if 'file' not in request.files: flash('No file part') return redirect(request.url) file = request.files['file'] # if user does not select file, browser also # submit an empty part without filename if file.filename == '': flash('No selected file') return redirect(request.url) if file and allowed_file(file.filename): filename = secure_filename(file.filename) file.save(os.path.join(app.config['TEXTURE_FOLDER'], 'texture.jpg')) # return redirect(url_for('uploaded_file',filename=filename)) return redirect(url_for('profit')) return render_template('bag_texture.html') @app.route('/upload_cloth_texture', methods=['GET', 'POST']) def upload_cloth_texture(): if request.method == 'POST': # check if the post request has the file part if 'file' not in request.files: flash('No file part') return redirect(request.url) file = request.files['file'] # if user does not select file, browser also # submit an empty part without filename if file.filename == '': flash('No selected file') return redirect(request.url) if file and allowed_file(file.filename): filename = secure_filename(file.filename) file.save(os.path.join(app.config['TEXTURE_FOLDER'], 'texture.jpg')) # return redirect(url_for('uploaded_file',filename=filename)) return redirect(url_for('profit_cloth')) return render_template('cloth_texture.html') @app.route('/profit', methods=['GET', 'POST']) def profit(): if request.method == 'POST': command = '--model texturegan --local_texture_size 50 --color_space lab' args = parse_arguments(command.split()) args.batch_size = 1 args.image_size =152 args.resize_max = 256 args.resize_min = 256 args.data_path = '/Users/spuliz/Desktop/schedio/img' #change to your data path transform = get_transforms(args) val = make_dataset(args.data_path, 'val') valDset = ImageFolder('val', args.data_path, transform) val_display_size = 1 valLoader = DataLoader(dataset=valDset, batch_size=val_display_size, shuffle=False) # pre-trained model for handbags model_location = '/Users/spuliz/Desktop/schedio/textureD_final_allloss_handbag_3300.pth' #change to your location netG = TextureGAN(5, 3, 32) load_network(netG, model_location) netG.eval() data = valLoader.__iter__().__next__() color_space = 'lab' img, skg, seg, eroded_seg, txt = data img = normalize_lab(img) skg = normalize_lab(skg) txt = normalize_lab(txt) seg = normalize_seg(seg) eroded_seg = normalize_seg(eroded_seg) inp,texture_loc = get_input(data,-1,-1,30,1) seg = seg!=0 model = netG device = torch.device("cpu") inpv = get_inputv(inp.to(device)) output = model(inpv.to(device)) out_img = vis_image(denormalize_lab(output.data.double().cpu()), color_space) plt.figure() plt.imshow(np.transpose(out_img[0],(1, 2, 0))) plt.axis('off') plt.savefig('/Users/spuliz/Desktop/schedio/img/output/output.png', dpi=1000) return send_from_directory(app.config["CLIENT_IMAGES"], filename='output.png', as_attachment=True) return redirect(url_for('http://127.0.0.1:5000/profit/output.png')) return render_template('profit_bag.html') @app.route('/profit_cloth', methods=['GET', 'POST']) def profit_cloth(): if request.method == 'POST': command = '--model texturegan --local_texture_size 50 --color_space lab' args = parse_arguments(command.split()) args.batch_size = 1 args.image_size =152 args.resize_max = 256 args.resize_min = 256 args.data_path = '/Users/spuliz/Desktop/schedio/img' #change to your data path transform = get_transforms(args) val = make_dataset(args.data_path, 'val') valDset = ImageFolder('val', args.data_path, transform) val_display_size = 1 valLoader = DataLoader(dataset=valDset, batch_size=val_display_size, shuffle=False) # pre-trained model for handbags model_location = '/Users/spuliz/Desktop/schedio/final_cloth_finetune.pth' #change to your location netG = TextureGAN(5, 3, 32) load_network(netG, model_location) netG.eval() data = valLoader.__iter__().__next__() color_space = 'lab' img, skg, seg, eroded_seg, txt = data img = normalize_lab(img) skg = normalize_lab(skg) txt = normalize_lab(txt) seg = normalize_seg(seg) eroded_seg = normalize_seg(eroded_seg) inp,texture_loc = get_input(data,-1,-1,30,1) seg = seg!=0 model = netG device = torch.device("cpu") inpv = get_inputv(inp.to(device)) output = model(inpv.to(device)) out_img = vis_image(denormalize_lab(output.data.double().cpu()), color_space) plt.figure() plt.imshow(np.transpose(out_img[0],(1, 2, 0))) plt.axis('off') plt.savefig('/Users/spuliz/Desktop/schedio/img/output/output.jpg') return send_from_directory(app.config["CLIENT_IMAGES"], filename='output.jpg', as_attachment=True) return redirect(url_for('http://127.0.0.1:5000/profit/output.jpg')) return render_template('profit_cloth.html')
StarcoderdataPython
73243
<gh_stars>100-1000 #!/usr/bin/env python from __future__ import print_function from fileinput import input from sgp4.vallado_cpp import Satrec def main(): lines = iter(input()) for line in lines: name = line line1 = next(lines) line2 = next(lines) sat = Satrec.twoline2rv(line1, line2) for name in dir(sat): if name.startswith('_') or name in ('sgp4', 'twoline2rv'): continue value = getattr(sat, name) print(name, value) print() if __name__ == '__main__': try: main() except BrokenPipeError: pass
StarcoderdataPython
4813551
from django.core.validators import ValidationError from django.test import TestCase from ..validators import ExactLengthsValidator class ExactLengthsValidatorTestCase(TestCase): def test_validator_message(self): """ Validator returns corrrect error message. """ validator = ExactLengthsValidator([10, 14, 566, 1]) with self.assertRaises(ValidationError) as err: validator("12") self.assertIn( "Ensure this value has length of any [10, 14, 566, 1] (it has 2).", err.exception, ) def test_validator_values(self): """ Given invalid values, validator raises `ValidationError`. Given valid values, validator does not raise `ValidationError`. """ validator = ExactLengthsValidator([10, 14]) # valid values chars_10 = "1111111111" self.assertEqual(len(chars_10), 10) chars_14 = "11111111111111" self.assertEqual(len(chars_14), 14) # invalid values chars_9 = "111111111" self.assertEqual(len(chars_9), 9) chars_12 = "111111111111" self.assertEqual(len(chars_12), 12) chars_15 = "111111111111111" self.assertEqual(len(chars_15), 15) # valid test validator(chars_10) validator(chars_14) # invalid test with self.assertRaises(ValidationError): validator(chars_9) with self.assertRaises(ValidationError): validator(chars_12) with self.assertRaises(ValidationError): validator(chars_15)
StarcoderdataPython
1626216
# Generated by Django 3.2.7 on 2022-03-23 14:59 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('gui', '0024_autofees'), ] operations = [ migrations.DeleteModel( name='Resolutions', ), migrations.DeleteModel( name='Closures', ), migrations.CreateModel( name='Resolutions', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('chan_id', models.CharField(max_length=20)), ('resolution_type', models.IntegerField()), ('outcome', models.IntegerField()), ('outpoint_tx', models.CharField(max_length=64)), ('outpoint_index', models.IntegerField()), ('amount_sat', models.BigIntegerField()), ('sweep_txid', models.CharField(max_length=64)), ], ), migrations.CreateModel( name='Closures', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('chan_id', models.CharField(max_length=20)), ('funding_txid', models.CharField(max_length=64)), ('funding_index', models.IntegerField()), ('closing_tx', models.CharField(max_length=64)), ('remote_pubkey', models.CharField(max_length=66)), ('capacity', models.BigIntegerField()), ('close_height', models.IntegerField()), ('settled_balance', models.BigIntegerField()), ('time_locked_balance', models.BigIntegerField()), ('close_type', models.IntegerField()), ('open_initiator', models.IntegerField()), ('close_initiator', models.IntegerField()), ('resolution_count', models.IntegerField()), ], options={ 'unique_together': {('funding_txid', 'funding_index')}, }, ), ]
StarcoderdataPython
53327
import pyjion import timeit from statistics import fmean def test_floats(n=10000): for y in range(n): x = 0.1 z = y * y + x - y x *= z def test_ints(n=10000): for y in range(n): x = 2 z = y * y + x - y x *= z if __name__ == "__main__": tests = (test_floats, test_ints) for test in tests: without_result = timeit.repeat(test, repeat=5, number=1000) print("{0} took {1} min, {2} max, {3} mean without Pyjion".format(str(test), min(without_result), max(without_result), fmean(without_result))) pyjion.enable() pyjion.set_optimization_level(1) with_result = timeit.repeat(test, repeat=5, number=1000) pyjion.disable() print("{0} took {1} min, {2} max, {3} mean with Pyjion".format(str(test), min(with_result), max(with_result), fmean(with_result))) delta = (abs(fmean(with_result) - fmean(without_result)) / fmean(without_result)) * 100.0 print(f"Pyjion is {delta:.2f}% faster")
StarcoderdataPython
1757118
## https://leetcode.com/problems/battleships-in-a-board/ ## goal is to count the number of battleships in a board, ## under the condition that no battleships will ever touch. ## that means that any touching x's belong to the same ## battleship. my solution is to iterate over the board, ## so O(N). If i find a part of a ship, I check the spots ## above and to the left for another part of a ship. If I ## find one, then I move on; if I don't, then I add to my ## ship count. ## comes in at 51st percentile for runtime and 13th percentile ## for memory. class Solution: def valid_neighbors(self, row_index: int, col_index: int) -> List[int]: return filter(lambda pair: pair[0] >= 0 and pair[1] >= 0, [[row_index-1, col_index], [row_index, col_index-1]]) def countBattleships(self, board: List[List[str]]) -> int: nships = 0 for row_index, row in enumerate(board): for col_index, val in enumerate(row): if val == 'X': is_part_of_prev_ship = False earlier_neighbors = self.valid_neighbors(row_index, col_index) for coord in earlier_neighbors: if board[coord[0]][coord[1]] == 'X': is_part_of_prev_ship = True break if not is_part_of_prev_ship: nships = nships + 1 return nships
StarcoderdataPython
145387
from django.apps import AppConfig class AuthManagerConfig(AppConfig): name = 'auth_manager' verbose_name = 'auth manager' def ready(self): from . import signals
StarcoderdataPython
4810091
<reponame>lbolanos/aws-sfn-builder #!/usr/bin/env python # -*- coding: utf-8 -*- import codecs import os from setuptools import find_packages, setup def read(fname): file_path = os.path.join(os.path.dirname(__file__), fname) return codecs.open(file_path, encoding="utf-8").read() setup( name="aws-sfn-builder", version=read("aws_sfn_builder/__init__.py").split("\n")[0].split("=", 1)[1].strip().strip('"'), author="<NAME>", author_email="<EMAIL>", maintainer="<NAME>", maintainer_email="<EMAIL>", license="MIT", url="https://github.com/jbasko/aws-sfn-builder", description="AWS Step Functions: state machine boilerplate generator", long_description=read("README.rst"), packages=find_packages(exclude=["integration_tests", "tests"]), python_requires=">=3.6.0", install_requires=[ "bidict", "dataclasses", "jsonpath-ng", ], keywords=[ "aws", "asl", "sfn", "step functions", "state machine", "boilerplate", ], classifiers=[ "Development Status :: 3 - Alpha", "Intended Audience :: Developers", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "License :: OSI Approved :: MIT License", ], )
StarcoderdataPython
3357610
class TeamReport(): def __init__(self, team_id): self.team_id = team_id self.release = "" self.issues = []
StarcoderdataPython
186375
import sys x = 0 y = 0 with open(sys.argv[1]) as f: instruct = f.read().strip().split(",") for c in instruct: if c == 'n': y += 2 elif c == 's': y -= 2 elif c == 'ne': x += 1 y += 1 elif c == 'nw': x -= 1 y += 1 elif c == 'se': x += 1 y -= 1 elif c == 'sw': x -= 1 y -= 1 x = abs(x) y = abs(y) d = abs(x + y)//2 print(d)
StarcoderdataPython
39464
#!/usr/bin/env python # SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD # SPDX-License-Identifier: Apache-2.0 import os import shutil import sys import unittest from subprocess import STDOUT, run from test_utils import compare_folders, fill_sector, generate_local_folder_structure, generate_test_dir_2 sys.path.append(os.path.join(os.path.dirname(__file__), '..')) import fatfsgen # noqa E402 # pylint: disable=C0413 class FatFSGen(unittest.TestCase): def setUp(self) -> None: os.makedirs('output_data') generate_test_dir_2() def tearDown(self) -> None: shutil.rmtree('output_data', ignore_errors=True) shutil.rmtree('Espressif', ignore_errors=True) shutil.rmtree('testf', ignore_errors=True) if os.path.exists('fatfs_image.img'): os.remove('fatfs_image.img') @staticmethod def test_gen_parse() -> None: run([ 'python', f'{os.path.join(os.path.dirname(__file__), "..", "fatfsgen.py")}', 'output_data/tst_str' ], stderr=STDOUT) run(['python', '../fatfsgen.py', 'output_data/tst_str'], stderr=STDOUT) run(['python', '../fatfsparse.py', 'fatfs_image.img'], stderr=STDOUT) assert set(os.listdir('Espressif')) == {'TEST', 'TESTFILE'} with open('Espressif/TESTFILE', 'rb') as in_: assert in_.read() == b'ahoj\n' assert set(os.listdir('Espressif/TEST')) == {'TEST', 'TESTFIL2'} with open('Espressif/TEST/TESTFIL2', 'rb') as in_: assert in_.read() == b'thisistest\n' assert set(os.listdir('Espressif/TEST/TEST')) == {'LASTFILE.TXT'} with open('Espressif/TEST/TEST/LASTFILE.TXT', 'rb') as in_: assert in_.read() == b'deeptest\n' @staticmethod def test_file_chaining() -> None: fatfs = fatfsgen.FATFS() fatfs.create_file('WRITEF', extension='TXT') fatfs.write_content(path_from_root=['WRITEF.TXT'], content=4096 * b'a' + b'a') fatfs.write_filesystem('fatfs_image.img') run(['python', '../fatfsparse.py', 'fatfs_image.img'], stderr=STDOUT) with open('Espressif/WRITEF.TXT', 'rb') as in_: assert in_.read() == 4097 * b'a' @staticmethod def test_full_two_sectors_folder() -> None: fatfs = fatfsgen.FATFS(size=2 * 1024 * 1024) fatfs.create_directory('TESTFOLD') for i in range((2 * 4096) // 32): fatfs.create_file(f'A{str(i).upper()}', path_from_root=['TESTFOLD']) fatfs.write_content(path_from_root=['TESTFOLD', 'A253'], content=b'later') fatfs.write_content(path_from_root=['TESTFOLD', 'A255'], content=b'last') fatfs.write_filesystem('fatfs_image.img') run(['python', '../fatfsparse.py', 'fatfs_image.img'], stderr=STDOUT) assert set(os.listdir('Espressif')) == {'TESTFOLD'} assert set(os.listdir('Espressif/TESTFOLD')) == {f'A{str(i).upper()}' for i in range(256)} with open('Espressif/TESTFOLD/A253', 'rb') as in_: assert in_.read() == b'later' with open('Espressif/TESTFOLD/A255', 'rb') as in_: assert in_.read() == b'last' @staticmethod def test_empty_fat16() -> None: fatfs = fatfsgen.FATFS(size=17 * 1024 * 1024) fatfs.write_filesystem('fatfs_image.img') run(['python', '../fatfsparse.py', 'fatfs_image.img'], stderr=STDOUT) @staticmethod def test_chaining_fat16() -> None: fatfs = fatfsgen.FATFS(size=17 * 1024 * 1024) fatfs.create_file('WRITEF', extension='TXT') fatfs.write_content(path_from_root=['WRITEF.TXT'], content=4096 * b'a' + b'a') fatfs.write_filesystem('fatfs_image.img') run(['python', '../fatfsparse.py', 'fatfs_image.img'], stderr=STDOUT) with open('Espressif/WRITEF.TXT', 'rb') as in_: assert in_.read() == 4097 * b'a' @staticmethod def test_full_sector_folder_fat16() -> None: fatfs = fatfsgen.FATFS(size=17 * 1024 * 1024) fatfs.create_directory('TESTFOLD') fill_sector(fatfs) fatfs.write_content(path_from_root=['TESTFOLD', 'A0'], content=b'first') fatfs.write_content(path_from_root=['TESTFOLD', 'A126'], content=b'later') fatfs.write_filesystem('fatfs_image.img') run(['python', '../fatfsparse.py', 'fatfs_image.img'], stderr=STDOUT) assert set(os.listdir('Espressif')) == {'TESTFOLD'} assert set(os.listdir('Espressif/TESTFOLD')) == {f'A{str(i).upper()}' for i in range(128)} with open('Espressif/TESTFOLD/A0', 'rb') as in_: assert in_.read() == b'first' with open('Espressif/TESTFOLD/A126', 'rb') as in_: assert in_.read() == b'later' @staticmethod def file_(x: str, content_: str = 'hey this is a test') -> dict: return { 'type': 'file', 'name': x, 'content': content_ } def test_e2e_file(self) -> None: struct_: dict = { 'type': 'folder', 'name': 'testf', 'content': [self.file_('NEWF')] } generate_local_folder_structure(struct_, path_='.') run([ 'python', f'{os.path.join(os.path.dirname(__file__), "..", "fatfsgen.py")}', 'testf' ], stderr=STDOUT) run(['python', '../fatfsparse.py', 'fatfs_image.img'], stderr=STDOUT) assert compare_folders('testf', 'Espressif') def test_e2e_deeper(self) -> None: folder_ = { 'type': 'folder', 'name': 'XYZ', 'content': [ self.file_('NEWFLE'), self.file_('NEW.TXT'), self.file_('NEWE.TXT'), self.file_('NEW4.TXT'), self.file_('NEW5.TXT'), ] } struct_: dict = { 'type': 'folder', 'name': 'testf', 'content': [ self.file_('MY_NEW'), folder_ ] } generate_local_folder_structure(struct_, path_='.') run([ 'python', f'{os.path.join(os.path.dirname(__file__), "..", "fatfsgen.py")}', 'testf' ], stderr=STDOUT) run(['python', '../fatfsparse.py', 'fatfs_image.img'], stderr=STDOUT) assert compare_folders('testf', 'Espressif') def test_e2e_deeper_large(self) -> None: folder_ = { 'type': 'folder', 'name': 'XYZ', 'content': [ self.file_('NEWFLE', content_=4097 * 'a'), self.file_('NEW.TXT', content_=2 * 4097 * 'a'), self.file_('NEWE.TXT'), self.file_('NEW4.TXT'), self.file_('NEW5.TXT'), ] } folder2_ = { 'type': 'folder', 'name': 'XYZ3', 'content': [ self.file_('NEWFLE', content_=4097 * 'a'), self.file_('NEW.TXT', content_=2 * 4097 * 'a'), self.file_('NEWE.TXT'), self.file_('NEW4.TXT'), self.file_('NEW5.TXT'), ] } folder3_ = { 'type': 'folder', 'name': 'XYZ2', 'content': [self.file_(f'A{i}') for i in range(50)] } struct_: dict = { 'type': 'folder', 'name': 'testf', 'content': [ self.file_('MY_NEW'), folder_, folder2_, folder3_ ] } generate_local_folder_structure(struct_, path_='.') run([ 'python', f'{os.path.join(os.path.dirname(__file__), "..", "fatfsgen.py")}', 'testf' ], stderr=STDOUT) run(['python', '../fatfsparse.py', 'fatfs_image.img'], stderr=STDOUT) assert compare_folders('testf', 'Espressif') def test_e2e_very_deep(self) -> None: folder_ = { 'type': 'folder', 'name': 'XYZ', 'content': [ self.file_('NEWFLE', content_=4097 * 'a'), self.file_('NEW.TXT', content_=2 * 4097 * 'a'), self.file_('NEWE.TXT'), self.file_('NEW4.TXT'), self.file_('NEW5.TXT'), ] } folder2_ = { 'type': 'folder', 'name': 'XYZ3', 'content': [ self.file_('NEWFLE', content_=4097 * 'a'), self.file_('NEW.TXT', content_=2 * 4097 * 'a'), self.file_('NEWE.TXT'), self.file_('NEW4.TXT'), self.file_('NEW5.TXT'), folder_, ] } folder3_ = { 'type': 'folder', 'name': 'XYZ2', 'content': [self.file_(f'A{i}') for i in range(50)] + [folder2_] } struct_: dict = { 'type': 'folder', 'name': 'testf', 'content': [ self.file_('MY_NEW'), folder_, folder2_, folder3_ ] } generate_local_folder_structure(struct_, path_='.') run([ 'python', f'{os.path.join(os.path.dirname(__file__), "..", "fatfsgen.py")}', 'testf' ], stderr=STDOUT) run(['python', '../fatfsparse.py', 'fatfs_image.img'], stderr=STDOUT) assert compare_folders('testf', 'Espressif') def test_e2e_very_deep_long(self) -> None: folder_ = { 'type': 'folder', 'name': 'veryveryverylong111', 'content': [ self.file_('myndewveryverylongfile1.txt', content_=4097 * 'a'), self.file_('mynewveryverylongfile22.txt', content_=2 * 4097 * 'a'), self.file_('mynewveryverylongfile333.txt' * 8), self.file_('mynewveryverylongfile4444.txt' * 8), self.file_('mynewveryverylongfile5555.txt'), self.file_('SHORT.TXT'), ] } struct_: dict = { 'type': 'folder', 'name': 'testf', 'content': [ self.file_('mynewveryverylongfile.txt' * 5), folder_, ] } generate_local_folder_structure(struct_, path_='.') run([ 'python', f'{os.path.join(os.path.dirname(__file__), "..", "fatfsgen.py")}', 'testf', '--long_name_support' ], stderr=STDOUT) run(['python', '../fatfsparse.py', 'fatfs_image.img', '--long-name-support'], stderr=STDOUT) assert compare_folders('testf', 'Espressif') if __name__ == '__main__': unittest.main()
StarcoderdataPython
1713193
{ 'name': "ProPortal", 'summary': """ Portal Upgrade Module that adds Advanced Features""", 'description': """ Module that allows expands Customer Portal """, 'author': "<NAME>", # Categories can be used to filter modules in modules listing # Check https://github.com/odoo/odoo/blob/14.0/odoo/addons/base/data/ir_module_category_data.xml # for the full list 'category': 'Sales', 'version': '0.4', # any module necessary for this one to work correctly 'depends': ['base', 'stock_account', 'product', 'purchase', 'stock', 'portal'], # always loaded 'data': [ #'security/ir.model.access.csv', 'views/partnerView.xml', 'views/productView.xml', 'views/stockView.xml', 'views/customer_portal.xml' ], # only loaded in demonstration mode 'demo': [ 'demo/demo.xml', ], }
StarcoderdataPython
1683481
""" Shamefully copied from https://www.reddit.com/r/adventofcode/comments/7lte5z/2017_day_24_solutions/droyesm/ """ from collections import defaultdict def gen_bridges(library, bridge=None): l, s, components, a = bridge or (0, 0, set(), 0) for b in library[a]: next = (a, b) if a <= b else (b, a) if next not in components: new = l+1, s+a+b, (components | {next}), b yield new; yield from gen_bridges(library, new) def solve(input): library = defaultdict(set) for l in input.strip().splitlines(): a, b = [int(x) for x in l.split('/')] library[a].add(b); library[b].add(a) return [b[:2] for b in gen_bridges(library)] with open('components.txt') as f: input = f.read() bridges = solve(input) # A list of (length, strength) tuples part1 = sorted(bridges, key=lambda x: x[1])[-1][1] # Sort by strength only part2 = sorted(bridges)[-1][1] # Sort by length, then by strength print(part1) print(part2)
StarcoderdataPython
1640018
from setuptools import setup setup(name='requires_simple_extra', version='0.1', py_modules=['requires_simple_extra'], extras_require={ 'extra': ['simple==1.0'] } )
StarcoderdataPython
142419
try: from setuptools import setup except ImportError: from distutils.core import setup setup( name='monotonic_cffi', version='0.1', license='Apache', author='<NAME>', author_email='<EMAIL>', url='https://github.com/rkyoto/monotonic_cffi', classifiers=( 'Development Status :: 4 - Beta', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy', ), py_modules=['monotonic_cffi'], install_requires=['cffi'], )
StarcoderdataPython
1657513
<reponame>speedypotato/chuni-lite<filename>chunair/kicad-footprint-generator-master/KicadModTree/nodes/specialized/PolygoneLine.py<gh_stars>1-10 # KicadModTree is free software: you can redistribute it and/or # modify it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # KicadModTree is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with kicad-footprint-generator. If not, see < http://www.gnu.org/licenses/ >. # # (C) 2016 by <NAME>, <<EMAIL>> from KicadModTree.Vector import * from KicadModTree.PolygonPoints import * from KicadModTree.nodes.Node import Node from KicadModTree.nodes.base.Line import Line class PolygoneLine(Node): r"""Add a Polygone Line to the render tree :param \**kwargs: See below :Keyword Arguments: * *polygone* (``list(Point)``) -- edges of the polygone * *layer* (``str``) -- layer on which the polygone is drawn (default: 'F.SilkS') * *width* (``float``) -- width of the line (default: None, which means auto detection) :Example: >>> from KicadModTree import * >>> PolygoneLine(polygone=[[0, 0], [0, 1], [1, 1], [0, 0]], layer='F.SilkS') """ def __init__(self, **kwargs): Node.__init__(self) self.layer = kwargs.get('layer', 'F.SilkS') self.width = kwargs.get('width') self._initPolyPoint(**kwargs) self.virtual_childs = self._createChildNodes(self.nodes) def _initPolyPoint(self, **kwargs): self.nodes = PolygonPoints(**kwargs) def _createChildNodes(self, polygone_line): nodes = [] for line_start, line_end in zip(polygone_line, polygone_line[1:]): new_node = Line(start=line_start, end=line_end, layer=self.layer, width=self.width) new_node._parent = self nodes.append(new_node) return nodes def getVirtualChilds(self): return self.virtual_childs def _getRenderTreeText(self): render_text = Node._getRenderTreeText(self) render_text += " [" node_strings = [] for node in self.nodes: node_position = Vector2D(node) node_strings.append("[x: {x}, y: {y}]".format(x=node_position.x, y=node_position.y)) if len(node_strings) <= 6: render_text += " ,".join(node_strings) else: # display only a few nodes of the beginning and the end of the polygone line render_text += " ,".join(node_strings[:3]) render_text += " ,... ," render_text += " ,".join(node_strings[-3:]) render_text += "]" return render_text
StarcoderdataPython
1724555
<reponame>sunyunxian/test_lib """ 切片 """ info = "纽约 美国" CITY = slice(0, 2) print(CITY) print(dir(CITY)) print(info[CITY]) s = [1, 2, 3, 4, 5] s[0:1] = [10, 10] print(s)
StarcoderdataPython
85086
import pytest @pytest.fixture def checkup_html() -> str: return { "html": """<!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF-8"> <title>Test Page Title</title> </head> <body> <a href="https://google.com/">Root domain link</a> <a href="https://google.com/1.html">Normal link 1</a> <a href="https://google.com/2.html">Normal link 2</a> <a href="https://google.com/2.html">Duplicate link 2</a> <a href="https://google.com/3.html#test">Normal link 3 with anchor</a> <a href="https://google.com/picture.png">Link to an image.</a> <a href="https://mail.google.com/">Subdomain link</a> <a href="/4.html">Relative link</a> <a rel="nofollow" href="https://google.com/nofollow">Nofollow link</a> <a href="https://yandex.ru">Foreign link</a> <a href="mailto:<EMAIL>">Email link</a> <a href="#fragment">Fragment link</a> <a href="ftp://google.com">Non HTTP(S) link</a> <a href="">Link with empty href</a> <a>Link without a URL</a> <title>Incorrect Title</title> <p>Some stuff.</p> </body> </html> """, "title_nonempty_links": ( "Test Page Title", { "#fragment", "/4.html", "ftp://google.com", "https://google.com/", "https://google.com/1.html", "https://google.com/2.html", "https://google.com/3.html#test", "https://google.com/picture.png", "https://mail.google.com/", "https://yandex.ru", "mailto:<EMAIL>", }, ), "filter_result_wo_subdomains": [ "https://google.com/1.html", "https://google.com/2.html", "https://google.com/3.html", "https://google.com/4.html", ], "filter_result_subdomains": [ "https://google.com/1.html", "https://google.com/2.html", "https://google.com/3.html", "https://google.com/4.html", "https://mail.google.com", ], }
StarcoderdataPython
151272
<reponame>Jacqueline121/YOLOv2-pytorch from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import argparse import torch from torch.utils.data import DataLoader from dataset.factory import get_imdb from dataset.roidb import RoiDataset, detection_collate from torch.autograd import Variable from yolo.yolov2 import YOLOv2 from config.config import cfg from eval import eval import pickle from PIL import Image import matplotlib.pyplot as plt from utils.network import WeightLoader from utils.visualize import draw_detection_boxes from utils.network import WeightLoader def parse_args(): parser = argparse.ArgumentParser(description='YOLOv2') parser.add_argument('--dataset', dest='dataset', default='voc07test', type=str) parser.add_argument('--batch_size', dest='batch_size', default=2, type=int) parser.add_argument('--cuda', dest='use_cuda', default=True, type=bool) parser.add_argument('--mGPUs', dest='mGPUs', default=True, type=bool) parser.add_argument('--num_workers', dest='num_workers', default=2, type=int) parser.add_argument('--vis', dest='vis', default=False, type=bool) parser.add_argument('--output_dir', dest='output_dir', default='output', type=str) parser.add_argument('--check_epoch', dest='check_epoch', default=159, type=int) parser.add_argument('--conf_thresh', dest='conf_thresh', default=0.005, type=float) parser.add_argument('--nms_thresh', dest='nms_thresh', default=0.45, type=float) args = parser.parse_args() return args def test(): args = parse_args() if args.vis: args.conf_thresh = 0.5 # load test data if args.dataset == 'voc07test': dataset_name = 'voc_2007_test' elif args.dataset == 'voc12test': dataset_name = 'voc_2012_test' else: raise NotImplementedError test_imdb = get_imdb(dataset_name) test_dataset = RoiDataset(test_imdb, train=False) test_dataloader = DataLoader(test_dataset, batch_size=args.batch_size, num_workers=args.num_workers, shuffle=False) # load model model = YOLOv2() if not os.path.exists(args.output_dir): os.mkdir(args.output_dir) weight_file_path = os.path.join(args.output_dir, 'yolov2_epoch_{}.pth'.format(args.check_epoch)) if torch.cuda.is_available: state_dict = torch.load(weight_file_path) else: state_dict = torch.load(weight_file_path, map_location='cpu') model.load_state_dict(state_dict['model']) if args.use_cuda: model = model.cuda() model.eval() num_data = len(test_dataset) all_boxes = [[[] for _ in range(num_data)] for _ in range(test_imdb.num_classes)] img_id = -1 det_file = os.path.join(args.output_dir, 'detections.pkl') with torch.no_grad(): for batch_size, (im_data, im_infos) in enumerate(test_dataloader): if args.use_cuda: im_data = im_data.cuda() im_infos = im_infos.cuda() im_data_variable = Variable(im_data) outputs = model(im_data_variable) for i in range(im_data.size(0)): img_id += 1 output = [item[i].data for item in outputs] im_info = im_infos[i] detections = eval(output, im_info, args.conf_thresh, args.nms_thresh) if len(detections) > 0: for i in range(cfg.CLASS_NUM): idxs = torch.nonzero(detections[:, -1] == i).view(-1) if idxs.numel() > 0: cls_det = torch.zeros((idxs.numel(), 5)) cls_det[:, :4] = detections[idxs, :4] cls_det[:, 4] = detections[idxs, 4] * detections[idxs, 5] all_boxes[i][img_id] = cls_det.cpu().numpy() if args.vis: img = Image.open(test_imdb.image_path_at(img_id)) if len(detections) == 0: continue det_boxes = detections[:, :5].cpu().numpy() det_classes = detections[:, -1].long().cpu().numpy() imshow = draw_detection_boxes(img, det_boxes, det_classes, class_names=test_imdb.classes) plt.figure() plt.imshow(imshow) plt.show() with open(det_file, 'wb') as f: pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL) test_imdb.evaluate_detections(all_boxes, output_dir=args.output_dir) if __name__ == '__main__': test()
StarcoderdataPython
3212419
import sqlalchemy.exc import sqlalchemy.event from sqlalchemy.orm.query import Query from sqlalchemy.orm.session import Session from sqlalchemy.orm import ( joinedload ) from .db import Database from .schema import SchemaBase class DatabaseApi: def __init__(self, database: Database): self._database = database @property def _session(self) -> Session: return self._database.session def _query(self, cls, filters=(), preload=()) -> Query: query = self._session.query(cls).options(joinedload(rel) for rel in preload) if filters: query = query.filter(*filters) return query def _commit(self): try: self._session.commit() except sqlalchemy.exc.DatabaseError: self._session.rollback() raise def get(self, cls, filters: tuple=(), preload: tuple=()) -> list: return self._query(cls, filters, preload).all() def get_one(self, cls, filters: tuple=(), preload: tuple=()): return self._query(cls, filters, preload).one() def add(self, item: SchemaBase): self._session.add(item) self._commit() def add_all(self, items: list): self._session.add_all(items) self._commit() # bulk updates: https://docs.sqlalchemy.org/en/latest/orm/query.html?highlight=update#sqlalchemy.orm.query.Query.update # items_updated = self._query.filter(selector.filter).update(...) def update(self): self._commit() def delete(self, cls, filters: tuple=()) -> bool: rows = self._query(cls, filters).delete() self._commit() return rows > 0 def exists(self, cls, filters: tuple=()) -> bool: rows = self._query(cls, filters).count() return rows > 0
StarcoderdataPython
87200
<gh_stars>10-100 # Generated by Django 2.2.7 on 2019-11-22 21:27 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('reputation', '0002_auto_20191017_2107'), ] operations = [ migrations.AlterField( model_name='distribution', name='distribution_type', field=models.CharField(choices=[('CREATE_PAPER', 'CREATE_PAPER'), ('COMMENT_ENDORSED', 'COMMENT_ENDORSED'), ('COMMENT_FLAGGED', 'COMMENT_FLAGGED'), ('COMMENT_UPVOTED', 'COMMENT_UPVOTED'), ('COMMENT_DOWNVOTED', 'COMMENT_DOWNVOTED'), ('REPLY_ENDORSED', 'REPLY_ENDORSED'), ('REPLY_FLAGGED', 'REPLY_FLAGGED'), ('REPLY_UPVOTED', 'REPLY_UPVOTED'), ('REPLY_DOWNVOTED', 'REPLY_DOWNVOTED'), ('THREAD_ENDORSED', 'THREAD_ENDORSED'), ('THREAD_FLAGGED', 'THREAD_FLAGGED'), ('THREAD_UPVOTED', 'THREAD_UPVOTED'), ('THREAD_DOWNVOTED', 'THREAD_DOWNVOTED')], max_length=255), ), migrations.CreateModel( name='Withdrawal', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('amount_integer_part', models.BigIntegerField()), ('amount_decimal_part', models.BigIntegerField()), ('from_address', models.CharField(max_length=255)), ('to_address', models.CharField(max_length=255)), ('created_date', models.DateTimeField(auto_now_add=True)), ('updated_date', models.DateTimeField(auto_now=True)), ('paid_date', models.DateTimeField(default=None, null=True)), ('is_paid', models.BooleanField(default=False)), ('transaction_hash', models.CharField(blank=True, default='', max_length=255)), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='withdrawals', to=settings.AUTH_USER_MODEL)), ], ), ]
StarcoderdataPython
3302204
<filename>jovian-inputdata/download_raw_data.py<gh_stars>1-10 import sys import os import subprocess import datetime import yaml import json from pymongo import MongoClient FIRST_PATTERN = 'ftp.sra.ebi.ac.uk/vol1/' SECOND_PATTERN = 'ftp.dcc-private.ebi.ac.uk/vol1/' def main(): """ Main function that will parse existing file, check for files existence and download all required files """ files_to_download = parse_file() for file_name, file_urls in files_to_download.items(): # Updating status of sample sample = DB.samples.find_one({'id': file_name}) write_sample_status(sample, 'download started') # Starting download for every url data_download_errors = list() completed_process_mkdir = subprocess.run( f"mkdir /raw_data/{file_name}", shell=True, capture_output=True) if completed_process_mkdir.returncode != 0: data_download_errors.append( completed_process_mkdir.stderr.decode('utf-8')) # Download should be finished for all files in file_urls, # ex. 2 for pair-end and 1 for single-end reads download_finished_for_urls = 0 for file_url in file_urls: # 10 attempts to do authentication with ftp server for _ in range(10): output_file = f"/raw_data/{file_name}/" \ f"{os.path.basename(file_url)}" completed_process_wget = subprocess.run( f"wget -t 2 {file_url} -O {output_file}", shell=True, capture_output=True) if completed_process_wget.returncode != 0: data_download_errors.append( completed_process_wget.stderr.decode('utf-8')) else: download_finished_for_urls += 1 break write_sample_errors(sample, data_download_errors) if len(data_download_errors) > 0 and \ download_finished_for_urls != len(file_urls): write_sample_status(sample, 'download failed') else: write_sample_status(sample, 'download finished') write_sample_status(sample, 'starting to submit pipeline job') # Converting yaml to json as required by k8s api server with open('/wms/pipeline/run_pipeline-job/' 'jovian-pipeline-run-job.yaml', 'r') as f: job_to_submit = yaml.load(f.read(), Loader=yaml.FullLoader) # Assigning unique id to job (id of run) job_to_submit['metadata']['name'] = f'jovian-pipeline-run-' \ f'job-{file_name.lower()}' # Submitting run id as arg to job job_to_submit['spec']['template']['spec']['containers'][0][ 'args'] = [file_name] job_to_submit = json.dumps(job_to_submit) # Get token, required by k8s api server to submit jobs get_token_process = subprocess.run( "cat /var/run/secrets/kubernetes.io/serviceaccount/token", shell=True, capture_output=True) k8s_token = get_token_process.stdout.decode('utf-8') submit_job_process = subprocess.run( f'curl -X POST -sSk -H "Authorization: Bearer {k8s_token}" ' f'-H "Content-Type: application/json" ' f'https://$KUBERNETES_SERVICE_HOST:' f'$KUBERNETES_PORT_443_TCP_PORT/apis/batch/v1/namespaces/' f'default/jobs -d \'{job_to_submit}\'', shell=True, capture_output=True) submit_job_process_results = json.loads( submit_job_process.stdout.decode('utf-8')) if submit_job_process_results['status'] == 'Failure': write_sample_status(sample, 'submitting pipeline job failed') write_sample_errors(sample, [ submit_job_process_results['message'] ]) else: write_sample_status(sample, 'submitting pipeline job succeed') DB.samples.update_one({'id': file_name}, {'$set': sample}) def write_sample_status(sample, status): """ This function will write date and log message to sample dict :param sample: sample to write log to :param status: status message :return: """ sample['import_from_ena']['date'].append( datetime.datetime.now().strftime("%d %B, %Y - %H:%M:%S")) sample['import_from_ena']['status'].append(status) def write_sample_errors(sample, errors): """ This function will write date and log message to sample dict :param sample: sample to write log to :param errors: errors list :return: """ if len(errors) > 0: sample['import_from_ena']['errors'].extend(errors) def parse_file(): """ This function will read output file and generate list of links to download :return: list of links to download """ files = dict() with open("covid.tsv", 'r') as f: next(f) for line in f: line = line.rstrip() data = line.split("\t") if not check_file_in_database(data[7], data[2], data[5]): files[data[7]] = generate_download_links(data[10]) return files def check_file_in_database(file_name, sample_id, study_id): """ This function will check for current file in MongoDB and insert it in case of absence :param file_name: name of the file to check :param sample_id: BioSample id :param study_id: Study accession :return: True if file is already in database and False otherwise """ results = DB.samples.find_one({'id': file_name}) if results is None: sample = get_sample_structure() sample['id'] = file_name sample['sample_id'] = sample_id sample['study_id'] = study_id sample['import_from_ena']['date'].append( datetime.datetime.now().strftime("%d %B, %Y - %H:%M:%S")) sample['import_from_ena']['status'].append('run added for download') DB.samples.insert_one(sample) return False else: return True def get_sample_structure(): """ This function will return data structure to write to db :return: """ return {'id': None, 'sample_id': None, 'study_id': None, 'import_from_ena': {'date': list(), 'status': list(), 'errors': list()}, 'pipeline_analysis': {'date': list(), 'status': list(), 'errors': list()}, 'export_to_ena': {'date': list(), 'status': list(), 'errors': list()}} def generate_download_links(download_string): """ This function will format download string :param download_string: string to reformat :return: list of links to download """ files_to_download = list() files = download_string.split(';') for file_name in files: if FIRST_PATTERN in file_name: name = file_name.split(FIRST_PATTERN)[-1] elif SECOND_PATTERN in file_name: name = file_name.split(SECOND_PATTERN)[-1] files_to_download.append(f"ftp://{DATA_HUB}:{DATA_HUB_PASSWORD}" f"@ftp.<EMAIL>/vol1/{name}") return files_to_download if __name__ == "__main__": # Getting credentials from command line DATA_HUB = sys.argv[1] DATA_HUB_PASSWORD = sys.argv[2] # Getting access to MongoDB CLIENT = MongoClient('mongodb://samples-logs-db-svc') DB = CLIENT.samples main()
StarcoderdataPython
5432
#!/usr/bin/env python # # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # <NAME> # California Institute of Technology # (C) 2006-2010 All Rights Reserved # # {LicenseText} # # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # standalone = True import unittestX as unittest import journal debug = journal.debug( "Broadened_E_Q_Kernel_TestCase" ) warning = journal.warning( "Broadened_E_Q_Kernel_TestCase" ) import mcni from mccomposite import mccompositebp from mccomponents import mccomponentsbp class TestCase(unittest.TestCase): def test(self): E_Q = "Q*Q/3." S_Q = "1" sigma_Q = "Q/2." Qmin = 0; Qmax = 10 absorption_coefficient = scattering_coefficient = 1. kernel = mccomponentsbp.create_Broadened_E_Q_Kernel( E_Q, S_Q, sigma_Q, Qmin, Qmax, absorption_coefficient, scattering_coefficient, ) ei = 500 # meV from mcni.utils import conversion vil = conversion.e2v(ei) vi = (0,0,vil) import numpy.linalg as nl import numpy as np for i in range(10): event = mcni.neutron( r = (0,0,0), v = vi, prob = 1, time = 0 ) kernel.scatter( event ); vf = np.array(event.state.velocity) diffv = vi - vf Q = conversion.v2k(nl.norm(diffv)) ef = conversion.v2e(nl.norm(vf)) E = ei - ef # print E, Q, event E1 = eval(E_Q) continue return pass # end of TestCase def main(): unittest.main() return if __name__ == "__main__": main() # version __id__ = "$Id: TestCase.py 696 2010-11-09 06:23:06Z linjiao $" # End of file
StarcoderdataPython
20131
<gh_stars>1-10 """Word and pron extraction for (Mandarin) Chinese.""" import itertools import typing import requests from wikipron.extract.default import yield_pron, IPA_XPATH_SELECTOR if typing.TYPE_CHECKING: from wikipron.config import Config from wikipron.typing import Iterator, Word, Pron, WordPronPair # Select pron from within this li _PRON_XPATH_TEMPLATE = """ //div[@class="vsHide"] //ul //li[(a[@title="w:Mandarin Chinese"])] """ def yield_cmn_pron( request: requests.Response, config: "Config" ) -> "Iterator[Pron]": for li_container in request.html.xpath(_PRON_XPATH_TEMPLATE): yield from yield_pron(li_container, IPA_XPATH_SELECTOR, config) def extract_word_pron_cmn( word: "Word", request: requests.Response, config: "Config" ) -> "Iterator[WordPronPair]": words = itertools.repeat(word) prons = yield_cmn_pron(request, config) yield from zip(words, prons)
StarcoderdataPython
124640
<reponame>DallogFheir/aoc-2020 # PART 1 def game_of_cups(starting_sequence,num_of_moves,min_cup=None,max_cup=None): # create a "linked list" dict cups = { starting_sequence[i] : starting_sequence[i+1] for i in range(len(starting_sequence)-1) } cups[starting_sequence[-1]] = starting_sequence[0] # current_cup = starting_sequence[0] max_cup = max_cup or max(starting_sequence) min_cup = min_cup or min(starting_sequence) for _ in range(num_of_moves): # cups to move are 3 ones after the current cups_to_move = ( first := cups[current_cup], second := cups[first], third := cups[second] ) # selects next current cup next_current_cup = cups[third] # destination is 1 less than current # if it's in the next 3 cups, it's 1 less than that, etc. # if it gets less than min, it loops back to max destination = current_cup - 1 while destination in cups_to_move or destination<min_cup: destination -= 1 if destination<min_cup: destination = max_cup # moves 3 cups after destination # by relinking destination to 1st cup # & third cup to cup after destination cup_after_destination = cups[destination] cups[destination] = first cups[third] = cup_after_destination # relinks current cup to next current cup cups[current_cup] = next_current_cup current_cup = next_current_cup return cups def collect_result(cups_dict): output_string = "" next_cup = cups_dict[1] while next_cup!=1: output_string += str(next_cup) next_cup = cups_dict[next_cup] return output_string # PART 2 def hyper_game_of_cups(starting_sequence): min_cup = min(starting_sequence) max_cup = max(starting_sequence) filled_starting_sequence = starting_sequence + list(range(max_cup+1,1_000_000+1)) return game_of_cups(filled_starting_sequence,10_000_000,min_cup,1_000_000) def hyper_collect_result(cups_dict): first = cups_dict[1] second = cups_dict[first] return first * second
StarcoderdataPython
1763069
from flask import Flask, abort, jsonify, request, render_template from sklearn.externals import joblib import numpy as np import json # load pickle below lr = joblib.load('kiva_predictor.pkl') app = Flask(__name__) @app.route("/") def home(): return render_template("index.html") """ Index(['language_english', 'description_length', 'loan_amount', 'loan_use_length', 'currency_usd', 'tags_exist', 'num_borrowers_female_pct', 'sector_name_Agriculture', 'sector_name_Arts', 'sector_name_Clothing', 'sector_name_Construction', 'sector_name_Education', 'sector_name_Entertainment', 'sector_name_Health', 'sector_name_Housing', 'sector_name_Manufacturing', 'sector_name_Personal Use', 'sector_name_Retail', 'sector_name_Services', 'sector_name_Transportation', 'sector_name_Wholesale', 'distribution_model_field_partner', 'fol', 'repayment_interval_bullet', 'repayment_interval_irregular', 'repayment_interval_weekly'], dtype='object') repayment_interval_monthly sector_name_Food distribution_model_field_partner """ def process_input(data): # initialize the target vector with zero values enc_input = np.zeros(25) # set the numerical input as they are if data['englishyn'] == 'Yes' : enc_input[0] = 1 else: enc_input[0] = 0 enc_input[1] = len(data['description']) enc_input[2] = float(data['loanamount']) enc_input[3] = len(data['intendeduse']) if data['usd'] == 'Yes' : enc_input[4] = 1 else: enc_input[4] = 0 if len(data['hashtags']) > 0 : enc_input[5] = 1 else: enc_input[5] = 0 enc_input[6] = float(data['females']) / (float(data['females']) + float(data['males'])) if data['sector'] == 'Agriculture': enc_input[7] = 1 else: enc_input[7] = 0 if data['sector'] == 'Arts': enc_input[8] = 1 else: enc_input[8] = 0 if data['sector'] == 'Clothing': enc_input[9] = 1 else: enc_input[9] = 0 if data['sector'] == 'Construction': enc_input[10] = 1 else: enc_input[10] = 0 if data['sector'] == 'Education': enc_input[11] = 1 else: enc_input[11] = 0 if data['sector'] == 'Entertainment': enc_input[12] = 1 else: enc_input[12] = 0 if data['sector'] == 'Health': enc_input[13] = 1 else: enc_input[13] = 0 if data['sector'] == 'Housing': enc_input[14] = 1 else: enc_input[14] = 0 if data['sector'] == 'Manufacturing': enc_input[15] = 1 else: enc_input[15] = 0 if data['sector'] == 'Personal Use': enc_input[16] = 1 else: enc_input[16] = 0 if data['sector'] == 'Retail': enc_input[17] = 1 else: enc_input[17] = 0 if data['sector'] == 'Services': enc_input[18] = 1 else: enc_input[18] = 0 if data['sector'] == 'Transportation': enc_input[19] = 1 else: enc_input[19] = 0 if data['sector'] == 'Wholesale': enc_input[20] = 1 else: enc_input[20] = 0 if data['distribution_model'] == 'Field Partner': enc_input[21] = 1 else: enc_input[21] = 0 if data['repayment_interval'] == 'One Time Payement': enc_input[22] = 1 else: enc_input[22] = 0 if data['repayment_interval'] == 'Whenever you can': enc_input[23] = 1 else: enc_input[23] = 0 if data['repayment_interval'] == 'Weekly': enc_input[24] = 1 else: enc_input[24] = 0 return enc_input @app.route('/api',methods=['POST']) def get_delay(): result = request.form loanamount = result['loanamount'] description = result['description'] intendeduse = result['intendeduse'] hashtags = result['hashtags'] females = result['females'] males = result['males'] usd = result['usd'] englishyn = result['englishyn'] sector = result['sector'] repayment_interval = result['repayment_interval'] distribution_model = result['distribution_model'] data = {'loanamount':loanamount,'description':description,'intendeduse':intendeduse,'hashtags':hashtags,'females':females, 'males':males,'usd':usd,'englishyn':englishyn,'sector':sector,'repayment_interval':repayment_interval,'distribution_model':distribution_model} s = process_input(data) s = s.reshape(1, -1) pred = lr.predict_proba(s) pred = np.around((pred[0,1]), 2) * 100 print (pred) string = '<div style="text-align: center;"><img src="static/Kiva-loans.jpg" width=""></div><div style="font-size: 24px;text-align: center;">There is a %d%% chance you will raise the money in 5 days</div>"' % pred; return string; # return render_template('result.html',prediction=price_pred) if __name__ == "__main__": app.run(port=8080, debug=True, use_reloader=False)
StarcoderdataPython
3294815
<reponame>strange/django-country-utils from django.db import models from country_utils.fields import CountryField class Profile(models.Model): country1 = CountryField(blank=True) country2 = CountryField(blank=False, default='SE') country3 = CountryField(blank=False)
StarcoderdataPython
3396797
<reponame>AlecAivazis/python<gh_stars>1-10 # external imports import aiohttp_jinja2 # local imports from nautilus.network.http import RequestHandler class GraphiQLRequestHandler(RequestHandler): @aiohttp_jinja2.template('graphiql.html') async def get(self): # write the template to the client return {}
StarcoderdataPython
3269998
<reponame>highfestiva/life from trabant import * #bus with wheels bus = create_box(side=(6,1.5,2)) rear_left = create_sphere(pos=(-3,+1,-1), radius=0.25) rear_right = create_sphere(pos=(-3,-1,-1), radius=0.25) front_left = create_sphere(pos=(+3,-1,-1), radius=0.25) front_right = create_sphere(pos=(+3,+1,-1), radius=0.25) bus.joint(suspend_hinge_joint, rear_left, (0,+1,0)) bus.joint(suspend_hinge_joint, rear_right, (0,+1,0)) bus.joint(suspend_hinge_joint, front_left, (0,+1,0)) bus.joint(suspend_hinge_joint, front_right, (0,+1,0)) bus.vel((5,0,0)) #floor create_box(pos=(8,4,-23), side=40, static=True, col='#0f0', mat='flat') for z in range(0,7): create_box(pos=(5,4,z)) create_sphere(pos=(5,0,3), vel=(0,99,2)) while loop(): pass
StarcoderdataPython
1673313
import requests import json import os from itertools import count blocklist = [ '0x06012c8cf97bead5deae237070f9587f8e7a266d' # cryptokitties ] def valid_hash(hash): if hash in blocklist: return False return hash.startswith('0x') and len(hash) == 42 def list_nifty_gateway(update=True, verbose=False): cache_fn = 'data/nifty-gateway-contracts.json' if not update and os.path.exists(cache_fn): if verbose: print('Loading Nifty Gateway contracts from cache...') with open(cache_fn) as f: return json.load(f) drop_contracts = [] if verbose: print('Downloading from drops...') for current_page in count(1): url = f'https://api.niftygateway.com/drops/open/?size=100&current={current_page}' res = requests.get(url) results = json.loads(res.content)['listOfDrops'] if len(results) == 0: break contracts = [item['contractAddress'] for drop in results for item in drop['Exhibitions']] drop_contracts.extend(contracts) if verbose: print('Page', current_page, 'total', len(drop_contracts)) if verbose: print('Done.') exhibition_contracts = [] if verbose: print('Downloading from exhibition...') url = f'https://api.niftygateway.com/exhibition/open/' res = requests.get(url) results = json.loads(res.content) contracts = [e['contractAddress'] for e in results] exhibition_contracts.extend(contracts) if verbose: print('total', len(exhibition_contracts)) print('Done.') filtered = {} combined = set(exhibition_contracts + drop_contracts) if verbose: print(f'Combined: total {len(combined)}') for i, hash in enumerate(combined): if not valid_hash(hash): continue filtered[f'Nifty Gateway/{i}'] = hash with open(cache_fn, 'w') as f: json.dump(filtered, f, indent=2) return filtered if __name__ == '__main__': list_nifty_gateway(update=True, verbose=True)
StarcoderdataPython
1798364
#!/usr/bin/python # -*- coding: utf-8 -*- # We need this for gui controls import gui3d import humanmodifier print 'Face imported' class GroupBoxRadioButton(gui3d.RadioButton): def __init__(self, group, label, groupBox, selected=False): gui3d.RadioButton.__init__(self, group, label, selected, style=gui3d.ButtonStyle) self.groupBox = groupBox def onClicked(self, event): gui3d.RadioButton.onClicked(self, event) self.parent.parent.hideAllBoxes() self.groupBox.show() class FaceSlider(humanmodifier.ModifierSlider): def __init__(self, modifier, image, view): humanmodifier.ModifierSlider.__init__(self, min=-1.0, max=1.0, modifier=modifier, style=gui3d.SliderStyle._replace(height=56, normal=image), thumbStyle=gui3d.SliderThumbStyle._replace(width = 32, height = 32, normal="slider2.png", focused="slider2_focused.png")) self.view = getattr(gui3d.app, view) def onFocus(self, event): humanmodifier.ModifierSlider.onFocus(self, event) self.view() def setPosition(self, position): humanmodifier.ModifierSlider.setPosition(self, position) self.thumb.setPosition([position[0], position[1] + self.style.height / 2 - self.thumbStyle.height / 2, position[2] + 0.01]) self.setValue(self.getValue()) class FaceSlider2(humanmodifier.ModifierSlider): def __init__(self, modifier, image, view): humanmodifier.ModifierSlider.__init__(self, min=0.0, max=1.0, modifier=modifier, style=gui3d.SliderStyle._replace(height=56, normal=image), thumbStyle=gui3d.SliderThumbStyle._replace(width = 32, height = 32, normal="slider2.png", focused="slider2_focused.png")) self.view = getattr(gui3d.app, view) def onFocus(self, event): humanmodifier.ModifierSlider.onFocus(self, event) self.view() def setPosition(self, position): humanmodifier.ModifierSlider.setPosition(self, position) self.thumb.setPosition([position[0], position[1] + self.style.height / 2 - self.thumbStyle.height / 2, position[2] + 0.01]) self.setValue(self.getValue()) class FaceTaskView(gui3d.TaskView): def __init__(self, category): gui3d.TaskView.__init__(self, category, 'Face') features = [ ('head', [('data/targets/head/${ethnic}/${gender}_${age}/%s-${value}.target' % (i[0]), i[0], i[1], i[2], 'data/targets/head/images/', i[3]) for i in [ ('head-age', 'less', 'more', 'frontView'), ('head-angle', 'in', 'out', 'rightView'), ('head-scale-depth', 'less', 'more', 'rightView'), ('head-scale-horiz', 'less', 'more', 'frontView'), ('head-scale-vert', 'more', 'less', 'frontView'), ('head-trans', 'in', 'out', 'frontView'), ('head-trans', 'down', 'up', 'frontView'), ('head-trans', 'forward', 'backward', 'rightView'), ]]), ('neck', [('data/targets/neck/${ethnic}/${gender}_${age}/%s-${value}.target' % (i[0]), i[0], i[1], i[2], 'data/targets/neck/images/', i[3]) for i in [ ('neck-scale-depth', 'less', 'more', 'rightView'), ('neck-scale-horiz', 'less', 'more', 'frontView'), ('neck-scale-vert', 'more', 'less', 'frontView'), ('neck-trans', 'in', 'out', 'frontView'), ('neck-trans', 'down', 'up', 'frontView'), ('neck-trans', 'forward', 'backward', 'rightView'), ]]), ('right eye', [('data/targets/eyes/${ethnic}/${gender}_${age}/%s-${value}.target' % (i[0]), i[0], i[1], i[2], 'data/targets/eyes/images/', i[3]) for i in [ ('r-eye-height1', 'min', 'max', 'frontView'), ('r-eye-height2', 'min', 'max', 'frontView'), ('r-eye-height3', 'min', 'max', 'frontView'), ('r-eye-push1', 'in', 'out', 'frontView'), ('r-eye-push2', 'in', 'out', 'frontView'), ('r-eye-move', 'in', 'out', 'frontView'), ('r-eye-move', 'up', 'down', 'frontView'), ('r-eye', 'small', 'big', 'frontView'), ('r-eye-corner1', 'up', 'down', 'frontView'), ('r-eye-corner2', 'up', 'down', 'frontView') ]]), ('left eye', [('data/targets/eyes/${ethnic}/${gender}_${age}/%s-${value}.target' % (i[0]), i[0], i[1], i[2], 'data/targets/eyes/images/', i[3]) for i in [ ('l-eye-height1', 'min', 'max', 'frontView'), ('l-eye-height2', 'min', 'max', 'frontView'), ('l-eye-height3', 'min', 'max', 'frontView'), ('l-eye-push1', 'in', 'out', 'frontView'), ('l-eye-push2', 'in', 'out', 'frontView'), ('l-eye-move', 'in', 'out', 'frontView'), ('l-eye-move', 'up', 'down', 'frontView'), ('l-eye', 'small', 'big', 'frontView'), ('l-eye-corner1', 'up', 'down', 'frontView'), ('l-eye-corner2', 'up', 'down', 'frontView'), ]]), ('nose features', [('data/targets/nose/${ethnic}/${gender}_${age}/%s-${value}.target' % (i[0]), i[0], i[1], i[2], 'data/targets/nose/images/', i[3]) for i in [ ('nose', 'compress', 'uncompress', 'rightView'), ('nose', 'convex', 'concave', 'rightView'), ('nose', 'greek', 'ungreek', 'rightView'), ('nose', 'hump', 'unhump', 'rightView'), ('nose', 'potato', 'point', 'rightView'), ('nose-nostrils', 'point', 'unpoint', 'frontView'), ('nose-nostrils', 'up', 'down', 'rightView'), ('nose-point', 'up', 'down', 'rightView'), ]]), ('nose size details', [('data/targets/nose/${ethnic}/${gender}_${age}/%s-${value}.target' % (i[0]), i[0], i[1], i[2], 'data/targets/nose/images/', i[3]) for i in [ ('nose-nostril-width', 'min', 'max', 'frontView'), ('nose-height', 'min', 'max', 'rightView'), ('nose-width1', 'min', 'max', 'frontView'), ('nose-width2', 'min', 'max', 'frontView'), ('nose-width3', 'min', 'max', 'frontView'), ('nose-width', 'min', 'max', 'frontView'), ]]), ('nose size', [('data/targets/nose/${ethnic}/${gender}_${age}/%s-${value}.target' % (i[0]), i[0], i[1], i[2], 'data/targets/nose/images/', i[3]) for i in [ ('nose-trans', 'up', 'down', 'frontView'), ('nose-trans', 'forward', 'backward', 'rightView'), ('nose-trans', 'in', 'out', 'frontView'), ('nose-scale-vert', 'incr', 'decr', 'frontView'), ('nose-scale-horiz', 'incr', 'decr', 'frontView'), ('nose-scale-depth', 'incr', 'decr', 'rightView'), ]]), ('mouth size', [('data/targets/mouth/${ethnic}/${gender}_${age}/%s-${value}.target' % (i[0]), i[0], i[1], i[2], 'data/targets/mouth/images/', i[3]) for i in [ ('mouth-scale-horiz', 'incr', 'decr', 'frontView'), ('mouth-scale-vert', 'incr', 'decr', 'frontView'), ('mouth-scale-depth', 'incr', 'decr', 'rightView'), ('mouth-trans', 'in', 'out', 'frontView'), ('mouth-trans', 'up', 'down', 'frontView'), ('mouth-trans', 'forward', 'backward', 'rightView'), ]]), ('mouth size details', [('data/targets/mouth/${ethnic}/${gender}_${age}/%s-${value}.target' % (i[0]), i[0], i[1], i[2], 'data/targets/mouth/images/', i[3]) for i in [ ('mouth-lowerlip-height', 'min', 'max', 'frontView'), ('mouth-lowerlip-middle', 'up', 'down', 'frontView'), ('mouth-lowerlip-width', 'min', 'max', 'frontView'), ('mouth-upperlip-height', 'min', 'max', 'frontView'), ('mouth-upperlip-width', 'min', 'max', 'frontView'), ]]), ('mouth features', [('data/targets/mouth/${ethnic}/${gender}_${age}/%s-${value}.target' % (i[0]), i[0], i[1], i[2], 'data/targets/mouth/images/', i[3]) for i in [ ('mouth-lowerlip-ext', 'up', 'down', 'frontView'), ('mouth-angles', 'up', 'down', 'frontView'), ('mouth-lowerlip-middle', 'up', 'down', 'frontView'), ('mouth-lowerlip', 'deflate', 'inflate', 'rightView'), ('mouth-philtrum', 'up', 'down', 'frontView'), ('mouth-philtrum', 'increase', 'decrease', 'rightView'), ('mouth', 'up', 'down', 'frontView'), ('mouth-upperlip', 'deflate', 'inflate', 'rightView'), ('mouth-upperlip-ext', 'up', 'down', 'frontView'), ('mouth-upperlip-middle', 'up', 'down', 'frontView'), ]]), ('right ear', [('data/targets/ears/${ethnic}/${gender}_${age}/%s-${value}.target' % (i[0]), i[0], i[1], i[2], 'data/targets/ears/images/', i[3]) for i in [ ('r-ear', 'backward', 'forward', 'rightView'), ('r-ear', 'big', 'small', 'rightView'), ('r-ear', 'down', 'up', 'rightView'), ('r-ear-height', 'min', 'max', 'rightView'), ('r-ear-lobe', 'min', 'max', 'rightView'), ('r-ear', 'pointed', 'triangle', 'rightView'), ('r-ear-rot', 'backward', 'forward', 'rightView'), ('r-ear', 'square', 'round', 'rightView'), ('r-ear-width', 'max', 'min', 'rightView'), ('r-ear-wing', 'out', 'in', 'frontView'), ('r-ear-flap', 'out', 'in', 'frontView'), ]]), ('left ear', [('data/targets/ears/${ethnic}/${gender}_${age}/%s-${value}.target' % (i[0]), i[0], i[1], i[2], 'data/targets/ears/images/', i[3]) for i in [ ('l-ear', 'backward', 'forward', 'leftView'), ('l-ear', 'big', 'small', 'leftView'), ('l-ear', 'down', 'up', 'leftView'), ('l-ear-height', 'min', 'max', 'leftView'), ('l-ear-lobe', 'min', 'max', 'leftView'), ('l-ear', 'pointed', 'triangle', 'leftView'), ('l-ear-rot', 'backward', 'forward', 'leftView'), ('l-ear', 'square', 'round', 'leftView'), ('l-ear-width', 'max', 'min', 'leftView'), ('l-ear-wing', 'out', 'in', 'frontView'), ('l-ear-flap', 'out', 'in', 'frontView'), ]]), ('chin', [('data/targets/chin/${ethnic}/${gender}_${age}/%s-${value}.target' % (i[0]), i[0], i[1], i[2], 'data/targets/chin/images/', i[3]) for i in [ ('chin', 'in', 'out', 'rightView'), ('chin-width', 'min', 'max', 'frontView'), ('chin-height', 'min', 'max', 'frontView'), ('chin', 'squared', 'round', 'frontView'), ('chin', 'prognathism1', 'prognathism2', 'rightView'), ]]), ('cheek', [('data/targets/cheek/${ethnic}/${gender}_${age}/%s-${value}.target' % (i[0]), i[0], i[1], i[2], 'data/targets/cheek/images/', i[3]) for i in [ ('l-cheek', 'in', 'out', 'frontView'), ('l-cheek-bones', 'out', 'in', 'frontView'), ('r-cheek', 'in', 'out', 'frontView'), ('r-cheek-bones', 'out', 'in', 'frontView'), ]]) ] features2 = [ ('head shape', [('data/targets/head/${ethnic}/${gender}_${age}/%s.target' % (i[0]), i[0], 'data/targets/head/images/', i[1]) for i in [ ('head-oval', 'frontView'), ('head-round', 'frontView'), ('head-rectangular', 'frontView'), ('head-square', 'frontView'), ('head-triangular', 'frontView'), ('head-invertedtriangular', 'frontView'), ('head-diamond', 'frontView'), ]]) ] y = 80 self.groupBoxes = [] self.radioButtons = [] self.sliders = [] self.modifiers = {} self.categoryBox = self.addView(gui3d.GroupBox([650, y, 9.0], 'Category')) y += 25 for name, templates in features2: for index, template in enumerate(templates): if index % 12 == 0: if len(templates) <= 12: title = name.capitalize() else: title = '%s %d' % (name.capitalize(), index / 12 + 1) # Create box box = self.addView(gui3d.GroupBox([10, 80, 9.0], title, gui3d.GroupBoxStyle._replace(width=128+112+4))) self.groupBoxes.append(box) # Create radiobutton radio = self.categoryBox.addView(GroupBoxRadioButton(self.radioButtons, title, box, selected=len(self.radioButtons) == 0)) y += 24 # Create sliders modifier = humanmodifier.GenderAgeEthnicModifier2(template[0]) self.modifiers['%s%d' % (name, index + 1)] = modifier slider = box.addView( (FaceSlider2(modifier, '%s%s.png' % (template[2], template[1]), template[3]))) self.sliders.append(slider) for name, templates in features: for index, template in enumerate(templates): if index % 12 == 0: if len(templates) <= 12: title = name.capitalize() else: title = '%s %d' % (name.capitalize(), index / 12 + 1) # Create box box = self.addView(gui3d.GroupBox([10, 80, 9.0], title, gui3d.GroupBoxStyle._replace(width=128+112+4))) self.groupBoxes.append(box) # Create radiobutton radio = self.categoryBox.addView(GroupBoxRadioButton(self.radioButtons, title, box, selected=len(self.radioButtons) == 0)) y += 24 # Create sliders modifier = humanmodifier.GenderAgeEthnicAsymmetricModifier(template[0], 'value', template[2], template[3], False) self.modifiers['%s%d' % (name, index + 1)] = modifier slider = box.addView(FaceSlider(modifier, '%s%s-%s-%s.png' % (template[4], template[1], template[2], template[3]), template[5])) self.sliders.append(slider) y += 16 self.hideAllBoxes() self.groupBoxes[0].show() def hideAllBoxes(self): for box in self.groupBoxes: box.hide() def onShow(self, event): gui3d.TaskView.onShow(self, event) gui3d.app.setFaceCamera() for slider in self.sliders: slider.update() def onResized(self, event): self.categoryBox.setPosition([event.width - 150, self.categoryBox.getPosition()[1], 9.0]) def onHumanChanged(self, event): human = event.human for slider in self.sliders: slider.update() def loadHandler(self, human, values): if values[0] == 'face': modifier = self.modifiers.get(values[1].replace("-", " "), None) if modifier: modifier.setValue(human, float(values[2])) def saveHandler(self, human, file): for name, modifier in self.modifiers.iteritems(): value = modifier.getValue(human) if value: file.write('face %s %f\n' % (name.replace(" ", "-"), value)) def load(app): category = app.getCategory('Modelling') taskview = category.addView(FaceTaskView(category)) app.addLoadHandler('face', taskview.loadHandler) app.addSaveHandler(taskview.saveHandler) print 'Face loaded' def unload(app): pass
StarcoderdataPython
3236724
<gh_stars>1-10 # -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('disclaimrwebadmin', '0007_auto_20141124_1229'), ] operations = [ migrations.AlterModelOptions( name='rule', options={'ordering': ['position']}, ), migrations.AlterField( model_name='rule', name='position', field=models.PositiveIntegerField(default=0, help_text='The position inside the rule processor', verbose_name='position'), preserve_default=True, ), ]
StarcoderdataPython
3269992
# coding=UTF-8 from flask import url_for def test_index(client): res = client.get(url_for('index')) assert res.status_code == 200 assert b'Hello, World!' in res.data #def test_push_hook(client): # res = client.post('/postreceive', '{"foo":"bar"}') # assert res.status_code == 400
StarcoderdataPython
136303
<gh_stars>0 #!/usr/bin/env python # stdlib imports import os.path from datetime import datetime import re import logging # third party imports import numpy as np from scipy import constants import pandas as pd import pkg_resources # local from gmprocess.core.stationstream import StationStream from gmprocess.core.stationtrace import StationTrace, PROCESS_LEVELS from gmprocess.io.seedname import get_channel_name from gmprocess.io.utils import is_binary TIMEFMT = "%m/%d/%Y %H:%M:%S.%f" FLOATRE = r"[-+]?[0-9]*\.?[0-9]+" # INTRE = "[-+]?[0-9]*" INTRE = r"(\+|-)?\d+" # 2/27/2010 2:45:46.000 TIME_RE = r"[0-9]{1,2}/[0-9]{1,2}/[0-9]{4} [0-9]{1,2}:[0-9]{2}:" r"[0-9]{2}\.?[0-9]*" TIME_RE2 = "[0-9]{1,2}/[0-9]{1,2}/[0-9]{4} [0-9]{1,2}:[0-9]{2}:[0-9]{2}" TEXT_HDR_ROWS = 13 INT_HEADER_ROWS = 7 FLOAT_HEADER_ROWS = 7 NCOLS = 10 SOURCE = "UNIVERSIDAD DE CHILE - RENADIC" SOURCE_FORMAT = "RENADIC" NETWORK = "C" LEVELS = {"VOL1DS": "V1"} DECIG_TO_GALS = (constants.g * 100) / 10 MARKER = "UNIVERSIDAD DE CHILE - RENADIC" ENCODING = "ISO-8859-1" NORTH_CHANNELS = ["NS", "NZ", "L"] # calling these north channels WEST_CHANNELS = ["EW", "T"] VERTICAL_CHANNELS = ["Z", "V"] G10_TO_GALS = 980 / 10.0 def is_renadic(filename, config=None): """Check to see if file is Chilean RENADIC format. Args: filename (str): Path to file to check. config (dict): Dictionary containing configuration. Returns: bool: True if Chilean RENADIC supported, otherwise False. """ if is_binary(filename): return False with open(filename, "rt", encoding=ENCODING) as f: lines = [next(f) for x in range(TEXT_HDR_ROWS)] if MARKER in lines[7]: return True return False def read_renadic(filename, config=None, **kwargs): """Read the Chilean RENADIC strong motion data format. Args: filename (str): path to RENADIC data file. config (dict): Dictionary containing configuration. kwargs (ref): Other arguments will be ignored. Returns: list: Sequence of one StationStream object containing 3 StationTrace objects. """ # This network does not include station coordinates in the data files, # but they did provide a PDF table with information about each station, # including structure type (free field or something else) and the # coordinates data_dir = pkg_resources.resource_filename("gmprocess", "data") tablefile = os.path.join(data_dir, "station_coordinates.xlsx") table = pd.read_excel(tablefile, engine="openpyxl") with open(filename, "rt", encoding=ENCODING) as f: lines1 = [next(f) for x in range(TEXT_HDR_ROWS)] header1 = _read_header(lines1, filename, table) ndata_rows = int(np.ceil((header1["npts"] * 2) / NCOLS)) skip_rows = TEXT_HDR_ROWS + INT_HEADER_ROWS + FLOAT_HEADER_ROWS data1 = _read_data(filename, skip_rows, header1["npts"]) skip_rows += ndata_rows + 1 with open(filename, "rt", encoding=ENCODING) as f: [next(f) for x in range(skip_rows)] lines2 = [next(f) for x in range(TEXT_HDR_ROWS)] header2 = _read_header(lines2, filename, table) skip_rows += TEXT_HDR_ROWS + INT_HEADER_ROWS + FLOAT_HEADER_ROWS data2 = _read_data(filename, skip_rows, header1["npts"]) skip_rows += ndata_rows + 1 with open(filename, "rt", encoding=ENCODING) as f: [next(f) for x in range(skip_rows)] lines3 = [next(f) for x in range(TEXT_HDR_ROWS)] header3 = _read_header(lines3, filename, table) skip_rows += TEXT_HDR_ROWS + INT_HEADER_ROWS + FLOAT_HEADER_ROWS data3 = _read_data(filename, skip_rows, header1["npts"]) trace1 = StationTrace(data=data1, header=header1) response = {"input_units": "counts", "output_units": "cm/s^2"} trace1.setProvenance("remove_response", response) trace2 = StationTrace(data=data2, header=header2) trace2.setProvenance("remove_response", response) trace3 = StationTrace(data=data3, header=header3) trace3.setProvenance("remove_response", response) stream = StationStream(traces=[trace1, trace2, trace3]) return [stream] def _read_data(filename, skip_rows, npts): floatrows = (npts * 2) / NCOLS introws = int(floatrows) data = np.genfromtxt( filename, skip_header=skip_rows, max_rows=introws, delimiter=10 * [7], encoding=ENCODING, ) data = data.flatten() if floatrows > introws: data2 = np.genfromtxt( filename, skip_header=skip_rows + introws, max_rows=1, delimiter=10 * [7], encoding=ENCODING, ) data2 = data2.flatten() data = np.concatenate((data, data2)) data = data[1::2] data *= G10_TO_GALS data = data[0:npts] return data def _read_header(lines, filename, table): header = {} standard = {} coords = {} format_specific = {} # fill out the standard dictionary standard["source"] = SOURCE standard["source_format"] = SOURCE_FORMAT standard["instrument"] = "" standard["sensor_serial_number"] = "" standard["process_level"] = PROCESS_LEVELS["V1"] standard["process_time"] = lines[0].split(":")[1].strip() # station name line can look like this: # <NAME> S/N 675 sparts = lines[5].split() station_name = " ".join(sparts[0 : sparts.index("S/N")]) standard["station_name"] = station_name # this table gives us station coordinates and structure type station_row = table[table["Name"] == station_name] if not len(station_row): logging.warning("Unknown structure type.") standard["structure_type"] = "" else: row = station_row.iloc[0] standard["structure_type"] = row["Structure Type"] standard["corner_frequency"] = np.nan standard["units"] = "cm/s^2" standard["units_type"] = "acc" inst_dict = {} for part in lines[9].split(","): key, value = part.split("=") fvalue_str = re.search(FLOATRE, value.strip()).group() inst_dict[key.strip()] = float(fvalue_str) standard["instrument_period"] = inst_dict["INSTR PERIOD"] standard["instrument_damping"] = inst_dict["DAMPING"] standard["horizontal_orientation"] = np.nan standard["vertical_orientation"] = np.nan standard["comments"] = " ".join(lines[11:13]).replace("\n", "") head, tail = os.path.split(filename) standard["source_file"] = tail or os.path.basename(head) # this field can be used for instrument correction # when data is in counts standard["instrument_sensitivity"] = inst_dict["SENSITIVITY"] # fill out the stats stuff try: stimestr = re.search(TIME_RE, lines[3]).group() except AttributeError: try: stimestr = re.search(TIME_RE2, lines[3]).group() except AttributeError: logging.warning("Setting time to epoch.") stimestr = "01/01/1970 00:00:00.000" # 2/27/2010 2:45:46.000 GMT stime = datetime.strptime(stimestr, TIMEFMT) # it appears that sometimes the trigger time is set to Jan 1, 1980 # by default. if stime.year == 1980 and stime.month == 1 and stime.day == 1: fmt = "Trigger time set to %s in file %s" logging.warning(fmt % (str(stime), standard["source_file"])) header["starttime"] = stime npts, duration = re.findall(FLOATRE, lines[10]) npts = int(npts) duration = float(duration) header["npts"] = npts header["delta"] = duration / (npts - 1) header["sampling_rate"] = (npts - 1) / duration header["duration"] = duration raw_channel = lines[6][9:11].strip() if raw_channel in NORTH_CHANNELS: channel = get_channel_name(header["sampling_rate"], True, False, True) elif raw_channel in WEST_CHANNELS: channel = get_channel_name(header["sampling_rate"], True, False, False) elif raw_channel in VERTICAL_CHANNELS: channel = get_channel_name(header["sampling_rate"], True, True, False) else: raise KeyError(f"Channel name {raw_channel} not defined") header["channel"] = channel header["station"] = lines[5].split()[-1] header["location"] = "--" header["network"] = NETWORK # these files seem to have all zeros for station coordinates! if not len(station_row): logging.warning(f"Could not find station match for {station_name}") coordparts = lines[4].split() lat = float(re.search(FLOATRE, coordparts[2]).group()) lon = float(re.search(FLOATRE, coordparts[3]).group()) if lon == 0 or lat == 0: logging.warning("Latitude or Longitude values are 0") if "S" in coordparts[2]: lat = -1 * lat if "W" in coordparts[3]: lon = -1 * lon else: row = station_row.iloc[0] lat = row["Lat"] lon = row["Lon"] altitude = 0.0 logging.warning("Setting elevation to 0.0") coords = {"latitude": lat, "longitude": lon, "elevation": altitude} header["coordinates"] = coords header["standard"] = standard header["format_specific"] = format_specific return header
StarcoderdataPython
9796
import numpy as np import torch import torch.nn as nn from mmcv.runner import obj_from_dict from mmcv.utils.config import Config from mmedit.models import build_model from mmedit.models.losses import L1Loss from mmedit.models.registry import COMPONENTS @COMPONENTS.register_module() class BP(nn.Module): """A simple BP network for testing LIIF. Args: in_dim (int): Input dimension. out_dim (int): Output dimension. """ def __init__(self, in_dim, out_dim): super().__init__() self.layer = nn.Linear(in_dim, out_dim) def forward(self, x): shape = x.shape[:-1] x = self.layer(x.view(-1, x.shape[-1])) return x.view(*shape, -1) def test_liif(): model_cfg = dict( type='LIIF', generator=dict( type='EDSR', in_channels=3, out_channels=3, mid_channels=8, num_blocks=1), imnet=dict(type='BP', in_dim=8, out_dim=3), local_ensemble=True, feat_unfold=True, cell_decode=True, rgb_mean=(0.4488, 0.4371, 0.4040), rgb_std=(1., 1., 1.), eval_bsize=30000, pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean')) scale_max = 4 train_cfg = None test_cfg = Config(dict(metrics=['PSNR', 'SSIM'], crop_border=scale_max)) # build restorer restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) # test attributes assert restorer.__class__.__name__ == 'LIIF' assert isinstance(restorer.imnet, BP) assert isinstance(restorer.pixel_loss, L1Loss) # prepare data inputs = torch.rand(1, 3, 22, 11) targets = torch.rand(1, 128 * 64, 3) coord = torch.rand(1, 128 * 64, 2) cell = torch.rand(1, 128 * 64, 2) data_batch = {'lq': inputs, 'gt': targets, 'coord': coord, 'cell': cell} # prepare optimizer optim_cfg = dict(type='Adam', lr=1e-4, betas=(0.9, 0.999)) optimizer = obj_from_dict(optim_cfg, torch.optim, dict(params=restorer.parameters())) # test train_step and forward_test (cpu) outputs = restorer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) assert isinstance(outputs['log_vars']['loss_pix'], float) assert outputs['num_samples'] == 1 assert outputs['results']['lq'].shape == data_batch['lq'].shape assert outputs['results']['gt'].shape == data_batch['gt'].shape assert torch.is_tensor(outputs['results']['output']) assert outputs['results']['output'].size() == (1, 128 * 64, 3) # test train_step and forward_test (gpu) if torch.cuda.is_available(): restorer = restorer.cuda() data_batch = { 'lq': inputs.cuda(), 'gt': targets.cuda(), 'coord': coord.cuda(), 'cell': cell.cuda() } # train_step optimizer = obj_from_dict(optim_cfg, torch.optim, dict(params=restorer.parameters())) outputs = restorer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) assert isinstance(outputs['log_vars']['loss_pix'], float) assert outputs['num_samples'] == 1 assert outputs['results']['lq'].shape == data_batch['lq'].shape assert outputs['results']['gt'].shape == data_batch['gt'].shape assert torch.is_tensor(outputs['results']['output']) assert outputs['results']['output'].size() == (1, 128 * 64, 3) # val_step result = restorer.val_step(data_batch, meta=[{'gt_path': ''}]) assert isinstance(result, dict) assert isinstance(result['eval_result'], dict) assert result['eval_result'].keys() == set({'PSNR', 'SSIM'}) assert isinstance(result['eval_result']['PSNR'], np.float64) assert isinstance(result['eval_result']['SSIM'], np.float64)
StarcoderdataPython