text
string
size
int64
token_count
int64
import os from enum import Enum class AppPackage(Enum): # Add packages here as required. LASTOOLS = "lastools" def getPackageDirectory(package: AppPackage, version: str = None): "Gets the directory where the specified package is installed." varName = f'AZ_BATCH_APP_PACKAGE_{package.value}' if version != None: varName = f'{varName}#{version}' return os.environ[varName]
410
130
""" This file should be imported at the bottom of configure.py TODO: All of this may be moved into a single function in the future so people can choose a reactor in configure.py """ from twisted.internet import reactor from twisted.internet.task import LoopingCall from threading import currentThread, Thread # Check to see if main thread is alive mainthread = currentThread() def checkExit(): if not mainthread.isAlive(): reactor.stop() # Every second, make sure that the interface thread is alive. LoopingCall(checkExit).start(1) # start the network loop in a new thread Thread(target=lambda : reactor.run(installSignalHandlers=0)).start()
680
200
import random import time import requests class Account: # C'tor def __init__(self, language, world, user, password, ability): # def standard class variables self.cookie = "" self.language = language self.world = world self.user = user self.password = password self.ability = ability # preparing header and basic url for get and post requests if language == "de": self.basic_url = "http://welt" + self.world + ".freewar.de/freewar/internal/" self.header = {"Host": "welt" + self.world + ".freewar.de", "Connection": "keep-alive", "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64)"} elif language == "en": self.basic_url = "http://world" + self.world + ".freewar.com/freewar/internal/" self.header = {"Host": "world" + self.world + ".freewar.com", "Connection": "keep-alive", "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64)"} def login(self): print("\t[*] Logging in") login_url = self.basic_url + "index.php" # really annoying if self.language == "de": login_submit = "Einloggen" elif self.language == "en": login_submit = "Login" # login payload / post parameters login_payload = {"name": self.user, "password": self.password, "submit": login_submit} # login request login_request = requests.post(login_url, data = login_payload, headers = self.header) # nesseccary for session management in other requests self.cookie = login_request.cookies print("\t[+] Login successful") return 0 # nesseccary to access all other links in fw main window after login def redirect(self): print("\t[*] Redirecting") redirect_url = self.basic_url + "frset.php" requests.get(redirect_url, headers = self.header, cookies = self.cookie) print("\t[+] Redirect successful") return 0 # function to train characters abilities def train(self): # the training sequence print("\t[*] Training") train_url = self.basic_url + "ability.php" train_payload = {"action": "train", "ability_id": self.ability} requests.get(train_url, params = train_payload, headers = self.header, cookies = self.cookie) print("\t[+] Training successful") # preparing for the training status request status_payload = {"action": "show_ability", "ability_id": self.ability} # requesting content of main frame status_request = requests.get(train_url, params = status_payload, headers = self.header, cookies = self.cookie) if self.language == "de": search_parameters = ["Aktuelle Stufe: ", "Maximale Stufe: "] # TODO: online den genauen text nachschlagen elif self.language == "en": search_parameters = ["actual level: ", "maximal level: "] output = "\t[*] Actual level: " first = True # looking for search parameters in http response for search_text in search_parameters: # exception handling try: position = status_request.text.find(search_text) if (position == -1): raise RuntimeError("Bad Request") except RuntimeError: print("\t[-] Could not found ability level.") return 1 # TODO: Hier gehts weiter text_length = len(search_text) ability_level = status_request.text[position + text_length : position + text_length + 3] # geting a clean output ability_level = ability_level.strip("<") ability_level = ability_level.strip("/") ability_level = ability_level.strip("b") output += ability_level if first: first = False output += " / " print(output) return 0 # function to pick up accounts oil if he's on the right field for that def oil(self): print("\t[*] Picking up oil") # requesting content of main frame main_url = self.basic_url + "main.php" main_request = requests.get(main_url, headers = self.header, cookies = self.cookie) # something called exception handling try: position = main_request.text.find("checkid=") if (position == -1): raise RuntimeError("wrong position") except RuntimeError: print("\t[-] Oil isn't ready yet or account is on the wrong position.") return 1 # pincking up the oil oil_url = self.basic_url + "main.php" oil_payload = {"arrive_eval": "drink", "checkid": main_request.text[position + 8 : position + 15]} requests.get(oil_url, params = oil_payload, headers = self.header, cookies = self.cookie) return 0 # for a clean session def logout(self): print("\t[*] Logging out") logout_url = self.basic_url + "logout.php" requests.get(logout_url, headers = self.header, cookies = self.cookie) print("\t[+] Logged out") return 0 def automatic_sit(self): try: self.login() self.redirect() self.train() self.oil() self.logout() except: print("[!] Connection Error.") return 1 class ManageAccounts: def __init__(self, account_path): self.accounts = [] self.later = [] # filling the list of credentials with open(account_path, "r") as account_file: for line in account_file: splitted_line = line.strip("\n").split(", ") #print(splitted_line) if len(splitted_line) == 5: self.accounts.append(splitted_line) def manage(self): while len(self.accounts) > 0: for language, world, user, password, ability in self.accounts: # skipping credentials of the same world skip = False for account in self.accounts: if (account[1] == world) and (account[2] != user): self.later.append(account) self.accounts.remove(account) skip = True if skip: continue # if not skipped, handling the credential print("\n[*] World: " + world + " Account: " + user + " Server: " + language) FWAccount = Account(language, world, user, password, ability) if FWAccount.automatic_sit(): return 1 # writing memorized credentials back to be handled if len(self.later) > 0: random_time = random.randint(180, 300) print("[*] Wating " + str(random_time) + " Seconds to log other accounts savely.") time.sleep(random_time) self.accounts = self.later self.later.clear() else: self.accounts.clear()
7,170
1,969
from django.shortcuts import render from django.views import View # Create your views here. class CoreView(View): template_name = 'core/home.html' def get(self, request, *args, **kwargs): return render(request, self.template_name, {})
251
74
#!/usr/bin/python # -*- coding: utf-8 -*- """hashvis by Peter Hosey Reads from standard input or files, and prints what it reads, along with colorized versions of any hashes or signatures found in each line. The goal here is visual comparability. You should be able to tell whether two hashes are the same at a glance, rather than having to closely compare digits (or, more probably, not bother and just assume the hashes match!). The more obvious of the two methods used is shaping the output: Each hash will be represented as a rectangle of an aspect ratio determined by the hash. You may thus end up with one that's tall and one that's wide, or one that's square (if the hash length is a square number) and one that isn't. If two hashes are the same shape (or if you passed --oneline), another difference is that each byte is represented by a different pair of foreground and background colors. You should thus be able to compare the color-patterns rather than having to look at individual digits. """ # #mark - Imports and utilities import sys import os import re import base64 import binascii import cmath as math range = xrange def factors(n): "Yield every pair of factors of n (x,y where n/x == y and n/y == x), except for (1,n) and (n,1)." limit = math.sqrt(n).real if n == 1: yield (1, 1) return for i in range(1, int(limit + 1)): if n % i == 0: pair = (i, n/i) yield pair opposite_pair = (pair[1], pair[0]) #If n is square, one of the pairs will be (sqrt, sqrt). We want to yield that only once. All other pairs, we want to yield both ways round. if pair != opposite_pair: yield opposite_pair def except_one(pairs): "Given a sequence of pairs (x, y), yield every pair where neither x nor y is 1." for pair in pairs: if 1 not in pair: yield pair # #mark - Parsing MD5_exp = re.compile(r'^MD5 \(.*\) = ([0-9a-fA-F]+)') fingerprint_exp = re.compile(r'^(?:R|ECD)SA key fingerprint is (?:(?:MD5:)?(?P<hex>[:0-9a-fA-F]+)|SHA256:(?P<base64>[+/0-9a-zA-Z]+))\.') commit_exp = re.compile(r'^commit ([0-9a-fA-F]+)') more_base64_padding_than_anybody_should_ever_need = '=' * 64 def extract_hash_from_line(input_line): "Returns a tuple of the extracted hash as hex, and whether it was originally hex (vs, say, base64). The hash may be None if none was found in the input." if input_line[:1] == 'M': match = MD5_exp.match(input_line) if match: return match.group(1), True else: return '', False elif input_line[:1] in 'RE': match = fingerprint_exp.match(input_line) if match: hex = match.group('hex') if hex: return hex, True b64str = match.group('base64') if b64str: # Pacify the base64 module, which wants *some* padding (at least sometimes) but doesn't care how much. b64str += more_base64_padding_than_anybody_should_ever_need # Re-encode to hex for processing downstream. Arguably a refactoring opportunity… return binascii.b2a_hex(base64.b64decode(b64str)), False return '', False elif input_line[:7] == 'commit ': match = commit_exp.match(input_line) if match: return match.group(1), True if input_line: try: hash, not_the_hash = input_line.split(None, 1) except ValueError: # Insufficient fields. This line doesn't contain any whitespace. Use the entire line. hash = input_line hash = hash.strip().replace('-', '') try: int(hash, 16) except ValueError: # Not a hex number. return None, False else: return hash, True def parse_hex(hex): hex = hex.lstrip(':-') while hex: byte_hex, hex = hex[:2], hex[2:].lstrip(':-') yield int(byte_hex, 16) # #mark - Representation def fgcolor(idx, deep_color=False): if deep_color: return '\x1b[38;5;{0}m'.format(idx) idx = ((idx >> 4) & 0xf) # 90 is bright foreground; 30 is dull foreground. if idx < 0x8: base = 30 else: base = 90 idx = idx - 0x8 return '\x1b[{0}m'.format(base + idx) def bgcolor(idx, deep_color=False): if deep_color: idx = ((idx & 0xf) << 4) | ((idx & 0xf0) >> 4) # This add 128 and mod 256 is important, because it ensures double-digits such as 00 remain different colors. return '\x1b[48;5;{0}m'.format((idx + 128) % 256) else: idx = (idx & 0xf) # 100 is bright background; 40 is dull background. if idx < 0x8: base = 40 else: base = 100 idx = idx - 0x8 return '\x1b[{0}m'.format(base + idx) BOLD = '\x1b[1m' RESET = '\x1b[0m' def hash_to_pic(hash, only_ever_one_line=False, represent_as_hex=False, deep_color=False, _underlying_fgcolor=fgcolor, _underlying_bgcolor=bgcolor): def fgcolor(idx): return _underlying_fgcolor(idx, deep_color) def bgcolor(idx): return _underlying_bgcolor(idx, deep_color) bytes = parse_hex(hash) characters = list('0123456789abcdef') if represent_as_hex else [ '▚', '▞', '▀', '▌', ] if not only_ever_one_line: pairs = list((w, h) for (w, h) in except_one(factors(len(hash) / 2)) if w >= h) if not pairs: # Prefer (w, 1) over (1, h) if we have that choice. pairs = list((w, h) for (w, h) in factors(len(hash) / 2) if w >= h) output_chunks = [] last_byte = 0 character_idx = None for b in bytes: def find_character(b): character_idx = b % len(characters) return characters[character_idx] if not represent_as_hex: output_chunks.append(fgcolor(b) + bgcolor(b) + find_character(b)) else: output_chunks.append(fgcolor(b) + bgcolor(b) + find_character(b >> 4) + find_character(b & 0xf)) last_byte = b if only_ever_one_line: pixels_per_row, num_rows = len(hash) / 2, 1 else: pixels_per_row, num_rows = pairs[last_byte % len(pairs)] while output_chunks: yield BOLD + ''.join(output_chunks[:pixels_per_row]) + RESET del output_chunks[:pixels_per_row] if __name__ == '__main__': # #mark - Self-tests run_tests = False if run_tests: # A square number. Should contain a diagonal pair (in this case, (16,16)). factors_of_256 = set(factors(256)) assert factors_of_256 == set([(256, 1), (16, 16), (8, 32), (2, 128), (64, 4), (1, 256), (32, 8), (128, 2), (4, 64)]) # A rectangular number: not square, but still composite. No diagonal pair here. factors_of_12 = set(factors(12)) assert factors_of_12 == set([(2, 6), (12, 1), (1, 12), (6, 2), (4, 3), (3, 4)]) assert (1, 256) in factors_of_256 assert (256, 1) in factors_of_256 assert (1, 256) not in except_one(factors_of_256) assert (256, 1) not in except_one(factors_of_256) # A prime number. Should have exactly one pair of factors. factors_of_5 = set(factors(5)) assert factors_of_5 == set([(1, 5), (5, 1)]) assert list(parse_hex('ab15e')) == [0xab, 0x15, 0x0e] assert list(parse_hex(':::ab:15:e')) == [0xab, 0x15, 0x0e] assert extract_hash_from_line('RSA key fingerprint is b8:79:03:7d:00:44:98:6e:67:a0:59:1a:01:21:36:38.\n') == ('b8:79:03:7d:00:44:98:6e:67:a0:59:1a:01:21:36:38', True) assert extract_hash_from_line('RSA key fingerprint is b8:79:03:7d:00:44:98:6e:67:a0:59:1a:01:21:36:38.') == ('b8:79:03:7d:00:44:98:6e:67:a0:59:1a:01:21:36:38', True) #Alternate output example from https://en.wikibooks.org/wiki/OpenSSH/Cookbook/Authentication_Keys : assert extract_hash_from_line('RSA key fingerprint is MD5:10:4a:ec:d2:f1:38:f7:ea:0a:a0:0f:17:57:ea:a6:16.') == ('10:4a:ec:d2:f1:38:f7:ea:0a:a0:0f:17:57:ea:a6:16', True) # Also from https://en.wikibooks.org/wiki/OpenSSH/Cookbook/Authentication_Keys : assert extract_hash_from_line('ECDSA key fingerprint is SHA256:LPFiMYrrCYQVsVUPzjOHv+ZjyxCHlVYJMBVFerVCP7k.\n') == ('2cf162318aeb098415b1550fce3387bfe663cb10879556093015457ab5423fb9', False), extract_hash_from_line('ECDSA key fingerprint is SHA256:LPFiMYrrCYQVsVUPzjOHv+ZjyxCHlVYJMBVFerVCP7k.\n') assert extract_hash_from_line('ECDSA key fingerprint is SHA256:LPFiMYrrCYQVsVUPzjOHv+ZjyxCHlVYJMBVFerVCP7k.') == ('2cf162318aeb098415b1550fce3387bfe663cb10879556093015457ab5423fb9', False), extract_hash_from_line('ECDSA key fingerprint is SHA256:LPFiMYrrCYQVsVUPzjOHv+ZjyxCHlVYJMBVFerVCP7k.') # Mix and match RSA and ECDSA with MD5 and SHA256: assert extract_hash_from_line('ECDSA key fingerprint is MD5:10:4a:ec:d2:f1:38:f7:ea:0a:a0:0f:17:57:ea:a6:16.') == ('10:4a:ec:d2:f1:38:f7:ea:0a:a0:0f:17:57:ea:a6:16', True) assert extract_hash_from_line('RSA key fingerprint is SHA256:LPFiMYrrCYQVsVUPzjOHv+ZjyxCHlVYJMBVFerVCP7k.\n') == ('2cf162318aeb098415b1550fce3387bfe663cb10879556093015457ab5423fb9', False), extract_hash_from_line('RSA key fingerprint is SHA256:LPFiMYrrCYQVsVUPzjOHv+ZjyxCHlVYJMBVFerVCP7k.\n') #UUID assert extract_hash_from_line('E6CD379E-12CD-4E00-A83A-B06E74CF03B8') == ('E6CD379E12CD4E00A83AB06E74CF03B8', True), extract_hash_from_line('E6CD379E-12CD-4E00-A83A-B06E74CF03B8') assert extract_hash_from_line('e6cd379e-12cd-4e00-a83a-b06e74cf03b8') == ('e6cd379e12cd4e00a83ab06e74cf03b8', True), extract_hash_from_line('e6cd379e-12cd-4e00-a83a-b06e74cf03b8') assert extract_hash_from_line('MD5 (hashvis.py) = e21c7b846f76826d52a0ade79ef9cb49\n') == ('e21c7b846f76826d52a0ade79ef9cb49', True) assert extract_hash_from_line('MD5 (hashvis.py) = e21c7b846f76826d52a0ade79ef9cb49') == ('e21c7b846f76826d52a0ade79ef9cb49', True) assert extract_hash_from_line('8b948e9c85fdf68f872017d7064e839c hashvis.py\n') == ('8b948e9c85fdf68f872017d7064e839c', True) assert extract_hash_from_line('8b948e9c85fdf68f872017d7064e839c hashvis.py') == ('8b948e9c85fdf68f872017d7064e839c', True) assert extract_hash_from_line('2c9997ce32cb35823b2772912e221b350717fcb2d782c667b8f808be44ae77ba1a7b94b4111e386c64a2e87d15c64a2fc2177cd826b9a0fba6b348b4352ed924 hashvis.py\n') == ('2c9997ce32cb35823b2772912e221b350717fcb2d782c667b8f808be44ae77ba1a7b94b4111e386c64a2e87d15c64a2fc2177cd826b9a0fba6b348b4352ed924', True) assert extract_hash_from_line('2c9997ce32cb35823b2772912e221b350717fcb2d782c667b8f808be44ae77ba1a7b94b4111e386c64a2e87d15c64a2fc2177cd826b9a0fba6b348b4352ed924 hashvis.py') == ('2c9997ce32cb35823b2772912e221b350717fcb2d782c667b8f808be44ae77ba1a7b94b4111e386c64a2e87d15c64a2fc2177cd826b9a0fba6b348b4352ed924', True) assert extract_hash_from_line('#!/usr/bin/python\n')[0] is None # Protip: Use vis -co to generate these. (line,) = hash_to_pic('78', represent_as_hex=True, deep_color=False) assert line == '\033[1m\033[37m\033[100m78\033[0m', repr(line) (line,) = hash_to_pic('7f', represent_as_hex=True, deep_color=False) assert line == '\033[1m\033[37m\033[107m7f\033[0m', repr(line) assert list(hash_to_pic('aebece', deep_color=False)) != list(hash_to_pic('deeefe', deep_color=False)), (list(hash_to_pic('aebece', deep_color=False)), list(hash_to_pic('deeefe', deep_color=False))) assert list(hash_to_pic('eaebec', deep_color=False)) != list(hash_to_pic('edeeef', deep_color=False)), (list(hash_to_pic('eaebec', deep_color=False)), list(hash_to_pic('edeeef', deep_color=False))) sys.exit(0) # #mark - Main use_256color = os.getenv('TERM') == 'xterm-256color' import argparse parser = argparse.ArgumentParser(description="Visualize hexadecimal input (hashes, UUIDs, etc.) as an arrangement of color blocks.") parser.add_argument('--one-line', '--oneline', action='store_true', help="Unconditionally produce a rectangle 1 character tall. The default is to choose a pair of width and height based upon one of the bytes of the input.") parser.add_argument('--color-test', '--colortest', action='store_true', help="Print the 16-color, 256-color foreground, and 256-color background color palettes, then exit.") options, args = parser.parse_known_args() if options.color_test: for x in range(16): print fgcolor(x, deep_color=False), print bgcolor(x, deep_color=False), else: print for x in range(256): sys.stdout.write(fgcolor(x, deep_color=True) + bgcolor(x, deep_color=True) + '%02x' % (x,)) else: print RESET import sys sys.exit(0) import fileinput for input_line in fileinput.input(args): print input_line.rstrip('\n') hash, is_hex = extract_hash_from_line(input_line) if hash: for output_line in hash_to_pic(hash, only_ever_one_line=options.one_line, represent_as_hex=is_hex, deep_color=use_256color): print output_line
12,075
5,813
# https://projecteuler.net/problem=7 import math def sieve(xmax): p = {i for i in range(2, xmax + 1)} for i in range(2, xmax): r = {j * i for j in range(2, int(xmax / i) + 1)} p -= r return sorted(p) print(sum(sieve(2000000)))
270
124
"""Add new types Revision ID: 32053847c4db Revises: 05a62958a9cc Create Date: 2019-06-11 10:36:14.456629 """ from alembic import context from sqlalchemy.orm import sessionmaker # revision identifiers, used by Alembic. revision = '32053847c4db' down_revision = '05a62958a9cc' branch_labels = None depends_on = None all_commands = [ (""" ALTER TABLE data_source CHANGE `format` `format` ENUM( 'CSV','CUSTOM','GEO_JSON','HAR_IMAGE_FOLDER','HDF5','DATA_FOLDER', 'IMAGE_FOLDER', 'JDBC','JSON','NETCDF4','PARQUET','PICKLE','SHAPEFILE', 'TAR_IMAGE_FOLDER','TEXT', 'VIDEO_FOLDER', 'UNKNOWN','XML_FILE') CHARSET utf8 COLLATE utf8_unicode_ci NOT NULL;""", """ ALTER TABLE data_source CHANGE `format` `format` ENUM( 'CSV','CUSTOM','GEO_JSON','HDF5','JDBC','JSON', 'NETCDF4','PARQUET','PICKLE','SHAPEFILE','TEXT', 'UNKNOWN','XML_FILE') CHARSET utf8 COLLATE utf8_unicode_ci NOT NULL;""" ), (""" ALTER TABLE `storage` CHANGE `type` `type` ENUM( 'HDFS','OPHIDIA','ELASTIC_SEARCH','MONGODB','POSTGIS','HBASE', 'CASSANDRA','JDBC','LOCAL') CHARSET utf8 COLLATE utf8_unicode_ci NOT NULL;""", """ ALTER TABLE `storage` CHANGE `type` `type` ENUM( 'HDFS','OPHIDIA','ELASTIC_SEARCH','MONGODB','POSTGIS','HBASE', 'CASSANDRA','JDBC') CHARSET utf8 COLLATE utf8_unicode_ci NOT NULL;""", ), ( """ALTER TABLE `model` CHANGE `type` `type` ENUM( 'KERAS','SPARK_ML_REGRESSION','SPARK_MLLIB_CLASSIFICATION', 'SPARK_ML_CLASSIFICATION','UNSPECIFIED') CHARSET utf8 COLLATE utf8_unicode_ci NOT NULL; """, """ALTER TABLE `model` CHANGE `type` `type` ENUM( 'KERAS','SPARK_ML_REGRESSION','SPARK_MLLIB_CLASSIFICATION', 'SPARK_ML_CLASSIFICATION','UNSPECIFIED') CHARSET utf8 COLLATE utf8_unicode_ci NOT NULL; """ ) ] def upgrade(): ctx = context.get_context() session = sessionmaker(bind=ctx.bind)() connection = session.connection() try: for cmd in all_commands: if isinstance(cmd[0], (unicode, str)): connection.execute(cmd[0]) elif isinstance(cmd[0], list): for row in cmd[0]: connection.execute(row) else: cmd[0]() except: session.rollback() raise session.commit() def downgrade(): ctx = context.get_context() session = sessionmaker(bind=ctx.bind)() connection = session.connection() connection.execute('SET foreign_key_checks = 0;') try: for cmd in reversed(all_commands): if isinstance(cmd[1], (unicode, str)): connection.execute(cmd[1]) elif isinstance(cmd[1], list): for row in cmd[1]: connection.execute(row) else: cmd[1]() except: session.rollback() raise connection.execute('SET foreign_key_checks = 1;') session.commit()
3,066
1,095
from advanced_tools.IO_path_utils import * from advanced_tools.algorithm_utils import *
88
25
import os import pickle import functools import errno import shutil from urllib.request import urlopen #import definitions def read_config(schema='data/schema.yaml', name='sets'): filename = '.{}rc'.format(name) paths = [ os.path.join(os.curdir, filename), os.path.expanduser(os.path.join('~', filename)), os.environ.get('{}_CONFIG'.format(name.upper())), ] schema = os.path.join(os.path.dirname(__file__), schema) parser = definitions.Parser(schema) for path in paths: if path and os.path.isfile(path): return parser(path) return parser('{}') def disk_cache(basename, directory, method=False): """ Function decorator for caching pickleable return values on disk. Uses a hash computed from the function arguments for invalidation. If 'method', skip the first argument, usually being self or cls. The cache filepath is 'directory/basename-hash.pickle'. """ directory = os.path.expanduser(directory) ensure_directory(directory) def wrapper(func): @functools.wraps(func) def wrapped(*args, **kwargs): key = (tuple(args), tuple(kwargs.items())) # Don't use self or cls for the invalidation hash. if method and key: key = key[1:] filename = '{}-{}.pickle'.format(basename, hash(key)) filepath = os.path.join(directory, filename) if os.path.isfile(filepath): with open(filepath, 'rb') as handle: return pickle.load(handle) result = func(*args, **kwargs) with open(filepath, 'wb') as handle: pickle.dump(result, handle) return result return wrapped return wrapper def download(url, directory, filename=None): """ Download a file and return its filename on the local file system. If the file is already there, it will not be downloaded again. The filename is derived from the url if not provided. Return the filepath. """ if not filename: _, filename = os.path.split(url) directory = os.path.expanduser(directory) ensure_directory(directory) filepath = os.path.join(directory, filename) if os.path.isfile(filepath): return filepath print('Download', filepath) with urlopen(url) as response, open(filepath, 'wb') as file_: shutil.copyfileobj(response, file_) return filepath def ensure_directory(directory): """ Create the directories along the provided directory path that do not exist. """ directory = os.path.expanduser(directory) try: os.makedirs(directory) except OSError as e: if e.errno != errno.EEXIST: raise e
2,836
838
import cv2 import time import numpy as np import imutils camera= 0 cam = cv2.VideoCapture(camera) fgbg = cv2.createBackgroundSubtractorMOG2(history=1000,varThreshold=0,detectShadows=False) width=600 height=480 fps_time = 0 while True: ret_val,image = cam.read() image = cv2.resize(image,(width,height)) image = cv2.GaussianBlur(image, (5, 5), 0) fgmask = fgbg.apply(image) # image = fgbg.apply(image,learningRate=0.001) # image = imutils.resize(image, width=500) # gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) cnts = cv2.findContours(fgmask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = cnts[0] if imutils.is_cv2() else cnts[1] # loop over the contours x_left = -1 y_left = -1 x_right = -1 y_right = -1 for c in cnts: # if the contour is too small, ignore it # if cv2.contourArea(c) > 500: # continue # compute the bounding box for the contour, draw it on the frame, # and update the text (x, y, w, h) = cv2.boundingRect(c) if x_left ==-1 : x_left = x y_left = y if x < x_left: x_left = x if y < y_left: y_left = y if x+w > x_right: x_right = x+w if y+h > y_right: y_right = y+h # cv2.rectangle(image, (x, y), (x+w, y+h), (255, 0, 0), 2) if (x_left==0 and y_left==0 and x_right==width and y_right==height)==False: cv2.rectangle(image, (x_left, y_left), (x_right, y_right), (0, 255, 0), 2) # cv2.putText(image, # "FPS: %f [press 'q'to quit]" % (1.0 / (time.time() - fps_time)), # (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, # (0, 255, 0), 2) cv2.imshow('tf-pose-estimation result',fgmask) cv2.imshow('tf-pose-estimation result2',image) fps_time = time.time() if cv2.waitKey(1)==ord('q'): cam.release() cv2.destroyAllWindows() break
1,984
835
#Given a positive integer num, write a function which returns True if num is a perfect square else False. class Solution(object): def isPerfectSquare(self, num): low=0 high=num #Starting from zero till the number we need to check for perfect square while(low<=high): #Calulating middle value by using right shift operator mid=(low+high)>>1 #If the square of the middle value is equal to the number then it is a perfect square else not if(mid*mid==num): return True #If the square of the middle value is less than the number we increment the low variable else the high variable is decremented. #The loop will continue till the low value becomes more than the high value or the number is a perfect square then True will be #returned elif(mid*mid<num): low=mid+1 else: high=mid-1 return False
999
234
from minescrubber_core import abstract from . import mainwindow class UI(abstract.UI): def __init__(self): self.main_window = mainwindow.MainWidget() def init_board(self, board): self.main_window.init_board(board) def refresh(self, board, init_image=True): self.main_window.refresh(board=board, init_image=init_image) def game_over(self, board): self.main_window.game_over(board=board) def game_solved(self, board): self.main_window.game_solved(board=board) def run(self): self.main_window.show() @property def new_game_signal(self): return self.main_window.NEW_GAME_SIGNAL @property def cell_selected_signal(self): return self.main_window.CELL_SELECTED_SIGNAL @property def cell_flagged_signal(self): return self.main_window.CELL_FLAGGED_SIGNAL @property def wiring_method_name(self): return 'connect' class Controller(abstract.Controller): def pre_callback(self): import sys from PySide2 import QtWidgets QtWidgets.QApplication(sys.argv) def post_callback(self): import sys from PySide2 import QtWidgets app = ( QtWidgets.QApplication.instance() or QtWidgets.QApplication(sys.argv) ) sys.exit(app.exec_()) def run(): controller = Controller() controller.run(ui_class=UI)
1,432
456
from .power_group import PowerGroup
36
10
""" .. module:: elementtype.py :platform: Linux .. moduleauthor:: Michael Schilonka <michael@schilonka.de> """ import logging class ElementType(object): ''' The ElementType class is an in-memory representation of a graph element type. It provides some functions to operate on all entities of the same type and keeps the description of the structured attributes. ''' def __init__(self, type_svr, type_name, graph_name): self._type_svr = type_svr self._typename = type_name self._graphname = graph_name def get_type_definition(self): ''' Returns a dictionary comprising the structured attributes of this graph element type. Return: The created type declaration (dict). ''' return self._type_svr.get_type_definition(self._graphname, self._typename) def get_type_name(self): ''' Returns the type name of this object. Return: The type name (str). ''' return self._typename def count(self): ''' Returns the number of graph elements associated with this type. Return: Count of related graph elements (int). ''' return self._type_svr.count(self._graphname, self._typename) #TDOD provide search method on these elements class VertexType(ElementType, object): ''' The VertexType. ''' def __init__(self, type_svr, vertex_type, graph_name): vertex_type = 'vertex:' + vertex_type super(VertexType, self).__init__(type_svr, vertex_type, graph_name) def get_vertices(self): pass def remove(self): ''' Removes this element type and all associated elements. ''' self._type_svr.remove(self._graphname, self._typename) class EdgeType(ElementType, object): ''' The EdgeType. ''' def __init__(self, type_svr, edge_type, graph_name): edge_type = 'edge:' + edge_type super(EdgeType, self).__init__(type_svr, edge_type, graph_name) def get_edges(self): pass def remove(self): ''' Removes this element type and all associated elements. ''' self._type_svr.remove(self._graphname, self._typename)
2,386
674
# coding=utf-8 from OTLMOW.OTLModel.Classes.Put import Put from OTLMOW.OTLModel.Classes.PutRelatie import PutRelatie from OTLMOW.GeometrieArtefact.VlakGeometrie import VlakGeometrie # Generated with OTLClassCreator. To modify: extend, do not edit class Infiltratievoorziening(Put, PutRelatie, VlakGeometrie): """Voorziening voor infiltratie van onvervuild water.""" typeURI = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Infiltratievoorziening' """De URI van het object volgens https://www.w3.org/2001/XMLSchema#anyURI.""" def __init__(self): Put.__init__(self) PutRelatie.__init__(self) VlakGeometrie.__init__(self)
672
241
# get the training data D, sample the Generator with random z to produce r N = X_train z = np.random.uniform(-1, 1, (1, z_dim)) r = G.predict_on_batch(z) # define our distance measure S to be L1 S = lambda n, r: np.sum(np.abs(n - r)) # compute the distances between the reference and the samples in N using the measure D distances = [D(n, r) for n in N] # find the indices of the most similar samples and select them from N nearest_neighbors_index = np.argpartition(distances, k) nearest_neighbors_images = N[nearest_neighbors_index] # generate fake images from the discriminator n_fake_images = 5000 z = np.random.uniform(-1, 1, (n_fake_images, z_dim)) x = G.predict_on_batch(z) def compute_inception_score(x, inception_model, n_fake_images, z_dim): # probability of y given x p_y_given_x = inception_model.predict_on_batch(x) # marginal probability of y q_y = np.mean(p_y_given_x, axis=0) inception_scores = p_y_given_x * (np.log(p_y_given_x) - np.log(q_y) inception_score = np.exp(np.mean(inception_scores)) return inception_score def get_mean_and_covariance(data): mean = np.mean(data, axis=0) covariance = np.cov(data, rowvar=False) # rowvar? return mean, covariance def compute_frechet_inception_distance(mean_r, mean_f, cov_r, cov_f): l2_mean = np.sum((mean_r - mean_f)**2) cov_mean, _ = np.trace(scipy.linalg.sqrtm(np.dot(cov_r, cov_f))) return l2_mu + np.trace(cov_r) + np.trace(cov_f) - 2 * np.trace(cov_mean)
1,517
622
import os import boto3 # import subprocess from subprocess import Popen, PIPE from time import sleep import json import ast from datetime import datetime, time, timedelta, date import logging import logging.handlers import sys, getopt import glob import shutil logger = logging.getLogger() logger.setLevel(logging.INFO) def main(): ### Order of tasks # # 0 check disks are here, catch output # # 1 sync to replica disk, catch output # # 2 sync to aws, catch output # # 3 compare disks files vs replica, catch oputput # # 4 compare disks files vs s3, catch out # # # Run option # -l, --system : only analyze_disks & get_server_metrics , every 5m # -a, --analyze : analyze_s3_files & analyze_local_files, every 1 or 3 hours # -s, --sync : run_s3_syncs & run_local_syncs, every night # -d, --syncdelete : run_s3_syncs & run_local_syncs with delete no cron #### exception handling in logger: sys.excepthook = handle_exception valid_modes = ["system","analyze","sync","syncdelete","synclocal","syncs3","backup","osbackup","init_config"] mode = '' config = '' usage_message = 'naspi -c /path/to/config.json -m <system|analyze|sync|syncdelete|synclocal|syncs3|backup|osbackup|init_config>' try: opts, args = getopt.getopt(sys.argv[1:],"hm:c:",["mode=","config="]) # except getopt.GetoptError: except Exception as e: print(usage_message) sys.exit(2) for opt, arg in opts: if opt == '-h': print(usage_message) sys.exit() elif opt in ("-m", "--mode"): mode = arg elif opt in ("-c", "--config"): config = arg # # # checking values passed if not mode: print("Error, mode is mandatory !!") print(usage_message) sys.exit(2) elif not config: print("Error, config file is mandatory !!") print(usage_message) sys.exit(2) elif mode not in valid_modes: print("Wrong mode selected, correct modes are : {}".format(valid_modes)) print(usage_message) sys.exit(2) # logger.info("Context info : ") # logger.info(os.getcwd()) # logger.info(__file__) if mode == "init_config": output = init_config_file(config) sys.exit(0) else: #### Configuration loading disks_list,folder_to_sync_locally,folders_to_sync_s3,configuration = load_configuration(config) global NUMBER_DAYS_RETENTION global MIN_DELAY_BETWEEN_SYNCS_SECONDS global working_dir NUMBER_DAYS_RETENTION = configuration.get('NUMBER_DAYS_RETENTION') MIN_DELAY_BETWEEN_SYNCS_SECONDS = configuration.get('MIN_DELAY_BETWEEN_SYNCS_SECONDS') working_dir = configuration.get('working_dir') home_dir = os.environ['HOME'] global export_path_cmd export_path_cmd = 'export PATH={}/.local/bin:$PATH'.format(home_dir) ### Logging setup # Change root logger level from WARNING (default) to NOTSET in order for all messages to be delegated. logging.getLogger('').setLevel(logging.NOTSET) # Add file rotatin handler, with level DEBUG rotatingHandler = logging.handlers.RotatingFileHandler(filename='{}/nas_monitor.log'.format(working_dir), maxBytes=1000000, backupCount=5) rotatingHandler.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') rotatingHandler.setFormatter(formatter) logging.getLogger('').addHandler(rotatingHandler) global logger logger = logging.getLogger("naspi." + __name__) logger.info("") logger.info("") logger.info("----------------------------------------------------------------------------------------") logger.info("----------------------------------------------------------------------------------------") logger.info("### Starting Nas Monitor") logger.info('Mode is {} and config file is {}'.format(mode,config)) output = open_or_init_output_file(working_dir) if mode == "backup": output = backup_naspi(configuration['backup'],output) if mode == "osbackup": output = os_backup(configuration['backup'],output) if mode == "system": output = analyze_disks(disks_list,output) output = get_server_metrics(output) if mode == "synclocal": output = analyze_local_files(folder_to_sync_locally, output) output = run_local_syncs(folder_to_sync_locally,configuration,output) output = analyze_local_files(folder_to_sync_locally, output) # File stored to s3 once per hour like local sync (TODO can be improved with a dedicated mode and cron) res_s3 = write_and_cleanup_output_file_to_s3(output,'archive-fgi') if mode == "syncs3": output = analyze_s3_files(folders_to_sync_s3, output) output = run_s3_syncs(folders_to_sync_s3,configuration,output) output = analyze_s3_files(folders_to_sync_s3, output) if mode == "sync": output = run_s3_syncs(folders_to_sync_s3,configuration,output) output = run_local_syncs(folder_to_sync_locally,configuration,output) if mode == "analyze" or mode == "sync": output = analyze_s3_files(folders_to_sync_s3, output) output = analyze_local_files(folder_to_sync_locally, output) result = write_and_cleanup_output_file(output,configuration) # res_s3 = write_and_cleanup_output_file_to_s3(output,'archive-fgi') logger.info(json.dumps(output)) #### #### function defs #### def handle_exception(exc_type, exc_value, exc_traceback): if issubclass(exc_type, KeyboardInterrupt): sys.__excepthook__(exc_type, exc_value, exc_traceback) return logger.error("Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback)) def load_configuration(conf_file): try: f = open(conf_file, "r") dict_conf = json.loads(f.read()) f.close() return( dict_conf['disks_list'], dict_conf['folder_to_sync_locally'], dict_conf['folders_to_sync_s3'], dict_conf['naspi_configuration'] ) except FileNotFoundError as e: print("Conf file not found, provide a file named {}".format(conf_file)) raise(e) # sys.exit(2) def today_time(): today = datetime.today() d1 = today.strftime("%Y-%m-%d %H:%M:%S") return(d1) def today_date(): today = datetime.today() d1 = today.strftime("%Y-%m-%d") return(d1) def date_diff_in_seconds(dt2, dt1): timediff = dt2 - dt1 return timediff.days * 24 * 3600 + timediff.seconds def run_shell_command(command): message = "" logger.info("### Running {}".format(command)) df_out = Popen(command, shell=True, stdout=PIPE, stderr=PIPE ) sleep(.2) retcode = df_out.poll() while retcode is None: # Process running # logger.info("### Process not finished, waiting...") sleep(10) retcode = df_out.poll() # Here, `proc` has finished with return code `retcode` if retcode != 0: """Error handling.""" logger.info("### Error !") message = df_out.stderr.read().decode("utf-8") logger.info(retcode) logger.info(message) return(retcode,message) message = df_out.stdout.read().decode("utf-8") logger.info(retcode) logger.info(message) return(retcode,message) def open_or_init_output_file(working_dir): today = today_date() try: f = open("{}/naspi_status_{}.json".format(working_dir,today), "r") dict_output = json.loads(f.read()) f.close() except FileNotFoundError: logger.info("File for today does not exist, initializing it") dict_output = {} dict_output['disks'] = {} dict_output['disks']['disk-list'] = [] dict_output['local_sync'] = {} dict_output['local_sync']['success'] = True dict_output['s3_sync'] = {} dict_output['s3_sync']['success'] = True dict_output['server'] = {} return(dict_output) def init_config_file(file_name): print("initializing config file {}".format(file_name)) if os.path.exists(file_name): print("Error, config file {} already exists !!".format(file_name)) sys.exit(2) else: dict_conf = {} dict_conf['disks_list'] = [] dict_conf['folder_to_sync_locally'] = [] dict_conf['folders_to_sync_s3'] = [] dict_conf['naspi_configuration'] = {} dict_conf['naspi_configuration']['working_dir'] = "" dict_conf['naspi_configuration']['NUMBER_DAYS_RETENTION'] = 7 dict_conf['naspi_configuration']['MIN_DELAY_BETWEEN_SYNCS_SECONDS'] = 14400 dict_conf['naspi_configuration']['backup'] = {} dict_conf['naspi_configuration']['backup']['files_to_backup'] = [] dict_conf['naspi_configuration']['backup']['backup_location'] = "" dict_conf['naspi_configuration']['backup']['os_backup_location'] = "" f = open("{}".format(file_name), "w") f.write(json.dumps(dict_conf,indent=4)) f.close() return("ok") def write_and_cleanup_output_file_to_s3(output,bucket): s3_client = boto3.client('s3',region_name='eu-west-1') today = today_date() response = s3_client.put_object( Body=json.dumps(output), Bucket=bucket, Key="status/naspi_status_{}.json".format(today) ) return(response) def write_and_cleanup_output_file(output,configuration): NUMBER_DAYS_RETENTION = configuration.get('NUMBER_DAYS_RETENTION') working_dir = configuration.get('working_dir') today = today_date() f = open("{}/naspi_status_{}.json".format(working_dir,today), "w") f.write(json.dumps(output,indent=4)) f.close() existing_output_files = glob.glob('{}/naspi_status_*.json'.format(working_dir)) existing_output_files.sort() for out_file in existing_output_files: if out_file not in existing_output_files[-NUMBER_DAYS_RETENTION:]: logger.info("Deleting {}".format(out_file)) os.remove(out_file) return("done") def analyze_disks(disks_list,output): output['disks']['all_disks_ok'] = True output['disks']['disk-list'] = [] retcode,message = run_shell_command('df -kh | tail -n +2') #logger.info(message) all_disks_present = True for disk in disks_list: disk_output = {} if disk in message: logger.info("### disk {} is here".format(disk)) usage = message.split(disk)[0][-4:] logger.info("### usage : {}".format(usage)) disk_output['name'] = disk disk_output['occupied_%'] = usage disk_output['present'] = True output['disks']['disk-list'].append(disk_output) else: logger.info("### disk {} not here".format(disk)) all_disks_present = False disk_output['name'] = disk disk_output['occupied_%'] = "NA" disk_output['present'] = False output['disks']['disk-list'].append(disk_output) if not all_disks_present: logger.info("### some disks are missing") output['disks']['all_disks_ok'] = False output['disks']['last_run'] = today_time() return(output) def acquire_sync_lock(output,local_or_s3,configuration): # Make sure only one sync process runs at a time can_run = True MIN_DELAY_BETWEEN_SYNCS_SECONDS = configuration.get('MIN_DELAY_BETWEEN_SYNCS_SECONDS') if 'last_started' in output[local_or_s3]: started_time = datetime.strptime(output[local_or_s3]['last_started'], '%Y-%m-%d %H:%M:%S') else: started_time = datetime.strptime('2020-12-25 12:00:00', '%Y-%m-%d %H:%M:%S') now_time = datetime.now() logger.info(" %d seconds from previous run" %(date_diff_in_seconds(now_time, started_time))) if 'locked' in output[local_or_s3] and output[local_or_s3]['locked'] == True and date_diff_in_seconds(now_time, started_time) < MIN_DELAY_BETWEEN_SYNCS_SECONDS: logger.info("Can't run sync as another process might be running") can_run = False else: logger.info("Acquiring lock for {}".format(local_or_s3)) output[local_or_s3]['locked'] = True output[local_or_s3]['last_started'] = today_time() logger.info(output) # Acquire lock and write it to disk: result = write_and_cleanup_output_file(output,configuration) return(can_run,output) def run_s3_syncs(folders_to_sync_s3,configuration, output): can_run,output = acquire_sync_lock(output, 's3_sync',configuration) if can_run: success = True for folder in folders_to_sync_s3: exclusions_flags = '' if 'exclude' in folder: for exclusion in folder['exclude']: exclusions_flags = exclusions_flags + ' --exclude "{}/*" '.format(exclusion) # command = 'aws s3 sync {} {} {} --storage-class DEEP_ARCHIVE --dryrun'.format(folder['source_folder'],folder['dest_folder'],exclusions_flags) command = 'aws s3 sync {} {} {} --storage-class DEEP_ARCHIVE --only-show-errors'.format(folder['source_folder'],folder['dest_folder'],exclusions_flags) ret,msg = run_shell_command('{}; {}'.format(export_path_cmd,command)) if ret != 0: success = False output['s3_sync']['success'] = success output['s3_sync']['last_run'] = today_time() output['s3_sync']['locked'] = False else: logger.info("/!\ Cant run the sync, there is a sync process ongoing") return(output) def count_files_in_dir(folder,exclude_list): exclude_directories = set(exclude_list) #directory (only names) want to exclude total_file = 0 for dname, dirs, files in os.walk(folder): #this loop though directies recursively dirs[:] = [d for d in dirs if d not in exclude_directories] # exclude directory if in exclude list total_file += len(files) logger.info("Files in {} : {}".format(folder,total_file)) return(total_file) def analyze_s3_files(folders_to_sync_s3, output): output['s3_sync']['files_source'] = 0 output['s3_sync']['files_dest'] = 0 output['s3_sync']['folders'] = [] for folder in folders_to_sync_s3: one_folder = {} one_folder['source_folder'] = folder['source_folder'] # Get local files count if 'exclude' in folder: exclude_directories = set(folder['exclude']) #directory (only names) want to exclude else: exclude_directories = [] total_file = 0 for dname, dirs, files in os.walk(folder['source_folder']): #this loop though directies recursively dirs[:] = [d for d in dirs if d not in exclude_directories] # exclude directory if in exclude list # print(len(files)) total_file += len(files) logger.info("Files in {} : {}".format(folder['source_folder'],total_file)) one_folder['source_count'] = total_file output['s3_sync']['files_source'] += total_file # Get s3 files count ret,msg = run_shell_command('{}; aws s3 ls {} --recursive --summarize | grep "Total Objects"'.format(export_path_cmd,folder['dest_folder'])) output['s3_sync']['files_dest'] += int(msg.split(': ')[1]) one_folder['dest_folder'] = folder['dest_folder'] one_folder['dest_count'] = int(msg.split(': ')[1]) output['s3_sync']['folders'].append(one_folder) output['s3_sync']['files_delta'] = output['s3_sync']['files_source'] - output['s3_sync']['files_dest'] logger.info("Analyze s3 file output : {}".format(json.dumps(output))) return(output) def run_local_syncs(folder_to_sync_locally,configuration, output): # rsync -anv dir1 dir2 # n = dryrun, v = verbose # will create dir2/dir1 can_run,output = acquire_sync_lock(output, 'local_sync', configuration) if can_run: success = True for folder in folder_to_sync_locally: delete = "" if folder['delete']: delete = "--delete" ret,msg = run_shell_command('mkdir -p {}'.format(folder['dest_folder'])) ret,msg = run_shell_command('rsync -aq {} {} {}'.format(folder['source_folder'],folder['dest_folder'],delete)) if ret != 0: success = False output['local_sync']['success'] = success output['local_sync']['last_run'] = today_time() output['local_sync']['locked'] = False else: logger.info("/!\ Cant run the sync, there is a sync process ongoing") return(output) def analyze_local_files(folder_to_sync_locally, output): output['local_sync']['files_source'] = 0 output['local_sync']['files_dest'] = 0 output['local_sync']['folders'] = [] for folder in folder_to_sync_locally: one_folder = {} one_folder['source_folder'] = folder['source_folder'] src_count = count_files_in_dir(folder['source_folder'],['']) output['local_sync']['files_source'] += src_count one_folder['source_count'] = src_count dest_folder = "{}/{}".format(folder['dest_folder'],folder['source_folder'].split("/")[-1]) one_folder['dest_folder'] = dest_folder dest_count = count_files_in_dir(dest_folder,['']) output['local_sync']['files_dest'] += dest_count one_folder['dest_count'] = dest_count output['local_sync']['folders'].append(one_folder) output['local_sync']['files_delta'] = output['local_sync']['files_source'] - output['local_sync']['files_dest'] logger.info("Analyze local file output : {}".format(json.dumps(output))) return(output) def get_server_metrics(output): # get cpu usage ret,msg = run_shell_command('top -bn 1 | grep Cpu | head -c 14 | tail -c 5') output['server']['cpu_%'] = msg ret,msg = run_shell_command('free -m | grep Mem | head -c 32 | tail -c 5') output['server']['ram_Mo'] = msg ret,msg = run_shell_command('vcgencmd measure_temp | head -c 11 | tail -c 6') output['server']['temp_c'] = msg output['server']['last_run'] = today_time() return(output) def backup_naspi(backup,output): backup_location = backup.get('backup_location') backup_dir = "{}{}".format(backup_location,today_date()) ret,msg = run_shell_command('mkdir -p {}'.format(backup_dir)) files_to_backup = backup.get("files_to_backup") for entry in files_to_backup: if os.path.isdir(entry): ret,msg = run_shell_command('rsync -aqR {} {}'.format(entry,backup_dir)) else: subdir = entry.rsplit('/',1)[0] ret,msg = run_shell_command('mkdir -p {}{}'.format(backup_dir,subdir)) ret,msg = run_shell_command('rsync -aq {} {}{}'.format(entry,backup_dir,entry)) # old bkp cleanup existing_backup_dir = glob.glob('{}/*'.format(backup_location)) existing_backup_dir.sort() for out_file in existing_backup_dir: if out_file not in existing_backup_dir[-10:]: print("Deleting {}".format(out_file)) shutil.rmtree(out_file,ignore_errors=True) return(output) def os_backup(backup,output): os_backup_location = backup.get('os_backup_location') backup_name = "osbkp-{}.img".format(today_date()) # sudo dd if=/dev/mmcblk0 of=/disks/Elements/os_bkp/osbkp18082021.img bs=1M # sudo ./pishrink.sh -z osbkp18082021.img ret,msg = run_shell_command('sudo dd if=/dev/mmcblk0 of={}/{} bs=1M'.format(os_backup_location,backup_name)) if not os.path.exists("{}/pishrink.sh".format(working_dir)): ret,msg = run_shell_command('wget https://raw.githubusercontent.com/Drewsif/PiShrink/master/pishrink.sh -P {}'.format(working_dir)) # wget https://raw.githubusercontent.com/Drewsif/PiShrink/master/pishrink.sh ret,msg = run_shell_command('sudo chmod +x {}/pishrink.sh'.format(working_dir)) # sudo chmod +x pishrink.sh ret,msg = run_shell_command('sudo bash {}/pishrink.sh -z {}/{}'.format(working_dir,os_backup_location,backup_name)) ret,msg = run_shell_command('sudo chown pi:pi *.img.gz') # old bkp cleanup existing_backup_dir = glob.glob('{}/*'.format(os_backup_location)) existing_backup_dir.sort() for out_file in existing_backup_dir: if out_file not in existing_backup_dir[-4:]: print("Deleting {}".format(out_file)) shutil.rmtree(out_file,ignore_errors=True) return(output) if __name__=='__main__': main() # main(sys.argv[1:])
20,923
6,835
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Date : 2020-10-21 # @Author : Emily Wenger (ewenger@uchicago.edu) import time import numpy as np import tensorflow as tf import tensorflow_addons as tfa from keras.utils import Progbar class FawkesMaskGeneration: # if the attack is trying to mimic a target image or a neuron vector MIMIC_IMG = True # number of iterations to perform gradient descent MAX_ITERATIONS = 10000 # larger values converge faster to less accurate results LEARNING_RATE = 1e-2 # the initial constant c to pick as a first guess INITIAL_CONST = 1 # pixel intensity range INTENSITY_RANGE = 'imagenet' # threshold for distance L_THRESHOLD = 0.03 # whether keep the final result or the best result KEEP_FINAL = False # max_val of image MAX_VAL = 255 MAXIMIZE = False IMAGE_SHAPE = (224, 224, 3) RATIO = 1.0 LIMIT_DIST = False LOSS_TYPE = 'features' # use features (original Fawkes) or gradients (Witches Brew) to run Fawkes? def __init__(self, bottleneck_model_ls, mimic_img=MIMIC_IMG, batch_size=1, learning_rate=LEARNING_RATE, max_iterations=MAX_ITERATIONS, initial_const=INITIAL_CONST, intensity_range=INTENSITY_RANGE, l_threshold=L_THRESHOLD, max_val=MAX_VAL, keep_final=KEEP_FINAL, maximize=MAXIMIZE, image_shape=IMAGE_SHAPE, verbose=1, ratio=RATIO, limit_dist=LIMIT_DIST, loss_method=LOSS_TYPE): assert intensity_range in {'raw', 'imagenet', 'inception', 'mnist'} # constant used for tanh transformation to avoid corner cases self.it = 0 self.tanh_constant = 2 - 1e-6 self.MIMIC_IMG = mimic_img self.LEARNING_RATE = learning_rate self.MAX_ITERATIONS = max_iterations self.initial_const = initial_const self.batch_size = batch_size self.intensity_range = intensity_range self.l_threshold = l_threshold self.max_val = max_val self.keep_final = keep_final self.verbose = verbose self.maximize = maximize self.learning_rate = learning_rate self.ratio = ratio self.limit_dist = limit_dist self.single_shape = list(image_shape) self.bottleneck_models = bottleneck_model_ls self.loss_method = loss_method self.input_shape = tuple([self.batch_size] + self.single_shape) self.bottleneck_shape = tuple([self.batch_size] + self.single_shape) # the variable we're going to optimize over self.modifier = tf.Variable(np.ones(self.input_shape, dtype=np.float32) * 1e-6) self.const = tf.Variable(np.ones(batch_size) * self.initial_const, dtype=np.float32) self.mask = tf.Variable(np.ones(batch_size), dtype=np.bool) @staticmethod def resize_tensor(input_tensor, model_input_shape): if input_tensor.shape[1:] == model_input_shape or model_input_shape[1] is None: return input_tensor resized_tensor = tf.image.resize(input_tensor, model_input_shape[:2]) return resized_tensor def input_space_process(self, img): if self.intensity_range == 'imagenet': mean = np.repeat([[[[103.939, 116.779, 123.68]]]], self.batch_size, axis=0) raw_img = (img - mean) else: raw_img = img return raw_img def reverse_input_space_process(self, img): if self.intensity_range == 'imagenet': mean = np.repeat([[[[103.939, 116.779, 123.68]]]], self.batch_size, axis=0) raw_img = (img + mean) else: raw_img = img return raw_img def clipping(self, imgs): imgs = self.reverse_input_space_process(imgs) imgs = np.clip(imgs, 0, self.max_val) imgs = self.input_space_process(imgs) return imgs def calc_dissim(self, source_raw, source_mod_raw): return 0.0, 0.0, 0.0 # msssim_split = tf.image.ssim(source_raw, source_mod_raw, max_val=255.0) # dist_raw = (1.0 - tf.stack(msssim_split)) / 2.0 # dist = tf.maximum(dist_raw - self.l_threshold, 0.0) # # dist_raw_sum = tf.reduce_sum(tf.where(self.mask, dist_raw, tf.zeros_like(dist_raw))) # dist_raw_sum = tf.reduce_sum(dist_raw) # # dist_sum = tf.reduce_sum(tf.where(self.mask, dist, tf.zeros_like(dist))) # dist_sum = tf.reduce_sum(dist) # return dist, dist_sum, dist_raw_sum def calc_bottlesim(self, tape, source_raw, target_raw, source_filtered, original_raw): """ original Fawkes loss function. """ bottlesim = 0.0 bottlesim_sum = 0.0 # make sure everything is the right size. model_input_shape = self.single_shape cur_aimg_input = self.resize_tensor(source_raw, model_input_shape) cur_source_filtered = self.resize_tensor(source_filtered, model_input_shape) # cur_timg_input = self.resize_tensor(target_raw, model_input_shape) for bottleneck_model in self.bottleneck_models: if tape is not None: try: tape.watch(bottleneck_model.variables) except AttributeError: tape.watch(bottleneck_model.model.variables) # get the respective feature space reprs. bottleneck_a = bottleneck_model(cur_aimg_input) bottleneck_filter = bottleneck_model(cur_source_filtered) bottleneck_s = bottleneck_model(original_raw) # compute the differences. bottleneck_diff = bottleneck_a - bottleneck_s bottleneck_diff_filter = bottleneck_filter - bottleneck_s # get scale factor. scale_factor = tf.sqrt(tf.reduce_sum(tf.square(bottleneck_s), axis=1)) scale_factor_filter = tf.sqrt(tf.reduce_sum(tf.square(bottleneck_diff_filter), axis=1)) # compute the loss cur_bottlesim = tf.reduce_sum(tf.square(bottleneck_diff), axis=1) cur_bottlesim_filter = tf.reduce_sum(tf.square(bottleneck_diff_filter), axis=1) cur_bottlesim = cur_bottlesim / scale_factor cur_bottlesim_filter = cur_bottlesim_filter / scale_factor_filter bottlesim += cur_bottlesim + cur_bottlesim_filter bottlesim_sum += tf.reduce_sum(cur_bottlesim) + tf.reduce_sum(cur_bottlesim_filter) return bottlesim, bottlesim_sum def compute_feature_loss(self, tape, aimg_raw, simg_raw, aimg_input, timg_input, simg_input, aimg_filtered): """ Compute input space + feature space loss. """ input_space_loss, input_space_loss_sum, input_space_loss_raw_sum = self.calc_dissim(aimg_raw, simg_raw) feature_space_loss, feature_space_loss_sum = self.calc_bottlesim(tape, aimg_input, timg_input, aimg_filtered, simg_input) if self.maximize: loss = self.const * input_space_loss - feature_space_loss else: if self.it < self.MAX_ITERATIONS: loss = self.const * input_space_loss + 1000 * feature_space_loss # - feature_space_loss_orig else: loss = self.const * 100 * input_space_loss + feature_space_loss # loss_sum = tf.reduce_sum(tf.where(self.mask, loss, tf.zeros_like(loss))) loss_sum = tf.reduce_sum(loss) # return loss_sum, input_space_loss, feature_space_loss, input_space_loss_sum, input_space_loss_raw_sum, feature_space_loss_sum return loss_sum, 0, feature_space_loss, 0, 0, feature_space_loss_sum def attack(self, source_imgs, target_imgs, weights=None): """ Main function that runs cloak generation. """ if weights is None: weights = np.ones([source_imgs.shape[0]] + list(self.bottleneck_shape[1:])) assert weights.shape[1:] == self.bottleneck_shape[1:] assert source_imgs.shape[1:] == self.input_shape[1:] assert source_imgs.shape[0] == weights.shape[0] if self.MIMIC_IMG: assert target_imgs.shape[1:] == self.input_shape[1:] assert source_imgs.shape[0] == target_imgs.shape[0] else: assert target_imgs.shape[1:] == self.bottleneck_shape[1:] assert source_imgs.shape[0] == target_imgs.shape[0] start_time = time.time() adv_imgs = [] print('%d batches in total' % int(np.ceil(len(source_imgs) / self.batch_size))) for idx in range(0, len(source_imgs), self.batch_size): # print('processing image %d at %s' % (idx + 1, datetime.datetime.now())) adv_img = self.attack_batch(source_imgs[idx:idx + self.batch_size], target_imgs[idx:idx + self.batch_size]) adv_imgs.extend(adv_img) elapsed_time = time.time() - start_time print('protection cost %f s' % elapsed_time) return np.array(adv_imgs) def attack_batch(self, source_imgs, target_imgs): """ TF2 method to generate the cloak. """ # preprocess images. global progressbar nb_imgs = source_imgs.shape[0] mask = [True] * nb_imgs + [False] * (self.batch_size - nb_imgs) self.mask = np.array(mask, dtype=np.bool) LR = self.learning_rate # make sure source/target images are an array source_imgs = np.array(source_imgs, dtype=np.float32) target_imgs = np.array(target_imgs, dtype=np.float32) # metrics to test best_bottlesim = [0] * nb_imgs if self.maximize else [np.inf] * nb_imgs best_adv = np.zeros(source_imgs.shape) total_distance = [0] * nb_imgs finished_idx = set() # make the optimizer optimizer = tf.keras.optimizers.Adam(self.learning_rate) # optimizer = tf.keras.optimizers.Adadelta(self.learning_rate) # get the modifier self.modifier = tf.Variable(np.ones(self.input_shape, dtype=np.float32) * 1e-4) # self.modifier = tf.Variable(np.random.uniform(-8.0, 8.0, self.input_shape), dtype=tf.float32) if self.verbose == 0: progressbar = Progbar( self.MAX_ITERATIONS, width=30, verbose=1 ) # watch relevant variables. simg_tanh = tf.Variable(source_imgs, dtype=np.float32) timg_tanh = tf.Variable(target_imgs, dtype=np.float32) # simg_tanh = self.reverse_input_space_process(simg_tanh) # timg_tanh = self.reverse_input_space_process(timg_tanh) # run the attack self.it = 0 below_thresh = False while self.it < self.MAX_ITERATIONS: self.it += 1 with tf.GradientTape(persistent=True) as tape: tape.watch(self.modifier) tape.watch(simg_tanh) tape.watch(timg_tanh) aimg_raw = simg_tanh + self.modifier aimg_filtered_raw = simg_tanh + tfa.image.gaussian_filter2d(self.modifier, [7, 7], 3.0) final_filtered_raw = simg_tanh + tfa.image.gaussian_filter2d(self.modifier, [1, 1], 2.0) simg_raw = simg_tanh timg_raw = timg_tanh # Convert further preprocess for bottleneck aimg_input = self.input_space_process(aimg_raw) aimg_filtered = self.input_space_process(aimg_filtered_raw) timg_input = self.input_space_process(timg_raw) simg_input = self.input_space_process(simg_raw) # aimg_input = aimg_raw # timg_input = timg_raw # simg_input = simg_raw # get the feature space loss. loss, input_dist, internal_dist, input_dist_sum, input_dist_raw_sum, internal_dist_sum = self.compute_feature_loss( tape, aimg_raw, simg_raw, aimg_input, timg_input, simg_input, aimg_filtered) # compute gradients grad = tape.gradient(loss, [self.modifier]) # grad[0] = grad[0] * 1e11 grad[0] = tf.sign(grad[0]) * 0.6375 # optimizer.apply_gradients(zip(grad, [self.modifier])) self.modifier = self.modifier - grad[0] self.modifier = tf.clip_by_value(self.modifier, -12.0, 12.0) for e, (feature_d, mod_img) in enumerate(zip(internal_dist, final_filtered_raw)): if e >= nb_imgs: break if (feature_d < best_bottlesim[e] and (not self.maximize)) or ( feature_d > best_bottlesim[e] and self.maximize): # print('found improvement') best_bottlesim[e] = feature_d best_adv[e] = mod_img # compute whether or not your perturbation is too big. # thresh_over = input_dist_sum / self.batch_size / self.l_threshold * 100 # if self.it != 0 and (self.it % (self.MAX_ITERATIONS // 3) == 0): # LR = LR * 0.8 # np.array([LR * 0.8]) # optimizer.learning_rate = LR # print("LR: {}".format(LR)) # print iteration result # if self.it % 10 == 0: if self.verbose == 1: thresh_over = input_dist_sum / self.batch_size / self.l_threshold * 100 # import pdb # pdb.set_trace() print( "ITER {:0.0f} Total Loss: {:.4f} perturb: {:0.4f} ({:0.4f} over, {:0.4f} raw); sim: {:.4f}".format( self.it, loss, input_dist_sum, thresh_over, input_dist_raw_sum, internal_dist_sum / nb_imgs)) if self.verbose == 0: progressbar.update(self.it) # DONE: print results if self.verbose == 1: thresh_over = input_dist_sum / self.batch_size / self.l_threshold * 100 print( "END after {} iterations: Total Loss: {} perturb: {:0.4f} ({:0.4f} over, {:0.4f} raw); sim: {}".format( self.it, loss, input_dist_sum, thresh_over, input_dist_raw_sum, internal_dist_sum / nb_imgs)) print("\n") best_adv = self.clipping(best_adv[:nb_imgs]) return best_adv
14,328
4,849
from abc import ABCMeta, abstractmethod import database from . import w_l class IncomingClass(metaclass=ABCMeta): @abstractmethod def __init__(self, request): self.request = request self.graph = None self.uri = None self.named_graph_uri = None self.error_messages = None @abstractmethod def valid(self): pass @abstractmethod def determine_uri(self): pass def stored(self): """ Add an item to PROMS""" if self.graph is None or self.named_graph_uri is None: msg = 'The graph and the named_grapoh_uri properties of this class instance must not be None when trying ' \ 'to store this instance in the provenance DB.' self.error_messages = msg return False try: w_l(str(self.graph)) w_l(str(self.named_graph_uri)) database.insert(self.graph, self.named_graph_uri) return True except Exception as e: self.error_messages = ['Could not connect to the provenance database'] return False
1,124
303
from typing import Callable def blocking(func: Callable): setattr(func, "_ow_blocking", True) return func def is_blocking(func: Callable): return getattr(func, "_ow_blocking", False) is True def nonblocking(func: Callable) -> Callable: setattr(func, "_ow_nonblocking", True) return func def is_nonblocking(func: Callable) -> bool: return getattr(func, "_ow_nonblocking", False) is True
418
143
import setuptools setuptools.setup( name= 'ramCOH', version= '0.1', description= '...', author= 'Thomas van Gerve', packages= setuptools.find_packages( exclude= ['examples'] ), # package_dir= {'' : 'petroPy'}, package_data= {'ramCOH': ['static/*']}, install_requires= [ 'pandas', 'matplotlib', 'numpy', 'scipy', 'csaps' ] )
404
149
from chess_game._board import make_board from chess_game.chess_game import ChessGame from chess_game.play_game import get_user_input, game_event_loop if __name__ == "__main__": game_board = make_board() # pawn = Pawn('x', 'y', None, None, None) # pawn.move() print('Chess') print(' : Rules') print(' : input - piece''s position x,y, second x,y = destination') print(" : x = row number 1 though 8") print(" : y = column number 1 though 8") player1_name = get_user_input(' : Enter player one name', is_move=False) player2_name = get_user_input(' : Enter player two name', is_move=False) print('------------------------------------------------') chess_game = ChessGame(game_board, player1_name, player2_name) game_event_loop(chess_game)
798
271
""" Given a binary tree, find its maximum depth. The maximum depth is the number of nodes along the longest path from the root node down to the farthest leaf node. Note: A leaf is a node with no children. Example: Given binary tree [3,9,20,null,null,15,7], 3 / \ 9 20 / \ 15 7 return its depth = 3. """ import Math class Solution: def findDeep(self,root): if not root: return 0 if not root.left or root.right: return 1 return 1+ Math.max(self.findDeep(root.left),self.findDeep(root.right))
576
196
# time 15 mins # used time 15 mins # time 15 mins # used time 15 mins # this is actually a correct solution # the code i submitted a day ago, which passed lintcode, is actually wrong after i looked KMP up # the previous version does not take care of the situations where the target contains repeatitive elements class Solution: def strStr(self, source, target): ## try O(n) with no bug if source is None or target is None: return -1 source_pointer = 0 target_pointer = 0 last_target_begining_match = None while source_pointer < len(source): if target_pointer == len(target): return source_pointer - len(target) if source[source_pointer] == target[target_pointer]: if target_pointer != 0 and target[target_pointer] == target[0] and last_target_begining_match is None: last_target_begining_match = target_pointer target_pointer += 1 else: if last_target_begining_match is not None: target_pointer = last_target_begining_match + 1 last_target_begining_match = None elif source[source_pointer] == target[0]: target_pointer = 1 else: target_pointer = 0 source_pointer += 1 else: if target_pointer == len(target): return source_pointer - len(target) return -1
1,589
406
# -*- coding: cp1252 -*- # -*- coding: utf-8 -*- """ Algoritmos y Estructuras de Datos Proyecto Final Antonio Reyes #17273 Esteban Cabrera #17781 Miguel #17102 """ import random import xlrd file_location = "C:/Users/Antonio/Desktop/Recommendation-System-python-neo4J-master/Database.xlsx" workbook = xlrd.open_workbook(file_location) sheet = workbook.sheet_by_index(0) from neo4jrestclient.client import GraphDatabase db = GraphDatabase("http://localhost:7474",username="neo4j", password="1111") dataB = db.labels.create("Database") gen = db.labels.create("Genero") #se crea un diccionario (como vimos en hashmaps) database = {} #donde se guardan los generos de las series que ya se vieron historial = [] #en el for se puede poner sheet.nrows para imprimir todo def add_Excel(): lista_gen = [] for x in range(sheet.nrows): name = sheet.cell_value(x,0) gen1 = sheet.cell_value(x,1) gen2 = sheet.cell_value(x,2) gen3 = sheet.cell_value(x,3) lista_gen = [] lista_gen.append(gen1) lista_gen.append(gen2) lista_gen.append(gen3) lista_gen.sort() gen1 = lista_gen[0] gen2 = lista_gen[1] gen3 = lista_gen[2] generos = [] generos.append(gen1) generos.append(gen2) generos.append(gen3) database[name] = generos unidad = db.nodes.create(nombre=name, genero1=gen1, genero2=gen2, genero3=gen3) dataB.add(unidad) try: unidad.relationships.create("contains", gen.get(genero=gen1)[0]) gen.get(genero=gen1)[0].relationships.create("contains", unidad) except Exception: genNode = db.nodes.create(genero=gen1) gen.add(genNode) unidad.relationships.create("contains", gen.get(genero=gen1)[0]) gen.get(genero=gen1)[0].relationships.create("contains", unidad) try: unidad.relationships.create("contains", gen.get(genero=gen2)[0]) gen.get(genero=gen2)[0].relationships.create("contains", unidad) except Exception: genNode = db.nodes.create(genero=gen2) gen.add(genNode) unidad.relationships.create("contains", gen.get(genero=gen2)[0]) gen.get(genero=gen2)[0].relationships.create("contains", unidad) try: unidad.relationships.create("contains", gen.get(genero=gen3)[0]) gen.get(genero=gen3)[0].relationships.create("contains", unidad) except Exception: genNode = db.nodes.create(genero=gen3) gen.add(genNode) unidad.relationships.create("contains", gen.get(genero=gen3)[0]) gen.get(genero=gen3)[0].relationships.create("contains", unidad) def add_database(): listaOrden = [] name = raw_input("Insert name: ") gen1 = raw_input("Insert genre1 ") gen2 = raw_input("Insert genre2: ") gen3 = raw_input("Insert genre3: ") listaOrden.append(gen1) listaOrden.append(gen2) listaOrden.append(gen3) listaOrden.sort() gen1 = listaOrden[0] gen2 = listaOrden[1] gen3 = listaOrden[2] unidad = db.nodes.create(nombre=name, genero1=gen1, genero2=gen2, genero3=gen3) dataB.add(unidad) try: unidad.relationships.create("contains", gen.get(genero=gen1)[0]) gen.get(genero=gen1)[0].relationships.create("contains", unidad) except Exception: genNode = db.nodes.create(genero=gen1) gen.add(genNode) unidad.relationships.create("contains", gen.get(genero=gen1)[0]) gen.get(genero=gen1)[0].relationships.create("contains", unidad) try: unidad.relationships.create("contains", gen.get(genero=gen2)[0]) gen.get(genero=gen2)[0].relationships.create("contains", unidad) except Exception: genNode = db.nodes.create(genero=gen2) gen.add(genNode) unidad.relationships.create("contains", gen.get(genero=gen2)[0]) gen.get(genero=gen2)[0].relationships.create("contains", unidad) try: unidad.relationships.create("contains", gen.get(genero=gen3)[0]) gen.get(genero=gen3)[0].relationships.create("contains", unidad) except Exception: genNode = db.nodes.create(genero=gen3) gen.add(genNode) unidad.relationships.create("contains", gen.get(genero=gen3)[0]) gen.get(genero=gen3)[0].relationships.create("contains", unidad) database[name] = [gen1,gen2,gen3] def watch(): name = raw_input("Insert name: ") try: query = "MATCH (n:Database) WHERE n.nombre='"+name+"' RETURN n.genero1, n.genero2, n.genero3" results = db.query(query, data_contents=True) a = results.rows for x in a: historial.append(x[0]) historial.append(x[1]) historial.append(x[2]) except Exception: print("The movie or TV show you were looking for is not in the database, you can add it by going to option 1") popular_topics(name) #se utiliza el código mostrado en este link para mostrar los generos que se repiten más veces #https://stackoverflow.com/questions/3594514/how-to-find-most-common-elements-of-a-list def popular_topics(name): nombre = name #diccionario que determinará cuales son los 5 generos más vistos top_5 = [] #por cada genero en la lista.... word_counter = {} for word in historial: if word in word_counter: word_counter[word] += 1 else: word_counter[word] = 1 popular_words = sorted(word_counter, key = word_counter.get, reverse = True) top_5 = popular_words[:5] #se ordenan los generos en orden alfabetico lista = [] print "Most watched genres: " for x in top_5: lista.append(x) print x print "We recommend: " print "-----------------" print "-----------------" try: query = "match (n:Database{nombre:'"+nombre+"'})-[:contains*1..3]->(a:Database{genero1:'"+top_5[0]+"'}) return collect(distinct a.nombre)" #query = "MATCH (n:Database {genero1:'"+top_5[0]+"', genero2:'"+top_5[1]+"', genero3:'"+top_5[2]+"'}) RETURN n.nombre" results = db.query(query, data_contents=True) #print results a = results.rows #print len(a[0][0]) b = [] print a[0][0][0] for x in a[0][0]: if x not in b: b.append(x) valor = random.sample(range(0, len(b)+1), 3) print b[valor[0]] print b[valor[1]] print b[valor[2]] except Exception: pass try: query = "match (n:Database{nombre:'"+nombre+"'})-[:contains*1..3]->(a:Database{genero2:'"+top_5[0]+"'}) return collect(distinct a.nombre)" #query = "MATCH (n:Database {genero1:'"+top_5[0]+"', genero2:'"+top_5[1]+"', genero3:'"+top_5[2]+"'}) RETURN n.nombre" results = db.query(query, data_contents=True) #print results a = results.rows #print len(a[0][0]) b = [] print a[0][0][0] for x in a[0][0]: if x not in b: b.append(x) valor = random.sample(range(0, len(b)+1), 3) print b[valor[0]] print b[valor[1]] print b[valor[2]] except Exception: pass try: query = "match (n:Database{nombre:'"+nombre+"'})-[:contains*1..3]->(a:Database{genero3:'"+top_5[0]+"'}) return collect(distinct a.nombre)" #query = "MATCH (n:Database {genero1:'"+top_5[0]+"', genero2:'"+top_5[1]+"', genero3:'"+top_5[2]+"'}) RETURN n.nombre" results = db.query(query, data_contents=True) #print results a = results.rows #print len(a[0][0]) b = [] print a[0][0][0] for x in a[0][0]: if x not in b: b.append(x) valor = random.sample(range(0, len(b)+1), 3) print b[valor[0]] print b[valor[1]] print b[valor[2]] except Exception: pass try: query = "match (n:Database{nombre:'"+nombre+"'})-[:contains*1..3]->(a:Database{genero1:'"+top_5[1]+"'}) return collect(distinct a.nombre)" #query = "MATCH (n:Database {genero1:'"+top_5[0]+"', genero2:'"+top_5[1]+"', genero3:'"+top_5[2]+"'}) RETURN n.nombre" results = db.query(query, data_contents=True) #print results a = results.rows #print len(a[0][0]) b = [] print a[0][0][0] for x in a[0][0]: if x not in b: b.append(x) valor = random.sample(range(0, len(b)+1), 3) print b[valor[0]] print b[valor[1]] print b[valor[2]] except Exception: pass try: query = "match (n:Database{nombre:'"+nombre+"'})-[:contains*1..3]->(a:Database{genero2:'"+top_5[1]+"'}) return collect(distinct a.nombre)" #query = "MATCH (n:Database {genero1:'"+top_5[0]+"', genero2:'"+top_5[1]+"', genero3:'"+top_5[2]+"'}) RETURN n.nombre" results = db.query(query, data_contents=True) #print results a = results.rows #print len(a[0][0]) b = [] print a[0][0][0] for x in a[0][0]: if x not in b: b.append(x) valor = random.sample(range(0, len(b)+1), 3) print b[valor[0]] print b[valor[1]] print b[valor[2]] except Exception: pass try: query = "match (n:Database{nombre:'"+nombre+"'})-[:contains*1..3]->(a:Database{genero3:'"+top_5[1]+"'}) return collect(distinct a.nombre)" #query = "MATCH (n:Database {genero1:'"+top_5[0]+"', genero2:'"+top_5[1]+"', genero3:'"+top_5[2]+"'}) RETURN n.nombre" results = db.query(query, data_contents=True) #print results a = results.rows #print len(a[0][0]) b = [] print a[0][0][0] for x in a[0][0]: if x not in b: b.append(x) valor = random.sample(range(0, len(b)+1), 3) print b[valor[0]] print b[valor[1]] print b[valor[2]] except Exception: pass try: query = "match (n:Database{nombre:'"+nombre+"'})-[:contains*1..3]->(a:Database{genero1:'"+top_5[2]+"'}) return collect(distinct a.nombre)" #query = "MATCH (n:Database {genero1:'"+top_5[0]+"', genero2:'"+top_5[1]+"', genero3:'"+top_5[2]+"'}) RETURN n.nombre" results = db.query(query, data_contents=True) #print results a = results.rows #print len(a[0][0]) b = [] print a[0][0][0] for x in a[0][0]: if x not in b: b.append(x) valor = random.sample(range(0, len(b)+1), 3) print b[valor[0]] print b[valor[1]] print b[valor[2]] except Exception: pass try: query = "match (n:Database{nombre:'"+nombre+"'})-[:contains*1..3]->(a:Database{genero2:'"+top_5[2]+"'}) return collect(distinct a.nombre)" #query = "MATCH (n:Database {genero1:'"+top_5[0]+"', genero2:'"+top_5[1]+"', genero3:'"+top_5[2]+"'}) RETURN n.nombre" results = db.query(query, data_contents=True) #print results a = results.rows #print len(a[0][0]) b = [] print a[0][0][0] for x in a[0][0]: if x not in b: b.append(x) valor = random.sample(range(0, len(b)+1), 3) print b[valor[0]] print b[valor[1]] print b[valor[2]] except Exception: pass try: query = "match (n:Database{nombre:'"+nombre+"'})-[:contains*1..3]->(a:Database{genero3:'"+top_5[2]+"'}) return collect(distinct a.nombre)" #query = "MATCH (n:Database {genero1:'"+top_5[0]+"', genero2:'"+top_5[1]+"', genero3:'"+top_5[2]+"'}) RETURN n.nombre" results = db.query(query, data_contents=True) #print results a = results.rows #print len(a[0][0]) b = [] print a[0][0][0] for x in a[0][0]: if x not in b: b.append(x) valor = random.sample(range(0, len(b)+1), 3) print b[valor[0]] print b[valor[1]] print b[valor[2]] except Exception: pass #YourList.OrderBy(x => rnd.Next()).Take(5) #recomendation(name, top_5[0], top_5[1], top_5[2], top_5[3]) #método para mostrar todas las series y peliculas de un genero def show_genre(): genre = raw_input("Insert genre: ") try: query = "MATCH (n:Database {genero1:'"+genre+"'}) RETURN n.nombre" results = db.query(query, data_contents=True) a = results.rows b = [] for x in a: if x not in b: b.append(x) print x except Exception: pass try: query = "MATCH (n:Database {genero2:'"+genre+"'}) RETURN n.nombre" results = db.query(query, data_contents=True) a = results.rows b = [] for x in a: if x not in b: b.append(x) print x except Exception: pass try: query = "MATCH (n:Database {genero3:'"+genre+"'}) RETURN n.nombre" results = db.query(query, data_contents=True) a = results.rows b = [] for x in a: if x not in b: b.append(x) print x except Exception: pass #****************************************************************************************************** #******************************************************************************************************* def menu(): print("0. Add movies and TV shows to from Excel to Database") print("1. Add move or TV show to Database") print("2. Watch movie or TV Show") print("3. List of movies and TV shows by genre") print("9. Exit") menu() opcion = input("Option: ") print ("**********************************") print ("**********************************") while(opcion != 9): if(opcion == 0): add_Excel() print ("**********************************") print ("**********************************") print ("Values added to Database") menu() opcion = input("Option: ") elif(opcion == 1): add_database() print ("**********************************") print ("**********************************") menu() opcion = input("Option: ") elif(opcion == 2): watch() print ("**********************************") print ("**********************************") menu() opcion = input("Option: ") elif(opcion == 3): show_genre() print ("**********************************") print ("**********************************") menu() opcion = input("Option: ") else: print("This option is not valid") print ("**********************************") print ("**********************************") menu() opcion = input("Option: ") print ("Thanks for using the program")
15,633
5,653
from flask import Flask import time from _thread import get_ident app=Flask(__name__) @app.route("/") def hello_world(): time.sleep(20) return "hello world!"+str(get_ident()) @app.route("/index") def hello(): time.sleep(1) return "Hello"+str(get_ident()) if __name__=="__main__": app.run(port=6003)
324
124
import unittest import util.strings as strings class TestStrings(unittest.TestCase): def test_first_index_of(self): self.assertEqual(1, strings.first_index_of('1', "0103003004")) self.assertEqual(20, strings.first_index_of('f', "post this text on a form")) def test_last_index_of(self): self.assertEqual(9, strings.last_index_of('1', "01030030014")) self.assertEqual(20, strings.last_index_of('f', "post this text on a form")) def test_indexes_of(self): self.assertEqual([1, 9], strings.indexes_of('1', "01030030014")) self.assertEqual([20, 30], strings.indexes_of('f', "post this text on a fantastic form")) if __name__ == '__main__': unittest.main()
724
273
# Data sources database( thermoLibraries = ['primaryThermoLibrary'], reactionLibraries = [('C3', False)], seedMechanisms = ['GRI-Mech3.0'], kineticsDepositories = ['training'], kineticsFamilies = 'default', kineticsEstimator = 'rate rules', ) # List of species species( label='ethane', reactive=True, structure=SMILES("CC"), ) species( label='N2', reactive=False, structure=adjacencyList(""" 1 N u0 p1 c0 {2,T} 2 N u0 p1 c0 {1,T} """), ) # Reaction systems simpleReactor( temperature=(1350,'K'), pressure=(1.0,'bar'), initialMoleFractions={ "ethane": 0.1, "N2": 0.9 }, terminationConversion={ 'ethane': 0.9, }, terminationTime=(1e6,'s'), ) simulator( atol=1e-16, rtol=1e-8, ) model( toleranceKeepInEdge=0.0, toleranceMoveToCore=0.1, toleranceInterruptSimulation=0.1, maximumEdgeSpecies=100000, ) options( units='si', saveRestartPeriod=None, generateOutputHTML=True, generatePlots=False, saveEdgeSpecies=True, saveSimulationProfiles=True, verboseComments=True, ) pressureDependence( method='modified strong collision', maximumGrainSize=(0.5,'kcal/mol'), minimumNumberOfGrains=250, temperatures=(300,2200,'K',2), pressures=(0.01,100,'bar',3), interpolation=('Chebyshev', 6, 4), maximumAtoms=15, )
1,395
552
# Copyright (c) 2019-2021 by University of Kassel, Tu Dortmund, RWTH Aachen University and Fraunhofer # Institute for Energy Economics and Energy System Technology (IEE) Kassel and individual # contributors (see AUTHORS file for details). All rights reserved. __version__ = "1.3.0" __author__ = "smeinecke" import os sb_dir = os.path.dirname(os.path.realpath(__file__)) from simbench.converter import * from simbench.networks import *
438
147
import assistantResume from speak_module import speak from database import speak_is_on def output(o): # For command line input if speak_is_on(): speak(o) print(assistantResume.name +": "+o+"\n")
215
68
import vk_api from vk_api.utils import get_random_id from vk_bot.core.sql.vksql import * def relationmeet(text, vk, event): check = checkrelation('waitmeet', event.object.from_id) if check == None: check = checkrelation('relation', event.object.from_id) if check == None: userid = "".join(text[2][3:]) userid = userid.split('|')[0] check = checkrelation('relation', userid) if check == None: check = checkrelation('waitmeet', userid) if check == None: tableadd("waitmeet", "id, id2", (f"{event.object.from_id}, {userid}")) vk.messages.send(user_id=int(userid), random_id=get_random_id(), message=f"*id{event.object.from_id}(Пользователь) предложил тебе встречаться!\nНапиши: '/отношения принять' или '/отношения отклонить'") else: return "Этому пользователю уже кто-то предложил встречатся!" else: return "Этот пользователь уже встречается с кем-то!" else: return "Ай-яй-яй! Изменять нехорошо" else: return "Ты уже отправил приглашение!" def reject(event, vk): check = checktable('waitmeet', 'id2', event.object.from_id) if check == None: return 'У тебя нет предложений встречаться!' else: userid = checktable('waitmeet', 'id2', event.object.from_id) vk.messages.send(user_id=int(userid['id']), random_id=get_random_id(), message=f"*id{event.object.from_id}(Пользователь) отклонил твое предложение :()") tablerm('waitmeet', "id2", event.object.from_id) return "Вы отклонили предложение" def accept(event, vk): check = checktable('waitmeet', 'id2', event.object.from_id) if check == None: return 'У тебя нет предложений встречаться!' else: relationaccept(event.object.from_id) tablerm('waitmeet', "id2", event.object.from_id) userid = checktable('relation', 'id2', event.object.from_id) vk.messages.send(user_id=int(userid['id']), random_id=get_random_id(), message=f"*id{event.object.from_id}(Пользователь) принял твое предложение! Поздравляем!") return "Вы приняли предложение! Поздравляем!" def test(event, vk, message, case): check = checkrelation('relation', event.object.from_id) if check == None: return {'message': 'Ты ни с кем не встречаешься :('} else: userid = checktable('relation', 'id', event.object.from_id) if userid == None: userid = checktable('relation', 'id2', event.object.from_id) if userid['id2'] == event.object.from_id: userid = f"*id{userid['id']}({vk.users.get(user_ids=userid['id'], name_case=case)[0]['first_name']})" return {'message':f"{message} {userid}"} elif userid['id'] == event.object.from_id: userid = f"*id{userid['id2']}({vk.users.get(user_ids=userid['id2'], name_case=case)[0]['first_name']})" return {'message':f"{message} {userid}"} def relation(event, vk, text): try: if text[1] == "принять": return {"message": accept(event, vk)} elif text[1] == "отклонить": return {"message": reject(event, vk)} elif text[:2] == ['/отношения', 'встречаться']: return {"message": relationmeet(text, vk, event)} except IndexError: return test(event, vk, "Ты встречаешься с", "ins")
3,545
1,185
"""Wallpaper Downloader Main Module.""" import argparse import asyncio import logging import sys from datetime import datetime from wallpaperdownloader.downloader import download, LOGGER_NAME def abort(*args): """Print message to the stderr and exit the program.""" print(*args, file=sys.stderr) sys.exit(1) def check_args(args): """Check if arguments are valid.""" month, year = (args.month, args.year) if month < 1 or month > 12: abort("Invalid month number %d", month) date_string = f"{year:04}{month:02}" if date_string < "201205": abort("There are no wallpapers older than May 2012") if date_string > datetime.now().strftime("%Y%M"): abort("Too early... come a bit later") def configure_logger(level): """Configure console log output.""" logger = logging.getLogger(LOGGER_NAME) handler = logging.StreamHandler() logger.setLevel(level) handler.setLevel(level) logger.addHandler(handler) def main(): """Run WD main routine.""" parser = argparse.ArgumentParser( description="Download wallpapers from www.smashingmagazine.com" ) parser.add_argument("month", type=int, help="Month number") parser.add_argument("year", type=int, help="Year") parser.add_argument("resolution", type=str, help="Image resolution") parser.add_argument( "-v", "--verbose", action="store_true", help="Enable verbose output" ) args = parser.parse_args() check_args(args) configure_logger(logging.DEBUG if args.verbose else logging.INFO) year, month, res = (args.year, args.month, args.resolution) asyncio.get_event_loop().run_until_complete(download(year, month, res)) if __name__ == "__main__": main()
1,746
553
""" Functions shared across the main window, the welcome window and the system tray. """ import os import qcrash.api as qcrash from PyQt5 import QtWidgets from hackedit.app import templates, settings from hackedit.app.dialogs.dlg_about import DlgAbout from hackedit.app.dialogs.dlg_template_answers import DlgTemplateVars from hackedit.app.dialogs.preferences import DlgPreferences from hackedit.app.wizards.new import WizardNew def show_about(window): """ Shows the about dialog on the parent window :param window: parent window. """ DlgAbout.show_about(window) def check_for_update(*args, **kwargs): """ Checks for update. :param window: parent window :param show_up_to_date_msg: True to show a message box when the app is up to date. """ # todo: improve this: make an update wizard that update both hackedit # and its packages (to ensure compatiblity) # if pip_tools.check_for_update('hackedit', __version__): # answer = QtWidgets.QMessageBox.question( # window, 'Check for update', # 'A new version of HackEdit is available...\n' # 'Would you like to install it now?') # if answer == QtWidgets.QMessageBox.Yes: # try: # status = pip_tools.graphical_install_package( # 'hackedit', autoclose_dlg=True) # except RuntimeError as e: # QtWidgets.qApp.processEvents() # QtWidgets.QMessageBox.warning( # window, 'Update failed', # 'Failed to update hackedit: %r' % e) # else: # QtWidgets.qApp.processEvents() # if status: # QtWidgets.QMessageBox.information( # window, 'Check for update', # 'Update completed with sucess, the application ' # 'will now restart...') # window.app.restart() # else: # QtWidgets.QMessageBox.warning( # window, 'Update failed', # 'Failed to update hackedit') # else: # _logger().debug('HackEdit up to date') # if show_up_to_date_msg: # QtWidgets.QMessageBox.information( # window, 'Check for update', 'HackEdit is up to date.') pass def open_folder(window, app): path = QtWidgets.QFileDialog.getExistingDirectory( window, _('Open directory'), settings.last_open_dir()) if path: settings.set_last_open_dir(os.path.dirname(path)) app.open_path(path, sender=window) def report_bug(window, title='', traceback=None, issue_description=''): qcrash.show_report_dialog( issue_title=title, traceback=traceback, parent=window, include_log=traceback is not None, include_sys_info=traceback is not None, issue_description=issue_description) return True def edit_preferences(window, app): DlgPreferences.edit_preferences(window, app) def not_implemented_action(window): QtWidgets.QMessageBox.information( window, _('Not implementeded'), _('This action has not been implemented yet...')) def create_new(app, window, current_project=None): source, template, dest_dir, single_file = WizardNew.get_parameters( window, current_project) if source is not None: create_new_from_template(source, template, dest_dir, single_file, window, app) def create_new_from_template(source, template, dest_dir, single_file, window, app): from .main_window import MainWindow try: variables = template['variables'] except KeyError: answers = {} else: answers = DlgTemplateVars.get_answers(variables, parent=window) if answers is None: # canceled by user return None files = templates.create(template, dest_dir, answers) if not files: # should not happen unless the template is empty return None if single_file: path = files[0] else: path = dest_dir from hackedit.app.welcome_window import WelcomeWindow if isinstance(window, WelcomeWindow): sender = None else: sender = window if single_file and isinstance(window, MainWindow): window.open_file(path) else: app.open_path(path, sender=sender) return path
4,512
1,297
""" pygame-menu https://github.com/ppizarror/pygame-menu LOCALS Local constants. License: ------------------------------------------------------------------------------- The MIT License (MIT) Copyright 2017-2021 Pablo Pizarro R. @ppizarror Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ------------------------------------------------------------------------------- """ __all__ = [ # Alignment 'ALIGN_CENTER', 'ALIGN_LEFT', 'ALIGN_RIGHT', # Data types 'INPUT_FLOAT', 'INPUT_INT', 'INPUT_TEXT', # Positioning 'POSITION_CENTER', 'POSITION_EAST', 'POSITION_NORTH', 'POSITION_NORTHEAST', 'POSITION_SOUTHWEST', 'POSITION_SOUTH', 'POSITION_SOUTHEAST', 'POSITION_NORTHWEST', 'POSITION_WEST', # Orientation 'ORIENTATION_HORIZONTAL', 'ORIENTATION_VERTICAL', # Scrollarea 'SCROLLAREA_POSITION_BOTH_HORIZONTAL', 'SCROLLAREA_POSITION_BOTH_VERTICAL', 'SCROLLAREA_POSITION_FULL', # Cursors 'CURSOR_ARROW', 'CURSOR_CROSSHAIR', 'CURSOR_HAND', 'CURSOR_IBEAM', 'CURSOR_NO', 'CURSOR_SIZEALL', 'CURSOR_SIZENESW', 'CURSOR_SIZENS', 'CURSOR_SIZENWSE', 'CURSOR_SIZEWE', 'CURSOR_WAIT', 'CURSOR_WAITARROW', # Event compatibility 'FINGERDOWN', 'FINGERMOTION', 'FINGERUP' ] import pygame as __pygame # Alignment ALIGN_CENTER = 'align-center' ALIGN_LEFT = 'align-left' ALIGN_RIGHT = 'align-right' # Input data type INPUT_FLOAT = 'input-float' INPUT_INT = 'input-int' INPUT_TEXT = 'input-text' # Position POSITION_CENTER = 'position-center' POSITION_EAST = 'position-east' POSITION_NORTH = 'position-north' POSITION_NORTHEAST = 'position-northeast' POSITION_NORTHWEST = 'position-northwest' POSITION_SOUTH = 'position-south' POSITION_SOUTHEAST = 'position-southeast' POSITION_SOUTHWEST = 'position-southwest' POSITION_WEST = 'position-west' # Menu ScrollArea position SCROLLAREA_POSITION_BOTH_HORIZONTAL = 'scrollarea-position-both-horizontal' SCROLLAREA_POSITION_BOTH_VERTICAL = 'scrollarea_position-both-vertical' SCROLLAREA_POSITION_FULL = 'scrollarea-position-full' # Orientation ORIENTATION_HORIZONTAL = 'orientation-horizontal' ORIENTATION_VERTICAL = 'orientation-vertical' # Cursors CURSOR_ARROW = None if not hasattr(__pygame, 'SYSTEM_CURSOR_ARROW') else __pygame.SYSTEM_CURSOR_ARROW CURSOR_CROSSHAIR = None if not hasattr(__pygame, 'SYSTEM_CURSOR_CROSSHAIR') else __pygame.SYSTEM_CURSOR_CROSSHAIR CURSOR_HAND = None if not hasattr(__pygame, 'SYSTEM_CURSOR_HAND') else __pygame.SYSTEM_CURSOR_HAND CURSOR_IBEAM = None if not hasattr(__pygame, 'SYSTEM_CURSOR_IBEAM') else __pygame.SYSTEM_CURSOR_IBEAM CURSOR_NO = None if not hasattr(__pygame, 'SYSTEM_CURSOR_NO') else __pygame.SYSTEM_CURSOR_NO CURSOR_SIZEALL = None if not hasattr(__pygame, 'SYSTEM_CURSOR_SIZEALL') else __pygame.SYSTEM_CURSOR_SIZEALL CURSOR_SIZENESW = None if not hasattr(__pygame, 'SYSTEM_CURSOR_SIZENESW') else __pygame.SYSTEM_CURSOR_SIZENESW CURSOR_SIZENS = None if not hasattr(__pygame, 'SYSTEM_CURSOR_SIZENS') else __pygame.SYSTEM_CURSOR_SIZENS CURSOR_SIZENWSE = None if not hasattr(__pygame, 'SYSTEM_CURSOR_SIZENWSE') else __pygame.SYSTEM_CURSOR_SIZENWSE CURSOR_SIZEWE = None if not hasattr(__pygame, 'SYSTEM_CURSOR_SIZEWE') else __pygame.SYSTEM_CURSOR_SIZEWE CURSOR_WAIT = None if not hasattr(__pygame, 'SYSTEM_CURSOR_WAIT') else __pygame.SYSTEM_CURSOR_WAIT CURSOR_WAITARROW = None if not hasattr(__pygame, 'SYSTEM_CURSOR_WAITARROW') else __pygame.SYSTEM_CURSOR_WAITARROW # Events compatibility with lower pygame versions FINGERDOWN = -1 if not hasattr(__pygame, 'FINGERDOWN') else __pygame.FINGERDOWN FINGERMOTION = -1 if not hasattr(__pygame, 'FINGERMOTION') else __pygame.FINGERMOTION FINGERUP = -1 if not hasattr(__pygame, 'FINGERUP') else __pygame.FINGERUP
4,778
1,903
import os import sys from enum import Enum from enum import unique from typing import List # Set system constants based on the current platform if sys.platform.startswith("win32"): DEFAULT_SYSTEM_CONFIG_PATH = os.path.join(os.environ["APPDATA"], "config") elif sys.platform.startswith("linux"): DEFAULT_SYSTEM_CONFIG_PATH = os.path.join(os.environ["HOME"], ".config") elif sys.platform.startswith("darwin"): DEFAULT_SYSTEM_CONFIG_PATH = os.path.join( os.environ["HOME"], "Library", "Application Support" ) else: DEFAULT_SYSTEM_CONFIG_PATH = os.path.join(os.environ["HOME"], "config") # System configuration PACKAGE_BASE_CONFIG_FOLDER = "ventress-functions" PACKAGE_CONFIG_DIR_PATH = os.path.join( DEFAULT_SYSTEM_CONFIG_PATH, PACKAGE_BASE_CONFIG_FOLDER ) DEFAULT_LOG_FILENAME = "functions.log" DEFAULT_LOG_FILEPATH = os.path.join(PACKAGE_CONFIG_DIR_PATH, DEFAULT_LOG_FILENAME) # Project constants PROJECT_VENDOR = "ventress" PROJECT_MARK = "ventress-functions" class ConfigName(str, Enum): """Represents various availabel names for a config file""" BASE = "config.json" class RequiredFile(str, Enum): """Enum for required file names in a function's directory""" CONFIG = "config.json" DOCKERFILE = "Dockerfile" DOCKERIGNORE = ".dockerignore" ENTRY_POINT = "main.py" REQUIREMENTS = "requirements.txt" class LoggingLevel(str, Enum): DEBUG = "debug" ERROR = "error" INFO = "info" WARNING = "warning" class FunctionType(str, Enum): """Represents the various types of functions that can be run""" HTTP = "http" PUBSUB = "pubsub" @classmethod def options(cls) -> List[str]: """Returns a list of all the function types""" return [enum.value for enum in cls] class LocalStatus(str, Enum): """Represents the status of a function locally""" ADDED = "added" BUILT = "new build" INVALID = "invalid" NEW = "new" REMOVED = "removed" RUNNING = "running" STOPPED = "stopped" UNKNOWN = "unknown" @classmethod def build_statuses(cls) -> List[str]: """Returns a list of statuses which mean that the image is built""" return [ cls.BUILT, cls.RUNNING, cls.STOPPED, ] class CloudStatus(str, Enum): """Represents the status of a function on the cloud""" DELETED = "deleted" DEPLOYED = "deployed" UNKNOWN = "unknown" @property def is_deployed(self) -> bool: return self == CloudStatus.DEPLOYED @unique class CloudProvider(str, Enum): """Represents the various cloud providers supported by the functions package""" # AWS = "aws" # AZURE = "azure" GCP = "gcp" # LOCAL = "local" # OPENFASS = "openfass" # OPENSTACK = "openstack" @classmethod def all(cls) -> List[str]: """Returns all the available service types""" return [enum.value for enum in cls] @unique class CloudServiceType(str, Enum): CLOUD_FUNCTION = "cloud_function" @classmethod def all(cls) -> List[str]: """Returns all the available service types""" return [enum.value for enum in cls]
3,184
1,085
#- Copyright 2014 GOTO 10. #- Licensed under the Apache License, Version 2.0 (see LICENSE). ## Utilities used for creating build extensions. from abc import ABCMeta, abstractmethod # Abstract superclass of the tool sets loaded implicitly into each context. # There can be many of these, one for each context. class ToolSet(object): __metaclass__ = ABCMeta def __init__(self, context): self.context = context # Returns the context this tool set belongs to. def get_context(self): return self.context # Controller for this kind of extension. There is only one of these for each # kind of extension. class ToolController(object): __metaclass__ = ABCMeta def __init__(self, env): self.env = env # Returns the build environment. def get_environment(self): return self.env # Gives this controller an opportunity to add some extra custom flags. By # default does nothing. def add_custom_flags(self, parser): pass # Returns a toolset instance, given a concrete context. @abstractmethod def get_tools(self, context): pass
1,076
313
# -*- coding: utf-8 -*- ''' REFERENCES: [1] Y. Bar-Shalom, X. R. Li, and T. Kirubarajan, "Estimation with Applications to Tracking and Navigation," New York: John Wiley and Sons, Inc, 2001. [2] R. A. Singer, "Estimating Optimal Tracking Filter Performance for Manned Maneuvering Targets," in IEEE Transactions on Aerospace and Electronic Systems, vol. AES-6, no. 4, pp. 473-483, July 1970. [3] X. Rong Li and V. P. Jilkov, "Survey of maneuvering target tracking. Part I. Dynamic models," in IEEE Transactions on Aerospace and Electronic Systems, vol. 39, no. 4, pp. 1333-1364, Oct. 2003. [4] W. Koch, "Tracking and Sensor Data Fusion: Methodological Framework and Selected Applications," Heidelberg, Germany: Springer, 2014. [5] Mo Longbin, Song Xiaoquan, Zhou Yiyu, Sun Zhong Kang and Y. Bar-Shalom, "Unbiased converted measurements for tracking," in IEEE Transactions on Aerospace and Electronic Systems, vol. 34, no. 3, pp. 1023-1027, July 1998 ''' from __future__ import division, absolute_import, print_function __all__ = [ 'F_poly', 'F_singer', 'F_van_keuk', 'Q_poly_dc', 'Q_poly_dd', 'Q_singer', 'Q_van_keuk', 'H_pos_only', 'R_pos_only', 'F_cv', 'f_cv', 'f_cv_jac', 'Q_cv_dc', 'Q_cv_dd', 'H_cv', 'h_cv', 'h_cv_jac', 'R_cv', 'F_ca', 'f_ca', 'f_ca_jac', 'Q_ca_dc', 'Q_ca_dd', 'H_ca', 'h_ca', 'h_ca_jac', 'R_ca', 'F_ct', 'f_ct', 'f_ct_jac', 'Q_ct', 'h_ct', 'h_ct_jac', 'R_ct', 'convert_meas', 'model_switch', 'trajectory_cv', 'trajectory_ca', 'trajectory_ct', 'trajectory_generator', 'trajectory_with_pd', 'trajectory_to_meas' ] import numbers import numpy as np import scipy.linalg as lg import scipy.stats as st import scipy.special as sl from tracklib.utils import sph2cart, pol2cart def F_poly(order, axis, T): ''' This polynomial transition matrix is used with discretized continuous-time models as well as direct discrete-time models. see section 6.2 and 6.3 in [1]. Parameters ---------- order : int The order of the filter. If order=2, then it is constant velocity, 3 means constant acceleration, 4 means constant jerk, etc. axis : int Motion directions in Cartesian coordinate. If axis=1, it means x-axis, 2 means x-axis and y-axis, etc. T : float The time-duration of the propagation interval. Returns ------- F : ndarray The state transition matrix under a linear dynamic model of the given order and axis. ''' assert (order >= 1) assert (axis >= 1) F_base = np.zeros((order, order)) tmp = np.arange(order) F_base[0, :] = T**tmp / sl.factorial(tmp) for row in range(1, order): F_base[row, row:] = F_base[0, :order - row] F = np.kron(np.eye(axis), F_base) return F def F_singer(axis, T, tau=20): ''' Get the singer model transition matrix, see section 8.2 in [1]. Parameters ---------- axis : int Motion directions in Cartesian coordinate. If axis=1, it means x-axis, 2 means x-axis and y-axis, etc. T : float The time-duration of the propagation interval. tau : float The time constant of the target acceleration autocorrelation, that is, the decorrelation time is approximately 2*tau. A reasonable range of tau for Singer's model is between 5 and 20 seconds. Typical values of tau for aircraft are 20s for slow turn and 5s for an evasive maneuver. If this parameter is omitted, the default value of 20 is used.The time constant is assumed the same for all dimensions of motion, so this parameter is scalar. Returns ------- F : ndarray The state transition matrix under a Gauss-Markov dynamic model of the given axis. ''' assert (axis >= 1) alpha = 1 / tau F_base = np.zeros((3, 3)) aT = alpha * T eaT = np.exp(-aT) F_base[0, 0] = 1 F_base[0, 1] = T F_base[0, 2] = (aT - 1 + eaT) * tau**2 F_base[1, 1] = 1 F_base[1, 2] = (1 - eaT) * tau F_base[2, 2] = eaT F = np.kron(np.eye(axis), F_base) return F def F_van_keuk(axis, T, tau=20): ''' Get the state transition matrix for the van Keuk dynamic model. This is a direct discrete-time model such that the acceleration advances in each dimension over time as a[k+1]=exp(-T/tau)a[k]+std*sqrt(1-exp(-2*T/tau))*v[k], see section 2.2.1 in [4] Parameters ---------- axis : int Motion directions in Cartesian coordinate. If axis=1, it means x-axis, 2 means x-axis and y-axis, etc. T : float The time-duration of the propagation interval. tau : float The time constant of the target acceleration autocorrelation, that is, the decorrelation time is approximately 2*tau. A reasonable range of tau for Singer's model is between 5 and 20 seconds. Typical values of tau for aircraft are 20s for slow turn and 5s for an evasive maneuver. If this parameter is omitted, the default value of 20 is used.The time constant is assumed the same for all dimensions of motion, so this parameter is scalar. Returns ------- F : ndarray The state transition matrix under a Gauss-Markov dynamic model of the given axis. ''' assert (axis >= 1) F_base = F_poly(3, 1, T) F_base[-1, -1] = np.exp(-T / tau) F = np.kron(np.eye(axis), F_base) return F def Q_poly_dc(order, axis, T, std): ''' Process noise covariance matrix used with discretized continuous-time models. see section 6.2 in [1]. Parameters ---------- order : int The order of the filter. If order=2, then it is constant velocity, 3 means constant acceleration, 4 means constant jerk, etc. axis : int Motion directions in Cartesian coordinate. If axis=1, it means x-axis, 2 means x-axis and y-axis, etc. T : float The time-duration of the propagation interval. std : number, list The standard deviation (square root of intensity) of continuous-time porcess noise Returns ------- Q : ndarray Process noise convariance ''' assert (order >= 1) assert (axis >= 1) if isinstance(std, numbers.Number): std = [std] * axis sel = np.arange(order - 1, -1, -1) col, row = np.meshgrid(sel, sel) Q_base = T**(col + row + 1) / (sl.factorial(col) * sl.factorial(row) * (col + row + 1)) Q = np.kron(np.diag(std)**2, Q_base) return Q def Q_poly_dd(order, axis, T, std, ht=0): ''' Process noise covariance matrix used with direct discrete-time models. see section 6.3 in [1]. Parameters ---------- order : int The order of the filter. If order=2, then it is constant velocity, 3 means constant acceleration, 4 means constant jerk, etc. axis : int Motion directions in Cartesian coordinate. If axis=1, it means x-axis, 2 means x-axis and y-axis, etc. T : float The time-duration of the propagation interval. std : number, list The standard deviation of discrete-time porcess noise ht : int ht means that the order of the noise is `ht` greater than the highest order of the state, e.g., if the highest order of state is acceleration, then ht=0 means that the noise is of the same order as the highest order of state, that is, the noise is acceleration and the model is DWPA, see section 6.3.3 in [1]. If the highest order is velocity, the ht=1 means the noise is acceleration and the model is DWNA, see section 6.3.2 in [1]. Returns ------- Q : ndarray Process noise convariance Notes ----- For the model to which the alpha filter applies, we have order=0, ht=2. Likewise, for the alpha-beta filter, order=1, ht=1 and for the alpha- beta-gamma filter, order=2, ht=0 ''' assert (order >= 1) assert (axis >= 1) if isinstance(std, numbers.Number): std = [std] * axis sel = np.arange(ht + order - 1, ht - 1, -1) L = T**sel / sl.factorial(sel) Q_base = np.outer(L, L) Q = np.kron(np.diag(std)**2, Q_base) return Q def Q_singer(axis, T, std, tau=20): ''' Process noise covariance matrix used with Singer models. see section 8.2 in [1] Parameters ---------- axis : int Motion directions in Cartesian coordinate. If axis=1, it means x-axis, 2 means x-axis and y-axis, etc. T : float The time-duration of the propagation interval. std : number, list std is the instantaneous standard deviation of the acceleration knowm as Ornstein-Uhlenbeck process, which can be obtained by assuming it to be 1. Equal to a maxmum acceleration a_M with probability p_M and -a_M with the same probability 2. Equal to zero with probability p_0 3. Uniformly distributed in [-a_M, a_M] with the remaining probability mass All parameters mentioned above are chosen by the designer. So the expected std^2 is (a_M^2 / 3)*(1 + 4*p_M - p_0) tau : float The time constant of the target acceleration autocorrelation, that is, the decorrelation time is approximately 2*tau. A reasonable range of tau for Singer's model is between 5 and 20 seconds. Typical values of tau for aircraft are 20s for slow turn and 5s for an evasive maneuver. If this parameter is omitted, the default value of 20 is used.The time constant is assumed the same for all dimensions of motion, so this parameter is scalar. Returns ------- Q : ndarray Process noise convariance ''' assert (axis >= 1) if isinstance(std, numbers.Number): std = [std] * axis alpha = 1 / tau aT = alpha * T eaT = np.exp(-aT) e2aT = np.exp(-2 * aT) q11 = tau**4 * (1 - e2aT + 2 * aT + 2 * aT**3 / 3 - 2 * aT**2 - 4 * aT * eaT) q12 = tau**3 * (e2aT + 1 - 2 * eaT + 2 * aT * eaT - 2 * aT + aT**2) q13 = tau**2 * (1 - e2aT - 2 * aT * eaT) q22 = tau**2 * (4 * eaT - 3 - e2aT + 2 * aT) q23 = tau * (e2aT + 1 - 2 * eaT) q33 = 1 - e2aT Q_base = np.array([[q11, q12, q13], [q12, q22, q23], [q13, q23, q33]], dtype=float) Q = np.kron(np.diag(std)**2, Q_base) return Q def Q_van_keuk(axis, T, std, tau=20): ''' Process noise covariance matrix for a Van Keuk dynamic model, see section 2.2.1 in [4] Parameters ---------- axis : int Motion directions in Cartesian coordinate. If axis=1, it means x-axis, 2 means x-axis and y-axis, etc. T : float The time-duration of the propagation interval. std : number, list std is the instantaneous standard deviation of the acceleration knowm as Ornstein-Uhlenbeck process, which can be obtained by assuming it to be 1. Equal to a maxmum acceleration a_M with probability p_M and -a_M with the same probability 2. Equal to zero with probability p_0 3. Uniformly distributed in [-a_M, a_M] with the remaining probability mass All parameters mentioned above are chosen by the designer. So the expected std^2 is (a_M^2 / 3)*(1 + 4*p_M - p_0) tau : float The time constant of the target acceleration autocorrelation, that is, the decorrelation time is approximately 2*tau. A reasonable range of tau for Singer's model is between 5 and 20 seconds. Typical values of tau for aircraft are 20s for slow turn and 5s for an evasive maneuver. If this parameter is omitted, the default value of 20 is used. The time constant is assumed the same for all dimensions of motion, so this parameter is scalar. Returns ------- Q : ndarray Process noise convariance ''' assert (axis >= 1) if isinstance(std, numbers.Number): std = [std] * axis Q_base = np.diag([0., 0., 1.]) Q_base = (1 - np.exp(-2 * T / tau)) * Q_base Q = np.kron(np.diag(std)**2, Q_base) return Q def H_pos_only(order, axis): ''' Position-only measurement matrix is used with discretized continuous-time models as well as direct discrete-time models. see section 6.5 in [1]. Parameters ---------- order : int The order of the filter. If order=2, then it is constant velocity, 3 means constant acceleration, 4 means constant jerk, etc. axis : int Motion directions in Cartesian coordinate. If axis=1, it means x-axis, 2 means x-axis and y-axis, etc. Returns ------- H : ndarray the measurement or obervation matrix ''' assert (order >= 1) assert (axis >= 1) H = np.eye(order * axis) H = H[::order] return H def R_pos_only(axis, std): ''' Position-only measurement noise covariance matrix and the noise of each axis is assumed to be uncorrelated. Parameters ---------- axis : int Motion directions in Cartesian coordinate. If axis=1, it means x-axis, 2 means x-axis and y-axis, etc. Returns ------- R : ndarray the measurement noise covariance matrix ''' assert (axis >= 1) if isinstance(std, numbers.Number): std = [std] * axis R = np.diag(std)**2 return R def F_cv(axis, T): return F_poly(2, axis, T) def f_cv(axis, T): F = F_cv(axis, T) def f(x, u=None): return np.dot(F, x) return f def f_cv_jac(axis, T): F = F_cv(axis, T) def fjac(x, u=None): return F return fjac def Q_cv_dc(axis, T, std): return Q_poly_dc(2, axis, T, std) def Q_cv_dd(axis, T, std): return Q_poly_dd(2, axis, T, std, ht=1) def H_cv(axis): return H_pos_only(2, axis) def h_cv(axis): H = H_cv(axis) def h(x): return np.dot(H, x) return h def h_cv_jac(axis): H = H_cv(axis) def hjac(x): return H return hjac def R_cv(axis, std): return R_pos_only(axis, std) def F_ca(axis, T): return F_poly(3, axis, T) def f_ca(axis, T): F = F_ca(axis, T) def f(x, u=None): return np.dot(F, x) return f def f_ca_jac(axis, T): F = F_ca(axis, T) def fjac(x, u=None): return F return fjac def Q_ca_dc(axis, T, std): return Q_poly_dc(3, axis, T, std) def Q_ca_dd(axis, T, std): return Q_poly_dd(3, axis, T, std, ht=0) def H_ca(axis): return H_pos_only(3, axis) def h_ca(axis): H = H_ca(axis) def h(x): return np.dot(H, x) return h def h_ca_jac(axis): H = H_ca(axis) def hjac(x): return H return hjac def R_ca(axis, std): return R_pos_only(axis, std) def F_ct(axis, turnrate, T): assert (axis >= 2) omega = np.deg2rad(turnrate) if np.fabs(omega) >= np.sqrt(np.finfo(omega).eps): wt = omega * T sin_wt = np.sin(wt) cos_wt = np.cos(wt) sin_div = sin_wt / omega cos_div = (cos_wt - 1) / omega else: sin_wt = 0 cos_wt = 1 sin_div = T cos_div = 0 F = np.array([[1, sin_div, 0, cos_div], [0, cos_wt, 0, -sin_wt], [0, -cos_div, 1, sin_div], [0, sin_wt, 0, cos_wt]], dtype=float) if axis == 3: zblock = F_cv(1, T) F = lg.block_diag(F, zblock) return F def f_ct(axis, T): assert (axis >= 2) def f(x, u=None): omega = np.deg2rad(x[4]) if np.fabs(omega) >= np.sqrt(np.finfo(omega).eps): wt = omega * T sin_wt = np.sin(wt) cos_wt = np.cos(wt) sin_div = sin_wt / omega cos_div = (cos_wt - 1) / omega else: sin_wt = 0 cos_wt = 1 sin_div = T cos_div = 0 F = np.array([[1, sin_div, 0, cos_div], [0, cos_wt, 0, -sin_wt], [0, -cos_div, 1, sin_div], [0, sin_wt, 0, cos_wt]], dtype=float) F = lg.block_diag(F, 1) if axis == 3: zblock = F_cv(1, T) F = lg.block_diag(F, zblock) return np.dot(F, x) return f def f_ct_jac(axis, T): assert (axis >= 2) def fjac(x, u=None): omega = np.deg2rad(x[4]) if np.fabs(omega) >= np.sqrt(np.finfo(omega).eps): wt = omega * T sin_wt = np.sin(wt) cos_wt = np.cos(wt) sin_div = sin_wt / omega cos_div = (cos_wt - 1) / omega f0 = np.deg2rad(((wt * cos_wt - sin_wt) * x[1] + (1 - cos_wt - wt * sin_wt) * x[3]) / omega**2) f1 = np.deg2rad((-x[1] * sin_wt - x[3] * cos_wt) * T) f2 = np.deg2rad((wt * (x[1] * sin_wt + x[3] * cos_wt) - (x[1] * (1 - cos_wt) + x[3] * sin_wt)) / omega**2) f3 = np.deg2rad((x[1]*cos_wt - x[3]*sin_wt) * T) else: sin_wt = 0 cos_wt = 1 sin_div = T cos_div = 0 f0 = np.deg2rad(-x[3] * T**2 / 2) f1 = np.deg2rad(-x[3] * T) f2 = np.deg2rad(x[1] * T**2 / 2) f3 = np.deg2rad(x[1] * T) F = np.array([[1, sin_div, 0, cos_div], [0, cos_wt, 0, -sin_wt], [0, -cos_div, 1, sin_div], [0, sin_wt, 0, cos_wt]], dtype=float) F = lg.block_diag(F, 1) F[0, -1] = f0 F[1, -1] = f1 F[2, -1] = f2 F[3, -1] = f3 if axis == 3: zblock = F_cv(1, T) F = lg.block_diag(F, zblock) return F return fjac def Q_ct(axis, T, std): assert (axis >= 2) if isinstance(std, numbers.Number): std = [std] * (axis + 1) # omega block = np.array([T**2 / 2, T], dtype=float).reshape(-1, 1) L = lg.block_diag(block, block, T) Q = np.diag(std)**2 if axis == 3: L = lg.block_diag(L, block) return L @ Q @ L.T def h_ct(axis): assert (axis >= 2) if axis == 3: H = H_pos_only(2, 3) else: H = H_pos_only(2, 2) H = np.insert(H, 4, 0, axis=1) def h(x): return np.dot(H, x) return h def h_ct_jac(axis): assert (axis >= 2) if axis == 3: H = H_pos_only(2, 3) else: H = H_pos_only(2, 2) H = np.insert(H, 4, 0, axis=1) def hjac(x): return H return hjac def R_ct(axis, std): assert (axis >= 2) return R_pos_only(axis, std) def convert_meas(z, R, elev=False): if elev: # coverted measurement r, az, el = z[0], z[1], z[2] var_r, var_az, var_el = R[0, 0], R[1, 1], R[2, 2] lamb_az = np.exp(-var_az / 2) lamb_el = np.exp(-var_el / 2) z_cart = np.array(sph2cart(r, az, el), dtype=float) z_cart[0] = z_cart[0] / lamb_az / lamb_el z_cart[1] = z_cart[1] / lamb_az / lamb_el z_cart[2] = z_cart[2] / lamb_el # coverted covariance r11 = (1 / (lamb_az * lamb_el)**2 - 2) * (r * np.cos(az) * np.cos(el))**2 + (r**2 + var_r) * (1 + lamb_az**4 * np.cos(2 * az)) * (1 + lamb_el**4 * np.cos(2 * el)) / 4 r22 = (1 / (lamb_az * lamb_el)**2 - 2) * (r * np.sin(az) * np.cos(el))**2 + (r**2 + var_r) * (1 - lamb_az**4 * np.cos(2 * az)) * (1 + lamb_el**4 * np.cos(2 * el)) / 4 r33 = (1 / lamb_el**2 - 2) * (r * np.sin(el))**2 + (r**2 + var_r) * (1 - lamb_el**4 * np.cos(2 * el)) / 2 r12 = (1 / (lamb_az * lamb_el)**2 - 2) * r**2 * np.sin(az) * np.cos(az) * np.cos(el)**2 + (r**2 + var_r) * lamb_az**4 * np.sin(2 * az) * (1 + lamb_el**4 * np.cos(2 * el)) / 4 r13 = (1 / (lamb_az * lamb_el**2) - 1 / lamb_az - lamb_az) * r**2 * np.cos(az) * np.sin(el) * np.cos(el) + (r**2 + var_r) * lamb_az * lamb_el**4 * np.cos(az) * np.sin(2 * el) / 2 r23 = (1 / (lamb_az * lamb_el**2) - 1 / lamb_az - lamb_az) * r**2 * np.sin(az) * np.sin(el) * np.cos(el) + (r**2 + var_r) * lamb_az * lamb_el**4 * np.sin(az) * np.sin(2 * el) / 2 R_cart = np.array([[r11, r12, r13], [r12, r22, r23], [r13, r23, r33]], dtype=float) else: # coverted measurement r, az = z[0], z[1] var_r, var_az = R[0, 0], R[1, 1] lamb_az = np.exp(-var_az / 2) z_cart = np.array(pol2cart(r, az), dtype=float) / lamb_az # coverted covariance r11 = (r**2 + var_r) / 2 * (1 + lamb_az**4 * np.cos(2 * az)) + (1 / lamb_az**2 - 2) * (r * np.cos(az))**2 r22 = (r**2 + var_r) / 2 * (1 - lamb_az**4 * np.cos(2 * az)) + (1 / lamb_az**2 - 2) * (r * np.sin(az))**2 r12 = (r**2 + var_r) / 2 * lamb_az**4 * np.sin(2 * az) + (1 / lamb_az**2 - 2) * r**2 * np.sin(az) * np.cos(az) R_cart = np.array([[r11, r12], [r12, r22]], dtype=float) return z_cart, R_cart def state_switch(state, type_in, type_out): dim = len(state) state = state.copy() if type_in == 'cv': axis = dim // 2 if type_out == 'cv': return state elif type_out == 'ca': ca_dim = 3 * axis sel = np.setdiff1d(range(ca_dim), range(2, ca_dim, 3)) slct = np.eye(ca_dim)[:, sel] stmp = np.dot(slct, state) return stmp elif type_out == 'ct': slct = np.eye(5, 4) if axis == 3: slct = lg.block_diag(slct, np.eye(2)) stmp = np.dot(slct, state) return stmp else: raise ValueError('unknown output type: %s' % type_out) elif type_in == 'ca': axis = dim // 3 if type_out == 'cv': ca_dim = 3 * axis sel = np.setdiff1d(range(ca_dim), range(2, ca_dim, 3)) slct = np.eye(ca_dim)[sel] stmp = np.dot(slct, state) return stmp elif type_out == 'ca': return state elif type_out == 'ct': # ca to cv ca_dim = 3 * axis sel = np.setdiff1d(range(ca_dim), range(2, ca_dim, 3)) slct = np.eye(ca_dim)[sel] stmp = np.dot(slct, state) # cv to ct slct = np.eye(5, 4) if axis == 3: slct = lg.block_diag(slct, np.eye(2)) stmp = np.dot(slct, stmp) return stmp else: raise ValueError('unknown output type: %s' % type_out) elif type_in == 'ct': axis = dim // 2 if type_out == 'cv': slct = np.eye(4, 5) if axis == 3: slct = lg.block_diag(slct, np.eye(2)) stmp = np.dot(slct, state) return stmp elif type_out == 'ca': # ct to cv slct = np.eye(4, 5) if axis == 3: slct = lg.block_diag(slct, np.eye(2)) stmp = np.dot(slct, state) # cv to ca ca_dim = 3 * axis sel = np.setdiff1d(range(ca_dim), range(2, ca_dim, 3)) slct = np.eye(ca_dim)[:, sel] stmp = np.dot(slct, stmp) return stmp elif type_out == 'ct': return state else: raise ValueError('unknown output type: %s' % type_out) else: raise ValueError('unknown input type: %s' % type_in) def cov_switch(cov, type_in, type_out): dim = len(cov) cov = cov.copy() uncertainty = 100 if type_in == 'cv': axis = dim // 2 if type_out == 'cv': return cov elif type_out == 'ca': ca_dim = 3 * axis sel_diff = range(2, ca_dim, 3) sel = np.setdiff1d(range(ca_dim), sel_diff) slct = np.eye(ca_dim)[:, sel] ctmp = slct @ cov @ slct.T ctmp[sel_diff, sel_diff] = uncertainty return ctmp elif type_out == 'ct': slct = np.eye(5, 4) if axis == 3: slct = lg.block_diag(slct, np.eye(2)) ctmp = slct @ cov @ slct.T ctmp[4, 4] = uncertainty return ctmp else: raise ValueError('unknown output type: %s' % type_out) elif type_in == 'ca': axis = dim // 3 if type_out == 'cv': ca_dim = 3 * axis sel = np.setdiff1d(range(ca_dim), range(2, ca_dim, 3)) slct = np.eye(ca_dim)[sel] ctmp = slct @ cov @ slct.T return ctmp elif type_out == 'ca': return cov elif type_out == 'ct': # ca to cv ca_dim = 3 * axis sel = np.setdiff1d(range(ca_dim), range(2, ca_dim, 3)) slct = np.eye(ca_dim)[sel] ctmp = slct @ cov @ slct.T # cv to ct slct = np.eye(5, 4) if axis == 3: slct = lg.block_diag(slct, np.eye(2)) ctmp = slct @ ctmp @ slct.T ctmp[4, 4] = uncertainty return ctmp else: raise ValueError('unknown output type: %s' % type_out) elif type_in == 'ct': axis = dim // 2 if type_out == 'cv': slct = np.eye(4, 5) if axis == 3: slct = lg.block_diag(slct, np.eye(2)) ctmp = slct @ cov @ slct.T return ctmp elif type_out == 'ca': # ct to cv slct = np.eye(4, 5) if axis == 3: slct = lg.block_diag(slct, np.eye(2)) ctmp = slct @ cov @ slct.T # cv to ca ca_dim = 3 * axis sel_diff = range(2, ca_dim, 3) sel = np.setdiff1d(range(ca_dim), sel_diff) slct = np.eye(ca_dim)[:, sel] ctmp = slct @ ctmp @ slct.T ctmp[sel_diff, sel_diff] = uncertainty return ctmp elif type_out == 'ct': return cov else: raise ValueError('unknown output type: %s' % type_out) else: raise ValueError('unknown input type: %s' % type_in) def model_switch(x, type_in, type_out): dim = len(x) if isinstance(x, np.ndarray): if len(x.shape) == 1: state = state_switch(x, type_in, type_out) return state elif len(x.shape) == 2: cov = cov_switch(x, type_in, type_out) return cov else: raise ValueError("shape of 'x' must be 1 or 2") elif hasattr(x, '__getitem__'): state = state_switch(x[0], type_in, type_out) cov = cov_switch(x[1], type_in, type_out) return state, cov else: raise TypeError("error 'x' type: '%s'" % x.__class__.__name__) def trajectory_cv(state, interval, length, velocity): head = state.copy() dim = head.size order = 2 axis = dim // order traj_cv = np.zeros((length, dim)) vel = velocity cur_vel = head[1:dim:order] if isinstance(vel, numbers.Number): vel *= (cur_vel / lg.norm(cur_vel)) else: vel = [cur_vel[i] if vel[i] is None else vel[i] for i in range(axis)] cur_vel[:] = vel # it will also change the head head_cv = head F = F_cv(axis, interval) for i in range(length): head = np.dot(F, head) traj_cv[i] = head return traj_cv, head_cv def trajectory_ca(state, interval, length, acceleration): head = state.copy() dim = state.size order = 3 axis = dim // order traj_ca = np.zeros((length, dim)) acc = acceleration cur_vel = head[1:dim:order] cur_acc = head[2:dim:order] if isinstance(acc, numbers.Number): acc *= (cur_vel / lg.norm(cur_vel)) else: acc = [cur_acc[i] if acc[i] is None else acc[i] for i in range(axis)] cur_acc[:] = acc # it will also change the head head_ca = head F = F_ca(axis, interval) for i in range(length): head = np.dot(F, head) traj_ca[i] = head return traj_ca, head_ca def trajectory_ct(state, interval, length, turnrate, velocity=None): head = state.copy() dim = state.size order = 2 axis = dim // order traj_ct = np.zeros((length, dim)) if velocity is not None: vel = velocity cur_vel = head[1:dim:order] if isinstance(vel, numbers.Number): vel *= (cur_vel / lg.norm(cur_vel)) else: vel = [cur_vel[i] if vel[i] is None else vel[i] for i in range(axis)] cur_vel[:] = vel head_ct = head F = F_ct(axis, turnrate, interval) for i in range(length): head = np.dot(F, head) traj_ct[i] = head return traj_ct, head_ct def trajectory_generator(record): ''' record = { 'interval': [1, 1], 'start': [ [0, 0, 0], [0, 5, 0] ], 'pattern': [ [ {'model': 'cv', 'length': 100, 'velocity': [250, 250, 0]}, {'model': 'ct', 'length': 25, 'turnrate': 30} ], [ {'model': 'cv', 'length': 100, 'velocity': [250, 250, 0]}, {'model': 'ct', 'length': 30, 'turnrate': 30, 'velocity': 30} ] ], 'noise': [ 10 * np.eye(3), 10 * np.eye(3) ], 'pd': [ 0.9, 0.9 ], 'entries': 2 } ''' dim, order, axis = 9, 3, 3 ca_sel = range(dim) acc_sel = range(2, dim, order) cv_sel = np.setdiff1d(ca_sel, acc_sel) ct_sel = np.setdiff1d(ca_sel, acc_sel) insert_sel = [2, 4, 6] interval = record['interval'] start = record['start'] pattern = record['pattern'] noise = record['noise'] entries = record['entries'] trajs_state = [] for i in range(entries): head = np.kron(start[i], [1., 0., 0.]) state = np.kron(start[i], [1., 0., 0.]).reshape(1, -1) for pat in pattern[i]: if pat['model'] == 'cv': ret, head_cv = trajectory_cv(head[cv_sel], interval[i], pat['length'], pat['velocity']) ret = np.insert(ret, insert_sel, 0, axis=1) head = ret[-1, ca_sel] state[-1, acc_sel] = 0 # set the acceleration of previous state to zero state[-1, cv_sel] = head_cv # change the velocity of previous state state = np.vstack((state, ret)) elif pat['model'] == 'ca': ret, head_ca = trajectory_ca(head, interval[i], pat['length'], pat['acceleration']) head = ret[-1, ca_sel] state[-1, ca_sel] = head_ca # change the acceleartion of previous state state = np.vstack((state, ret)) elif pat['model'] == 'ct': if 'velocity' in pat: ret, head_ct = trajectory_ct(head[ct_sel], interval[i], pat['length'], pat['turnrate'], pat['velocity']) else: ret, head_ct = trajectory_ct(head[ct_sel], interval[i], pat['length'], pat['turnrate']) ret = np.insert(ret, insert_sel, 0, axis=1) head = ret[-1, ca_sel] state[-1, acc_sel] = 0 state[-1, ct_sel] = head_ct state = np.vstack((state, ret)) else: raise ValueError('invalid model') trajs_state.append(state) # add noise trajs_meas = [] for i in range(entries): H = H_ca(axis) traj_len = trajs_state[i].shape[0] noi = st.multivariate_normal.rvs(cov=noise[i], size=traj_len) trajs_meas.append(np.dot(trajs_state[i], H.T) + noi) return trajs_state, trajs_meas def trajectory_with_pd(trajs_meas, pd=0.8): for traj in trajs_meas: traj_len = traj.shape[0] remove_idx = st.uniform.rvs(size=traj_len) >= pd traj[remove_idx] = np.nan return trajs_meas def trajectory_to_meas(trajs_meas, lamb=0): trajs_num = len(trajs_meas) min_x, max_x = np.inf, -np.inf min_y, max_y = np.inf, -np.inf min_z, max_z = np.inf, -np.inf max_traj_len = 0 for traj in trajs_meas: min_x, max_x = min(min_x, traj[:, 0].min()), max(max_x, traj[:, 0].max()) min_y, max_y = min(min_y, traj[:, 1].min()), max(max_y, traj[:, 1].max()) min_z, max_z = min(min_z, traj[:, 2].min()), max(max_z, traj[:, 2].max()) max_traj_len = max(max_traj_len, len(traj)) trajs = [] for i in range(max_traj_len): tmp = [] for j in range(trajs_num): if i >= len(trajs_meas[j]) or np.any(np.isnan(trajs_meas[j][i])): continue tmp.append(trajs_meas[j][i]) clutter_num = st.poisson.rvs(lamb) for j in range(clutter_num): x = np.random.uniform(min_x, max_x) y = np.random.uniform(min_y, max_y) z = np.random.uniform(min_z, max_z) tmp.append(np.array([x, y, z], dtype=float)) tmp = np.array(tmp, dtype=float).reshape(-1, 3) trajs.append(tmp) return trajs
32,864
12,303
# Generated by Django 3.2.4 on 2021-06-19 08:48 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('orders', '0004_auto_20210619_0847'), ] operations = [ migrations.AlterField( model_name='dinner_platters', name='Tops', field=models.ManyToManyField(related_name='DPAddons', to='orders.Topping'), ), migrations.AlterField( model_name='regular_pizza', name='toppings', field=models.ManyToManyField(blank=True, related_name='rpAddons', to='orders.Topping'), ), migrations.AlterField( model_name='sicilian_pizza', name='Tops', field=models.ManyToManyField(blank=True, related_name='spAddons', to='orders.Topping'), ), ]
848
280
""" Orchestrate experiment execution. """ import typing as tp import attr from benchbuild.experiment import Experiment from benchbuild.project import Project from benchbuild.utils import actions, tasks ExperimentCls = tp.Type[Experiment] Experiments = tp.List[ExperimentCls] ProjectCls = tp.Type[Project] Projects = tp.List[ProjectCls] ExperimentProject = tp.Tuple[ExperimentCls, ProjectCls] Actions = tp.Sequence[actions.Step] StepResults = tp.List[actions.StepResult] @attr.s class Experimentator: experiments: Experiments = attr.ib() projects: Projects = attr.ib() _plan: tp.Sequence[actions.Step] = attr.ib(init=False, default=None) def plan(self) -> Actions: if not self._plan: self._plan = tasks.generate_plan(self.experiments, self.projects) return self._plan @property def num_actions(self) -> int: p = self.plan() return sum([len(child) for child in p]) def start(self) -> StepResults: p = self.plan() # Prepare project environment. return tasks.execute_plan(p) def print_plan(self) -> None: p = self.plan() print("Number of actions to execute: {}".format(self.num_actions)) print(*p)
1,229
394
""" Mask R-CNN Dataset functions and classes. Copyright (c) 2017 Matterport, Inc. Licensed under the MIT License (see LICENSE for details) Written by Waleed Abdulla """ import numpy as np import tensorflow as tf import keras.backend as KB import keras.layers as KL import keras.initializers as KI import keras.engine as KE import mrcnn.utils as utils from mrcnn.loss import smooth_l1_loss import pprint pp = pprint.PrettyPrinter(indent=2, width=100) ##----------------------------------------------------------------------- ## FCN loss ##----------------------------------------------------------------------- def fcn_loss_graph(target_masks, pred_masks): # def fcn_loss_graph(input): # target_masks, pred_masks = input """Mask binary cross-entropy loss for the masks head. target_masks: [batch, height, width, num_classes]. pred_masks: [batch, height, width, num_classes] float32 tensor """ # Reshape for simplicity. Merge first two dimensions into one. print('\n fcn_loss_graph ' ) print(' target_masks shape :', target_masks.get_shape()) print(' pred_masks shape :', pred_masks.get_shape()) mask_shape = tf.shape(target_masks) print(' mask_shape shape :', mask_shape.shape) target_masks = KB.reshape(target_masks, (-1, mask_shape[1], mask_shape[2])) print(' target_masks shape :', target_masks.shape) pred_shape = tf.shape(pred_masks) print(' pred_shape shape :', pred_shape.shape) pred_masks = KB.reshape(pred_masks, (-1, pred_shape[1], pred_shape[2])) print(' pred_masks shape :', pred_masks.get_shape()) # Compute binary cross entropy. If no positive ROIs, then return 0. # shape: [batch, roi, num_classes] # Smooth-L1 Loss loss = KB.switch(tf.size(target_masks) > 0, smooth_l1_loss(y_true=target_masks, y_pred=pred_masks), tf.constant(0.0)) loss = KB.mean(loss) loss = KB.reshape(loss, [1, 1]) print(' loss type is :', type(loss)) return loss ##----------------------------------------------------------------------- ## FCN loss for L2 Normalized graph ##----------------------------------------------------------------------- def fcn_norm_loss_graph(target_masks, pred_masks): ''' Mask binary cross-entropy loss for the masks head. target_masks: [batch, height, width, num_classes]. pred_masks: [batch, height, width, num_classes] float32 tensor ''' print(type(target_masks)) pp.pprint(dir(target_masks)) # Reshape for simplicity. Merge first two dimensions into one. print('\n fcn_norm_loss_graph ' ) print(' target_masks shape :', target_masks.shape) print(' pred_masks shape :', pred_masks.shape) print('\n L2 normalization ------------------------------------------------------') output_shape=KB.int_shape(pred_masks) print(' output shape is :' , output_shape, ' ', pred_masks.get_shape(), pred_masks.shape, tf.shape(pred_masks)) output_flatten = KB.reshape(pred_masks, (pred_masks.shape[0], -1, pred_masks.shape[-1]) ) output_norm1 = KB.l2_normalize(output_flatten, axis = 1) output_norm = KB.reshape(output_norm1, KB.shape(pred_masks) ) print(' output_flatten : ', KB.int_shape(output_flatten) , ' Keras tensor ', KB.is_keras_tensor(output_flatten) ) print(' output_norm1 : ', KB.int_shape(output_norm1) , ' Keras tensor ', KB.is_keras_tensor(output_norm1) ) print(' output_norm final : ', KB.int_shape(output_norm) , ' Keras tensor ', KB.is_keras_tensor(output_norm) ) pred_masks1 = output_norm print('\n L2 normalization ------------------------------------------------------') gauss_flatten = KB.reshape(target_masks, (target_masks.shape[0], -1, target_masks.shape[-1]) ) gauss_norm1 = KB.l2_normalize(gauss_flatten, axis = 1) gauss_norm = KB.reshape(gauss_norm1, KB.shape(target_masks)) print(' guass_flatten : ', KB.int_shape(gauss_flatten), 'Keras tensor ', KB.is_keras_tensor(gauss_flatten) ) print(' gauss_norm shape : ', KB.int_shape(gauss_norm1) , 'Keras tensor ', KB.is_keras_tensor(gauss_norm1) ) print(' gauss_norm final shape: ', KB.int_shape(gauss_norm) , 'Keras tensor ', KB.is_keras_tensor(gauss_norm) ) print(' complete') target_masks1 = gauss_norm mask_shape = tf.shape(target_masks1) print(' mask_shape shape :', mask_shape.shape) target_masks1 = KB.reshape(target_masks1, (-1, mask_shape[1], mask_shape[2])) print(' target_masks shape :', target_masks1.shape) pred_shape = tf.shape(pred_masks1) print(' pred_shape shape :', pred_shape.shape) pred_masks1 = KB.reshape(pred_masks1, (-1, pred_shape[1], pred_shape[2])) print(' pred_masks shape :', pred_masks1.get_shape()) # Compute binary cross entropy. If no positive ROIs, then return 0. # shape: [batch, roi, num_classes] # Smooth-L1 Loss loss = KB.switch(tf.size(target_masks1) > 0, smooth_l1_loss(y_true=target_masks1, y_pred=pred_masks1), tf.constant(0.0)) loss = KB.mean(loss) loss = KB.reshape(loss, [1, 1]) print(' loss type is :', type(loss)) return loss class FCNLossLayer(KE.Layer): """ Returns: ------- """ def __init__(self, config=None, **kwargs): super().__init__(**kwargs) print('>>> FCN Loss Layer : initialization') self.config = config def call(self, inputs): print('\n FCN Loss Layer : call') print(' target_masks .shape/type :', inputs[0].shape) # , type(inputs[0])) print(' pred_masks shape/type :', inputs[1].shape) # , type(inputs[1])) target_masks = inputs[0] pred_masks = inputs[1] loss = KB.placeholder(shape=(1), dtype = 'float32', name = 'fcn_loss') norm_loss = KB.placeholder(shape=(1), dtype = 'float32', name = 'fcn_norm_loss') loss = fcn_loss_graph(target_masks, pred_masks) norm_loss = fcn_norm_loss_graph(target_masks, pred_masks) return [loss, norm_loss] def compute_output_shape(self, input_shape): # may need to change dimensions of first return from IMAGE_SHAPE to MAX_DIM return [(1), (1)]
6,751
2,288
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns def plot_well_map(df_logs, fig_size=(10, 10)): """ Simple map of locations of nearby wells """ f, ax = plt.subplots(figsize=fig_size) df = df_logs.drop_duplicates(subset=['HACKANAME', 'X', 'Y']) plt.scatter(df['X'], df['Y']) plt.axis('scaled') for label, x, y in zip(df['HACKANAME'], df['X'], df['Y']): plt.annotate(label, xy=(x, y), xytext=(-10, 10), textcoords='offset points') return f, ax def make_log_plot(df_logs, well_name, cols=['GR', 'DT', 'CALI'], ztop=None, zbot=None, fig_size=(8, 12)): """ Single well log plot, both GR and Resistivity """ logs = df_logs[df_logs['HACKANAME'] == well_name] logs = logs.sort_values(by='TVDSS') if not ztop: ztop = logs.TVDSS.min() if not zbot: zbot = logs.TVDSS.max() f, ax = plt.subplots(nrows=1, ncols=len(cols), figsize=fig_size) for i in range(len(ax)): log_name = cols[i] ax[i].scatter(logs[log_name], logs['TVDSS'], marker='+') ax[i].set_xlabel(log_name) ax[i].set_ylim(ztop, zbot) ax[i].invert_yaxis() ax[i].grid() ax[i].locator_params(axis='x', nbins=3) if i > 0: ax[i].set_yticklabels([]) # ax[0].set_xlabel("GR") # ax[0].set_xlim(0, 150) # ax[1].set_xlabel("RESD") # ax[1].set_xscale('log') # ax[1].set_xlim(0.2, 2000) # ax[1].set_yticklabels([]) f.suptitle('Well: {}'.format(well_name), fontsize=14, y=0.94) return f, ax def add_predictions(ax, predictions): """ Add predicted bands onto plt axes""" # Scatter plot ax.scatter(predictions['value'], predictions['TVDSS'], marker='+') # Shaded bands tvds = predictions[predictions.model_name == 'high']['TVDSS'] x_hi = predictions[predictions.model_name == 'high']['value'] x_lo = predictions[predictions.model_name == 'low']['value'] ax.fill(np.concatenate([x_lo, x_hi[::-1]]), np.concatenate([tvds, tvds[::-1]]), alpha=0.5)
2,150
847
#!/usr/bin/env python """ cc_plugin_eustace.eustace_global_attrs Compliance Test Suite: Check core global attributes in EUSTACE files """ import os from netCDF4 import Dataset # Import base objects from compliance checker from compliance_checker.base import Result, BaseNCCheck, GenericFile # Restrict which vocabs will load (for efficiency) os.environ["ESSV_VOCABS_ACTIVE"] = "eustace-team" # Import checklib import checklib.register.nc_file_checks_register as check_package class EUSTACEGlobalAttrsCheck(BaseNCCheck): register_checker = True name = 'eustace-global-attrs' _cc_spec = 'eustace-global-attrs' _cc_spec_version = '0.2' supported_ds = [GenericFile, Dataset] _cc_display_headers = { 3: 'Required', 2: 'Recommended', 1: 'Suggested' } def setup(self, ds): pass def check_cr01(self, ds): return check_package.ValidGlobalAttrsMatchFileNameCheck(kwargs={'delimiter': '_', 'order': 'institution_id,realm,frequency', 'extension': '.nc'}, level="HIGH", vocabulary_ref="eustace-team:eustace")(ds) def check_cr02(self, ds): return check_package.GlobalAttrRegexCheck(kwargs={'regex': 'CF-1\\.6', 'attribute': 'Conventions'}, level="HIGH", vocabulary_ref="")(ds) def check_cr03(self, ds): return check_package.GlobalAttrRegexCheck(kwargs={'regex': '.{4,}', 'attribute': 'source'}, level="HIGH", vocabulary_ref="")(ds) def check_cr04(self, ds): return check_package.GlobalAttrRegexCheck(kwargs={'regex': 'EUSTACE', 'attribute': 'project_id'}, level="HIGH", vocabulary_ref="")(ds) def check_cr05(self, ds): return check_package.GlobalAttrRegexCheck(kwargs={'regex': '.{4,}', 'attribute': 'contact'}, level="HIGH", vocabulary_ref="")(ds) def check_cr06(self, ds): return check_package.GlobalAttrRegexCheck(kwargs={'regex': '.{4,}', 'attribute': 'history'}, level="MEDIUM", vocabulary_ref="")(ds) def check_cr07(self, ds): return check_package.GlobalAttrRegexCheck(kwargs={'regex': '.{4,}', 'attribute': 'references'}, level="MEDIUM", vocabulary_ref="")(ds) def check_cr08(self, ds): return check_package.GlobalAttrRegexCheck(kwargs={'regex': '.{1,}', 'attribute': 'product_version'}, level="HIGH", vocabulary_ref="")(ds) def check_cr09(self, ds): return check_package.GlobalAttrRegexCheck(kwargs={'regex': '.{4,}', 'attribute': 'title'}, level="HIGH", vocabulary_ref="")(ds) def check_cr10(self, ds): return check_package.GlobalAttrRegexCheck(kwargs={'regex': '.{20,}', 'attribute': 'summary'}, level="HIGH", vocabulary_ref="")(ds) def check_cr11(self, ds): return check_package.GlobalAttrRegexCheck(kwargs={'regex': '.{4,}', 'attribute': 'creator_name'}, level="HIGH", vocabulary_ref="")(ds) def check_cr12(self, ds): return check_package.GlobalAttrRegexCheck(kwargs={'regex': '.+@.+\\..+', 'attribute': 'creator_email'}, level="HIGH", vocabulary_ref="")(ds) def check_cr13(self, ds): return check_package.GlobalAttrVocabCheck(kwargs={'attribute': 'frequency', 'vocab_lookup': 'canonical_name'}, level="LOW", vocabulary_ref="eustace-team:eustace")(ds) def check_cr14(self, ds): return check_package.GlobalAttrVocabCheck(kwargs={'attribute': 'institution_id', 'vocab_lookup': 'canonical_name'}, level="HIGH", vocabulary_ref="eustace-team:eustace")(ds) def check_cr15(self, ds): return check_package.GlobalAttrRegexCheck(kwargs={'regex': '\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}.*', 'attribute': 'creation_date'}, level="MEDIUM", vocabulary_ref="")(ds)
5,244
1,478
import unittest import translator class TestTranslator(unittest.TestCase): def test_one_e2f(self): respopnse = translator.english_to_french('IBM Translator') self.assertEqual(respopnse, 'Traducteur IBM') def test_un_f2e(self): respopnse = translator.french_to_english('Traducteur IBM') self.assertEqual(respopnse, 'IBM Translator') def test_hello_2f(self): respopnse = translator.english_to_french('Hello') self.assertEqual(respopnse, 'Bonjour') def test_bonjour_2e(self): respopnse = translator.french_to_english('Bonjour') self.assertEqual(respopnse, 'Hello') if __name__ == "__main__": unittest.main()
705
261
class Student: # class variables school_name = 'ABC School' # constructor def __init__(self, name, age): # instance variables self.name = name self.age = age s1 = Student("Harry", 12) # access instance variables print('Student:', s1.name, s1.age) # access class variable print('School name:', Student.school_name) # Modify instance variables s1.name = 'Jessa' s1.age = 14 print('Student:', s1.name, s1.age) # Modify class variables Student.school_name = 'XYZ School' print('School name:', Student.school_name)
556
187
# -*- coding: utf-8 -*- from . import base from .audit_log import * from .channel import * from .emoji import * from .gateway import * from .guild import * from .invite import * from .oauth import * from .user import * from .voice import * from .webhook import *
264
88
#!/usr/bin/python # -*- coding: utf-8 -*- import os import datetime import hamster.client import reports import argparse import pdfkit import gettext gettext.install('brainz', '../datas/translations/') # custom settings: reportTitle = "My Activities Report" activityFilter = "unfiled" def valid_date(s): try: return datetime.datetime.strptime(s, "%Y-%m-%d").date() except ValueError: msg = "Not a valid date: '{0}'.".format(s) raise argparse.ArgumentTypeError(msg) # find dates: today = datetime.date.today() first = today.replace(day=1) previousLast = first - datetime.timedelta(days=1) previousFirst = previousLast.replace(day=1) # assign arguments: parser = argparse.ArgumentParser(description="export the hamster database to pdf") parser.add_argument("--thismonth", action="store_true", help="export this month's records") parser.add_argument("--lastmonth", action="store_true", help="export last month's records") parser.add_argument("-s", dest="startDate", default=today, help="start date (default: today)", type=valid_date) parser.add_argument("-e", dest="endDate", default=today, help="end date (default: today)", type=valid_date) parser.add_argument("-o", dest="reportFile", default="report.pdf", help="output file (default: report.pdf)") # parse arguments: args = parser.parse_args() if args.thismonth: args.startDate = first args.endDate = today if args.lastmonth: args.startDate = previousFirst args.endDate = previousLast # prepare filenames: htmlFilename = os.path.splitext(args.reportFile)[0]+".html" pdfFilename = os.path.splitext(args.reportFile)[0]+".pdf" storage = hamster.client.Storage() facts = storage.get_facts(args.startDate, args.endDate) # generate report reports.simple(facts, args.startDate, args.endDate, htmlFilename) # convert .html to .pdf file: pdfkit.from_file(htmlFilename, pdfFilename)
1,878
610
from functools import wraps # created by PL # git hello world def single_ton(cls): _instance = {} @wraps(cls) def single(*args, **kwargs): if cls not in _instance: _instance[cls] = cls(*args, **kwargs) return _instance[cls] return single @single_ton class SingleTon(object): val = 123 def __init__(self, a): self.a = a if __name__ == '__main__': s = SingleTon(1) t = SingleTon(2) print (s is t) print (s.a, t.a) print (s.val, t.val) print ('test') print ("git test")
589
231
# Custo da viagem distancia = float(input('Qual a distância da sua viagem? ')) valor1 = distancia * 0.5 valor2 = distancia * 0.45 print('Você está prestes a começar uma viagem de {}Km/h.'.format(distancia)) if distancia <= 200: print('O preço de sua passagem será de R${:.2f}.'.format(valor1)) else: print('O preço de sua passagem será de R${:.2f}.'.format(valor2))
374
150
"""Difference classes.""" __all__ = [ 'BaseDifference', 'Missing', 'Extra', 'Invalid', 'Deviation', ] from cmath import isnan from datetime import timedelta from ._compatibility.builtins import * from ._compatibility import abc from ._compatibility.contextlib import suppress from ._utils import _make_token from ._utils import pretty_timedelta_repr NOVALUE = _make_token( 'NoValueType', '<no value>', 'Token to mark when a value does not exist.', truthy=False, ) NANTOKEN = _make_token( 'NanTokenType', '<nan token>', 'Token for comparing differences that contain not-a-number values.', ) def _nan_to_token(value): """Return NANTOKEN if *value* is NaN else return value unchanged.""" def func(x): with suppress(TypeError): if isnan(x): return NANTOKEN return x if isinstance(value, tuple): return tuple(func(x) for x in value) return func(value) def _safe_isnan(x): """Wrapper for isnan() so it won't fail on non-numeric values.""" try: return isnan(x) except TypeError: return False class BaseDifference(abc.ABC): """The base class for "difference" objects---all other difference classes are derived from this base. """ __slots__ = () @property @abc.abstractmethod def args(self): """The tuple of arguments given to the difference constructor. Some difference (like :class:`Deviation`) expect a certain number of arguments and assign a special meaning to the elements of this tuple, while others are called with only a single value. """ # Concrete method should return tuple of args used in __init__(). raise NotImplementedError def __eq__(self, other): if self.__class__ != other.__class__: return False self_args = tuple(_nan_to_token(x) for x in self.args) other_args = tuple(_nan_to_token(x) for x in other.args) return self_args == other_args def __ne__(self, other): # <- For Python 2.x support. There is return not self.__eq__(other) # no implicit relationship between # __eq__() and __ne__() in Python 2. def __hash__(self): try: return hash((self.__class__, self.args)) except TypeError as err: msg = '{0} in args tuple {1!r}'.format(str(err), self.args) hashfail = TypeError(msg) hashfail.__cause__ = getattr(err, '__cause__', None) # getattr for 2.x support raise hashfail def __repr__(self): cls_name = self.__class__.__name__ args_repr = ', '.join( getattr(x, '__name__', repr(x)) for x in self.args) return '{0}({1})'.format(cls_name, args_repr) class Missing(BaseDifference): """Created when *value* is missing from the data under test. In the following example, the required value ``'A'`` is missing from the data under test:: data = ['B', 'C'] requirement = {'A', 'B', 'C'} datatest.validate(data, requirement) Running this example raises the following error: .. code-block:: none :emphasize-lines: 2 ValidationError: does not satisfy set membership (1 difference): [ Missing('A'), ] """ __slots__ = ('_args',) def __init__(self, value): self._args = (value,) @property def args(self): return self._args class Extra(BaseDifference): """Created when *value* is unexpectedly found in the data under test. In the following example, the value ``'C'`` is found in the data under test but it's not part of the required values:: data = ['A', 'B', 'C'] requirement = {'A', 'B'} datatest.validate(data, requirement) Running this example raises the following error: .. code-block:: none :emphasize-lines: 2 ValidationError: does not satisfy set membership (1 difference): [ Extra('C'), ] """ __slots__ = ('_args',) def __init__(self, value): self._args = (value,) @property def args(self): return self._args class Invalid(BaseDifference): """Created when a value does not satisfy a function, equality, or regular expression requirement. In the following example, the value ``9`` does not satisfy the required function:: data = [2, 4, 6, 9] def is_even(x): return x % 2 == 0 datatest.validate(data, is_even) Running this example raises the following error: .. code-block:: none :emphasize-lines: 2 ValidationError: does not satisfy is_even() (1 difference): [ Invalid(9), ] """ __slots__ = ('_invalid', '_expected') def __init__(self, invalid, expected=NOVALUE): try: is_equal = invalid == expected except TypeError: is_equal = False if is_equal: msg = 'expects unequal values, got {0!r} and {1!r}' raise ValueError(msg.format(invalid, expected)) self._invalid = invalid self._expected = expected @property def args(self): if self._expected is NOVALUE: return (self._invalid,) return (self._invalid, self._expected) @property def invalid(self): """The invalid value under test.""" return self._invalid @property def expected(self): """The expected value (optional).""" return self._expected def __repr__(self): cls_name = self.__class__.__name__ invalid_repr = getattr(self._invalid, '__name__', repr(self._invalid)) if self._expected is not NOVALUE: expected_repr = ', expected={0}'.format( getattr(self._expected, '__name__', repr(self._expected))) else: expected_repr = '' return '{0}({1}{2})'.format(cls_name, invalid_repr, expected_repr) def _slice_datetime_repr_prefix(obj_repr): """Takes a default "datetime", "date", or "timedelta" repr and returns it with the module prefix sliced-off:: >>> _slice_datetime_repr_prefix('datetime.date(2020, 12, 25)') 'date(2020, 12, 25)' """ # The following implementation (using "startswith" and "[9:]") # may look clumsy but it can run up to 10 times faster than a # more concise "re.compile()" and "regex.sub()" approach. In # some situations, this function can get called many, many # times. DON'T GET CLEVER--KEEP THIS FUNCTION FAST. if obj_repr.startswith('datetime.datetime(') \ or obj_repr.startswith('datetime.date(') \ or obj_repr.startswith('datetime.timedelta('): return obj_repr[9:] return obj_repr class Deviation(BaseDifference): """Created when a quantative value deviates from its expected value. In the following example, the dictionary item ``'C': 33`` does not satisfy the required item ``'C': 30``:: data = {'A': 10, 'B': 20, 'C': 33} requirement = {'A': 10, 'B': 20, 'C': 30} datatest.validate(data, requirement) Running this example raises the following error: .. code-block:: none :emphasize-lines: 2 ValidationError: does not satisfy mapping requirement (1 difference): { 'C': Deviation(+3, 30), } """ __slots__ = ('_deviation', '_expected') def __init__(self, deviation, expected): try: if deviation + expected == expected: msg = 'deviation quantity must not be empty, got {0!r}' exc = ValueError(msg.format(deviation)) raise exc except TypeError: msg = ('Deviation arguments must be quantitative, ' 'got deviation={0!r}, expected={1!r}') exc = TypeError(msg.format(deviation, expected)) exc.__cause__ = None raise exc self._deviation = deviation self._expected = expected @property def args(self): return (self._deviation, self._expected) @property def deviation(self): """Quantative deviation from expected value.""" return self._deviation @property def expected(self): """The expected value.""" return self._expected def __repr__(self): cls_name = self.__class__.__name__ deviation = self._deviation if _safe_isnan(deviation): deviation_repr = "float('nan')" elif isinstance(deviation, timedelta): deviation_repr = pretty_timedelta_repr(deviation) else: try: deviation_repr = '{0:+}'.format(deviation) # Apply +/- sign except (TypeError, ValueError): deviation_repr = repr(deviation) expected = self._expected if _safe_isnan(expected): expected_repr = "float('nan')" else: expected_repr = repr(expected) if expected_repr.startswith('datetime.'): expected_repr = _slice_datetime_repr_prefix(expected_repr) return '{0}({1}, {2})'.format(cls_name, deviation_repr, expected_repr) def _make_difference(actual, expected, show_expected=True): """Returns an appropriate difference for *actual* and *expected* values that are known to be unequal. Setting *show_expected* to False, signals that the *expected* argument should be omitted when creating an Invalid difference (this is useful for reducing duplication when validating data against a single function or object). """ if actual is NOVALUE: return Missing(expected) if expected is NOVALUE: return Extra(actual) if isinstance(expected, bool) or isinstance(actual, bool): if show_expected: return Invalid(actual, expected) return Invalid(actual) try: deviation = actual - expected return Deviation(deviation, expected) except (TypeError, ValueError): if show_expected: return Invalid(actual, expected) return Invalid(actual)
10,254
2,991
from django.conf.urls import include, url, re_path from rest_framework import routers from . import views urlpatterns = [ re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/versiontime", views.get_last_processed_time), re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/players", views.get_player_list), re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/sessions", views.get_player_to_session_map), re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/puzzles", views.get_puzzles), re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/puzzlekeys", views.get_puzzle_keys), re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/snapshotsperpuzzle", views.get_snapshot_metrics), re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/attempted", views.get_attempted_puzzles), re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/completed", views.get_completed_puzzles), re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/timeperpuzzle", views.get_time_per_puzzle), re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/funnelperpuzzle", views.get_funnel_per_puzzle), re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/shapesperpuzzle", views.get_shapes_per_puzzle), re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/modesperpuzzle", views.get_modes_per_puzzle), re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/levelsofactivity", views.get_levels_of_activity), re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/sequencebetweenpuzzles", views.get_sequence_between_puzzles), re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/mloutliers", views.get_machine_learning_outliers), re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/persistence", views.get_persistence_by_attempt_data), re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/puzzlepersistence", views.get_persistence_by_puzzle_data), re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/insights", views.get_insights), re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/difficulty", views.get_puzzle_difficulty_mapping), re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/misconceptions", views.get_misconceptions_data), re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/competency", views.get_competency_data), re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/report/(?P<start>[0-9]+)/(?P<end>[0-9]+)", views.get_report_summary), re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/report", views.get_report_summary), re_path(r"^api/dashboard/(?P<slug>[a-zA-Z0-9-_]+)/(?P<player>[a-zA-Z0-9-_.]+)/(?P<level>[a-zA-Z0-9-_.]+)/replayurls", views.get_replay_urls), re_path(r"^(?P<slug>[a-zA-Z0-9-_]+)/dashboard/", views.dashboard), re_path(r"^(?P<slug>[a-zA-Z0-9-_]+)/thesisdashboard/", views.thesis_dashboard) ]
2,735
1,273
from typing import Any, Dict, List, Sequence import numpy as np import torch from detectron2.engine import DefaultPredictor class SiamPredictor(DefaultPredictor): def __call__( self, original_images: Sequence[np.ndarray], visual_crops: Sequence[np.ndarray], ) -> List[Dict[str, Any]]: """ Args: original_images (np.ndarray): a list of images of shape (H, W, C) (in BGR order). visual_crops (np.ndarray): a list of images of shape (H, W, C) (in BGR order) Returns: predictions (list[dict]): the output of the model for a list of images. See :doc:`/tutorials/models` for details about the format. """ with torch.no_grad(): # https://github.com/sphinx-doc/sphinx/issues/4258 # Apply pre-processing to image. inputs = [] for original_image, visual_crop in zip(original_images, visual_crops): if self.input_format == "RGB": # whether the model expects BGR inputs or RGB original_image = original_image[:, :, ::-1] visual_crop = visual_crop[:, :, ::-1] height, width = original_image.shape[:2] image = self.aug.get_transform(original_image).apply_image( original_image ) image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1)) reference = torch.as_tensor( visual_crop.astype("float32").transpose(2, 0, 1) ) inputs.append( { "image": image, "height": height, "width": width, "reference": reference, } ) predictions = self.model(inputs) return predictions
1,945
535
from user_agent2.base import ( generate_user_agent, generate_navigator, generate_navigator_js, )
109
37
import glob import random import uuid import numpy as np from multiprocessing import Pool from sklearn.metrics import ( recall_score, precision_score, accuracy_score, f1_score, mean_squared_error) from mutagene.io.profile import read_profile_file, write_profile, read_signatures from mutagene.signatures.identify import NegLogLik from mutagene.benchmark.deconstructsigs import deconstruct_sigs_custom from mutagene.benchmark.generate_benchmark import * # from mutagene.identify import decompose_mutational_profile_counts def multiple_benchmark_helper(j): dirname = "data/benchmark/multiple" # for i in [5, 10, 30]: for i in [30, ]: W, signature_names = read_signatures(i) N = W.shape[1] # r = random.randrange(2, i // 3 + 2) r = random.randrange(2, min(i + 1, 15)) # print(np.random.choice(N, r), .05 + np.random.dirichlet(np.ones(r), 1)) while True: h0 = np.zeros(N) h0[np.random.choice(N, r)] = 0.05 + np.random.dirichlet(np.ones(r), 1) if np.greater(h0, 0.05).sum() == r: break h0 /= h0.sum() v0 = W.dot(h0) # print(h0) n_mutations = random.randrange(10, 50) v0_counts = np.random.multinomial(n_mutations, v0 / v0.sum()) # print(v0_counts) random_name = str(uuid.uuid4())[:4] fname = dirname + "/{:02d}_{}_{}_{}".format(i, r, n_mutations, random_name) print(fname) profile_fname = fname + ".profile" info_fname = fname + ".info" mle_info = fname + ".MLE.info" mlez_info = fname + ".MLEZ.info" ds_info = fname + ".ds.info" write_profile(profile_fname, v0_counts) write_decomposition(info_fname, h0, signature_names) ################################################## results = deconstruct_sigs_custom(profile_fname, signatures=i) write_decomposition(ds_info, results, signature_names) ################################################## profile = read_profile_file(profile_fname) for method, method_fname in [("MLE", mle_info), ("MLEZ", mlez_info)]: _, _, results = decompose_mutational_profile_counts( profile, (W, signature_names), method, debug=False, others_threshold=0.0) write_decomposition(method_fname, results, signature_names) def multiple_benchmark(): # pathlib.Path(dirname).mkdir(parents=True, exist_ok=True) random.seed(13425) with Pool(10) as p: p.map(multiple_benchmark_helper, range(100)) def multiple_benchmark_run_helper(data): fname, signature_ids, W, force = data # methods = ['MLE', 'MLEZ', 'AICc', 'BIC', 'AICcZ', 'BICZ'] methods = ['AICc', 'AICcZ'] # print(fname) profile = read_profile_file(fname) for method in methods: info = "{}.{}.info".format(fname.split(".")[0], method) if isfile(info) and not force: continue print(info) _, _, results = decompose_mutational_profile_counts( profile, (W, signature_ids), method, debug=False, others_threshold=0.0) exposure_dict = {x['name']: x['score'] for x in results} exposure = [exposure_dict[name] for name in signature_ids] write_decomposition(info, np.array(exposure), signature_ids) def multiple_benchmark_run(N, signature_ids, W, force=False): def get_iterator(): for fname in glob.glob("data/benchmark/multiple/{:02d}_*.profile".format(N), recursive=True): yield (fname, signature_ids, W, force) random.seed(13425) with Pool(10) as p: p.map(multiple_benchmark_run_helper, get_iterator(), 100) def aggregate_multiple_benchmarks(): methods = { "mle": ".MLE.info", "mlez": ".MLEZ.info", "ds": ".ds.info", 'aicc': '.AICc.info', 'bic': '.BIC.info', 'aiccz': '.AICcz.info', 'bicz': '.BICz.info', } # signatures_thresholds = { # 5: 0.06, # 10: 0.03, # 30: 0.01, # } signatures_thresholds = { 5: 0.06, 10: 0.06, 30: 0.06, } # signatures_thresholds = { # 5: 0.0001, # 10: 0.0001, # 30: 0.0001, # } # only report the signature 2 value (as in DeconstructSigs benchmark) with open("data/benchmark/multiple/res1.txt", 'w') as o: o.write("file_id\tsigtype\tnsig\tnmut\tmethod\tSRMSE\tPRMSE\tSTRMSE\tLLIK\tLLIK0\tTLLIK\tTLLIK0\tprecision\trecall\taccuracy\tf1\n") for fname in glob.glob("data/benchmark/multiple/*.profile", recursive=True): file_id = fname.split("/")[-1].split(".")[0] sigtype, r, nmut, replica = fname.split("/")[-1].split(".")[0].split("_") sigtype = int(sigtype) if sigtype != 30: continue W, signature_names = read_signatures(sigtype) info_fname = fname.split(".")[0] + '.info' orig_profile = read_profile_file(fname) h0, names = read_decomposition(info_fname) # threshold = 0.06 threshold = 0.06 # threshold = 1.0 / np.sqrt(int(nmut)) if method != "ds" else 0.06 h0_threshold = np.where(h0 > threshold, h0, 0.0) # zero below threshold h0_binary = np.array(h0_threshold) > 0.0 # true / false for threshold nsig = np.count_nonzero(h0_binary) if nsig < int(r): print("LESS", sigtype, nsig, r) if nsig > int(r): print("MORE", sigtype, nsig, r) if nsig <= 1: continue if nsig > 10: continue for method in methods: method_fname = fname.split(".")[0] + methods[method] values, names = read_decomposition(method_fname) # print(method_fname) if values is None: continue h = np.array(values) if h.sum() == 0: continue h_threshold = np.where(h > threshold, h, 0.0) # zero below threshold reconstructed_profile = W.dot(h) # print(h) # print(reconstructed_profile) PRMSE = np.sqrt(mean_squared_error( np.array(orig_profile) / np.array(orig_profile).sum(), np.array(reconstructed_profile) / np.array(reconstructed_profile).sum())) SRMSE = np.sqrt(mean_squared_error(h0, h)) STRMSE = np.sqrt(mean_squared_error(h0_threshold, h_threshold)) LLIK0 = - NegLogLik(h0, W, orig_profile) TLLIK0 = - NegLogLik(h0_threshold, W, orig_profile) LLIK = - NegLogLik(h, W, orig_profile) TLLIK = - NegLogLik(h_threshold, W, orig_profile) # print(h0.sum()) # print(h.sum()) h_binary = np.array(h_threshold) > 0.0 # true / false for threshold precision = precision_score(h0_binary, h_binary) recall = recall_score(h0_binary, h_binary) accuracy = accuracy_score(h0_binary, h_binary) f1 = f1_score(h0_binary, h_binary) o.write("{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format( file_id, sigtype, nsig, nmut, method, SRMSE, PRMSE, STRMSE, LLIK, LLIK0, TLLIK, TLLIK0, precision, recall, accuracy, f1))
7,626
2,662
# -*- coding: utf-8 -*- # Generated by Django 1.10.5 on 2017-02-01 13:47 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Category', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=255)), ], options={ 'verbose_name': 'category', 'verbose_name_plural': 'categories', 'default_related_name': 'categories', }, ), migrations.CreateModel( name='Collection', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('collection_name', models.CharField(max_length=255)), ], options={ 'default_related_name': 'collections', }, ), migrations.CreateModel( name='Restaurant', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('restaurant_name', models.CharField(max_length=255)), ('restaurant_image', models.ImageField(default='restaurant_pic/images/no-name.jpg', upload_to='images/restaurant_pic/')), ], options={ 'default_related_name': 'restaurant', }, ), migrations.CreateModel( name='RestaurantTiming', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('start_time', models.TimeField()), ('end_time', models.TimeField()), ('restaurant', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='restaurant_timing', to='restaurant.Restaurant')), ], options={ 'verbose_name': 'Restaurant Timing', 'verbose_name_plural': 'Restaurant Timings', 'default_related_name': 'restaurant_timing', }, ), migrations.CreateModel( name='WeekDay', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('day', models.CharField(max_length=255)), ], options={ 'verbose_name': 'WeekDay', 'verbose_name_plural': 'WeekDays', 'default_related_name': 'week_day', }, ), migrations.AddField( model_name='restauranttiming', name='working_days', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='restaurant_timing', to='restaurant.WeekDay'), ), migrations.AddField( model_name='collection', name='restaurant', field=models.ManyToManyField(related_name='collections', to='restaurant.Restaurant'), ), migrations.AddField( model_name='category', name='restaurant', field=models.ManyToManyField(related_name='categories', to='restaurant.Restaurant'), ), ]
3,542
986
import argparse import os import sys from ckl.values import ( ValueList, ValueString, NULL ) from ckl.errors import ( CklSyntaxError, CklRuntimeError ) from ckl.interpreter import Interpreter def main(): parser = argparse.ArgumentParser(description="CKL run command") parser.add_argument("-s", "--secure", action="store_true") parser.add_argument("-l", "--legacy", action="store_true") parser.add_argument("-m", "--modulepath", nargs="?") parser.add_argument("script") parser.add_argument("args", nargs="*") args = parser.parse_args(sys.argv[1:]) modulepath = ValueList() if args.modulepath: modulepath.addItem(ValueString(args.modulepath)) interpreter = Interpreter(args.secure, args.legacy) if not os.path.exists(args.script): print(f"File not found '{args.script}'", file=sys.stderr) sys.exit(1) scriptargs = ValueList() for scriptarg in args.args: scriptargs.addItem(ValueString(scriptarg)) interpreter.environment.put("args", scriptargs) interpreter.environment.put("scriptname", ValueString(args.script)) interpreter.environment.put("checkerlang_module_path", modulepath) with open(args.script, encoding="utf-8") as infile: script = infile.read() try: result = interpreter.interpret(script, args.script) if result != NULL: print(str(result)) except CklRuntimeError as e: print(str(e.value.asString().value) + ": " + e.msg + " (Line " + str(e.pos) + ")") if e.stacktrace: for st in e.stacktrace: print(str(st)) except CklSyntaxError as e: print(e.msg + ((" (Line " + str(e.pos) + ")") if e.pos else "")) if __name__ == "__main__": main()
1,807
577
import datetime import fnmatch import hashlib import json import time import arrow import os from botocore.exceptions import ClientError from boto.s3.key import Key from security_monkey.alerters import custom_alerter from security_monkey.common.sts_connect import connect from security_monkey import app, db from security_monkey.datastore import Account from security_monkey.task_scheduler.alert_scheduler import schedule_krampus_alerts class Notify: """Notification for resources outside of the Justice Engine.""" KILL = 0 DISABLE = 1 def __init__(self): self.conn = None self.bucket = None self.key = None self.s3connect(os.getenv('AWS_ACCOUNT_NAME'), os.getenv('KRAMPUS_BUCKET')) def s3connect(self, account, bucket): """ s3connect will attempt to connect to an s3 bucket resource. If the resource does not exist it will attempt to create it :param account: string the aws account you are connecting to :param bucket: string the name of the bucket you wish to connect to :returns: Boolean of connection Status """ self.conn = connect( account, 's3' ) if self.conn.lookup(bucket) is None: app.logger.debug("Bucket Does not exist. Creating one") self.bucket = self.conn.create_bucket(bucket) else: self.bucket = self.conn.get_bucket(bucket) self.key = Key(self.bucket) return True def get_s3_key(self, filename): """ Return the key contents for a specific s3 object :param filename: the file name of the s3 object :returns: data in the form of a Dict. """ if self.bucket.lookup(filename) is None: self.key = self.bucket.new_key(filename) self.key.set_contents_from_string(json.dumps(json.loads('{}'))) self.key.key = filename tmp = self.key.get_contents_as_string() return json.loads(tmp) def write_to_s3_object(self, filename, data): """ Write to s3 :param filename: the s3 object file name :param data: string of data to be written to the object :returns: Boolean of writing success """ try: self.key.key = filename self.key.set_contents_from_string(data) return True except ClientError as e: app.logger.critical( "Unable to push information back to s3. :: {0}".format(e)) return False class Jury(): """ The Jury makes verdict based on evidence. The Jury class contains the methods used to convert items with issues into actionable jobs for Krampus to kill. """ KILL_THRESHOLD = int(os.getenv('KILL_THRESHOLD')) DISABLE_THRESHOLD = int(os.getenv('DISABLE_THRESHOLD')) KILL_RESPONSE_DELTA = int(os.getenv('KILL_RESPONSE_DELTA')) DISABLE_RESPONSE_DELTA = int(os.getenv('DISABLE_RESPONSE_DELTA')) SECMONKEY_KRAMPUS_ITEM_MAP = { 's3': ['s3'], 'ebs': ['ebssnapshot', 'ebsvolume'], 'ec2': ['ec2image', 'ec2instance'], 'rds': [ 'rdsclustersnapshot', 'rdsdbcluster', 'rdsdbinstance', 'rdssecuritygroup', 'rdssnapshot', 'rdssubnetgroup'], 'iam': [ 'iamgroup', 'iamrole', 'iamssl', 'iamuser', 'policy', 'samlprovider', 'keypair'], 'security_group': ['securitygroup'], None: [ 'acm', 'sqs', 'cloudtrail', 'config', 'configrecorder', 'connection', 'virtual_gateway', 'elasticip', 'elasticsearchservice', 'elb', 'alb', 'networkinterface', 'gcefirewallrule', 'gcenetwork', 'gcsbucket', 'organization', 'repository', 'team', 'glacier', 'kms', 'lambda', 'redshift', 'route53', 'route53domains', 'ses', 'sns', 'dhcp', 'endpoint', 'flowlog', 'natgateway', 'networkacl', 'peering', 'routetable', 'subnet', 'vpc', 'vpn']} @staticmethod def calc_score(issues): """ Helper method for calculating scores after an audit. :param issues: list of the item issues to be turned into a score :return: int of the score based on the item's issues """ score = 0 for i in issues: if not i.justified: score += i.score return score @staticmethod def aws_object_type_mapper(aws_object_type): """ maps an aws_object_type from sec-monkey into an actionable type for krampus :param aws_object_type: string of the sec-monkey type :return: None """ for key in SECMONKEY_KRAMPUS_ITEM_MAP: if aws_object_type in SECMONKEY_KRAMPUS_ITEM_MAP[key]: return key return None @staticmethod def s3_handler(item, issue): """ Append information required for handling s3 resources :param item: the item to be handled :param issue: the issue to be handled :return: jobs based on this action """ jobs = [] for grants in item.config['Grants']: jobs.append({ "s3_principal": grants, "s3_permission": item.config['Grants'][grants] }) return jobs @staticmethod def ebs_handler(item, issue): """ Append information required for handling ebs resources :param item: the item to be handled :param issue: the issue to be handled :return: jobs based on this action """ return [] @staticmethod def ec2_handler(item, issue): """ Append information required for handling ec2 resources :param item: the item to be handled :param issue: the issue to be handled :return: jobs based on this action """ return [] @staticmethod def rds_handler(item, issue): """ Append information required for handling rds resources :param item: the item to be handled :param issue: the issue to be handled :return: jobs based on this action """ return [] @staticmethod def iam_handler(item, issue): """ Append information required for handling iam resources :param item: the item to be handled :param issue: the issue to be handled :return: jobs based on this action """ return [] @staticmethod def sg_handler(item, issue): """ Append information required for handling security group resources :param item: the item to be handled :param issue: the issue to be handled :return: jobs based on this action """ jobs = [] # We don't want to do anything to issues that have a scoring of 0 if issue.score == 0: return [] if len(issue.notes.split(':')) != 2: return [] rule_issue_id = issue.notes.split(':')[1] for rule in item.config.get('rules', []): if int(rule_issue_id) == int(rule.get("sg_index", -1)): jobs.append({ 'cidr_ip': rule['cidr_ip'], 'from_port': rule['from_port'], 'to_port': rule['to_port'], 'proto': rule['ip_protocol'], 'direction': rule['rule_type'] }) return jobs @staticmethod def justice(score): """ Determine the action taken for a specific score :param score: int of the score for a specific item :return: string of the action to be taken """ int_score = int(score) if int_score >= Jury.KILL_THRESHOLD: return "kill" if int_score >= Jury.DISABLE_THRESHOLD: return "disable" else: return "ignore" @staticmethod def should_be_actioned(score): """ Simple helper method to determine whether a job warrants action :param score: The int value :return: Boolean if job should be actioned. """ if Jury.justice(score) == 'ignore': return False else: return True @staticmethod def get_current_time(): """ :return: float of current unix (seconds since epoch) """ return time.time() @staticmethod def when_to_action(action): """ returns an int of when to action a specific resource based on the action :param action: String of the action decided :return: int, representing the unix time the action should occur. """ if action == "kill": delta = Jury.KILL_RESPONSE_DELTA return Jury.get_current_time() + delta elif action == "disable": delta = Jury.DISABLE_RESPONSE_DELTA return Jury.get_current_time() + delta else: app.logger.error("when_to_action was invoked with an issue determined to be ignored.") raise ValueError("I can't serve Justice to those who have not committed injustice.") @staticmethod def gather_details_for_nuanced_actions(item, issues, object_type): """ Append actions related to specific issues. If we are not completely deleting a resource, we need more information for Krampus to action the job generated. i.e. If 3 rules in a security group need to be removed it's really 3 jobs that need to be added to the task file. :param item: the security monkey item that is to be used for gathering details :param issues: the secmonkey item called :param object_type: string of the aws resource type of the item :return jobs: a list of the jobs required to action the item. """ if object_type is None: app.logger.info("Krampus does not have a handler for item type {0}".format(item.index)) return {} type_handler = { 's3': Jury.s3_handler, 'ebs': Jury.ebs_handler, 'ec2': Jury.ec2_handler, 'rds': Jury.rds_handler, 'iam': Jury.iam_handler, 'security_group': Jury.sg_handler } resource_details = [] for issue in item.audit_issues: extra_fields_by_aws_type = type_handler[object_type](item, issue) map(lambda x: (isinstance(x, dict)), extra_fields_by_aws_type) resource_details.extend(extra_fields_by_aws_type) return resource_details @staticmethod def get_case_insensitive_arn(item): """ get_case_insensitive_arn will return the arn if it exists within the provided item. there was some historical inconsistency here so this is just a safety class for older versions. param item: the secmonkey item containing the arn :return: string the arn result. """ for key in ['arn', 'Arn']: if item.config.get(key, False): return item.config[key] app.logger.debug("Arn & arn not in config for {0} of type :: {1}".format(item.name, item.index)) return None @staticmethod def get_account_of_item(item): """ returns the string of the account id hosting a specific item. This helps with S3 resources. :param item: the secmonkey item containing the arn :return: string account id result. """ # base_arn = Jury.get_case_insensitive_arn(item) return str(db.session.query(Account.identifier).filter( Account.name == item.account).one()[0]) @staticmethod def build_krampus_jobs_for_item(score, item, current_tasks, whitelist): """ build_krampus_jobs_for_item will create actionable jobs for krampus for a given aws resource. * if krampus is not going to delete the aws resource entirely, multiple jobs might be produced. :param score: int representing how 'bad' the resource is according to sec_monkey. :param item: the secmonkey item that needs jobs built :param current_tasks: dict of the current_tasks for krampus :param whitelist: dict of the krampus whitelist :return: list of the jobs for this item to be actioned by krampus. """ arn = Jury.get_case_insensitive_arn(item) if arn is None: return [] action = Jury.justice(score) issues = "" for issue in item.audit_issues: issues += "{0}::{1}\t{2}\n".format(issue.issue, issue.notes, issue.score) job = { 'score': score, 'action': action, 'action_time': Jury.when_to_action(action), 'audited_time': Jury.get_current_time(), 'aws_resource_name': arn, 'aws_account': Jury.get_account_of_item(item), 'aws_region': item.region, 'aws_object_type': Jury.aws_object_type_mapper(item.index), 'human_readable_name': item.name, 'secmonkey_id': item.db_item.id, 'issues': issues, } # Only create jobs for the item if it's actually workable my Krampus if job['aws_resource_name'] is not None: if job['aws_object_type'] is None: job["unique_id"] = Jury.hash_job(job) job['is_whitelisted'] = True return [job] if job['action'] == 'disable': jobs = Jury.gather_details_for_nuanced_actions( item, job['issues'], job['aws_object_type']) map(lambda x: x.update(job), jobs) map(lambda x: x.update({"unique_id": Jury.hash_job(job)}), jobs) for job in jobs: job['is_whitelisted'] = Jury.whitelist_match(arn, whitelist) or Jury.convicted(job['unique_id'], current_tasks) return jobs else: job["unique_id"] = Jury.hash_job(job) job['is_whitelisted'] = Jury.whitelist_match(arn, whitelist) or Jury.convicted(job['unique_id'], current_tasks) return [job] return [] @staticmethod def hash_job(job): """ hash_job creates a unique id to compare jobs. :param job: the job to be hashed :return: string hash representation uniquely identifying the job """ hasher = hashlib.sha1() hasher.update(job['aws_resource_name']) hasher.update(str(job['score'])) hasher.update(str(job['issues'])) hasher.update(job['human_readable_name']) return hasher.hexdigest() @staticmethod def make_local_from_timestamp(timestamp, timezone='US/Mountain'): """ make_local_from_timestamp returns a local string representation of a unix timestamp :param timestamp: int unix timestamp :param timezone: string timezone matching a tzdb entry from iana :return: human readable string representing a local timestamp. """ utc = arrow.get(timestamp) local_time = utc.to(timezone) return local_time.strftime('%a %I:%M %p') @staticmethod def make_utc_from_timestamp(timestamp): """ make_utc_from_timestamp returns a human readable string representing a UTC timestamp :param timestamp: timestamp in %Y-%m-%d %H:%M:%S :return: the unix timestamp as a datetime.datetime object """ utc_time = datetime.datetime.utcfromtimestamp(timestamp) return utc_time.strftime('%Y-%m-%d %H:%M:%S') @staticmethod def remove_if_in_current_tasks(arn, current_tasks): """ remove_if_in_current_tasks will remove a job if it exists within the current_tasks hash :param arn: string AWS Resource Name to check for in current_tasks :param current_tasks: dict of the current_tasks for krampus """ for task in current_tasks: if task['aws_resource_name'] == arn: current_tasks.remove(task) @staticmethod def convicted(unique_id, current_tasks): """ convicted returns whether the current job in question has already been judged and needs to be actioned by krampus :param unique_id: string unique_id hash representation of a job :param current_tasks: dict of the current_tasks in krampus :return: boolean of whether the aws resource is to be actioned """ for task in current_tasks: if task.get('unique_id', '') == unique_id: return True return False @staticmethod def whitelist_match(arn, whitelist): """ whitelist_match returns whether the whitelist has a fn-match of the arn in question. :param arn: string AWS Resource Name to check for in current_tasks :param whitelist: dict of the krampus whitelist :return: booelean of whether the arn is on the whitelist. """ for pattern in whitelist.keys(): if fnmatch.fnmatch(arn, pattern): return True return False class Justice(object): """ The Judge that serves the Jury's verdict to Krampus. The Judge class faciliates the actions to be made for any set of issues found for a security_monkey item. """ __metaclass__ = custom_alerter.AlerterType TASK_KEY = os.getenv('TASK_KEY') TASKS_FILE_NAME = os.getenv('TASKS_FILE_NAME') WHITELIST_KEY = os.getenv('WHITELIST_KEY') WHITELIST_FILE_NAME = os.getenv('WHITELIST_FILE_NAME') LOGS_FILE_NAME = "{0}.json".format(datetime.datetime.now().strftime('%Y-%m-%d')) def report_watcher_changes(self, watcher): """ report_watcher_changes must exist for report_auditor_changes to be invoked within the SecMonkey Auditor. This mimics the existing custom alerter documentation in SecurityMonkey:Develop as alerters can still work to perfom actions with watcher events as well as auditor events. """ for item in watcher.changed_items: pass def report_auditor_changes(self, auditor): """ Primary Driver for the Justice Engine. We accumulate scores for a specific resource and determine if it needs to be actioned. Alerters only use the confirmed_new_issues and confirmed_fixed_issues item fields. The Game Plan: 1. Gather the current tasks 2. Remove the fixed items from the current tasks 3. Calculate the current score from new and existing issues for all items 4 If the current score is larger than or equal to the required thresholds we will update the tasks file. """ notify = Notify() app.logger.debug("S3 Connection established.") app.logger.debug("Collecting existing items.") current_tasks = notify.get_s3_key(Justice.TASKS_FILE_NAME) if not current_tasks: current_tasks = {Justice.TASK_KEY: []} app.logger.debug("Collecting whitelisted items.") whitelist = notify.get_s3_key(Justice.WHITELIST_FILE_NAME) if not whitelist: whitelist = {Justice.WHITELIST_KEY: {}} app.logger.debug("Collecting log file \"{0}\"".format(Justice.LOGS_FILE_NAME)) logs = notify.get_s3_key(Justice.LOGS_FILE_NAME) if not logs: logs = [] new_tasks = [] app.logger.debug("Beginning current audit") current_run_audit_time = Jury.get_current_time() for item in auditor.items: app.logger.debug("changes in {0}. Auditing".format(item.name)) score = Jury.calc_score(item.audit_issues) # remove_if_in_current_tasks lets Krampus ignore those who have atoned Jury.remove_if_in_current_tasks(Jury.get_case_insensitive_arn(item), current_tasks[Justice.TASK_KEY]) if Jury.should_be_actioned(score): jobs = Jury.build_krampus_jobs_for_item(score, item, current_tasks[Justice.TASK_KEY], whitelist) logs.extend(jobs) for job in jobs: if not job['is_whitelisted']: new_tasks.extend(jobs) new_tasks.extend(current_tasks[Justice.TASK_KEY]) app.logger.debug("Tasks are updated locally.") app.logger.debug("{0} Tasks to be processed".format( len(new_tasks))) if new_tasks != []: app.logger.debug("Pushing tasks to s3.") notify.write_to_s3_object(Justice.TASKS_FILE_NAME, json.dumps({Justice.TASK_KEY: new_tasks})) if logs != []: app.logger.debug("Pushing logs to s3") notify.write_to_s3_object(Justice.LOGS_FILE_NAME, json.dumps(logs)) app.logger.debug("Sending Alerts to Account Owners.") schedule_krampus_alerts.s(current_run_audit_time) app.logger.debug("Justice Engine Complete. Closing.")
21,190
5,965
# noinspection PyPackageRequirements from telegram import InlineKeyboardMarkup, InlineKeyboardButton class InlineKeyboard: HIDE = None REMOVE = None @staticmethod def static_animated_switch(animated=False): static_button = InlineKeyboardButton( '{} normal'.format('☑️' if animated else '✅'), callback_data='packtype:static' ) animated_button = InlineKeyboardButton( '{} animated'.format('✅' if animated else '☑️'), callback_data='packtype:animated' ) return InlineKeyboardMarkup([[static_button, animated_button]])
623
182
import urllib2 import json FAKE_PACKAGES = ( 'south', 'django-debug-toolbar', 'django-extensions', 'django-social-auth', ) class GuitarWebAPI(object): def __init__(self, url): self.url = url def search(self, q): url = self.url + 'search/' + q + '/' res = urllib2.urlopen(url) return json.loads(res.read()) def get_config(self, package, version=None): url = self.url + 'search/' + package + '/' print url res = urllib2.urlopen(url) print res fetcher = GuitarWebAPI('http://localhost:8000/api/v1/')
596
213
import zipfile from getpass import getpass import os import stat import tempfile from os import path from .crypto import encrypt def compile_ruleset(ruleset_path, ruleset_encryption_password=None, output_path=None): output_path = output_path or os.getcwd() ruleset_encryption_password = ruleset_encryption_password or getpass('Password (used to encrypt compiled ruleset):') ruleset_name = path.basename(ruleset_path) with tempfile.SpooledTemporaryFile() as output_ruleset: with zipfile.PyZipFile(output_ruleset, mode='w') as ruleset: ruleset.writepy(pathname=ruleset_path) output_ruleset.seek(0) encrypted_output = encrypt(output_ruleset.read(), key=ruleset_encryption_password) compiled_ruleset_output_path = path.join(output_path, '{ruleset}.dpgr'.format(ruleset=ruleset_name)) with open(compiled_ruleset_output_path, 'wb') as output: os.chmod(compiled_ruleset_output_path, stat.S_IREAD) output.write(encrypted_output)
1,005
324
from typing import List # 使用最小花费爬楼梯 class Solution: def minCostClimbingStairs_1(self, cost: List[int]) -> int: dp = [0 for _ in range(len(cost))] dp[0], dp[1] = cost[0], cost[1] for i in range(2, len(cost)): dp[i] = min(dp[i - 1], dp[i - 2]) + cost[i] return min(dp[-1], dp[-2]) def minCostClimbingStairs_2(self, cost: List[int]) -> int: prev, back = 0, 0 for i in range(len(cost)): prev, back = back, min(prev, back) + cost[i] return min(prev, back)
553
229
""" File: pages/page.py Author: Luke Mason Description: Main part of the application, the actual graph page. """ # Application imports from message import log, error, success from settings import APP_NAME, COLOR, FONT, FONT_SIZE, SCREEN_WIDTH, SCREEN_HEIGHT, WIDTH, HEIGHT, PAD, _QUIT from sprites.vertex import Vertex from sprites.edge import Edge from pages.page import Page from graph import Graph as G # Pygame imports from pygame import draw, sprite, event, mouse, display, init, key, MOUSEBUTTONUP, MOUSEBUTTONDOWN, MOUSEMOTION, QUIT, \ KEYDOWN, K_BACKSPACE, K_DELETE, KMOD_SHIFT # Python imports from math import atan2, degrees, cos, sin class GraphPage(Page): def __init__(self, screen): Page.__init__(self, screen) self.second_click = False self.moving = False self.collision = False self.selected_vertices = [] self.selected_edges = [] self.vertices = sprite.Group() self.edges = [] # Edges arent sprites in the same way that vertices are self.last_clicked_vertex = None self.show_labels = False self.graph = G() # Actual graph logic def add_vertex(self, x: int, y: int): """ Attempts to add a new vertex, returns True if successful, False if it is colliding with an existing vertex. """ new_v = Vertex(x=x, y=y) self.collision = False for v in self.vertices: if sprite.collide_rect(new_v, v): error("Vertex placement collision detected!") self.collision = True if not self.collision: success(f'Adding vertex {new_v}') self.vertices.add(new_v) return not self.collision def add_edge(self, v1: Vertex, v2: Vertex) -> None: """ Adds an edge between vertices v1 and v2 Here edges in the list are a dict={'edge': edge, 'count': n} """ e = Edge(v1, v2) found = False # Try to find in list and update count for _e in self.edges: if _e.get('edge') == e: # We can do this with the __eq__ definition on the Edge class _e.update({'count': int(_e.get('count'))+1}) # log(f'{_e} update count={_e.get("count")}') found = True break # Otherwise insert with count=1 if not found: self.edges.append({'edge': e, 'count': 1}) # log(f'{e} insert count=1') v1.edges.append(e) v2.edges.append(e) success(f'Add edge {e}') def edge_count(self): """ Since self.edges is a list of dicts defining parallel edges, simply len(self.edges) is misleading. """ total_count = 0 for edge in self.edges: total_count += edge.get('count') return total_count def remove_edge(self, edge) -> bool: """ Removes an edge from the edge list """ found = False for e in self.edges: if e.get('edge') == edge: self.edges.remove(e) found = True break return found def delete_vertices(self): for sv in self.selected_vertices: log('deleting sv :', sv) x, y = sv.get_pos() self.vertices.remove(sv) # Remove any edges connected to this removed vertex for e in self.edges: if e.get('edge') in sv.edges: self.edges.remove(e) self.last_clicked_vertex = None def delete_edges(self): for se in self.selected_edges: for e in self.edges: if e.get('edge') == se: log('deleteing se:', se) self.edges.remove(e) def stats(self, font): """ Draws the graph stats stats, i.e., total vertex and edge count """ v_count = f'N={len(self.vertices)}' # N e_count = f'M={self.edge_count()}' # M v_count_rendered = font.render(str(v_count), False, COLOR.get('white'), True) e_count_rendered = font.render(str(e_count), False, COLOR.get('white'), True) return {'text': v_count_rendered, 'size': font.size(str(v_count))}, \ {'text': e_count_rendered, 'size': font.size(str(e_count))} def handle_click(self, x, y): """ Handles the logic when mouse is clicked, this logic is quite complex as it includes, - placing a vertex (single click anywhere on app window where there does not already exist a vertex) - moving a vertex (click and drag a vertex) - adding an edge between two vertices (single click two vertices in a row) """ self.collision = False button_clicked = False edge_clicked = False for b in self.buttons: if b.hovered(x, y): log(f'button clicked={b}') b.onclick() button_clicked = True if not button_clicked: for e in self.edges: edge = e.get('edge') if edge.hovered(x, y): edge_clicked = True if not button_clicked and not edge_clicked: for v in self.vertices: if v.rect.collidepoint(x, y): self.collision = True log('====== vertex click:', v) # Handles vertex move (self.moving and v.drag flipped on MOUSEBUTTONUP) self.moving = True v.drag = True # Click to select v.selected = True v.set_color(COLOR.get('focus')) self.selected_vertices.clear() self.selected_edges.clear() self.selected_vertices.append(v) # If last clicked vertex if self.last_clicked_vertex and v and self.last_clicked_vertex != v: self.add_edge(self.last_clicked_vertex, v) self.last_clicked_vertex = None log('clear last clicked 1') elif self.last_clicked_vertex and v and self.last_clicked_vertex == v: log('ADD LOOP!') else: self.last_clicked_vertex = v log('set last clicked') # If selected vertex and not a collision, clear selected vertex if not self.collision and len(self.selected_vertices) > 0: self.selected_vertices.clear() # If selected edge and not a collision, clear selected edge elif not self.collision and len(self.selected_edges) > 0: self.selected_edges.clear() # Otherwise add new vertex elif not self.collision: self.add_vertex(x, y) # Mousedown not moving, add vertex self.last_clicked_vertex = None def poll_events(self): """ Graph page event polling (Handles any sort of input) - Single click anywhere on screen to add a new vertex - Delete or backspace to delete selected vertex """ x, y = mouse.get_pos() for e in event.get(): if e.type == QUIT: return _QUIT # Mouse down elif e.type == MOUSEBUTTONDOWN: self.handle_click(x, y) # Mouse up elif e.type == MOUSEBUTTONUP: # If mouse release and vertex is being dragged, stop dragging (placing a moved vertex) dragging = False for v in self.vertices: if v.drag: dragging = True v.drag = False self.moving = False if v.rect.collidepoint(x, y) and self.last_clicked_vertex and v and self.last_clicked_vertex != v: self.add_edge(self.last_clicked_vertex, v) # Handling edge placement on mouse button up, so we do not place an edge when draggin a vertex if not dragging: for e in self.edges: edge = e.get('edge') if edge.hovered(x, y): self.selected_edges.clear() self.selected_vertices.clear() self.selected_edges.append(edge) # Mouse moving elif e.type == MOUSEMOTION: for v in self.vertices: # Handles vertex drag as it is being dragged if v.drag: v.set_pos(x, y) # Focus if mouseover if v.rect.collidepoint(x, y): v.set_color(COLOR.get('focus')) elif v not in self.selected_vertices: v.set_color(COLOR.get('white')) for _e in self.edges: edge = _e.get('edge') if edge.hovered(x, y): edge.set_color(COLOR.get('focus')) elif edge not in self.selected_edges: edge.set_color(COLOR.get('white')) elif e.type == KEYDOWN: # (Delete or backspace key) Delete selected vertices if e.key == K_BACKSPACE or e.key == K_DELETE: self.delete_vertices() self.delete_edges() self.moving = False def draw_edges(self): """ Draw the edges (have to do this manually as pygame sprite did not quite fit for this use case) """ mult = 6 # distance between edges for e in self.edges: total_count = e.get('count') for c in range(0, e.get('count')): edge = e.get('edge') p1, p2 = edge.v1.get_pos(), edge.v2.get_pos() ang = degrees(atan2(p2[1] - p1[1], p2[0] - p1[0])) # Logic to place parallel edges in clear visible manner despite angle between # the vertices. (This angle will change as user moves vertices around) x_mult, y_mult = self.handle_point_angle_eq(ang, mult) p1 = (p1[0] + edge.v1.radius + x_mult*c, p1[1] + edge.v1.radius + y_mult*c) p2 = (p2[0] + edge.v2.radius + x_mult*c, p2[1] + edge.v2.radius + y_mult*c) draw.line(self.screen, edge.color, p1, p2) def handle_point_angle_eq(self, ang, dist) -> (int, int): """ Handles the angle point code to keep draw_edges function clean It returns x, y multiple for distance between parallel edges based on the angle between the vertices so that parallel edges can always be displayed as parallel. """ # Handles sign of ranges we check to reduce repeated code sign = 1 if ang < 0: sign = -1 # This algorithm is likely really ugly... I know there exists a more elegant way # to do this. if 45 <= ang <= 135 or -135 <= ang <= -45: return dist, 0 elif -45 <= ang <= 45 or ang >= 135 or ang <= -135: return 0, dist else: print('======== other ang?') return dist, dist def toggle_labels(self): print('======== toggling labels') self.show_labels = not self.show_labels def draw_vertices(self, font): """ Draws the vertices and handles vertex labels """ self.vertices.draw(self.screen) # Draw vertices if self.show_labels: i = 1 for v in self.vertices: x, y = v.get_pos() text = font.render(str(i), False, COLOR.get('white'), True) self.screen.blit(text, (x + PAD*1.5, y - PAD*1.5)) i += 1 def think(self, font): """ Graph page think function, this function is called every tick """ q = self.poll_events() n, m = self.stats(font) # n, m are dicts, take a look at render_stats to see structure self.screen.fill(COLOR.get('black')) # Background color self.draw_vertices(font) self.draw_edges() # Draw edges self.draw_buttons(font) # Draw buttons (inherited from Page class) self.screen.blit(n.get('text'), (PAD, PAD)) # Draw N=vertex count and M=edge count self.screen.blit(m.get('text'), (WIDTH - PAD - m.get('size')[0], PAD)) # Set to right side of screen display.flip() # Weird pygame call required to display window if q == _QUIT: return q
10,289
4,273
import brownie def test_update(n1, barb, barb2, owner): tx = n1.update_capitalization(1, "coNan", {'from': owner}) assert tx.events["NameUpdated"].values() == (1, "Conan", "coNan") assert n1.summoner_name(1) == "coNan" def test_update_fails(n1, barb, barb2, owner, accounts): with brownie.reverts("!owner or approved name"): n1.update_capitalization(1, "coNan", {'from': accounts[5]}) with brownie.reverts("name different"): n1.update_capitalization(1, "Conan1", {'from': owner})
521
201
# Filenames : <tahm1d> # Python bytecode : 2.7 # Time decompiled : Thu Sep 10 23:29:38 2020 # Selector <module> in line 4 file <tahm1d> # Timestamp in code: 2020-09-02 17:33:14 import os, sys, time from os import system from time import sleep def htrprint(s): for t in s + '\n': sys.stdout.write(t) sys.stdout.flush() sleep(0.01) def menu(): system('rm -rf *.pyc *.dis') htrprint(' \x1b[1;96mHello Bro !!') htrprint('\n \x1b[1;96mExcute \x1b[1;92mpython2 crack.py \x1b[1;96mto run this tool !\x1b[1;97m') sleep(1) if __name__ == '__main__': menu()
603
285
import os import numpy as np import pandas as pd import torch as th from mstarhe.core.nn.models import PrettyFeedForward from MstarHe2R.components.dataloader import Mstar2RDataLoader __IMG_SIZE__ = 128 * 128 class MSTARNet(PrettyFeedForward): data_loader_class = Mstar2RDataLoader # model_graph_class = ANNetGraph model_graph_class = None optimizer_class = th.optim.Adam loss_func_class = th.nn.NLLLoss loader_params = { "train": {}, "test": {} } # hyper-parameters lr = 1e-3 # learning rate l1_lambda = 0.5 # l1-penalty coef l2_lambda = 0.01 # l2-penalty coef step = 10 # measure_progress step k patient = 3 # early stopping patient alpha = 0.5 # early stopping threshold def __init__(self, ofea, **kwargs): super(MSTARNet, self).__init__(ifea=__IMG_SIZE__, ofea=ofea, **kwargs) self.CHECK_POINT = 'cp{}ep%s.tar'.format(self.model_graph_class.__name__) self._acc = list() self.acc_curve = list() self._loss = list() self.vloss_curve = list() self.tloss_curve = list() self.eval_ret = list() self.pre_accuracy = None self.test_samples_ = list() def get_data_loader(self, train): p = self.loader_params['train'] if train else self.loader_params['test'] loader_factory = self.data_loader_class(train=train) if train: p["split"] = True return loader_factory(**p) p["shuffle"] = False loader = loader_factory(**p) self.test_samples_ = np.array(loader_factory.mstar.samples).reshape(-1, 1) return loader @property def epoch_acc(self): return np.mean(self._acc) @property def epoch_loss(self): return np.mean(self._loss) def analysis(self, label, ypre, preP): """ :param label: size(batch) true class :param ypre: size(batch) pre class :param preP: size(batch) pre prob :return: """ self._acc.append(self.accuracy(ypre, label).item()) if not getattr(self, 'validate', False): self.eval_ret.append(th.stack([label.float(), ypre.float(), preP], dim=1)) def train_batch(self, dl): super(MSTARNet, self).train_batch(dl) self.tloss_curve.append(self.epoch_loss) def eval_batch(self, dl): self._acc = list() # eval testing or validating batch super(MSTARNet, self).eval_batch(dl) print('Average Accuracy: %s' % self.epoch_acc) if getattr(self, 'validate', False): self.acc_curve.append(self.epoch_acc) self.vloss_curve.append(self.epoch_loss) else: ret = th.cat(self.eval_ret, dim=0) self.pre_accuracy = self.accuracy(ret[0], ret[1]) path = os.path.join(self.csv_path, 'EvalCurves%s.txt' % self.model_graph_class.__name__) pd.DataFrame(np.hstack([self.test_samples_, ret.cpu().numpy()]), columns=['objects', 'labels', 'predict', 'prob'])\ .to_csv(path, sep='\t', index=True, header=True) def model_persistence(self): super(MSTARNet, self).model_persistence() curves = { "Accaracy": self.acc_curve, "TrLoss": self.tloss_curve, "VaLoss": self.vloss_curve } path = os.path.join(self.csv_path, 'EpochCurves%s.txt' % self.model_graph_class.__name__) df = pd.DataFrame(curves.values()).T df.columns = curves.keys() df.to_csv(path, sep='\t', index=True, header=True) def _example(): Net = MSTARNet Net.device = None from components.graphs.graph2 import TestL4MSTARANNetGraph G = [TestL4MSTARANNetGraph] for g, params in G: Net.model_graph_class = g Net.alpha = params["aph"] Net.step = params["stp"] net = Net(3, reg=None, dropout=False) print(net.graph.__class__.__name__) # print(net.get_data_loader(False)) # print(len(net.test_samples_)) net.train(params['n'], 'PQ', checkpoint=params['cp']) if __name__ == '__main__': _example()
4,165
1,437
from abc import ABC, abstractmethod '''Comments In the original solution only functions were used to implement the event system (observer pattern). In this implementation I wanted to write classes (to be as nearest as possible to the pattern (?)). It is surely better to use python first-citizen functions to create the event handles (basically this is what I done, I created handle classes to write different implementations of update method). ''' class EventListener(ABC): @abstractmethod def update(self, data): pass class EventSystem(): def __init__(self): self.subscribers = {} def add_subscriber(self, event: str, subscriber: EventListener): if event in self.subscribers: self.subscribers[event].append(subscriber) return self.subscribers[event] = [subscriber,] def trigger_event(self, event: str, data): for subscriber in self.subscribers[event]: subscriber.update(data)
996
267
#! /usr/bin/env python import time import os import argparse import json import cv2 import sys sys.path += [os.path.abspath('keras-yolo3-master')] from utils.utils import get_yolo_boxes, makedirs from utils.bbox import draw_boxes from tensorflow.keras.models import load_model from tqdm import tqdm import numpy as np from panel_disconnect import disconnect def _main_(args): config_path = args.conf input_path = args.input output_path = args.output with open(config_path) as config_buffer: config = json.load(config_buffer) makedirs(output_path) ############################### # Set some parameter ############################### net_h, net_w = 416, 416 # a multiple of 32, the smaller the faster obj_thresh, nms_thresh = 0.5, 0.3 ############################### # Load the model ############################### os.environ['CUDA_VISIBLE_DEVICES'] = config['train']['gpus'] infer_model = load_model(config['train']['saved_weights_name']) ############################### # Predict bounding boxes ############################### image_paths = [] if os.path.isdir(input_path): for inp_file in os.listdir(input_path): image_paths += [input_path + inp_file] else: image_paths += [input_path] image_paths = [inp_file for inp_file in image_paths if (inp_file[-4:] in ['.jpg', '.png', 'JPEG'])] # the main loop times = [] images = [cv2.imread(image_path) for image_path in image_paths] #print(images) start = time.time() # predict the bounding boxes boxes = get_yolo_boxes(infer_model, images, net_h, net_w, config['model']['anchors'], obj_thresh, nms_thresh) boxes = [[box for box in boxes_image if box.get_score() > obj_thresh] for boxes_image in boxes] print('Elapsed time = {}'.format(time.time() - start)) times.append(time.time() - start) boxes_disc = [disconnect(image, boxes_image, z_thresh = 1.8) for image, boxes_image in zip(images, boxes)] for image_path, image, boxes_image in zip(image_paths, images, boxes_disc): #print(boxes_image[0].score) # draw bounding boxes on the image using labels draw_boxes(image, boxes_image, ["disconnect"], obj_thresh) #plt.figure(figsize = (10,12)) #plt.imshow(I) # write the image with bounding boxes to file cv2.imwrite(output_path + image_path.split('/')[-1], np.uint8(image)) file = open(args.output + '/time.txt','w') file.write('Tiempo promedio:' + str(np.mean(times))) file.close() if __name__ == '__main__': argparser = argparse.ArgumentParser(description='Predict with a trained yolo model') argparser.add_argument('-c', '--conf', help='path to configuration file') argparser.add_argument('-i', '--input', help='path to an image, a directory of images, a video, or webcam') argparser.add_argument('-o', '--output', default='output/', help='path to output directory') args = argparser.parse_args() _main_(args)
3,056
1,022
import tensorflow.keras.constraints as constraints from tensorflow.keras.layers import GlobalAveragePooling2D from tensorflow.keras.layers import BatchNormalization from tensorflow.keras.layers import Conv2DTranspose from tensorflow.keras.layers import LeakyReLU from tensorflow.keras.layers import ReLU from tensorflow.keras.layers import Conv2D from tensorflow.keras.layers import Lambda from tensorflow.keras.layers import Layer from tensorflow.keras.layers import Dense from tensorflow.keras.layers import Add from tensorflow_addons.layers import InstanceNormalization from tensorflow_addons.layers import GroupNormalization from tenning.generic_utils import get_object_config from tenning.activations import Swish import tensorflow as tf class ResnetBlock(Layer): def __init__(self, out_channels, strides=1, kernel_size=3, trainable=True, mode='identity', initializer='he_normal', normalization='instance_norm', activation='leaky_relu', groups=None, squeeze_excitation=False, squeeze_ratio=16, **kwargs): super().__init__(trainable=trainable, **kwargs) allowed_normalizations = ['batch_norm', 'instance_norm', 'group_norm'] allowed_modes = ['identity', 'downsample', 'upsample'] assert mode in allowed_modes, f"Invalid mode!" assert normalization in allowed_normalizations, f"Invalid normalization!" conv_constraint = kwargs.get('conv_constraint', None) conv_constraint_arguments = kwargs.get('conv_constraint_arguments', []) dense_constraint = kwargs.get('dense_constraint', None) dense_constraint_arguments = kwargs.get('dense_constraint_arguments', []) if conv_constraint_arguments: if not isinstance(conv_constraint_arguments, list): raise TypeError(f"'conv_constraint_arguments' must be a list") if dense_constraint_arguments: if not isinstance(dense_constraint_arguments, list): raise TypeError(f"'dense_constraint_arguments' must be a list") if conv_constraint: conv_constraint = getattr(constraints, conv_constraint, None)(*conv_constraint_arguments) if dense_constraint: dense_constraint = getattr(constraints, dense_constraint, None)(*dense_constraint_arguments) self.out_channels = out_channels self.initializer = initializer self.mode = mode self.kernel_size = kernel_size self.strides = strides self.normalization = normalization self.groups = groups self.squeeze_excitation = squeeze_excitation self.squeeze_ratio = squeeze_ratio self.conv_constraint = conv_constraint self.dense_constraint = dense_constraint if normalization == 'group_norm': self.norm1 = GroupNormalization(groups=self.groups, name=self.name + '/norm1', trainable=self.trainable) self.norm2 = GroupNormalization(groups=self.groups, name=self.name + '/norm2', trainable=self.trainable) self.norm3 = GroupNormalization(groups=self.groups, name=self.name + '/norm3', trainable=self.trainable) elif normalization == 'instance_norm': self.norm1 = InstanceNormalization(name=self.name + '/norm1', trainable=self.trainable) self.norm2 = InstanceNormalization(name=self.name + '/norm2', trainable=self.trainable) self.norm3 = InstanceNormalization(name=self.name + '/norm3', trainable=self.trainable) else: self.norm1 = BatchNormalization(name=self.name + '/norm1', trainable=self.trainable) self.norm2 = BatchNormalization(name=self.name + '/norm2', trainable=self.trainable) self.norm3 = BatchNormalization(name=self.name + '/norm3', trainable=self.trainable) if activation == 'swish': self.relu1 = Swish(name=self.name + '/activation1') self.relu2 = Swish(name=self.name + '/activation2') self.relu3 = Swish(name=self.name + '/activation3') elif activation == 'leaky_relu': self.relu1 = LeakyReLU(name=self.name + '/activation1') self.relu2 = LeakyReLU(name=self.name + '/activation2') self.relu3 = LeakyReLU(name=self.name + '/activation3') else: self.relu1 = ReLU(name=self.name + '/activation1') self.relu2 = ReLU(name=self.name + '/activation2') self.relu3 = ReLU(name=self.name + '/activation3') self.in_conv = Conv2D(self.out_channels // 2, kernel_size=1, name=self.name + '/in_conv', strides=1, kernel_constraint=conv_constraint, trainable=self.trainable, padding='same', kernel_initializer=self.initializer) if mode == 'identity': # Keeps image dimensions (height and width) intact self.mid_conv = Conv2D(self.out_channels // 2, kernel_size=1, name=self.name + '/mid_conv', strides=1, trainable=self.trainable, padding='same', kernel_constraint=conv_constraint, kernel_initializer=self.initializer) elif mode == 'downsample': # Causes a reduction over image dimensions. The new dimensions are calculated as follows: # new_dim = floor((old_dim - kernel_size)/stride + 1) # where new_dim and old_dim are either image height or width self.mid_conv = Conv2D(self.out_channels // 2, kernel_size=self.kernel_size, name=self.name + '/mid_conv', strides=self.strides, trainable=self.trainable, padding='valid', kernel_constraint=conv_constraint, kernel_initializer=self.initializer) else: # Causes an increase over image dimensions. The new dimensions are calculated as follows: # new_dim = old_dim * stride + max(kernel_size - stride, 0) # where new_dim and old_dim are either image height or width self.mid_conv = Conv2DTranspose(self.out_channels // 2, kernel_size=self.kernel_size, name=self.name + '/mid_conv', strides=self.strides, trainable=self.trainable, padding='valid', kernel_constraint=conv_constraint, kernel_initializer=self.initializer) self.global_pool = None self.squeeze_dense1 = None self.squeeze_dense2 = None if self.squeeze_excitation: self.global_pool = GlobalAveragePooling2D(name=self.name + "/global_pool") self.squeeze_dense1 = Dense(self.out_channels // self.squeeze_ratio, activation='relu', kernel_initializer=self.initializer, kernel_constraint=dense_constraint, trainable=self.trainable, name=self.name + "/squeeze_dense1") self.squeeze_dense2 = Dense(self.out_channels, activation='sigmoid', kernel_constraint=dense_constraint, kernel_initializer=self.initializer, trainable=self.trainable, name=self.name + "/squeeze_dense2") self.out_conv = Conv2D(self.out_channels, kernel_size=1, name=self.name + '/out_conv', strides=1, trainable=self.trainable, padding='same', kernel_constraint=conv_constraint, kernel_initializer=self.initializer) def build(self, input_shape): if self.mode == 'identity': if input_shape[-1] != self.out_channels: # This mode is used when the image dimensions (height and width) don't change, but only its channel dimension self.shortcut = Conv2D(self.out_channels, kernel_size=1, name=self.name + '/shortcut', strides=1, trainable=self.trainable, padding='same', kernel_constraint=self.conv_constraint, kernel_initializer=self.initializer) else: # If the shapes are equal then returns the input data itself self.shortcut = Lambda(lambda x: x, output_shape=input_shape, name=self.name + '/shortcut') elif self.mode == 'downsample': self.shortcut = Conv2D(self.out_channels, kernel_size=self.kernel_size, name=self.name + '/shortcut', strides=self.strides, trainable=self.trainable, padding='valid', kernel_constraint=self.conv_constraint, kernel_initializer=self.initializer) else: self.shortcut = Conv2DTranspose(self.out_channels, kernel_size=self.kernel_size, name=self.name + '/shortcut', strides=self.strides, trainable=self.trainable, padding='valid', kernel_constraint=self.conv_constraint, kernel_initializer=self.initializer) def call(self, input_tensor, training=True): norm1 = self.norm1(input_tensor, training=training) relu1 = self.relu1(norm1) in_conv = self.in_conv(relu1) norm2 = self.norm2(in_conv, training=training) relu2 = self.relu2(norm2) mid_conv = self.mid_conv(relu2) norm3 = self.norm3(mid_conv, training=training) relu3 = self.relu3(norm3) out_conv = self.out_conv(relu3) if self.squeeze_excitation: global_pool = self.global_pool(out_conv) squeeze_dense1 = self.squeeze_dense1(global_pool) squeeze_dense2 = self.squeeze_dense2(squeeze_dense1) out_conv = tf.keras.layers.Multiply()([out_conv, squeeze_dense2]) shortcut = self.shortcut(input_tensor) add = Add(name=self.name + '/add')([out_conv, shortcut]) return add def get_config(self): config = super().get_config() config.update({'out_channels': self.out_channels, 'initializer': self.initializer, 'mode': self.mode, 'kernel_size': self.kernel_size, 'strides': self.strides, 'trainable': self.trainable, 'normalization': self.normalization, 'groups': self.groups, 'squeeze_excitation': self.squeeze_excitation, 'squeeze_ratio': self.squeeze_ratio, # 'conv_constraint': self.conv_constraint, # 'dense_constraint': self.dense_constraint, 'name': self.name, 'norm1': get_object_config(self.norm1), 'norm2': get_object_config(self.norm2), 'norm3': get_object_config(self.norm3), 'relu1': get_object_config(self.relu1), 'relu2': get_object_config(self.relu2), 'relu3': get_object_config(self.relu3), 'global_pool': get_object_config(self.global_pool), 'squeeze_dense1': get_object_config(self.squeeze_dense1), 'squeeze_dense2': get_object_config(self.squeeze_dense2), 'in_conv': get_object_config(self.in_conv), 'mid_conv': get_object_config(self.mid_conv), 'out_conv': get_object_config(self.out_conv)}) return config
11,655
3,348
# coding=utf-8 from operator import xor import os import scrypt import time from libs.rediswrapper import UserHelper try: xrange except NameError: xrange = range class Token(object): """ @param user_id: @type user_id: @param password: @type password: """ __BLOCK_SIZE = 256 __TRANS_5C = "".join(chr(x ^ 0x5c) for x in xrange(256)) __TRANS_36 = "".join(chr(x ^ 0x36) for x in xrange(256)) __I_SALT = os.urandom(16).encode('base_64') __O_SALT = os.urandom(16).encode('base_64') def __init__(self, user_id, password=None): self.user_id = user_id # get or create some password to encrypt the user verification token self.password = password #if password else self.redis.get('token_pass') if not self.password: salt = os.urandom(16).encode('base_64') self.password = scrypt.hash(os.urandom(24).encode('base_64'), salt) def generate_token(self): """ Generates an encrypted token for validating a user @return: the encrypted token (a random value and the date as a timestamp @rtype: str """ # random value, user_id, timestamp values = '%s,%s,%s' % (os.urandom(16).encode('base_64'), self.user_id, time.time()) return scrypt.encrypt(values, self.password) def generate_hmac(self, key, message): """ @param key: The user's generated password @type key: str @param message: message to hash for client-server authentication @type message: str @return: the hash based message auth code (to verify against the client sent one) @rtype: str @see: http://en.wikipedia.org/wiki/Hash-based_message_authentication_code """ if len(key) > self.__BLOCK_SIZE: salt = os.urandom(16).encode('base_64') key = scrypt.hash(key, salt) key += chr(0) * (self.__BLOCK_SIZE - len(key)) o_key_pad = xor(self.__TRANS_5C, key) i_key_pad = xor(self.__TRANS_36, key) return scrypt.hash(o_key_pad + scrypt.hash(i_key_pad + message, self.__I_SALT), self.__O_SALT) def validate_token(self, client_token, server_token, expire_time=15): """ @param client_token: @type client_token: str @param server_token: @type server_token: str @param expire_time: @type expire_time: int @return: True if still valid @rtype: bool """ if client_token != server_token: return False tokens = scrypt.decrypt(client_token, self.password).split(',') if len(tokens) != 3: return False expired = ((time.time() - int(tokens[1])) / 3600) >= expire_time if expired: return False return True class RedisToken(Token): """ @param user_id: @type user_id: int @param redis_connection: @type redis_connection: StrictRedis @param password: @type password: str """ def __init__(self, user_id, redis_connection, password=None): """ @param user_id: @type user_id: int @param redis_connection @type redis_connection: StrictRedis @param password: @type password: str @return: @rtype: """ # get or create some password to encrypt the user verification token self.redis = UserHelper(redis_connection, user_id) self.password = password if password else self.redis.get('token_pass') super(RedisToken, self).__init__(user_id, password)
3,649
1,164
import json from collections import defaultdict import fastavro import pandas as pd from django.contrib import messages from django.http import HttpResponseRedirect from django.urls import reverse from datasets.models import Connection from users.models import User def get_supported_file_types(): """Return a list of the viable file type extensions.""" return ["csv", "avro", "parquet", "xlsx", "xls", "xlsm", "xlsb"] def initialize_connection(datastore, connection_name, connection_owner_id, connection_type, request): """Create a connection and save the datastore on the connection object for later use.""" owner = User.objects.get(id=connection_owner_id) connection = Connection.objects.create(name=connection_name, owner=owner, type=connection_type) connection.datastore = datastore connection.save() messages.success(request, "Connection was created.") return HttpResponseRedirect(reverse("datasets:index")) def get_query(dataset, query): """Go through the potentially None valued given dataset and query and extract the query.""" if query: return query elif dataset.query: return dataset.query else: return f"SELECT * FROM {dataset.table}" def structure_tables_views(table, views): """Return a structured dictionary containing the given tables and views.""" table_dict = defaultdict(list) [table_dict[schema].append({"value": f"{schema}.{table}", "display": table}) for (schema, table) in table] view_dict = defaultdict(list) [view_dict[schema].append({"value": f"{schema}.{view}", "display": view}) for (schema, view) in views] return {"Tables": dict(table_dict), "Views": dict(view_dict)} def convert_to_dataframe(file_type, data): """Convert the given bytes data into a dataframe based on the given file type.""" if file_type == "csv": df = pd.read_csv(data, sep=None) elif file_type == "avro": df = pd.DataFrame.from_records(fastavro.reader(data)) elif file_type == "parquet": df = pd.read_parquet(data) else: df = pd.read_excel(data) return df def get_viable_blob_datasets(blobs, name_attr): """ Used to get the viable datasets for blob datastores. Used for Google Cloud Storage, Azure Blob Storage, Azure Data Lake and Amazon S3 datastores. """ viable_blobs = [] for blob in blobs: if getattr(blob, name_attr).split(".")[-1].lower() in get_supported_file_types(): viable_blobs.append(blob) viable_datasets = defaultdict(list) for blob in viable_blobs: split_path = getattr(blob, name_attr).split("/") parent_folder = split_path[-2] if len(split_path) >= 2 else "root" value = json.dumps({"id": getattr(blob, name_attr), "name": split_path[-1].split(".")[0]}) viable_datasets[parent_folder].append({"value": value, "display": split_path[-1]}) return {"Files": dict(viable_datasets)}
2,961
888
#! /usr/bin/python def solution(A, B, K): res = 0 rem_A = A % K rem_B = B % K if rem_A == 0 and rem_B == 0: res = (B - A) / K + 1 elif rem_A == 0 and rem_B != 0: low_B = B - rem_B if low_B >= A: res = (low_B - A) / K + 1 else: res = 0 elif rem_A != 0 and rem_B != 0: low_A = A - rem_A low_B = B - rem_B if low_B >= A: res = (low_B - low_A) / K else: res = 0 elif rem_A != 0 and rem_B == 0: low_A = A - rem_A res = (B - low_A) / K if res < 1: res = 0 return res
637
274
import sys import Sofa import Tools def MechanicalObjectVisitor(node): ## listing mechanical states, bottom-up from node ancestors = [] visited = [] for p in node.getParents(): path = p.getPathName() if not path in visited: state = p.getMechanicalState() if not state is None: ancestors.append( path+"/"+state.name ) ancestors += MechanicalObjectVisitor( p ) return ancestors class Visitor(object): ## checking that mapping graph is equivalent to node graph ## checking that independent dofs are not under other dofs in the scene graph def __init__(self): #print "DAGValidationVisitor" self.error = [] def treeTraversal(self): #print 'ValidationVisitor treeTraversal' return -1 # dag def processNodeTopDown(self,node): #print node.name state = node.getMechanicalState() if state is None: return True mapping = node.getMechanicalMapping() if mapping is None: #independent dofs ancestors = MechanicalObjectVisitor(node) if not len(ancestors) is 0: # an independent dof is under other dofs in the scene graph err = "ERROR " err += "mechanical state '"+state.getContext().getPathName()+"/"+state.name+"' is independent (no mapping)" err += " and should not be in the child node of other mechanical states ("+Tools.listToStr(ancestors)+")" self.error.append(err) else: # mapped dofs #print mapping.getName() from_dof = mapping.getFrom() parent_node = mapping.getContext().getParents() parent_node_path = [] for p in parent_node: parent_node_path.append( p.getPathName() ) from_node_path = [] for f in from_dof: from_node_path.append( f.getContext().getPathName() ) #print parent_node_path for f in from_node_path: #print f if not f in parent_node_path: err = "ERROR " err += "'"+mapping.getContext().getPathName()+"/"+mapping.name+"': " err += "'"+ f + "' should be a parent node" self.error.append(err) #print err for p in parent_node_path: #print p if not p in from_node_path: err = "ERROR " err += "'"+mapping.getContext().getPathName()+"/"+mapping.name+"': " err += "'"+p + "' should NOT be a parent node" self.error.append(err) #print err #print "===================" return True def processNodeBottomUp(self,node): return True def test( node, silent=False ): ## checking that mapping graph is equivalent to node graph ## checking that independent dofs are not under other dofs in the scene graph ## return a list of errors if not silent: print "" print "====== SofaPython.DAGValidation.test =======================" print "" print "Validating scene from node '/" + node.getPathName() + "'..." vis = Visitor() node.executeVisitor(vis) if not silent: if len(vis.error) is 0: print "... VALIDATED" else: print "... NOT VALID" print "" for e in vis.error: print e print "" print "==============================================================" sys.stdout.flush() return vis.error
3,766
994
import time import os import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.manifold import TSNE from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import f1_score import stellargraph as sg from stellargraph.mapper import CorruptedGenerator, HinSAGENodeGenerator from stellargraph.layer import DeepGraphInfomax, HinSAGE import tensorflow as tf from tensorflow.keras.optimizers import Adam from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras import Model, optimizers, losses, metrics ''' Runs the entire pipeline: - Takes preprocessed data as input - Outputs predictions on the test_set nodes. ''' def DGIPipeline(v_sets, e_sets, v_data, e_data, core_targets, ext_targets, core_testing): print("HINSAGE DGI FULL PIPELINE STARTED") tin = time.time() #? Sort based on testingFlag # data_splits[i].iloc[INDEX].values[0] # where INDEX: # [0] testingFlag=NaN # [1] testingFlag=0 # [2] testingFlag=1 data_splits = dict() for i in v_sets: v_sets[i] = v_sets[i].sort_values('testingFlag') data_splits[i] = v_sets[i].testingFlag.value_counts().to_frame() v_sets[i] = v_sets[i].drop('testingFlag', axis=1) #? Removing ExtendedCaseGraphID for i in v_sets: v_sets[i] = v_sets[i].drop('ExtendedCaseGraphID', axis=1) #? Create the graph object G = sg.StellarDiGraph(v_sets, e_sets) ''' Iterate through the algotithm for every node type. This is because HinSAGE can predict on one node type at a time, even though it uses all the graph to compute the embeddings. ''' # Parameters batch_size = 200 dropout = 0.4 verbose = 1 visualize = False def run_for_node_type(v_type, hinsage_layer_sizes, num_samples, activations, epochs): nan_tflag = data_splits[v_type].iloc[0].values[0] train_tflag = data_splits[v_type].iloc[1].values[0] test_tflag = data_splits[v_type].iloc[2].values[0] train_cv_set = v_sets[v_type][nan_tflag:nan_tflag+train_tflag] train_cv_ids = train_cv_set.index.values.tolist() train_cv_labels = v_data.loc[[int(node_id) for node_id in train_cv_ids]].ExtendedCaseGraphID test_set = v_sets[v_type][-test_tflag:] test_ids = test_set.index.values.tolist() generator = HinSAGENodeGenerator( G, batch_size, num_samples, head_node_type=v_type ) hinsage = HinSAGE( layer_sizes=hinsage_layer_sizes, activations=activations, generator=generator, bias=True, normalize="l2", dropout=dropout ) def run_deep_graph_infomax(base_model, generator, epochs): print(f"Starting training for {v_type} type: ") t0 = time.time() corrupted_generator = CorruptedGenerator(generator) gen = corrupted_generator.flow(G.nodes(node_type=v_type)) infomax = DeepGraphInfomax(base_model, corrupted_generator) x_in, x_out = infomax.in_out_tensors() # Train with DGI model = Model(inputs=x_in, outputs=x_out) model.compile(loss=tf.nn.sigmoid_cross_entropy_with_logits, optimizer=Adam(lr=1e-3)) es = EarlyStopping(monitor="loss", min_delta=0, patience=10) history = model.fit(gen, epochs=epochs, verbose=verbose, callbacks=[es]) #sg.utils.plot_history(history) x_emb_in, x_emb_out = base_model.in_out_tensors() if generator.num_batch_dims() == 2: x_emb_out = tf.squeeze(x_emb_out, axis=0) t1 = time.time() print(f'Time required: {t1-t0:.2f} s ({(t1-t0)/60:.1f} min)') return x_emb_in, x_emb_out, model #? Train HinSAGE model: x_emb_in, x_emb_out, _model = run_deep_graph_infomax(hinsage, generator, epochs=epochs) emb_model = Model(inputs=x_emb_in, outputs=x_emb_out) train_cv_embs = emb_model.predict( generator.flow(train_cv_set.index.values) ) #? Optional: Plot embeddings of training and CV set of current node type if (visualize == True): train_cv_embs_2d = pd.DataFrame( TSNE(n_components=2).fit_transform(train_cv_embs), index=train_cv_set.index.values ) label_map = {l: i*10 for i, l in enumerate(np.unique(train_cv_labels), start=10) if pd.notna(l)} node_colours = [label_map[target] if pd.notna(target) else 0 for target in train_cv_labels] alpha = 0.7 fig, ax = plt.subplots(figsize=(15, 15)) ax.scatter( train_cv_embs_2d[0], train_cv_embs_2d[1], c=node_colours, cmap="jet", alpha=alpha, ) ax.set(aspect="equal") plt.title(f"TSNE of HinSAGE {v_type} embeddings with DGI- coloring on ExtendedCaseGraphID") plt.show() return 1 #? Split training and cross valuation set using 80% 20% simple ordered split n_embs = train_cv_embs.shape[0] train_size = int(n_embs*0.80) cv_size = int(n_embs*0.20) train_set = train_cv_embs[:train_size] train_labels = np.ravel(pd.DataFrame(train_cv_labels.values[:train_size]).fillna(0)) cv_set = train_cv_embs[-cv_size:] cv_labels = np.ravel(pd.DataFrame(train_cv_labels.values[-cv_size:]).fillna(0)) #? CLASSIFY print(f"Running Classifier for {v_type} type") classifier = DecisionTreeClassifier() classifier.fit( X=train_set, y=train_labels, ) cv_pred = classifier.predict(cv_set) f1_avg = f1_score(cv_labels, cv_pred, average='weighted') acc = (cv_pred == cv_labels).mean() print(f"{v_type} CV Metrics: f1: {f1_avg:.6f} - acc: {acc:.6f}") #? Now Run on test set test_embs = emb_model.predict( generator.flow(test_set.index.values) ) test_pred = classifier.predict(test_embs) #? Save predictions outdir = './output' outname = f"{v_type}_predictions.csv" if not os.path.exists(outdir): os.mkdir(outdir) fullname = os.path.join(outdir, outname) output = pd.DataFrame(test_ids) output = output.rename(columns={0: 'node_id'}) output['ExtendedCaseGraphID'] = test_pred output = output.set_index('node_id') output.to_csv(fullname) return output #? Run for each node type full_predictions = pd.DataFrame() for v_type in v_sets: if v_type == 'Account': epochs = 12 num_samples = [8, 4] hinsage_layer_sizes = [32, 32] activations = ['relu', 'relu'] else: epochs = 30 num_samples = [12] hinsage_layer_sizes = [72] activations = ['relu'] if v_type != 'External Entity' and v_type != 'Address': predictions = run_for_node_type(v_type, hinsage_layer_sizes, num_samples, activations, epochs) full_predictions = full_predictions.append(predictions) full_predictions.to_csv("./output/full_predictions.csv") tout = time.time() print(f"HINSAGE DGI FULL PIPELINE COMPLETED: {(tin-tout)/60:.0f} min") return 1
7,216
2,585
from flask import Flask , render_template, request import google_news app = Flask(__name__) outFile = '' @app.route("/") def main(): print "Welcome!" return render_template('index.html') @app.route('/uploadFile', methods=['POST']) def upload(): global outputFile filedata = request.files['upload'] filename = filedata.filename print 'filename:' + filename inputFile = 'input/' + filename outputFile = 'output/' + filename + '_output' outputPath = 'templates/' + outputFile filedata.save(inputFile) print "Input Saved" print "processing starts" google_news.news(inputFile,outputPath) print "processing success" #processing return "success" @app.route('/download') def download(): print 'download' print outputFile return render_template(outputFile) if __name__ == "__main__": app.run()
878
263
# Copyright 2020 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for mujoban_level.""" from absl.testing import absltest from physics_planning_games.mujoban import mujoban_level _LEVEL = """ ##### # @#### # $. # ###$.# # # $.# # # #$. # # ### ######""" _GRID_LEVEL = """******** *..P**** *..BG..* ***BG*.* *..BG*.* *.*BG..* *....*** ******** """ class MujobanLevelTest(absltest.TestCase): def test_ascii_to_text_grid_level(self): grid_level = mujoban_level._ascii_to_text_grid_level(_LEVEL) self.assertEqual(_GRID_LEVEL, grid_level) if __name__ == '__main__': absltest.main()
1,222
431
# Generated by Django 2.2.3 on 2019-07-12 12:51 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('TeamXapp', '0039_auto_20190712_1348'), ] operations = [ migrations.AddField( model_name='leavecalendar', name='leave_type', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='TeamXapp.LeaveStatus'), ), migrations.AlterField( model_name='allmembers', name='scrum_team_name', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='TeamXapp.ScrumTeam', verbose_name='Scrum team: '), ), migrations.AlterField( model_name='allmembers', name='scrum_team_roles', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='TeamXapp.ScrumTeamRole', verbose_name='Scrum Team Roles: '), ), ]
1,069
369
""" The given file contains the class to refer to the Site entity """ from quartic_sdk.core.entities.base import Base import quartic_sdk.utilities.constants as Constants class Site(Base): """ The given class refers to the site entity which is created based upon the site response returned by the API """ def __repr__(self): """ Override the method to return the site name """ return f"<{Constants.SITE_ENTITY}: {self.name}>" def assets(self): """ Get the assets belongs to a site """ raise NotImplementedError def edge_connectors(self): """ Get the edge_connectors belongs to a site """ raise NotImplementedError
742
203
# Generated by Django 2.0.5 on 2019-05-24 15:11 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('newapp', '0002_auto_20190524_1507'), ] operations = [ migrations.AlterField( model_name='course', name='additional_info', field=models.TextField(), ), migrations.AlterField( model_name='course', name='misc_links', field=models.TextField(), ), ]
525
175
from django.views import View from comment.models import BlockedUser, BlockedUserHistory, Comment from comment.mixins import CanBlockUsersMixin from comment.responses import UTF8JsonResponse, DABResponseData from comment.messages import BlockUserError class BaseToggleBlockingView(DABResponseData): response_class = None def get_response_class(self): assert self.response_class is not None, ( "'%s' should either include a `response_class` attribute, " "or override the `get_response_class()` method." % self.__class__.__name__ ) return self.response_class def post(self, request, *args, **kwargs): response_class = self.get_response_class() request_data = request.POST or getattr(request, 'data', {}) comment_id = request_data.get('comment_id', None) try: comment = Comment.objects.get(id=int(comment_id)) except (Comment.DoesNotExist, ValueError, TypeError): self.error = { 'detail': BlockUserError.INVALID } self.status = 400 return response_class(self.json(), status=self.status) blocked_user, created = BlockedUser.objects.get_or_create_blocked_user_for_comment(comment) if not created: blocked_user.blocked = not blocked_user.blocked blocked_user.save() reason = request_data.get('reason', None) if blocked_user.blocked and not reason: reason = comment.content BlockedUserHistory.objects.create_history( blocked_user=blocked_user, blocker=request.user, reason=reason ) self.data = { 'blocked_user': comment.get_username(), 'blocked': blocked_user.blocked, 'urlhash': comment.urlhash } return response_class(self.json()) class ToggleBlockingView(CanBlockUsersMixin, BaseToggleBlockingView, View): response_class = UTF8JsonResponse
2,027
551
def stable_api(x): print("X: {0}".format(x))
49
23
# -*- coding: utf-8 -*- # Copyright (c) 2015 Brad Newbold (wudan07 [at] gmail.com) # See LICENSE for details. # glyph.py # """wIcon library: glyph provides GlyphObject """ ##from handy import * ##from common import * ### represents a character in a glyphString class GlyphObject: def __init__(self, glyph): ### set to glyph value self.glyph = glyph ### will be an array of pixels, unique to each glyph self.coords = [] ### will be an adjustment to the next characters starting point - I eyeballed this. Sorry typographers! self.flash = 6 if glyph == 'A': self.coords.append([0, 7]) self.coords.append([0, 8]) self.coords.append([1, 3]) self.coords.append([1, 4]) self.coords.append([1, 5]) self.coords.append([1, 6]) self.coords.append([2, 0]) self.coords.append([2, 1]) self.coords.append([2, 2]) self.coords.append([2, 6]) self.coords.append([3, 0]) self.coords.append([3, 1]) self.coords.append([3, 2]) self.coords.append([3, 6]) self.coords.append([4, 3]) self.coords.append([4, 4]) self.coords.append([4, 5]) self.coords.append([4, 6]) self.coords.append([5, 6]) self.coords.append([5, 7]) self.coords.append([5, 8]) self.flash = 7 elif glyph == 'a': self.coords.append([1, 3]) self.coords.append([2, 3]) self.coords.append([3, 3]) self.coords.append([4, 3]) self.coords.append([0, 4]) self.coords.append([4, 4]) self.coords.append([0, 5]) self.coords.append([4, 5]) self.coords.append([0, 6]) self.coords.append([4, 6]) self.coords.append([0, 7]) self.coords.append([3, 7]) self.coords.append([4, 7]) self.coords.append([1, 8]) self.coords.append([2, 8]) self.coords.append([4, 8]) self.flash = 6 elif glyph == 'B': self.coords.append([0, 0]) self.coords.append([0, 1]) self.coords.append([0, 2]) self.coords.append([0, 3]) self.coords.append([0, 4]) self.coords.append([0, 5]) self.coords.append([0, 6]) self.coords.append([0, 7]) self.coords.append([0, 8]) self.coords.append([1, 0]) self.coords.append([1, 4]) self.coords.append([1, 8]) self.coords.append([2, 0]) self.coords.append([2, 4]) self.coords.append([2, 8]) self.coords.append([3, 0]) self.coords.append([3, 4]) self.coords.append([3, 8]) self.coords.append([4, 1]) self.coords.append([4, 2]) self.coords.append([4, 3]) self.coords.append([4, 5]) self.coords.append([4, 6]) self.coords.append([4, 7]) self.flash = 6 elif glyph == 'b': self.coords.append([0, 0]) self.coords.append([0, 1]) self.coords.append([0, 2]) self.coords.append([0, 3]) self.coords.append([0, 4]) self.coords.append([0, 5]) self.coords.append([0, 6]) self.coords.append([0, 7]) self.coords.append([0, 8]) self.coords.append([1, 4]) self.coords.append([1, 8]) self.coords.append([2, 3]) self.coords.append([2, 8]) self.coords.append([3, 3]) self.coords.append([3, 8]) self.coords.append([4, 4]) self.coords.append([4, 5]) self.coords.append([4, 6]) self.coords.append([4, 7]) self.flash = 6 elif glyph == 'C': self.coords.append([0, 2]) self.coords.append([0, 3]) self.coords.append([0, 4]) self.coords.append([0, 5]) self.coords.append([0, 6]) self.coords.append([1, 1]) self.coords.append([1, 7]) self.coords.append([2, 0]) self.coords.append([2, 8]) self.coords.append([3, 0]) self.coords.append([3, 8]) self.coords.append([4, 0]) self.coords.append([4, 8]) self.flash = 6 elif glyph == 'c': self.coords.append([1, 3]) self.coords.append([2, 3]) self.coords.append([3, 3]) self.coords.append([0, 4]) self.coords.append([4, 4]) self.coords.append([0, 5]) self.coords.append([0, 6]) self.coords.append([0, 7]) self.coords.append([4, 7]) self.coords.append([1, 8]) self.coords.append([2, 8]) self.coords.append([3, 8]) self.flash = 6 elif glyph == 'D': self.coords.append([0, 0]) self.coords.append([0, 1]) self.coords.append([0, 2]) self.coords.append([0, 3]) self.coords.append([0, 4]) self.coords.append([0, 5]) self.coords.append([0, 6]) self.coords.append([0, 7]) self.coords.append([0, 8]) self.coords.append([1, 0]) self.coords.append([1, 8]) self.coords.append([2, 0]) self.coords.append([2, 8]) self.coords.append([3, 0]) self.coords.append([3, 8]) self.coords.append([4, 1]) self.coords.append([4, 7]) self.coords.append([5, 2]) self.coords.append([5, 3]) self.coords.append([5, 4]) self.coords.append([5, 5]) self.coords.append([5, 6]) self.flash = 7 elif glyph == 'd': self.coords.append([4, 0]) self.coords.append([4, 1]) self.coords.append([4, 2]) self.coords.append([1, 3]) self.coords.append([2, 3]) self.coords.append([3, 3]) self.coords.append([4, 3]) self.coords.append([0, 4]) self.coords.append([4, 4]) self.coords.append([0, 5]) self.coords.append([4, 5]) self.coords.append([0, 6]) self.coords.append([4, 6]) self.coords.append([0, 7]) self.coords.append([3, 7]) self.coords.append([4, 7]) self.coords.append([1, 8]) self.coords.append([2, 8]) self.coords.append([4, 8]) self.flash = 6 elif glyph == 'E': self.coords.append([0, 0]) self.coords.append([0, 1]) self.coords.append([0, 2]) self.coords.append([0, 3]) self.coords.append([0, 4]) self.coords.append([0, 5]) self.coords.append([0, 6]) self.coords.append([0, 7]) self.coords.append([0, 8]) self.coords.append([1, 0]) self.coords.append([1, 4]) self.coords.append([1, 8]) self.coords.append([2, 0]) self.coords.append([2, 4]) self.coords.append([2, 8]) self.coords.append([3, 0]) self.coords.append([3, 4]) self.coords.append([3, 8]) self.coords.append([4, 0]) self.coords.append([4, 4]) self.coords.append([4, 8]) self.flash = 6 elif glyph == 'e': self.coords.append([1, 3]) self.coords.append([2, 3]) self.coords.append([3, 3]) self.coords.append([0, 4]) self.coords.append([4, 4]) self.coords.append([0, 5]) self.coords.append([1, 5]) self.coords.append([2, 5]) self.coords.append([3, 5]) self.coords.append([0, 6]) self.coords.append([0, 7]) self.coords.append([4, 7]) self.coords.append([1, 8]) self.coords.append([2, 8]) self.coords.append([3, 8]) self.flash = 6 elif glyph == 'F': self.coords.append([0, 0]) self.coords.append([0, 1]) self.coords.append([0, 2]) self.coords.append([0, 3]) self.coords.append([0, 4]) self.coords.append([0, 5]) self.coords.append([0, 6]) self.coords.append([0, 7]) self.coords.append([0, 8]) self.coords.append([1, 0]) self.coords.append([1, 4]) self.coords.append([2, 0]) self.coords.append([2, 4]) self.coords.append([3, 0]) self.coords.append([3, 4]) self.coords.append([4, 0]) self.coords.append([4, 4]) self.flash = 6 elif glyph == 'f': self.coords.append([2, 1]) self.coords.append([3, 1]) self.coords.append([1, 2]) self.coords.append([0, 3]) self.coords.append([1, 3]) self.coords.append([2, 3]) self.coords.append([3, 3]) self.coords.append([1, 4]) self.coords.append([1, 5]) self.coords.append([1, 6]) self.coords.append([1, 7]) self.coords.append([1, 8]) self.flash = 5 elif glyph == 'G': self.coords.append([0, 2]) self.coords.append([0, 3]) self.coords.append([0, 4]) self.coords.append([0, 5]) self.coords.append([0, 6]) self.coords.append([1, 1]) self.coords.append([1, 7]) self.coords.append([2, 0]) self.coords.append([2, 8]) self.coords.append([3, 0]) self.coords.append([3, 4]) self.coords.append([3, 8]) self.coords.append([4, 0]) self.coords.append([4, 4]) self.coords.append([4, 5]) self.coords.append([4, 6]) self.coords.append([4, 7]) self.coords.append([4, 8]) self.flash = 6 elif glyph == 'g': self.coords.append([1, 3]) self.coords.append([2, 3]) self.coords.append([3, 3]) self.coords.append([4, 3]) self.coords.append([0, 4]) self.coords.append([4, 4]) self.coords.append([0, 5]) self.coords.append([4, 5]) self.coords.append([0, 6]) self.coords.append([4, 6]) self.coords.append([0, 7]) self.coords.append([3, 7]) self.coords.append([4, 7]) self.coords.append([1, 8]) self.coords.append([2, 8]) self.coords.append([4, 8]) self.coords.append([4, 9]) self.coords.append([1, 10]) self.coords.append([2, 10]) self.coords.append([3, 10]) self.flash = 6 elif glyph == 'H': self.coords.append([0, 0]) self.coords.append([0, 1]) self.coords.append([0, 2]) self.coords.append([0, 3]) self.coords.append([0, 4]) self.coords.append([0, 5]) self.coords.append([0, 6]) self.coords.append([0, 7]) self.coords.append([0, 8]) self.coords.append([1, 4]) self.coords.append([2, 4]) self.coords.append([3, 4]) self.coords.append([4, 0]) self.coords.append([4, 1]) self.coords.append([4, 2]) self.coords.append([4, 3]) self.coords.append([4, 4]) self.coords.append([4, 5]) self.coords.append([4, 6]) self.coords.append([4, 7]) self.coords.append([4, 8]) self.flash = 6 elif glyph == 'h': self.coords.append([0, 0]) self.coords.append([0, 1]) self.coords.append([0, 2]) self.coords.append([0, 3]) self.coords.append([1, 3]) self.coords.append([2, 3]) self.coords.append([3, 3]) self.coords.append([0, 4]) self.coords.append([4, 4]) self.coords.append([0, 5]) self.coords.append([4, 5]) self.coords.append([0, 6]) self.coords.append([4, 6]) self.coords.append([0, 7]) self.coords.append([4, 7]) self.coords.append([0, 8]) self.coords.append([4, 8]) self.flash = 6 elif glyph == 'I': self.coords.append([0, 0]) self.coords.append([0, 8]) self.coords.append([1, 0]) self.coords.append([1, 8]) self.coords.append([2, 0]) self.coords.append([2, 1]) self.coords.append([2, 2]) self.coords.append([2, 3]) self.coords.append([2, 4]) self.coords.append([2, 5]) self.coords.append([2, 6]) self.coords.append([2, 7]) self.coords.append([2, 8]) self.coords.append([3, 0]) self.coords.append([3, 8]) self.coords.append([4, 0]) self.coords.append([4, 8]) self.flash = 6 elif glyph == 'i': self.coords.append([1, 1]) self.coords.append([0, 3]) self.coords.append([1, 3]) self.coords.append([1, 4]) self.coords.append([1, 5]) self.coords.append([1, 6]) self.coords.append([1, 7]) self.coords.append([1, 8]) self.coords.append([2, 8]) self.flash = 4 elif glyph == 'J': self.coords.append([0, 8]) self.coords.append([1, 8]) self.coords.append([2, 0]) self.coords.append([2, 8]) self.coords.append([3, 0]) self.coords.append([3, 8]) self.coords.append([4, 0]) self.coords.append([4, 1]) self.coords.append([4, 2]) self.coords.append([4, 3]) self.coords.append([4, 4]) self.coords.append([4, 5]) self.coords.append([4, 6]) self.coords.append([4, 7]) self.flash = 6 elif glyph == 'j': self.coords.append([2, 1]) self.coords.append([0, 3]) self.coords.append([1, 3]) self.coords.append([2, 3]) self.coords.append([2, 4]) self.coords.append([2, 5]) self.coords.append([2, 6]) self.coords.append([2, 7]) self.coords.append([2, 8]) self.coords.append([0, 9]) self.coords.append([1, 9]) self.flash = 4 elif glyph == 'K': self.coords.append([0, 0]) self.coords.append([0, 1]) self.coords.append([0, 2]) self.coords.append([0, 3]) self.coords.append([0, 4]) self.coords.append([0, 5]) self.coords.append([0, 6]) self.coords.append([0, 7]) self.coords.append([0, 8]) self.coords.append([1, 4]) self.coords.append([2, 3]) self.coords.append([2, 5]) self.coords.append([3, 1]) self.coords.append([3, 2]) self.coords.append([3, 6]) self.coords.append([4, 0]) self.coords.append([4, 7]) self.coords.append([5, 8]) self.flash = 7 elif glyph == 'k': self.coords.append([0, 0]) self.coords.append([0, 1]) self.coords.append([0, 2]) self.coords.append([0, 3]) self.coords.append([3, 3]) self.coords.append([0, 4]) self.coords.append([2, 4]) self.coords.append([0, 5]) self.coords.append([1, 5]) self.coords.append([0, 6]) self.coords.append([2, 6]) self.coords.append([0, 7]) self.coords.append([3, 7]) self.coords.append([0, 8]) self.coords.append([4, 8]) self.flash = 6 elif glyph == 'L': self.coords.append([0, 0]) self.coords.append([0, 1]) self.coords.append([0, 2]) self.coords.append([0, 3]) self.coords.append([0, 4]) self.coords.append([0, 5]) self.coords.append([0, 6]) self.coords.append([0, 7]) self.coords.append([0, 8]) self.coords.append([1, 8]) self.coords.append([2, 8]) self.coords.append([3, 8]) self.coords.append([4, 8]) self.flash = 6 elif glyph == 'l': self.coords.append([1, 1]) self.coords.append([1, 2]) self.coords.append([1, 3]) self.coords.append([1, 4]) self.coords.append([1, 5]) self.coords.append([1, 6]) self.coords.append([1, 7]) self.coords.append([1, 8]) self.coords.append([2, 8]) self.flash = 4 elif glyph == 'M': self.coords.append([0, 0]) self.coords.append([0, 1]) self.coords.append([0, 2]) self.coords.append([0, 3]) self.coords.append([0, 4]) self.coords.append([0, 5]) self.coords.append([0, 6]) self.coords.append([0, 7]) self.coords.append([0, 8]) self.coords.append([1, 1]) self.coords.append([1, 2]) self.coords.append([1, 3]) self.coords.append([2, 4]) self.coords.append([2, 5]) self.coords.append([2, 6]) self.coords.append([3, 1]) self.coords.append([3, 2]) self.coords.append([3, 3]) self.coords.append([4, 0]) self.coords.append([4, 1]) self.coords.append([4, 2]) self.coords.append([4, 3]) self.coords.append([4, 4]) self.coords.append([4, 5]) self.coords.append([4, 6]) self.coords.append([4, 7]) self.coords.append([4, 8]) self.flash = 6 elif glyph == 'm': self.coords.append([0, 3]) self.coords.append([1, 3]) self.coords.append([3, 3]) self.coords.append([0, 4]) self.coords.append([2, 4]) self.coords.append([4, 4]) self.coords.append([0, 5]) self.coords.append([2, 5]) self.coords.append([4, 5]) self.coords.append([0, 6]) self.coords.append([2, 6]) self.coords.append([4, 6]) self.coords.append([0, 7]) self.coords.append([2, 7]) self.coords.append([4, 7]) self.coords.append([0, 8]) self.coords.append([2, 8]) self.coords.append([4, 8]) self.flash = 6 elif glyph == 'N': self.coords.append([0, 0]) self.coords.append([0, 1]) self.coords.append([0, 2]) self.coords.append([0, 3]) self.coords.append([0, 4]) self.coords.append([0, 5]) self.coords.append([0, 6]) self.coords.append([0, 7]) self.coords.append([0, 8]) self.coords.append([1, 1]) self.coords.append([1, 2]) self.coords.append([2, 3]) self.coords.append([2, 4]) self.coords.append([2, 5]) self.coords.append([3, 6]) self.coords.append([3, 7]) self.coords.append([4, 0]) self.coords.append([4, 1]) self.coords.append([4, 2]) self.coords.append([4, 3]) self.coords.append([4, 4]) self.coords.append([4, 5]) self.coords.append([4, 6]) self.coords.append([4, 7]) self.coords.append([4, 8]) self.flash = 6 elif glyph == 'n': self.coords.append([0, 3]) self.coords.append([2, 3]) self.coords.append([3, 3]) self.coords.append([0, 4]) self.coords.append([1, 4]) self.coords.append([4, 4]) self.coords.append([0, 5]) self.coords.append([4, 5]) self.coords.append([0, 6]) self.coords.append([4, 6]) self.coords.append([0, 7]) self.coords.append([4, 7]) self.coords.append([0, 8]) self.coords.append([4, 8]) self.flash = 6 elif glyph == 'O': self.coords.append([0, 2]) self.coords.append([0, 3]) self.coords.append([0, 4]) self.coords.append([0, 5]) self.coords.append([0, 6]) self.coords.append([1, 1]) self.coords.append([1, 7]) self.coords.append([2, 0]) self.coords.append([2, 8]) self.coords.append([3, 0]) self.coords.append([3, 8]) self.coords.append([4, 1]) self.coords.append([4, 7]) self.coords.append([5, 2]) self.coords.append([5, 3]) self.coords.append([5, 4]) self.coords.append([5, 5]) self.coords.append([5, 6]) self.flash = 7 elif glyph == 'o': self.coords.append([1, 3]) self.coords.append([2, 3]) self.coords.append([3, 3]) self.coords.append([0, 4]) self.coords.append([4, 4]) self.coords.append([0, 5]) self.coords.append([4, 5]) self.coords.append([0, 6]) self.coords.append([4, 6]) self.coords.append([0, 7]) self.coords.append([4, 7]) self.coords.append([1, 8]) self.coords.append([2, 8]) self.coords.append([3, 8]) self.flash = 6 elif glyph == 'P': self.coords.append([0, 0]) self.coords.append([0, 1]) self.coords.append([0, 2]) self.coords.append([0, 3]) self.coords.append([0, 4]) self.coords.append([0, 5]) self.coords.append([0, 6]) self.coords.append([0, 7]) self.coords.append([0, 8]) self.coords.append([1, 0]) self.coords.append([1, 5]) self.coords.append([2, 0]) self.coords.append([2, 5]) self.coords.append([3, 0]) self.coords.append([3, 4]) self.coords.append([4, 1]) self.coords.append([4, 2]) self.coords.append([4, 3]) self.flash = 6 elif glyph == 'p': self.coords.append([0, 3]) self.coords.append([1, 3]) self.coords.append([2, 3]) self.coords.append([3, 3]) self.coords.append([0, 4]) self.coords.append([4, 4]) self.coords.append([0, 5]) self.coords.append([4, 5]) self.coords.append([0, 6]) self.coords.append([4, 6]) self.coords.append([0, 7]) self.coords.append([4, 7]) self.coords.append([0, 8]) self.coords.append([1, 8]) self.coords.append([2, 8]) self.coords.append([3, 8]) self.coords.append([0, 9]) self.coords.append([0, 10]) self.flash = 6 elif glyph == 'Q': self.coords.append([0, 2]) self.coords.append([0, 3]) self.coords.append([0, 4]) self.coords.append([0, 5]) self.coords.append([0, 6]) self.coords.append([1, 1]) self.coords.append([1, 7]) self.coords.append([2, 0]) self.coords.append([2, 8]) self.coords.append([3, 0]) self.coords.append([3, 8]) self.coords.append([3, 9]) self.coords.append([4, 1]) self.coords.append([4, 7]) self.coords.append([4, 10]) self.coords.append([5, 2]) self.coords.append([5, 3]) self.coords.append([5, 4]) self.coords.append([5, 5]) self.coords.append([5, 6]) self.coords.append([5, 10]) self.coords.append([6, 10]) self.flash = 7 elif glyph == 'q': self.coords.append([1, 3]) self.coords.append([2, 3]) self.coords.append([3, 3]) self.coords.append([4, 3]) self.coords.append([0, 4]) self.coords.append([4, 4]) self.coords.append([0, 5]) self.coords.append([4, 5]) self.coords.append([0, 6]) self.coords.append([4, 6]) self.coords.append([0, 7]) self.coords.append([3, 7]) self.coords.append([4, 7]) self.coords.append([1, 8]) self.coords.append([2, 8]) self.coords.append([4, 8]) self.coords.append([4, 9]) self.coords.append([4, 10]) self.flash = 6 elif glyph == 'R': self.coords.append([0, 0]) self.coords.append([0, 1]) self.coords.append([0, 2]) self.coords.append([0, 3]) self.coords.append([0, 4]) self.coords.append([0, 5]) self.coords.append([0, 6]) self.coords.append([0, 7]) self.coords.append([0, 8]) self.coords.append([1, 0]) self.coords.append([1, 4]) self.coords.append([2, 0]) self.coords.append([2, 4]) self.coords.append([3, 0]) self.coords.append([3, 3]) self.coords.append([3, 5]) self.coords.append([3, 6]) self.coords.append([4, 1]) self.coords.append([4, 2]) self.coords.append([4, 7]) self.coords.append([4, 8]) self.flash = 6 elif glyph == 'r': self.coords.append([0, 3]) self.coords.append([1, 3]) self.coords.append([3, 3]) self.coords.append([4, 3]) self.coords.append([1, 4]) self.coords.append([2, 4]) self.coords.append([1, 5]) self.coords.append([1, 6]) self.coords.append([1, 7]) self.coords.append([0, 8]) self.coords.append([1, 8]) self.coords.append([2, 8]) self.flash = 6 elif glyph == 'S': self.coords.append([0, 1]) self.coords.append([0, 2]) self.coords.append([0, 3]) self.coords.append([0, 8]) self.coords.append([1, 0]) self.coords.append([1, 4]) self.coords.append([1, 8]) self.coords.append([2, 0]) self.coords.append([2, 4]) self.coords.append([2, 8]) self.coords.append([3, 0]) self.coords.append([3, 5]) self.coords.append([3, 8]) self.coords.append([4, 6]) self.coords.append([4, 7]) self.flash = 6 elif glyph == 's': self.coords.append([1, 3]) self.coords.append([2, 3]) self.coords.append([3, 3]) self.coords.append([0, 4]) self.coords.append([4, 4]) self.coords.append([1, 5]) self.coords.append([2, 5]) self.coords.append([3, 6]) self.coords.append([0, 7]) self.coords.append([4, 7]) self.coords.append([1, 8]) self.coords.append([2, 8]) self.coords.append([3, 8]) self.flash = 6 elif glyph == 'T': self.coords.append([0, 0]) self.coords.append([1, 0]) self.coords.append([2, 0]) self.coords.append([3, 0]) self.coords.append([3, 1]) self.coords.append([3, 2]) self.coords.append([3, 3]) self.coords.append([3, 4]) self.coords.append([3, 5]) self.coords.append([3, 6]) self.coords.append([3, 7]) self.coords.append([3, 8]) self.coords.append([4, 0]) self.coords.append([5, 0]) self.coords.append([6, 0]) self.flash = 8 elif glyph == 't': self.coords.append([1, 1]) self.coords.append([1, 2]) self.coords.append([0, 3]) self.coords.append([1, 3]) self.coords.append([2, 3]) self.coords.append([3, 3]) self.coords.append([1, 4]) self.coords.append([1, 5]) self.coords.append([1, 6]) self.coords.append([1, 7]) self.coords.append([4, 7]) self.coords.append([2, 8]) self.coords.append([3, 8]) self.flash = 6 elif glyph == 'U': self.coords.append([0, 0]) self.coords.append([0, 1]) self.coords.append([0, 2]) self.coords.append([0, 3]) self.coords.append([0, 4]) self.coords.append([0, 5]) self.coords.append([0, 6]) self.coords.append([0, 7]) self.coords.append([1, 8]) self.coords.append([2, 8]) self.coords.append([3, 8]) self.coords.append([4, 0]) self.coords.append([4, 1]) self.coords.append([4, 2]) self.coords.append([4, 3]) self.coords.append([4, 4]) self.coords.append([4, 5]) self.coords.append([4, 6]) self.coords.append([4, 7]) self.flash = 6 elif glyph == 'u': self.coords.append([0, 3]) self.coords.append([4, 3]) self.coords.append([0, 4]) self.coords.append([4, 4]) self.coords.append([0, 5]) self.coords.append([4, 5]) self.coords.append([0, 6]) self.coords.append([4, 6]) self.coords.append([0, 7]) self.coords.append([3, 7]) self.coords.append([4, 7]) self.coords.append([1, 8]) self.coords.append([2, 8]) self.coords.append([4, 8]) self.flash = 6 elif glyph == 'V': self.coords.append([0, 0]) self.coords.append([0, 1]) self.coords.append([1, 2]) self.coords.append([1, 3]) self.coords.append([1, 4]) self.coords.append([2, 5]) self.coords.append([2, 6]) self.coords.append([3, 7]) self.coords.append([3, 8]) self.coords.append([4, 4]) self.coords.append([4, 5]) self.coords.append([4, 6]) self.coords.append([5, 1]) self.coords.append([5, 2]) self.coords.append([5, 3]) self.coords.append([6, 0]) self.flash = 8 elif glyph == 'v': self.coords.append([0, 3]) self.coords.append([4, 3]) self.coords.append([0, 4]) self.coords.append([4, 4]) self.coords.append([1, 5]) self.coords.append([3, 5]) self.coords.append([1, 6]) self.coords.append([3, 6]) self.coords.append([2, 7]) self.coords.append([2, 8]) self.flash = 6 elif glyph == 'W': self.coords.append([0, 0]) self.coords.append([0, 1]) self.coords.append([0, 2]) self.coords.append([0, 3]) self.coords.append([0, 4]) self.coords.append([1, 5]) self.coords.append([1, 6]) self.coords.append([1, 7]) self.coords.append([1, 8]) self.coords.append([2, 3]) self.coords.append([2, 4]) self.coords.append([2, 5]) self.coords.append([3, 0]) self.coords.append([3, 1]) self.coords.append([3, 2]) self.coords.append([4, 3]) self.coords.append([4, 4]) self.coords.append([4, 5]) self.coords.append([5, 5]) self.coords.append([5, 6]) self.coords.append([5, 7]) self.coords.append([5, 8]) self.coords.append([6, 0]) self.coords.append([6, 1]) self.coords.append([6, 2]) self.coords.append([6, 3]) self.coords.append([6, 4]) self.flash = 8 elif glyph == 'w': self.coords.append([0, 3]) self.coords.append([4, 3]) self.coords.append([0, 4]) self.coords.append([4, 4]) self.coords.append([0, 5]) self.coords.append([2, 5]) self.coords.append([4, 5]) self.coords.append([0, 6]) self.coords.append([2, 6]) self.coords.append([4, 6]) self.coords.append([1, 7]) self.coords.append([3, 7]) self.coords.append([1, 8]) self.coords.append([3, 8]) self.flash = 6 elif glyph == 'X': self.coords.append([0, 0]) self.coords.append([0, 7]) self.coords.append([0, 8]) self.coords.append([1, 1]) self.coords.append([1, 2]) self.coords.append([1, 5]) self.coords.append([1, 6]) self.coords.append([2, 3]) self.coords.append([2, 4]) self.coords.append([3, 1]) self.coords.append([3, 2]) self.coords.append([3, 5]) self.coords.append([3, 6]) self.coords.append([4, 0]) self.coords.append([4, 7]) self.coords.append([4, 8]) self.flash = 6 elif glyph == 'x': self.coords.append([0, 3]) self.coords.append([4, 3]) self.coords.append([1, 4]) self.coords.append([3, 4]) self.coords.append([2, 5]) self.coords.append([2, 6]) self.coords.append([1, 7]) self.coords.append([3, 7]) self.coords.append([0, 8]) self.coords.append([4, 8]) self.flash = 6 elif glyph == 'Y': self.coords.append([0, 0]) self.coords.append([0, 1]) self.coords.append([1, 2]) self.coords.append([1, 3]) self.coords.append([2, 4]) self.coords.append([2, 5]) self.coords.append([2, 6]) self.coords.append([2, 7]) self.coords.append([2, 8]) self.coords.append([3, 2]) self.coords.append([3, 3]) self.coords.append([4, 0]) self.coords.append([4, 1]) self.flash = 6 elif glyph == 'y': self.coords.append([0, 3]) self.coords.append([4, 3]) self.coords.append([0, 4]) self.coords.append([4, 4]) self.coords.append([1, 5]) self.coords.append([3, 5]) self.coords.append([1, 6]) self.coords.append([3, 6]) self.coords.append([2, 7]) self.coords.append([2, 8]) self.coords.append([1, 9]) self.coords.append([0, 10]) self.flash = 6 elif glyph == 'Z': self.coords.append([0, 0]) self.coords.append([0, 7]) self.coords.append([0, 8]) self.coords.append([1, 0]) self.coords.append([1, 5]) self.coords.append([1, 6]) self.coords.append([1, 8]) self.coords.append([2, 0]) self.coords.append([2, 4]) self.coords.append([2, 8]) self.coords.append([3, 0]) self.coords.append([3, 2]) self.coords.append([3, 3]) self.coords.append([3, 8]) self.coords.append([4, 0]) self.coords.append([4, 1]) self.coords.append([4, 8]) self.flash = 6 elif glyph == 'z': self.coords.append([0, 3]) self.coords.append([1, 3]) self.coords.append([2, 3]) self.coords.append([3, 3]) self.coords.append([4, 3]) self.coords.append([3, 4]) self.coords.append([2, 5]) self.coords.append([1, 6]) self.coords.append([0, 7]) self.coords.append([0, 8]) self.coords.append([1, 8]) self.coords.append([2, 8]) self.coords.append([3, 8]) self.coords.append([4, 8]) self.flash = 6 elif glyph == '0': self.coords.append([1, 0]) self.coords.append([2, 0]) self.coords.append([3, 0]) self.coords.append([0, 1]) self.coords.append([4, 1]) self.coords.append([0, 2]) self.coords.append([4, 2]) self.coords.append([0, 3]) self.coords.append([3, 3]) self.coords.append([4, 3]) self.coords.append([0, 4]) self.coords.append([2, 4]) self.coords.append([4, 4]) self.coords.append([0, 5]) self.coords.append([1, 5]) self.coords.append([4, 5]) self.coords.append([0, 6]) self.coords.append([4, 6]) self.coords.append([0, 7]) self.coords.append([4, 7]) self.coords.append([1, 8]) self.coords.append([2, 8]) self.coords.append([3, 8]) self.flash = 6 elif glyph == '1': self.coords.append([2, 0]) self.coords.append([1, 1]) self.coords.append([2, 1]) self.coords.append([0, 2]) self.coords.append([2, 2]) self.coords.append([2, 3]) self.coords.append([2, 4]) self.coords.append([2, 5]) self.coords.append([2, 6]) self.coords.append([2, 7]) self.coords.append([0, 8]) self.coords.append([1, 8]) self.coords.append([2, 8]) self.coords.append([3, 8]) self.coords.append([4, 8]) self.flash = 6 elif glyph == '2': self.coords.append([1, 0]) self.coords.append([2, 0]) self.coords.append([3, 0]) self.coords.append([0, 1]) self.coords.append([4, 1]) self.coords.append([4, 2]) self.coords.append([4, 3]) self.coords.append([3, 4]) self.coords.append([2, 5]) self.coords.append([1, 6]) self.coords.append([0, 7]) self.coords.append([0, 8]) self.coords.append([1, 8]) self.coords.append([2, 8]) self.coords.append([3, 8]) self.coords.append([4, 8]) self.flash = 6 elif glyph == '3': self.coords.append([1, 0]) self.coords.append([2, 0]) self.coords.append([3, 0]) self.coords.append([0, 1]) self.coords.append([4, 1]) self.coords.append([4, 2]) self.coords.append([4, 3]) self.coords.append([2, 4]) self.coords.append([3, 4]) self.coords.append([4, 5]) self.coords.append([4, 6]) self.coords.append([0, 7]) self.coords.append([4, 7]) self.coords.append([1, 8]) self.coords.append([2, 8]) self.coords.append([3, 8]) self.flash = 6 elif glyph == '4': self.coords.append([1, 0]) self.coords.append([3, 0]) self.coords.append([1, 1]) self.coords.append([3, 1]) self.coords.append([0, 2]) self.coords.append([3, 2]) self.coords.append([0, 3]) self.coords.append([3, 3]) self.coords.append([0, 4]) self.coords.append([1, 4]) self.coords.append([2, 4]) self.coords.append([3, 4]) self.coords.append([4, 4]) self.coords.append([3, 5]) self.coords.append([3, 6]) self.coords.append([3, 7]) self.coords.append([3, 8]) self.flash = 6 elif glyph == '5': self.coords.append([0, 0]) self.coords.append([1, 0]) self.coords.append([2, 0]) self.coords.append([3, 0]) self.coords.append([4, 0]) self.coords.append([0, 1]) self.coords.append([0, 2]) self.coords.append([0, 3]) self.coords.append([1, 3]) self.coords.append([2, 3]) self.coords.append([3, 3]) self.coords.append([0, 4]) self.coords.append([4, 4]) self.coords.append([4, 5]) self.coords.append([4, 6]) self.coords.append([4, 7]) self.coords.append([0, 8]) self.coords.append([1, 8]) self.coords.append([2, 8]) self.coords.append([3, 8]) self.flash = 6 elif glyph == '6': self.coords.append([2, 0]) self.coords.append([3, 0]) self.coords.append([1, 1]) self.coords.append([0, 2]) self.coords.append([0, 3]) self.coords.append([1, 3]) self.coords.append([2, 3]) self.coords.append([3, 3]) self.coords.append([0, 4]) self.coords.append([4, 4]) self.coords.append([0, 5]) self.coords.append([4, 5]) self.coords.append([0, 6]) self.coords.append([4, 6]) self.coords.append([0, 7]) self.coords.append([4, 7]) self.coords.append([1, 8]) self.coords.append([2, 8]) self.coords.append([3, 8]) self.flash = 6 elif glyph == '7': self.coords.append([0, 0]) self.coords.append([1, 0]) self.coords.append([2, 0]) self.coords.append([3, 0]) self.coords.append([4, 0]) self.coords.append([5, 0]) self.coords.append([0, 1]) self.coords.append([5, 1]) self.coords.append([5, 2]) self.coords.append([4, 3]) self.coords.append([4, 4]) self.coords.append([3, 5]) self.coords.append([3, 6]) self.coords.append([2, 7]) self.coords.append([2, 8]) self.flash = 7 elif glyph == '8': self.coords.append([1, 0]) self.coords.append([2, 0]) self.coords.append([3, 0]) self.coords.append([0, 1]) self.coords.append([4, 1]) self.coords.append([0, 2]) self.coords.append([4, 2]) self.coords.append([1, 3]) self.coords.append([2, 3]) self.coords.append([3, 3]) self.coords.append([0, 4]) self.coords.append([4, 4]) self.coords.append([0, 5]) self.coords.append([4, 5]) self.coords.append([0, 6]) self.coords.append([4, 6]) self.coords.append([0, 7]) self.coords.append([4, 7]) self.coords.append([1, 8]) self.coords.append([2, 8]) self.coords.append([3, 8]) self.flash = 6 elif glyph == '9': self.coords.append([1, 0]) self.coords.append([2, 0]) self.coords.append([3, 0]) self.coords.append([0, 1]) self.coords.append([4, 1]) self.coords.append([0, 2]) self.coords.append([4, 2]) self.coords.append([0, 3]) self.coords.append([4, 3]) self.coords.append([1, 4]) self.coords.append([2, 4]) self.coords.append([3, 4]) self.coords.append([4, 4]) self.coords.append([0, 4]) self.coords.append([4, 4]) self.coords.append([4, 5]) self.coords.append([4, 6]) self.coords.append([3, 7]) self.coords.append([1, 8]) self.coords.append([2, 8]) self.flash = 6 elif glyph == '-': self.coords.append([0, 4]) self.coords.append([1, 4]) self.coords.append([2, 4]) self.coords.append([3, 4]) self.flash = 6 elif glyph == '.': self.coords.append([0, 7]) self.coords.append([1, 7]) self.coords.append([0, 8]) self.coords.append([1, 8]) self.flash = 4 elif glyph == '!': self.coords.append([0, 0]) self.coords.append([1, 0]) self.coords.append([0, 1]) self.coords.append([1, 1]) self.coords.append([0, 2]) self.coords.append([1, 2]) self.coords.append([0, 3]) self.coords.append([1, 3]) self.coords.append([0, 4]) self.coords.append([1, 4]) self.coords.append([0, 5]) self.coords.append([1, 5]) self.coords.append([0, 7]) self.coords.append([1, 7]) self.coords.append([0, 8]) self.coords.append([1, 8]) self.flash = 4 elif glyph == ',': self.coords.append([0, 7]) self.coords.append([1, 7]) self.coords.append([0, 8]) self.coords.append([1, 8]) self.coords.append([1, 9]) self.coords.append([0, 10]) self.flash = 4 elif glyph == '\'': self.coords.append([0, 0]) self.coords.append([1, 0]) self.coords.append([0, 1]) self.coords.append([1, 1]) self.coords.append([1, 2]) self.flash = 4 elif glyph == '"': self.coords.append([0, 0]) self.coords.append([0, 1]) self.coords.append([0, 2]) self.coords.append([2, 0]) self.coords.append([2, 1]) self.coords.append([2, 2]) self.flash = 4 elif glyph == ' ': self.flash = 6 elif glyph == '\t': self.flash = 24 elif glyph == '(': self.coords.append([2, 0]) self.coords.append([1, 1]) self.coords.append([0, 2]) self.coords.append([0, 3]) self.coords.append([0, 4]) self.coords.append([0, 5]) self.coords.append([0, 6]) self.coords.append([0, 7]) self.coords.append([0, 8]) self.coords.append([1, 9]) self.coords.append([2, 10]) self.flash = 6 elif glyph == ')': self.coords.append([0, 0]) self.coords.append([1, 1]) self.coords.append([2, 2]) self.coords.append([2, 3]) self.coords.append([2, 4]) self.coords.append([2, 5]) self.coords.append([2, 6]) self.coords.append([2, 7]) self.coords.append([2, 8]) self.coords.append([1, 9]) self.coords.append([0, 10]) self.flash = 6 elif glyph == ')': self.coords.append([0, 0]) self.coords.append([1, 1]) self.coords.append([2, 2]) self.coords.append([2, 3]) self.coords.append([2, 4]) self.coords.append([2, 5]) self.coords.append([2, 6]) self.coords.append([2, 7]) self.coords.append([2, 8]) self.coords.append([1, 9]) self.coords.append([0, 10]) self.flash = 6 elif glyph == ':': self.coords.append([0, 3]) self.coords.append([1, 3]) self.coords.append([0, 4]) self.coords.append([1, 4]) self.coords.append([0, 7]) self.coords.append([1, 7]) self.coords.append([0, 8]) self.coords.append([1, 8]) self.flash = 5 elif glyph == ';': self.coords.append([0, 3]) self.coords.append([1, 3]) self.coords.append([0, 4]) self.coords.append([1, 4]) self.coords.append([0, 7]) self.coords.append([1, 7]) self.coords.append([1, 8]) self.coords.append([0, 9]) self.flash = 5 elif glyph == '_': self.coords.append([0, 8]) self.coords.append([1, 8]) self.coords.append([2, 8]) self.coords.append([3, 8]) self.coords.append([4, 8]) self.coords.append([5, 8]) self.flash = 7 else: self.flash = 6 def center(self, wide=6): glwide = self.flash - 2 adjust = (wide-glwide)/2 for cor in self.coords: cor[0] += adjust self._flash(wide+2) def _flash(self, flash): self.flash = flash def glyphstr_length(gls): """ Returns length of glyphstr gls """ length = 0 for gl in gls: length += gl.flash return length - 2 def glyphstr_monospace(gls, wide=6): """ for each GlyphObject in gls, calls .center(wide) """ for gl in gls: gl.center(wide) def glyphstr_center(gls, width=100): """ given a width of an area (such as column heading width) it will adjust the start point of each glyph in a glyphstr_, centering the string """ length = glyphstr_length(gls) glen = len(gls) #addlen = (width-length)/(glen)) print length print width - length hl = (width-length)/2 for i in range(0, glen): gl = gls[i] flash = gl.flash gl._flash(flash+hl) def glyphstr_justify(gls, width=100): """ given a width of an area (such as column heading width) it will adjust the start point of each glyph in a glyphstr_, justifying the string """ length = glyphstr_length(gls) glen = len(gls) #addlen = (width-length)/(glen)) print length print width - length ct = 0 for i in range(0, width-length): if ct >= glen-1: ct = 0 gl = gls[ct] flash = gl.flash gl._flash(flash+1) ct += 1 def glyphstr_bounds_get(string, mono=False): """ Returns 2 len integer array, size and height of string as glyphstr_ """ #xk = 0 #yk = 0 xz = 0 #yz = 10 vals = string.split('\n') yz = len(vals) * 10 for val in vals: gs = glyphstr_get(val) if mono: glyphstr_monospace(gs) sz = glyphstr_length(gs) if sz > xz: xz = sz return [xz, yz] def glyphstr_get(string): """ given a string, Returns glyphs, a list of glyphs """ glyphs = [] i = 0 while i < len(string): letter = string[i:i+1] glyphs.append(GlyphObject(letter)) i += 1 return glyphs
39,550
20,796
import json from paho.mqtt.client import Client from subscriber import Subscriber from datetime import datetime class MqttSender(Subscriber): def __init__(self, client: Client, topic: str): self.client = client self.topic = topic def on_next(self, message: dict): json_message = json.dumps(message) print(f'[{datetime.now().isoformat()}] Sending: {json_message}') self.client.publish(self.topic, json_message)
463
140
# Generated by Django 2.2.17 on 2020-12-28 08:13 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('users', '0003_auto_20201228_1605'), ] operations = [ migrations.CreateModel( name='Count', fields=[ ('user_id', models.IntegerField(primary_key=True, serialize=False, verbose_name='用户id')), ('name', models.CharField(blank=True, max_length=64, null=True, verbose_name='姓名')), ('day_customer', models.IntegerField(default=0, verbose_name='今天新增客户数量')), ('day_liaison', models.IntegerField(default=0, verbose_name='今天新增联系人数量')), ('day_record', models.IntegerField(default=0, verbose_name='今天新增拜访记录数量')), ('day_business', models.IntegerField(default=0, verbose_name='今天新增商机数量')), ('mouth_customer', models.IntegerField(default=0, verbose_name='本月新增客户数量')), ('mouth_liaison', models.IntegerField(default=0, verbose_name='本月新增联系人数量')), ('mouth_record', models.IntegerField(default=0, verbose_name='本月新增拜访记录数量')), ('mouth_business', models.IntegerField(default=0, verbose_name='本月新增商机数量')), ('all_customer', models.IntegerField(default=0, verbose_name='全部客户数量')), ('all_liaison', models.IntegerField(default=0, verbose_name='全部联系人数量')), ('all_record', models.IntegerField(default=0, verbose_name='全部拜访记录数量')), ('all_business', models.IntegerField(default=0, verbose_name='全部商机数量')), ], options={ 'verbose_name': '用户数据统计', 'verbose_name_plural': '用户数据统计', 'db_table': 'count', }, ), migrations.DeleteModel( name='UserCount', ), ]
1,914
690
from webium import BasePage, Finds, Find from selenium.webdriver.common.by import By class Homepage(BasePage): catalog_header = Find(by=By.CLASS_NAME, value="Header__BlockCatalogLink") computers_label = Find(by=By.CSS_SELECTOR, value="a[href='/kompyutery/']") laptops_accessories_label = Find(by=By.XPATH, value="//a[contains(.,'Ноутбуки и аксессуары')]") laptops_label = Find(by=By.LINK_TEXT, value="Ноутбуки")
430
162
from __future__ import print_function import argparse import sys import os import shutil import zipfile import urllib parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--bert_model_name", default = None, type = str, required = True, help = "Name of pretrained BERT model. Possible values: " "uncased_L-12_H-768_A-12,uncased_L-24_H-1024_A-16,cased_L-12_H-768_A-12," "multilingual_L-12_H-768_A-12,chinese_L-12_H-768_A-12") parser.add_argument("--model_dump_path", default = None, type = str, required = True, help = "Path to the output model.") parser.add_argument("--glue_data_path", default = None, type = str, required = True, help = "Path to store downloaded GLUE dataset") args = parser.parse_args() bert_model_url_map = { 'uncased_L-12_H-768_A-12': 'https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-12_H-768_A-12.zip', 'uncased_L-24_H-1024_A-16': 'https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-24_H-1024_A-16.zip', 'cased_L-12_H-768_A-12': 'https://storage.googleapis.com/bert_models/2018_10_18/cased_L-12_H-768_A-12.zip', 'multilingual_L-12_H-768_A-12': 'https://storage.googleapis.com/bert_models/2018_11_03/multilingual_L-12_H-768_A-12.zip', 'chinese_L-12_H-768_A-12': 'https://storage.googleapis.com/bert_models/2018_11_03/chinese_L-12_H-768_A-12.zip' } if args.bert_model_name not in bert_model_url_map: sys.stderr.write('Unknown BERT model name ' + args.bert_model_name) sys.exit(1) pretrained_model_url = bert_model_url_map.get(args.bert_model_name) # make local directory for pretrained tensorflow BERT model tensorflow_model_dir = './tensorflow_model' if not os.path.exists(tensorflow_model_dir): os.makedirs(tensorflow_model_dir) # download and extract pretrained tensorflow BERT model download_file_name = 'tensorflow_model.zip' urllib.request.urlretrieve(pretrained_model_url, filename=download_file_name) print('Extracting pretrained model...') with zipfile.ZipFile(download_file_name, 'r') as z: z.extractall(tensorflow_model_dir) # make destination path if not os.path.exists(args.model_dump_path): os.makedirs(args.model_dump_path) files = ['bert_model.ckpt.meta', 'bert_model.ckpt.index', 'bert_model.ckpt.data-00000-of-00001', 'bert_config.json', 'vocab.txt'] for file in files: shutil.copy(os.path.join(tensorflow_model_dir, args.bert_model_name, file), os.path.join(args.model_dump_path, file)) print('Start to download GLUE dataset...\n') urllib.request.urlretrieve( 'https://gist.githubusercontent.com/W4ngatang/60c2bdb54d156a41194446737ce03e2e/raw/17b8dd0d724281ed7c3b2aeeda662b92809aadd5/download_glue_data.py', filename='download_glue_data.py') if os.system('python download_glue_data.py --data_dir {0} --tasks all'.format(args.glue_data_path)) != 0: sys.exit(1)
3,112
1,237
# -*- coding: utf-8 -*- # Generated by Django 1.11 on 2018-02-21 15:15 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('pdata_app', '0034_auto_20180221_1158'), ] operations = [ migrations.AddField( model_name='observationdataset', name='cached_directories', field=models.CharField(blank=True, max_length=200, null=True, verbose_name=b'Directory'), ), migrations.AddField( model_name='observationdataset', name='cached_end_time', field=models.DateTimeField(blank=True, null=True, verbose_name=b'End Time'), ), migrations.AddField( model_name='observationdataset', name='cached_num_files', field=models.IntegerField(blank=True, null=True, verbose_name=b'# Data Files'), ), migrations.AddField( model_name='observationdataset', name='cached_start_time', field=models.DateTimeField(blank=True, null=True, verbose_name=b'Start Time'), ), migrations.AddField( model_name='observationdataset', name='cached_variables', field=models.CharField(blank=True, max_length=500, null=True, verbose_name=b'Variables'), ), ]
1,386
427
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import cv2 import numpy as np class ImageReader(): def __init__(self): self.image_mean = [0.485, 0.456, 0.406] self.image_std = [0.229, 0.224, 0.225] self.image_shape = [3, 224, 224] self.resize_short_size = 256 self.interpolation = None def resize_short(self, img, target_size, interpolation=None): """resize image Args: img: image data target_size: resize short target size interpolation: interpolation mode Returns: resized image data """ percent = float(target_size) / min(img.shape[0], img.shape[1]) resized_width = int(round(img.shape[1] * percent)) resized_height = int(round(img.shape[0] * percent)) if interpolation: resized = cv2.resize( img, (resized_width, resized_height), interpolation=interpolation) else: resized = cv2.resize(img, (resized_width, resized_height)) return resized def crop_image(self, img, target_size, center): """crop image Args: img: images data target_size: crop target size center: crop mode Returns: img: cropped image data """ height, width = img.shape[:2] size = target_size if center == True: w_start = (width - size) // 2 h_start = (height - size) // 2 else: w_start = np.random.randint(0, width - size + 1) h_start = np.random.randint(0, height - size + 1) w_end = w_start + size h_end = h_start + size img = img[h_start:h_end, w_start:w_end, :] return img def process_image(self, sample): """ process_image """ mean = self.image_mean std = self.image_std crop_size = self.image_shape[1] data = np.fromstring(sample, np.uint8) img = cv2.imdecode(data, cv2.IMREAD_COLOR) if img is None: print("img is None, pass it.") return None if crop_size > 0: target_size = self.resize_short_size img = self.resize_short( img, target_size, interpolation=self.interpolation) img = self.crop_image(img, target_size=crop_size, center=True) img = img[:, :, ::-1] img = img.astype('float32').transpose((2, 0, 1)) / 255 img_mean = np.array(mean).reshape((3, 1, 1)) img_std = np.array(std).reshape((3, 1, 1)) img -= img_mean img /= img_std return img
3,226
1,030
#!/usr/bin/env python import logging import sys import CloudFlare import os import re from os import path from certbot.plugins import dns_common __author__ = "Endrigo Antonini" __copyright__ = "Copyright 2020, Endrigo Antonini" __license__ = "Apache License 2.0" __version__ = "1.0" __maintainer__ = "Endrigo Antonini" __email__ = "eantonini@eidoscode.com" __status__ = "Production" logger = logging.getLogger(__name__) DEFAULT_CERT_FOLDER = "/etc/letsencrypt/live" CERTBOT_CONF_DIR = "/etc/letsencrypt/renewal" PROPERTIES = {} def read_file(filename): """ Read a file from disk and return all the content :param str filename: File name of the file that is going to read. :raises Exception: if the file doesn't exists """ if not path.isfile(filename): raise Exception("File {} doesn't exists!".format(filename)) with open(filename) as f: return f.read() def read_certificate(filename): return re.sub('\r?\n', '\\n', read_file(filename)) def read_properties_file(file): myvars = {} if not path.isfile(file): raise Exception("Config file {} doesn't exists!".format(file)) with open(file) as myfile: for line in myfile: name, var = line.partition("=")[::2] myvars[name.strip()] = var.strip() return myvars def read_domain_properties(domain): global PROPERTIES if domain in PROPERTIES: return PROPERTIES[domain] config_file="{}/{}.conf".format(CERTBOT_CONF_DIR, domain) myvars = read_properties_file(config_file) PROPERTIES[domain] = myvars return myvars def connect_cloudflare(domain): print("Connection to Cloudflare of domain {}".format(domain)) properties = read_domain_properties(domain) cred_file = None if not "dns_cloudflare_credentials" in properties: raise Exception("File {} doesn't have property dns_cloudflare_api_token on it.".format(cred_file)) cred_file = properties["dns_cloudflare_credentials"] props = read_properties_file(cred_file) if not "dns_cloudflare_api_token" in props: raise Exception("File {} doesn't have property dns_cloudflare_api_token on it.".format(cred_file)) api_key = props["dns_cloudflare_api_token"] return CloudFlare.CloudFlare(token=api_key) def find_zone_id(cf, domain): zone_name_guesses = dns_common.base_domain_name_guesses(domain) zones = [] # type: List[Dict[str, Any]] code = msg = None for zone_name in zone_name_guesses: params = {'name': zone_name, 'per_page': 1} try: zones = cf.zones.get(params=params) # zones | pylint: disable=no-member except CloudFlare.exceptions.CloudFlareAPIError as e: code = int(e) msg = str(e) hint = None if code == 6003: hint = ('Did you copy your entire API token/key? To use Cloudflare tokens, ' 'you\'ll need the python package cloudflare>=2.3.1.{}' .format(' This certbot is running cloudflare ' + str(CloudFlare.__version__) if hasattr(CloudFlare, '__version__') else '')) elif code == 9103: hint = 'Did you enter the correct email address and Global key?' elif code == 9109: hint = 'Did you enter a valid Cloudflare Token?' if hint: raise Exception('Error determining zone_id: {0} {1}. Please confirm ' 'that you have supplied valid Cloudflare API credentials. ({2})' .format(code, msg, hint)) else: logger.debug('Unrecognised CloudFlareAPIError while finding zone_id: %d %s. ' 'Continuing with next zone guess...', e, e) if zones: zone_id = zones[0]['id'] logger.debug('Found zone_id of %s for %s using name %s', zone_id, domain, zone_name) return zone_id raise Exception('Unable to determine zone_id for {0} using zone names: {1}. ' 'Please confirm that the domain name has been entered correctly ' 'and is already associated with the supplied Cloudflare account.{2}' .format(domain, domain, ' The error from Cloudflare was:' ' {0} {1}'.format(code, msg) if code is not None else '')) def upload_certificate(domain): cf = connect_cloudflare(domain) private_key = read_certificate("{}/{}/privkey.pem".format(DEFAULT_CERT_FOLDER, domain)) fullchain = read_certificate("{}/{}/fullchain.pem".format(DEFAULT_CERT_FOLDER, domain)) zone_id = find_zone_id(cf, domain) logger.debug("Cloudflare Zone id {} of domain {} ".format(zone_id, domain)) data = {'certificate': fullchain, 'private_key': private_key, 'bundle_method': 'ubiquitous'} print("Going to deploy certificate.") try: cf.zones.custom_certificates.post(zone_id, data=data) print("Depoyed.") except CloudFlare.exceptions.CloudFlareAPIError as e: code = int(e) msg = str(e) hint = None if code == 1228: print("Cert already deployed.") else: logger.error(code) logger.error(msg) raise e return def main(): domains_str = os.environ['RENEWED_DOMAINS'] domains_lst = domains_str.split() for domain in domains_lst: print("") print("Start domain {} checking".format(domain)) zone_name_guesses = dns_common.base_domain_name_guesses(domain) zone_domain = None for temp_zone_domain in zone_name_guesses: temp_config_file = "{}/{}.conf".format(CERTBOT_CONF_DIR, temp_zone_domain) logger.debug("Checking zone {} -- {}".format(temp_zone_domain, temp_config_file)) if path.isfile(temp_config_file): zone_domain = temp_zone_domain break if zone_domain is None: raise Exception("It wasn't possible to continue. There is no config file for domain {}.".format(domain)) upload_certificate(zone_domain) if __name__ == '__main__': main()
5,867
1,901
from datetime import datetime from marshmallow import Schema, EXCLUDE import marshmallow.fields as ms_fields class LocationLatSchema(Schema): user_id = ms_fields.Str(required=True) user_timestamp = ms_fields.DateTime(default=datetime.now()) location_id = ms_fields.Str(default="") latitude = ms_fields.Float(default=0.0) departure = ms_fields.Bool(default=False) accuracy = ms_fields.Float(default=0.0) class Meta: unknown = EXCLUDE
476
155
# Till now only Python 3.10 can run match statement def check_point(point): match point: case (0, 0): print("Origin") case (0, y): print(f"Y = {y}") case (x, 0) print(f"X = {x}") case (x, y): print(f"X = {x}, Y = {y}") case _: raise ValueError("Not a point") x = 1 y = 2 point = (x, y) check_point(point)
411
149
# python3 # -*- coding: utf-8 -*- # @Author : lina # @Time : 2018/4/22 21:17 """ code function: define all parameters. """ matched_file_name = "../data/gcn_res.txt" wordvec_path = '../data/word2vec.model' incremental_path = "../data/incremental_res.txt"
272
123
from django.shortcuts import render from django.http import HttpResponseRedirect from django.core.urlresolvers import reverse def root(request): """ Newsletter > Root """ return render(request, 'newsletter/newsletter_root.jade')
246
68
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License" # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import paddle.fluid as fluid from paddle.fluid.param_attr import ParamAttr from .search_space_base import SearchSpaceBase from .base_layer import conv_bn_layer from .search_space_registry import SEARCHSPACE from .utils import compute_downsample_num, check_points, get_random_tokens __all__ = ["MobileNetV1BlockSpace", "MobileNetV2BlockSpace"] @SEARCHSPACE.register class MobileNetV2BlockSpace(SearchSpaceBase): def __init__(self, input_size, output_size, block_num, block_mask=None, scale=1.0): super(MobileNetV2BlockSpace, self).__init__(input_size, output_size, block_num, block_mask) if self.block_mask == None: # use input_size and output_size to compute self.downsample_num self.downsample_num = compute_downsample_num(self.input_size, self.output_size) if self.block_num != None: assert self.downsample_num <= self.block_num, 'downsample numeber must be LESS THAN OR EQUAL TO block_num, but NOW: downsample numeber is {}, block_num is {}'.format( self.downsample_num, self.block_num) # self.filter_num means channel number self.filter_num = np.array([ 3, 4, 8, 12, 16, 24, 32, 48, 64, 80, 96, 128, 144, 160, 192, 224, 256, 320, 384, 512 ]) # 20 # self.k_size means kernel size self.k_size = np.array([3, 5]) #2 # self.multiply means expansion_factor of each _inverted_residual_unit self.multiply = np.array([1, 2, 3, 4, 5, 6]) #6 # self.repeat means repeat_num _inverted_residual_unit in each _invresi_blocks self.repeat = np.array([1, 2, 3, 4, 5, 6]) #6 self.scale = scale def init_tokens(self): return get_random_tokens(self.range_table()) def range_table(self): range_table_base = [] if self.block_mask != None: range_table_length = len(self.block_mask) else: range_table_length = self.block_num for i in range(range_table_length): range_table_base.append(len(self.multiply)) range_table_base.append(len(self.filter_num)) range_table_base.append(len(self.repeat)) range_table_base.append(len(self.k_size)) return range_table_base def token2arch(self, tokens=None): """ return mobilenetv2 net_arch function """ if tokens == None: tokens = self.init_tokens() self.bottleneck_params_list = [] if self.block_mask != None: for i in range(len(self.block_mask)): self.bottleneck_params_list.append( (self.multiply[tokens[i * 4]], self.filter_num[tokens[i * 4 + 1]], self.repeat[tokens[i * 4 + 2]], 2 if self.block_mask[i] == 1 else 1, self.k_size[tokens[i * 4 + 3]])) else: repeat_num = int(self.block_num / self.downsample_num) num_minus = self.block_num % self.downsample_num ### if block_num > downsample_num, add stride=1 block at last (block_num-downsample_num) layers for i in range(self.downsample_num): self.bottleneck_params_list.append( (self.multiply[tokens[i * 4]], self.filter_num[tokens[i * 4 + 1]], self.repeat[tokens[i * 4 + 2]], 2, self.k_size[tokens[i * 4 + 3]])) ### if block_num / downsample_num > 1, add (block_num / downsample_num) times stride=1 block for k in range(repeat_num - 1): kk = k * self.downsample_num + i self.bottleneck_params_list.append( (self.multiply[tokens[kk * 4]], self.filter_num[tokens[kk * 4 + 1]], self.repeat[tokens[kk * 4 + 2]], 1, self.k_size[tokens[kk * 4 + 3]])) if self.downsample_num - i <= num_minus: j = self.downsample_num * (repeat_num - 1) + i self.bottleneck_params_list.append( (self.multiply[tokens[j * 4]], self.filter_num[tokens[j * 4 + 1]], self.repeat[tokens[j * 4 + 2]], 1, self.k_size[tokens[j * 4 + 3]])) if self.downsample_num == 0 and self.block_num != 0: for i in range(len(self.block_num)): self.bottleneck_params_list.append( (self.multiply[tokens[i * 4]], self.filter_num[tokens[i * 4 + 1]], self.repeat[tokens[i * 4 + 2]], 1, self.k_size[tokens[i * 4 + 3]])) def net_arch(input, return_mid_layer=False, return_block=None): # all padding is 'SAME' in the conv2d, can compute the actual padding automatic. # bottleneck sequences in_c = int(32 * self.scale) mid_layer = dict() layer_count = 0 depthwise_conv = None for i, layer_setting in enumerate(self.bottleneck_params_list): t, c, n, s, k = layer_setting if s == 2: layer_count += 1 if check_points((layer_count - 1), return_block): mid_layer[layer_count - 1] = depthwise_conv input, depthwise_conv = self._invresi_blocks( input=input, in_c=in_c, t=t, c=int(c * self.scale), n=n, s=s, k=int(k), name='mobilenetv2_' + str(i + 1)) in_c = int(c * self.scale) if check_points(layer_count, return_block): mid_layer[layer_count] = depthwise_conv if return_mid_layer: return input, mid_layer else: return input, return net_arch def _shortcut(self, input, data_residual): """Build shortcut layer. Args: input(Variable): input. data_residual(Variable): residual layer. Returns: Variable, layer output. """ return fluid.layers.elementwise_add(input, data_residual) def _inverted_residual_unit(self, input, num_in_filter, num_filters, ifshortcut, stride, filter_size, expansion_factor, reduction_ratio=4, name=None): """Build inverted residual unit. Args: input(Variable), input. num_in_filter(int), number of in filters. num_filters(int), number of filters. ifshortcut(bool), whether using shortcut. stride(int), stride. filter_size(int), filter size. padding(str|int|list), padding. expansion_factor(float), expansion factor. name(str), name. Returns: Variable, layers output. """ num_expfilter = int(round(num_in_filter * expansion_factor)) channel_expand = conv_bn_layer( input=input, num_filters=num_expfilter, filter_size=1, stride=1, padding='SAME', num_groups=1, act='relu6', name=name + '_expand') bottleneck_conv = conv_bn_layer( input=channel_expand, num_filters=num_expfilter, filter_size=filter_size, stride=stride, padding='SAME', num_groups=num_expfilter, act='relu6', name=name + '_dwise', use_cudnn=False) depthwise_output = bottleneck_conv linear_out = conv_bn_layer( input=bottleneck_conv, num_filters=num_filters, filter_size=1, stride=1, padding='SAME', num_groups=1, act=None, name=name + '_linear') out = linear_out if ifshortcut: out = self._shortcut(input=input, data_residual=out) return out, depthwise_output def _invresi_blocks(self, input, in_c, t, c, n, s, k, name=None): """Build inverted residual blocks. Args: input: Variable, input. in_c: int, number of in filters. t: float, expansion factor. c: int, number of filters. n: int, number of layers. s: int, stride. k: int, filter size. name: str, name. Returns: Variable, layers output. """ first_block, depthwise_output = self._inverted_residual_unit( input=input, num_in_filter=in_c, num_filters=c, ifshortcut=False, stride=s, filter_size=k, expansion_factor=t, name=name + '_1') last_residual_block = first_block last_c = c for i in range(1, n): last_residual_block, depthwise_output = self._inverted_residual_unit( input=last_residual_block, num_in_filter=last_c, num_filters=c, ifshortcut=True, stride=1, filter_size=k, expansion_factor=t, name=name + '_' + str(i + 1)) return last_residual_block, depthwise_output @SEARCHSPACE.register class MobileNetV1BlockSpace(SearchSpaceBase): def __init__(self, input_size, output_size, block_num, block_mask=None, scale=1.0): super(MobileNetV1BlockSpace, self).__init__(input_size, output_size, block_num, block_mask) if self.block_mask == None: # use input_size and output_size to compute self.downsample_num self.downsample_num = compute_downsample_num(self.input_size, self.output_size) if self.block_num != None: assert self.downsample_num <= self.block_num, 'downsample numeber must be LESS THAN OR EQUAL TO block_num, but NOW: downsample numeber is {}, block_num is {}'.format( self.downsample_num, self.block_num) # self.filter_num means channel number self.filter_num = np.array([ 3, 4, 8, 12, 16, 24, 32, 48, 64, 80, 96, 128, 144, 160, 192, 224, 256, 320, 384, 512, 576, 640, 768, 1024, 1048 ]) self.k_size = np.array([3, 5]) self.scale = scale def init_tokens(self): return get_random_tokens(self.range_table()) def range_table(self): range_table_base = [] if self.block_mask != None: for i in range(len(self.block_mask)): range_table_base.append(len(self.filter_num)) range_table_base.append(len(self.filter_num)) range_table_base.append(len(self.k_size)) else: for i in range(self.block_num): range_table_base.append(len(self.filter_num)) range_table_base.append(len(self.filter_num)) range_table_base.append(len(self.k_size)) return range_table_base def token2arch(self, tokens=None): if tokens == None: tokens = self.init_tokens() self.bottleneck_params_list = [] if self.block_mask != None: for i in range(len(self.block_mask)): self.bottleneck_params_list.append( (self.filter_num[tokens[i * 3]], self.filter_num[tokens[i * 3 + 1]], 2 if self.block_mask[i] == 1 else 1, self.k_size[tokens[i * 3 + 2]])) else: repeat_num = int(self.block_num / self.downsample_num) num_minus = self.block_num % self.downsample_num for i in range(self.downsample_num): ### if block_num > downsample_num, add stride=1 block at last (block_num-downsample_num) layers self.bottleneck_params_list.append( (self.filter_num[tokens[i * 3]], self.filter_num[tokens[i * 3 + 1]], 2, self.k_size[tokens[i * 3 + 2]])) ### if block_num / downsample_num > 1, add (block_num / downsample_num) times stride=1 block for k in range(repeat_num - 1): kk = k * self.downsample_num + i self.bottleneck_params_list.append( (self.filter_num[tokens[kk * 3]], self.filter_num[tokens[kk * 3 + 1]], 1, self.k_size[tokens[kk * 3 + 2]])) if self.downsample_num - i <= num_minus: j = self.downsample_num * (repeat_num - 1) + i self.bottleneck_params_list.append( (self.filter_num[tokens[j * 3]], self.filter_num[tokens[j * 3 + 1]], 1, self.k_size[tokens[j * 3 + 2]])) if self.downsample_num == 0 and self.block_num != 0: for i in range(len(self.block_num)): self.bottleneck_params_list.append( (self.filter_num[tokens[i * 3]], self.filter_num[tokens[i * 3 + 1]], 1, self.k_size[tokens[i * 3 + 2]])) def net_arch(input, return_mid_layer=False, return_block=None): mid_layer = dict() layer_count = 0 for i, layer_setting in enumerate(self.bottleneck_params_list): filter_num1, filter_num2, stride, kernel_size = layer_setting if stride == 2: layer_count += 1 if check_points((layer_count - 1), return_block): mid_layer[layer_count - 1] = input input = self._depthwise_separable( input=input, num_filters1=filter_num1, num_filters2=filter_num2, stride=stride, scale=self.scale, kernel_size=int(kernel_size), name='mobilenetv1_{}'.format(str(i + 1))) if return_mid_layer: return input, mid_layer else: return input, return net_arch def _depthwise_separable(self, input, num_filters1, num_filters2, stride, scale, kernel_size, name=None): num_groups = input.shape[1] s_oc = int(num_filters1 * scale) if s_oc > num_groups: output_channel = s_oc - (s_oc % num_groups) else: output_channel = num_groups depthwise_conv = conv_bn_layer( input=input, filter_size=kernel_size, num_filters=output_channel, stride=stride, num_groups=num_groups, use_cudnn=False, name=name + '_dw') pointwise_conv = conv_bn_layer( input=depthwise_conv, filter_size=1, num_filters=int(num_filters2 * scale), stride=1, name=name + '_sep') return pointwise_conv
16,826
5,108
"""Middle High German phonology tools """ from typing import List from cltk.phonology.gmh.transcription import Transcriber from cltk.phonology.syllabify import Syllabifier __author__ = ["Clément Besnier <clem@clementbesnier.fr>"] class MiddleHighGermanTranscription: """ Middle High German Transcriber """ def __init__(self): self.transcriber = Transcriber() def transcribe(self, word): """ >>> MiddleHighGermanTranscription().transcribe("Brynhild") 'Brynχɪld̥' :param word: word to transcribe :return: transcribed word """ return self.transcriber.transcribe(word, with_squared_brackets=False) def __repr__(self): return f"<MiddleHighGermanTranscription>" def __call__(self, word): return self.transcribe(word) class MiddleHighGermanSyllabifier: """ Middle High German syllabifier based on sonority phoneme hierarchy for MHG. Source: Resonances in Middle High German: New Methodologies in Prosody, Christopher Leo Hench, 2017 """ def __init__(self): self.syllabifier = Syllabifier(language="gmh") def syllabify(self, word: str) -> List[str]: """ >>> MiddleHighGermanSyllabifier().syllabify("Gunther") ['Gunt', 'her'] :param word: word to syllabify :return: syllabified word """ return self.syllabifier.syllabify(word, mode="MOP") def __repr__(self): return f"<MiddleHighGermanSyllabifier>" def __call__(self, word): return self.syllabify(word)
1,580
507
import json from html import unescape from bs4 import BeautifulSoup from baiduspider.core._spider import BaseSpider from baiduspider.errors import ParseError class Parser(BaseSpider): def __init__(self) -> None: super().__init__() def parse_web(self, content: str) -> dict: """解析百度网页搜索的页面源代码 Args: content (str): 已经转换为UTF-8编码的百度网页搜索HTML源码 Returns: dict: 解析后的结果 """ soup = BeautifulSoup(content, 'html.parser') if soup.find('div', id='content_left') is None: raise ParseError('Invalid HTML content.') # 尝试获取搜索结果总数 try: num = int(str(soup.find('span', class_='nums_text').text).strip( '百度为您找到相关结果约').strip('个').replace(',', '')) except: num = 0 # 查找运算窗口 calc = soup.find('div', class_='op_new_cal_screen') # 定义预结果(运算以及相关搜索) pre_results = [] # 预处理相关搜索 try: _related = soup.find('div', id='rs').find('table').find_all('th') except: _related = [] related = [] # 预处理新闻 news = soup.find('div', class_='result-op', tpl='sp_realtime_bigpic5', srcid='19') # 确认是否有新闻块 try: news_title = self._format( news.find('h3', class_='t').find('a').text) except: news_title = None news_detail = [] else: news_rows = news.findAll('div', class_='c-row') news_detail = [] prev_row = None for row in news_rows: try: row_title = self._format(row.find('a').text) except AttributeError: prev_row['des'] = self._format(row.text) continue row_time = self._format( row.find('span', class_='c-color-gray2').text) row_author = self._format( row.find('span', class_='c-color-gray').text) row_url = self._format(row.find('a')['href']) news_detail.append({ 'title': row_title, 'time': row_time, 'author': row_author, 'url': row_url, 'des': None }) prev_row = news_detail[-1] # 预处理短视频 video = soup.find('div', class_='op-short-video-pc') if video: video_rows = video.findAll('div', class_='c-row') video_results = [] for row in video_rows: row_res = [] videos = row.findAll('div', class_='c-span6') for v in videos: v_link = v.find('a') v_title = v_link['title'] v_url = self._format(v_link['href']) v_img = v_link.find('img')['src'] v_len = self._format( v.find('div', class_='op-short-video-pc-duration-wrap').text) v_from = self._format( v.find('div', class_='op-short-video-pc-clamp1').text) row_res.append({ 'title': v_title, 'url': v_url, 'cover': v_img, 'length': v_len, 'origin': v_from }) video_results += row_res else: video_results = [] # 一个一个append相关搜索 for _ in _related: if _.text: related.append(_.text) # 预处理百科 baike = soup.find('div', class_='c-container', tpl='bk_polysemy') if baike: b_title = self._format(baike.find('h3').text) b_url = baike.find('a')['href'] b_des = self._format(baike.find( 'div', class_='c-span-last').find('p').text) try: b_cover = baike.find( 'div', class_='c-span6').find('img')['src'] b_cover_type = 'image' except (TypeError, AttributeError): try: b_cover = baike.find( 'video', class_='op-bk-polysemy-video')['data-src'] b_cover_type = 'video' except TypeError: b_cover = None b_cover_type = None baike = { 'title': b_title, 'url': b_url, 'des': b_des, 'cover': b_cover, 'cover-type': b_cover_type } # 加载搜索结果总数 if num != 0: pre_results.append(dict(type='total', result=num)) # 加载运算 if calc: pre_results.append(dict(type='calc', process=str(calc.find('p', class_='op_new_val_screen_process').find( 'span').text), result=str(calc.find('p', class_='op_new_val_screen_result').find('span').text))) # 加载相关搜索 if related: pre_results.append(dict(type='related', results=related)) # 加载资讯 if news_detail: pre_results.append(dict(type='news', results=news_detail)) # 加载短视频 if video_results: pre_results.append(dict(type='video', results=video_results)) # 加载百科 if baike: pre_results.append(dict(type='baike', result=baike)) # 预处理源码 error = False try: soup = BeautifulSoup(content, 'html.parser') # 错误处理 except IndexError: error = True finally: if error: raise ParseError( 'Failed to generate BeautifulSoup object for the given source code content.') results = soup.findAll('div', class_='result') res = [] for result in results: soup = BeautifulSoup(self._minify(str(result)), 'html.parser') # 链接 href = soup.find('a').get('href').strip() # 标题 title = self._format(str(soup.find('a').text)) # 时间 try: time = self._format(soup.findAll( 'div', class_='c-abstract')[0].find('span', class_='newTimeFactor_before_abs').text) except (AttributeError, IndexError): time = None try: # 简介 des = soup.find_all('div', class_='c-abstract')[0].text soup = BeautifulSoup(str(result), 'html.parser') des = self._format(des).lstrip(str(time)).strip() except IndexError: try: des = des.replace('mn', '') except (UnboundLocalError, AttributeError): des = None if time: time = time.split('-')[0].strip() # 因为百度的链接是加密的了,所以需要一个一个去访问 # 由于性能原因,分析链接部分暂略 # if href is not None: # try: # # 由于性能原因,这里设置1秒超时 # r = requests.get(href, timeout=1) # href = r.url # except: # # 获取网页失败,默认换回原加密链接 # href = href # # 分析链接 # if href: # parse = urlparse(href) # domain = parse.netloc # prepath = parse.path.split('/') # path = [] # for loc in prepath: # if loc != '': # path.append(loc) # else: # domain = None # path = None try: is_not_special = result['tpl'] not in [ 'short_video_pc', 'sp_realtime_bigpic5', 'bk_polysemy'] except KeyError: is_not_special = False if is_not_special: # 确保不是特殊类型的结果 # 获取可见的域名 try: domain = result.find('div', class_='c-row').find('div', class_='c-span-last').find( 'div', class_='se_st_footer').find('a', class_='c-showurl').text except Exception as error: try: domain = result.find( 'div', class_='c-row').find('div', class_='c-span-last').find('p', class_='op-bk-polysemy-move').find('span', class_='c-showurl').text except Exception as error: try: domain = result.find( 'div', class_='se_st_footer').find('a', class_='c-showurl').text except: domain = None if domain: domain = domain.replace(' ', '') else: domain = None # 加入结果 if title and href and is_not_special: res.append({ 'title': title, 'des': des, 'origin': domain, 'url': href, 'time': time, 'type': 'result'}) soup = BeautifulSoup(content, 'html.parser') try: soup = BeautifulSoup(str(soup.findAll('div', id='page') [0]), 'html.parser') # 分页 pages_ = soup.findAll('span', class_='pc') except IndexError: pages_ = [] pages = [] for _ in pages_: pages.append(int(_.text)) # 如果搜索结果仅有一页时,百度不会显示底部导航栏 # 所以这里直接设置成1,如果不设会报错`TypeError` if not pages: pages = [1] # 设置最终结果 result = pre_results result.extend(res) return { 'results': result, # 最大页数 'pages': max(pages) } def parse_pic(self, content: str) -> dict: """解析百度图片搜索的页面源代码 Args: content (str): 已经转换为UTF-8编码的百度图片搜索HTML源码 Returns: dict: 解析后的结果 """ # 从JavaScript中加载数据 # 因为JavaScript很像JSON(JavaScript Object Notation),所以直接用json加载就行了 # 还有要预处理一下,把函数和无用的括号过滤掉 error = None try: data = json.loads(content.split('flip.setData(\'imgData\', ')[1].split( 'flip.setData(')[0].split(']);')[0].replace(');', '').replace('<\\/strong>', '</strong>').replace('\\\'', '\'')) except Exception as err: error = err if type(err) in [IndexError, AttributeError]: raise ParseError('Invalid HTML content.') finally: if error: raise ParseError(str(error)) results = [] for _ in data['data'][:-1]: if _: # 标题 title = str(_['fromPageTitle']).encode('utf-8').decode('utf-8') # 去除标题里的HTML title = unescape(self._remove_html(title)) # 链接 url = _['objURL'] # 来源域名 host = _['fromURLHost'] # 生成结果 result = { 'title': title, 'url': url, 'host': host } results.append(result) # 加入结果 # 获取分页 bs = BeautifulSoup(content, 'html.parser') pages_ = bs.find('div', id='page').findAll('span', class_='pc') pages = [] for _ in pages_: pages.append(int(_.text)) return { 'results': results, # 取最大页码 'pages': max(pages) }
11,680
3,660
import socket from requests.adapters import HTTPAdapter from requests.compat import urlparse, unquote try: from requests.packages.urllib3.connection import HTTPConnection from requests.packages.urllib3.connectionpool import HTTPConnectionPool except ImportError: from urllib3.connection import HTTPConnection from urllib3.connectionpool import HTTPConnectionPool # The following was adapted from some code from docker-py # https://github.com/docker/docker-py/blob/master/docker/unixconn/unixconn.py class UnixHTTPConnection(HTTPConnection): def __init__(self, unix_socket_url, timeout=60): """Create an HTTP connection to a unix domain socket :param unix_socket_url: A URL with a scheme of 'http+unix' and the netloc is a percent-encoded path to a unix domain socket. E.g.: 'http+unix://%2Ftmp%2Fprofilesvc.sock/status/pid' """ HTTPConnection.__init__(self, 'localhost', timeout=timeout) self.unix_socket_url = unix_socket_url self.timeout = timeout def connect(self): sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sock.settimeout(self.timeout) socket_path = unquote(urlparse(self.unix_socket_url).netloc) sock.connect(socket_path) self.sock = sock class UnixHTTPConnectionPool(HTTPConnectionPool): def __init__(self, socket_path, timeout=60): HTTPConnectionPool.__init__(self, 'localhost', timeout=timeout) self.socket_path = socket_path self.timeout = timeout def _new_conn(self): return UnixHTTPConnection(self.socket_path, self.timeout) class UnixAdapter(HTTPAdapter): def __init__(self, timeout=60): super(UnixAdapter, self).__init__() self.timeout = timeout def get_connection(self, socket_path, proxies=None): proxies = proxies or {} proxy = proxies.get(urlparse(socket_path.lower()).scheme) if proxy: raise ValueError('%s does not support specifying proxies' % self.__class__.__name__) return UnixHTTPConnectionPool(socket_path, self.timeout)
2,142
611
from django.test import TestCase from jarvis.resume.utils.extractor import get_text from jarvis.resume.utils.parser_helper import get_urls, get_url_response, url_categories, get_github_username, get_stackoverflow_userid, get_stackoverflow_username, get_name, get_id_from_linkedin_url, get_email from unidecode import unidecode path_to_test_data = 'resume/tests/test_data/1.pdf' urls = ['https://github.com/imnithin', 'http://imnithin.github.io', 'https://gist.github.com/imnithin', 'http://stackoverflow.com/users/2231236/nithin', 'https://www.linkedin.com/in/imnithink'] categories = {'blog': ['http://imnithin.github.io'], 'coding': [], 'contributions': ['https://github.com/imnithin', 'https://gist.github.com/imnithin'], 'forums': ['http://stackoverflow.com/users/2231236/nithin'], 'others': [], 'social': ['https://www.linkedin.com/in/imnithink']} url_response = [{'name': 'https://github.com/imnithin', 'type': 'contributions'}, {'name': 'https://gist.github.com/imnithin', 'type': 'contributions'}, {'name': 'https://www.linkedin.com/in/imnithink', 'type': 'social'}, {'name': 'http://imnithin.github.io', 'type': 'blog'}, {'name': 'http://stackoverflow.com/users/2231236/nithin', 'type': 'forums'}] class ParserHelperUtilsTest(TestCase): """Unit tests for Parser Helper Functions""" def setUp(self): self.text = get_text(path_to_test_data) def test_get_name(self): """Test User Name Obtained from jarvis.resume""" name = 'nithin' self.assertEqual(get_name(self.text)[0], name) def test_github_username(self): """Test GitHub Username""" github_user_name = 'imnithin' self.assertEqual(get_github_username(self.text), github_user_name) def test_stackoverflow_user_id(self): """Test StackOverflow user id""" stackoverflow_user_id = '2231236' self.assertEqual(get_stackoverflow_userid(self.text), stackoverflow_user_id) def test_stackoverflow_user_name(self): """Test StackOverflow User Name""" stackoverflow_user_name = 'nithin' self.assertEqual(get_stackoverflow_username(self.text), stackoverflow_user_name) def test_get_urls(self): self.assertEqual(get_urls(self.text), urls) def test_url_categories(self): values = list(categories.values()).sort() self.assertEqual(list(url_categories(urls).values()).sort(), values) def test_get_url_response(self): sorted_url_response = url_response.sort() self.assertEqual(get_url_response(categories).sort(), sorted_url_response) def test_get_id_from_linkedin_url(self): linkedin_id = 'imnithink' self.assertEqual(unidecode(get_id_from_linkedin_url(self.text)).strip(), linkedin_id) def test_get_email(self): email = 'nithinkool14@gmail.com' self.assertEqual(get_email(self.text)[0], email)
3,021
1,005
from __future__ import print_function from __future__ import division from sklearn.utils import check_random_state from sklearn import preprocessing as prep from utils.data import load_data, show_data_splits, shape_data from utils.evaluation import evaluate from utils.profiles import select_model, show_design, train, fit, compute_scores import theano import lasagne as lg import numpy as np import argparse import os ''' Hybrid music playlist continuation based on a song-to-playlist classifier. We learn a classifier that takes song features as inputs and predicts the playlists songs belong to. Once it is learned, such classifier can be used to populate a matrix of song-playlist scores describing how well a song and a playlist fit together. Thus, a playlist can be extended by selecting the songs with highest score. This approach is "hybrid" in the usual sense in the recommender systems literature, i.e., it combines content (given by the song features) and cf information (given by playlists examples). As it is, this approach only works on the so-called weak generalization setting. That is, the model is trained on the same playlists that will be extended. ''' if __name__ == '__main__': parser = argparse.ArgumentParser(description='Hybrid music playlist continuation based on a song-to-playlist classifier.') parser.add_argument('--model', type=str, help='path to the model specification file', metavar='') parser.add_argument('--dataset', type=str, help='path to the playlists dataset directory', metavar='') parser.add_argument('--msd', type=str, help='path to the MSD directory', metavar='') parser.add_argument('--train', action='store_true', help='train the song-to-playist classifier with monitoring') parser.add_argument('--fit', action='store_true', help='fit the song-to-playlist classifier') parser.add_argument('--test', action='store_true', help='evaluate the playlist continuations') parser.add_argument('--ci', action='store_true', help='compute confidence intervals if True') parser.add_argument('--song_occ', type=int, help='test on songs observed song_occ times during training', nargs='+', metavar='') parser.add_argument('--metrics_file', type=str, help='file name to save metrics', metavar='') parser.add_argument('--seed', type=int, help='set random behavior', metavar='') args = parser.parse_args() # set random behavior rng = check_random_state(args.seed) lg.random.set_rng(rng) # set model configuration model = select_model(args.model) # prepare output directory data_name = os.path.basename(os.path.normpath(args.dataset)) out_dir = os.path.join('params', 'profiles', model.name + '_' + data_name + '_weak') if not os.path.exists(out_dir): os.makedirs(out_dir) # load data: playlists, splits, features and artist info data = load_data(args.dataset, args.msd, model) playlists_coo, split_weak, _, features, song2artist = data # playlists_coo are the playlists stored in coordinate format playlists_idx, songs_idx, _, idx2song = playlists_coo # each playlist is split into a "query" of ~80% of the songs (train_idx + # valid_idx) and a "continuation" of ~20% of the songs (test_idx) train_idx, valid_idx, test_idx = split_weak # define splits for this experiment # train model on the training queries # validate model on the validation queries # fit the model on the full queries # extend all the playlists, using all queries and continuations train_idx = train_idx valid_idx = valid_idx fit_idx = np.hstack((train_idx, valid_idx)) query_idx = fit_idx cont_idx = test_idx # provide data information show_data_splits(playlists_idx, songs_idx, idx2song, song2artist, train_idx, valid_idx, fit_idx, query_idx, cont_idx) # provide model information print('\nNetwork:') show_design(model) if args.train: # # train the hybrid model while validating on withheld playlists # # prepare input song features and playlist targets at training X_train, Y_train = shape_data( playlists_idx, songs_idx, idx2song, features, mode='train', subset=train_idx ) # prepare input song features and playlist targets at validation X_valid, Y_valid = shape_data( playlists_idx, songs_idx, idx2song, features, mode='test', subset=valid_idx ) # preprocess input features if required # use the training song features to standardize the validation data if model.standardize: scaler = prep.RobustScaler() X_train = scaler.fit_transform(X_train) X_valid = scaler.transform(X_valid) if model.normalize: X_train = prep.normalize(X_train, norm=model.normalize) X_valid = prep.normalize(X_valid, norm=model.normalize) # train the classifier train( model=model, train_input=X_train.astype(theano.config.floatX), train_target=Y_train.astype(np.int8), valid_input=X_valid.astype(theano.config.floatX), valid_target=Y_valid.astype(np.int8), out_dir=out_dir, random_state=rng ) if args.fit: # # fit the hybrid model # # prepare input song features and playlist targets at training X_fit, Y_fit = shape_data( playlists_idx, songs_idx, idx2song, features, mode='train', subset=fit_idx ) # preprocess input features if required if model.standardize: X_fit = prep.robust_scale(X_fit) if model.normalize: X_fit = prep.normalize(X_fit, norm=model.normalize) # fit the classifier fit( model=model, fit_input=X_fit.astype(theano.config.floatX), fit_target=Y_fit.astype(np.int8), out_dir=out_dir, random_state=rng ) if args.test: # # extend the playlists in the query split and evaluate the # continuations by comparing them to actual withheld continuations # # prepare input song features and playlist targets at test X_cont, Y_cont = shape_data( playlists_idx, songs_idx, idx2song, features, mode='test', subset=cont_idx ) # preprocess input features if required # use the training song features to standardize the test data if model.standardize: X_fit, _ = shape_data( playlists_idx, songs_idx, idx2song, features, mode='train', subset=fit_idx ) scaler = prep.RobustScaler() scaler.fit(X_fit) X_cont = scaler.transform(X_cont) if model.normalize: X_cont = prep.normalize(X_cont, norm=model.normalize) # songs in the "query" playlists need to be masked to make sure that # they are not recommended as continuations _, Y_query = shape_data( playlists_idx, songs_idx, idx2song, features, mode='test', subset=query_idx ) # get number of song occurrences when fitting for cold-start analysis # Y_fit = Y_query train_occ = np.asarray(Y_query.sum(axis=1)).flatten() # compute the song-playlist scores cont_output = compute_scores( model=model, params_dir=out_dir, cont_input=X_cont.astype(theano.config.floatX), cont_target=Y_cont.astype(np.int8) ) # evaluate the continuations evaluate( scores=[cont_output.T], targets=[Y_cont.T.tocsr()], queries=[Y_query.T.tocsr()], train_occ=[train_occ], k_list=[10, 30, 100], ci=args.ci, song_occ=args.song_occ, metrics_file=args.metrics_file )
8,032
2,346
from django.contrib import admin from django.db import models from easy_select2.widgets import Select2Multiple from news.models import Entry class EntryAdmin(admin.ModelAdmin): list_display = ('title', 'pub_date', 'author') readonly_fields = ('slug',) exclude = ('author',) formfield_overrides = { models.ManyToManyField: {'widget': Select2Multiple()} } def save_model(self, request, obj, form, change): if not change: obj.author = request.user obj.save() admin.site.register(Entry, EntryAdmin)
560
165
import analyzer_client as analyzer from tkinter import * from tkinter import filedialog from tkinter import messagebox from tkinter import ttk import json import os from pathlib import Path IP_ADDRESS = "localhost" PORT = "8061" ENGINE_CURR_OPTIONS = {} ANALYZE_CURR_OPTIONS = {'language':'en', 'entities': None, 'correlation_id': None, 'score_threshold': "0.1", 'return_decision_process': "0" } DENY_LIST = {'supported_entities': [], 'valuesList': [], 'length': 0 } REGEX_LIST = {'entities': [], 'names_pattern': [], 'patterns': [], 'scores': [], 'context_words': [], 'length': 0 } class Frames(object): def __init__(self, root): self.root = root self.root.title('Presidio Analyzer gRPC Client') self.root.geometry('650x260') self.root.configure(bg="#0B0C10") self.root.resizable(0, 0) # Title frameTitle = Frame(self.root, width = 650, height = 60, bg="#0B0C10") frameTitle.grid(row = 0, columnspan = 2) Label(frameTitle, text="Microsoft Presidio Analyzer", font=("Helvetica", 17, "bold"), bg="#0B0C10", fg="#C5C6C7", anchor = CENTER).pack(ipady = 20) # Settings frameBtnSettings = Frame(self.root, bg="#0B0C10") frameBtnSettings.grid(row = 2, columnspan = 2) settingsButton = Button(frameBtnSettings, text="Settings", font=("Helvetica", 14), bg="#0B0C10", fg="#C5C6C7", command = self.settings).pack(pady = 10, ipadx= 33, ipady = 3) # Start analyzer frameBtnAnalyze = Frame(self.root, width = 650, height = 1, bg="#0B0C10") frameBtnAnalyze.grid(row = 1, columnspan = 2) analyzeBtn = Button(frameTitle, text="Start analyzer", font=("Helvetica", 14), bg="#0B0C10", fg="#C5C6C7", command = self.startAnalyzer).pack(pady = 22, ipadx= 10, ipady = 3) def startAnalyzer(self): dir_path = os.path.dirname(os.path.realpath(__file__)) path = Path(dir_path) self.root.filenames = filedialog.askopenfilenames(initialdir= str(path.parent.absolute()) + "/files", title="Select A File", filetypes=(("txt files", "*.txt"),("all files", "*.*"))) if self.root.filenames: clientAnalyzer = analyzer.ClientEntity(IP_ADDRESS, PORT) # send options if setted for elem in ANALYZE_CURR_OPTIONS: clientAnalyzer.setupOptions(elem, ANALYZE_CURR_OPTIONS[elem], "ANALYZE_OPTIONS") if DENY_LIST['length'] > 0: clientAnalyzer.setupDenyList(DENY_LIST['supported_entities'], DENY_LIST['valuesList']) if REGEX_LIST['length'] > 0: patterns = analyzer.createPatternInfo(1, REGEX_LIST['names_pattern'], REGEX_LIST['patterns'], REGEX_LIST['scores']) clientAnalyzer.setupRegex(REGEX_LIST['entities'][0], patterns, REGEX_LIST['context_words'][0]) progressWindow = Toplevel() progressWindow.title("Analyzer Status") progressWindow.geometry("330x80") progressWindow.configure(bg="white") self.root.update_idletasks() Label(progressWindow, text="Analyzer process is starting..it may take a while!", font=("Helvetica", 10), bg="white", fg="black").pack(side=TOP, padx = 15, pady = 7) progressBar = ttk.Progressbar(progressWindow, orient=HORIZONTAL, length=200, mode="determinate") progressBar.pack(side=TOP, pady = 14) self.root.update_idletasks() filenameList = [] for path in self.root.filenames: filename, ext = os.path.basename(path).split(".") filenameList.append(filename) res = clientAnalyzer.sendRequestAnalyze(os.path.basename(filename)) if res == -2: progressWindow.destroy() messagebox.showerror("gRPC Server Error", "Cannot connect to the server! Check your server settings") break if progressBar['value'] < 100: progressBar['value'] += (100/len(self.root.filenames)) self.root.update_idletasks() if int(progressBar['value']) == 100: messagebox.showinfo(parent=progressWindow, message='Analyzer process completed!') progressWindow.destroy() if res != -2: clientAnalyzer.closeConnection() self.readResults(filenameList) def readResults(self, filenameList): self.result = Toplevel() self.result.title("Presidio Analyzer gRPC - RESULTS") self.result.geometry("850x450") self.result.configure(bg="#0B0C10") self.result.resizable(0, 0) ## List filename-results.txt frameList = Frame(self.result, width = 150, height = 30) frameList.pack(side=LEFT, padx=13) # Scrollbar resultsScrollbar = Scrollbar(frameList, orient=VERTICAL) listbox_widget = Listbox(frameList, yscrollcommand=resultsScrollbar.set, height = 20, font=("Courier", 12), bg="#1F2833", fg="#C5C6C7") # configure scrollbar resultsScrollbar.config(command=listbox_widget.yview) resultsScrollbar.pack(side=RIGHT, fill=Y) ## END LIST ## Frame that will contain results frameResults = Frame(self.result, width = 680, bg="#0B0C10") frameResults.pack(side=RIGHT, pady = 15, padx = 10) self.text_widget = Text(frameResults, font=("Courier", 13), spacing1=3, bg="#1F2833", fg="#C5C6C7") self.text_widget.pack(pady = 10, padx= 15) ## END FRAME for filename in filenameList: listbox_widget.insert(END, filename) listbox_widget.bind('<<ListboxSelect>>', self.clickEvent) listbox_widget.pack() def clickEvent(self, e): dir_path = os.path.dirname(os.path.realpath(__file__)) path = Path(dir_path) currSelection = e.widget.curselection() filename = e.widget.get(currSelection) #print(filename) with open(str(path.parent.absolute()) + "/files/" + filename + ".txt", "r") as originalFile: originalText = originalFile.read() with open(str(path.parent.absolute()) + "/analyzer-results/" + filename + "-results.txt", "r") as resultsFile: self.text_widget.configure(state='normal') self.text_widget.delete("1.0", END) for line in resultsFile: resultStr = json.loads(line) #print(resultStr) start = resultStr['start'] end = resultStr['end'] self.text_widget.insert(END, f"FOUND WORD: {originalText[start:end]}\n\n") self.text_widget.insert(END, f"ENTITY TYPE: {resultStr['entity_type']}\nSTART: {resultStr['start']}\nEND: {resultStr['end']}\nSCORE: {resultStr['score']}") self.text_widget.insert(END, "\n-------------------------------------------------\n") self.text_widget.configure(state='disabled') def settings(self): self.settings = Toplevel() self.settings.title("Presidio Analyzer gRPC - Settings") self.settings.geometry("790x430") self.settings.configure(bg="#0B0C10") self.settings.resizable(0, 0) ## List of options frameList = Frame(self.settings, width = 100, height = 30) frameList.pack(side=LEFT, padx=8, pady=10) listbox_widget = Listbox(frameList, height = 20, font=("Courier", 12), bg="#1F2833", fg="#C5C6C7") ## Container options self.frameOptions = Frame(self.settings, bg="#0B0C10") self.frameOptions.pack(side=RIGHT, pady = 15, padx = 10, expand = True) listbox_widget.insert(0, "Server settings") listbox_widget.insert(1, "PII Recognition") listbox_widget.insert(2, "Analyzer Options") listbox_widget.bind('<<ListboxSelect>>', self.clickEventOption) listbox_widget.pack() def clickEventOption(self, e): currSelection = e.widget.curselection() optionName = e.widget.get(currSelection) for widget in self.frameOptions.winfo_children(): widget.destroy() if optionName == "Server settings": Label(self.frameOptions, text = "SERVER IP: " + IP_ADDRESS + " | SERVER PORT: " + str(PORT), font=("courier", 10), bg="#0B0C10", fg="#C5C6C7").pack(side=TOP) Label(self.frameOptions, text = "Server IP", font=("helvetica", 15), bg="#0B0C10", fg="#C5C6C7").pack(side=TOP, pady = 10) self.server_ip = Entry(self.frameOptions, font=("helvetica", 13), justify=CENTER, bd=3) self.server_ip.pack(anchor=S, pady = 5, padx = 20, ipady = 2) Label(self.frameOptions, text = "Server Port", font=("helvetica", 15), bg="#0B0C10", fg="#C5C6C7").pack(side=TOP, pady = 10) self.server_port = Entry(self.frameOptions, font=("helvetica", 13), justify=CENTER, bd=3) self.server_port.pack(anchor=S, pady = 5, padx = 20, ipady = 2) Button(self.frameOptions, text = "Save", font=("helvetica", 12), bg="#0B0C10", fg="#C5C6C7", command=self.setupServer).pack(side=TOP, ipadx = 10, pady = 10) if IP_ADDRESS != "null" and PORT != "null": self.server_ip.insert(0, IP_ADDRESS) self.server_port.insert(0, PORT) elif optionName == "Analyzer Options": frameNameOptions = Frame(self.frameOptions, width = 650, height = 60, bg="#0B0C10") frameNameOptions.grid(row = 0, column = 0, padx = 12) frameValues = Frame(self.frameOptions, width = 650, height = 60, bg="#0B0C10") frameValues.grid(row = 0, column = 1) Label(frameNameOptions, text = "LANGUAGE", font=("helvetica", 13), bg="#0B0C10", fg="#C5C6C7").grid(row = 0, column = 0, pady = 5) self.language = Entry(frameValues, font=("helvetica", 13), bd=3) self.language.grid(row = 0, column = 0, pady = 5) Label(frameNameOptions, text = "ENTITIES", font=("helvetica", 13), bg="#0B0C10", fg="#C5C6C7").grid(row = 1, column = 0, pady = 5) self.entities = Entry(frameValues, font=("helvetica", 13), bd=3) self.entities.grid(row = 1, column = 0, pady = 5) Label(frameNameOptions, text = "CORRELATION ID", font=("helvetica", 13), bg="#0B0C10", fg="#C5C6C7").grid(row = 2, column = 0, pady = 5) self.corr_id = Entry(frameValues, font=("helvetica", 13), bd=3) self.corr_id.grid(row = 2, column = 0, pady = 5) Label(frameNameOptions, text = "SCORE THRESHOLD", font=("helvetica", 13), bg="#0B0C10", fg="#C5C6C7").grid(row = 3, column = 0, pady = 5) self.score = Entry(frameValues, font=("helvetica", 13), bd=3) self.score.grid(row = 3, column = 0, pady = 5) self.decision_process = IntVar(None, int(ANALYZE_CURR_OPTIONS['return_decision_process'])) Label(frameNameOptions, text = "RETURN DECISION PROCESS", font=("helvetica", 13), bg="#0B0C10", fg="#C5C6C7").grid(row = 4, column = 0, pady = 5) Radiobutton(frameValues, text="YES", font=("helvetica", 10), variable=self.decision_process, value=1).grid(row=4, sticky=W, pady = 5) Radiobutton(frameValues, text="NO", font=("helvetica", 10), variable=self.decision_process, value=0).grid(row=4, sticky=E, pady = 5) Button(self.frameOptions, text = "Save", font=("helvetica", 12), bg="#0B0C10", fg="#C5C6C7", command=self.saveAnalyzeConfig).grid(row = 5, columnspan = 2, ipadx = 10, pady = 20) # load the current config self.language.insert(0, ANALYZE_CURR_OPTIONS['language']) if ANALYZE_CURR_OPTIONS['entities'] != None: self.entities.insert(0, ANALYZE_CURR_OPTIONS['entities']) if ANALYZE_CURR_OPTIONS['correlation_id'] != None: self.corr_id.insert(0, ANALYZE_CURR_OPTIONS['correlation_id']) self.score.insert(0, ANALYZE_CURR_OPTIONS['score_threshold']) elif optionName == "PII Recognition": frameMenu = Frame(self.frameOptions, bg="#0B0C10") frameMenu.grid(row = 0, column = 0, padx = 12) self.frameInsertOption = Frame(self.frameOptions, width = 300, height = 150, bg="#0B0C10") self.frameInsertOption.grid(row = 0, column = 1, padx = 12) # menu options self.value_inside = StringVar() # Set the default value of the variable self.value_inside.set("Select an option") recognition_menu = OptionMenu(frameMenu, self.value_inside, "Select an option", *("Regex", "Deny List"), command=self.optionChanged) recognition_menu.pack() self.frameCurr = Frame(self.frameOptions, width = 520, height = 100, bg="#0B0C10") self.frameCurr.grid(row = 1, columnspan = 2, pady = 7) def setupServer(self): global IP_ADDRESS, PORT IP_ADDRESS = self.server_ip.get() PORT = self.server_port.get() messagebox.showinfo(parent=self.settings, title = "Save", message=f"Server options saved succefully!") def saveAnalyzeConfig(self): if self.language.get() != "en": messagebox.showerror("Setup Error", "Only English language is supported!") else: ANALYZE_CURR_OPTIONS['language'] = self.language.get() if self.entities.get() == "" or str(self.entities.get()).lower() == "none": ANALYZE_CURR_OPTIONS['entities'] = None else: ANALYZE_CURR_OPTIONS['entities'] = self.entities.get() if self.corr_id.get() == "": ANALYZE_CURR_OPTIONS['correlation_id'] = None else: ANALYZE_CURR_OPTIONS['correlation_id'] = self.corr_id.get() ANALYZE_CURR_OPTIONS['score_threshold'] = self.score.get() ANALYZE_CURR_OPTIONS['return_decision_process'] = str(self.decision_process.get()) print(ANALYZE_CURR_OPTIONS) messagebox.showinfo(parent=self.settings, title = "Save", message=f"Options saved succefully!") def optionChanged(self, e): for widget in self.frameInsertOption.winfo_children(): widget.destroy() for widget in self.frameCurr.winfo_children(): widget.destroy() if self.value_inside.get() == "Deny List": Label(self.frameInsertOption, text = "ENTITY", font=("helvetica", 13), bg="#0B0C10", fg="#C5C6C7").grid(row = 0, column = 0, pady = 5, padx = 5) self.entity = Entry(self.frameInsertOption, font=("helvetica", 13), bd=3) self.entity.grid(row = 0, column = 1, pady = 5) Label(self.frameInsertOption, text = "VALUES LIST", font=("helvetica", 13), bg="#0B0C10", fg="#C5C6C7").grid(row = 1, column = 0, pady = 5, padx = 5) self.values = Entry(self.frameInsertOption, font=("helvetica", 13), bd=3) self.values.grid(row = 1, column = 1, pady = 5) Button(self.frameInsertOption, text = "Save", font=("helvetica", 12), bg="#0B0C10", fg="#C5C6C7", command=self.setupDenyList).grid(row=3, column = 0, ipadx = 10, pady = 20) Button(self.frameInsertOption, text = "Reset", font=("helvetica", 12), bg="#0B0C10", fg="#C5C6C7", command=self.clearDenyConfig).grid(row=3, column = 1, ipadx = 10, pady = 20) # Print current deny lists self.deny_widget = Text(self.frameCurr, font=("helvetica", 13), width = 60, height = 10, spacing1=3, bg="#1F2833", fg="#C5C6C7") self.deny_widget.grid(row = 0, column = 0) for i in range(DENY_LIST['length']): self.deny_widget.insert(END, f"{DENY_LIST['supported_entities'][i]} - {DENY_LIST['valuesList'][i]}\n") self.deny_widget.configure(state='disabled') elif self.value_inside.get() == "Regex": Label(self.frameInsertOption, text = "ENTITY", font=("helvetica", 13), bg="#0B0C10", fg="#C5C6C7").grid(row = 0, column = 0, pady = 5, padx = 5) self.entity_regex = Entry(self.frameInsertOption, font=("helvetica", 13), bd=3) self.entity_regex.grid(row = 0, column = 1, pady = 5) Label(self.frameInsertOption, text = "NAME PATTERN", font=("helvetica", 13), bg="#0B0C10", fg="#C5C6C7").grid(row = 1, column = 0, pady = 5, padx = 5) self.name_pattern = Entry(self.frameInsertOption, font=("helvetica", 13), bd=3) self.name_pattern.grid(row = 1, column = 1, pady = 5) Label(self.frameInsertOption, text = "REGEX", font=("helvetica", 13), bg="#0B0C10", fg="#C5C6C7").grid(row = 2, column = 0, pady = 5, padx = 5) self.regex = Entry(self.frameInsertOption, font=("helvetica", 13), bd=3) self.regex.grid(row = 2, column = 1, pady = 5) Label(self.frameInsertOption, text = "SCORE", font=("helvetica", 13), bg="#0B0C10", fg="#C5C6C7").grid(row = 3, column = 0, pady = 5, padx = 5) self.score_regex = Entry(self.frameInsertOption, font=("helvetica", 13), bd=3) self.score_regex.grid(row = 3, column = 1, pady = 5) Label(self.frameInsertOption, text = "CONTEXT WORD", font=("helvetica", 13), bg="#0B0C10", fg="#C5C6C7").grid(row = 4, column = 0, pady = 5, padx = 5) self.context = Entry(self.frameInsertOption, font=("helvetica", 13), bd=3) self.context.grid(row = 4, column = 1, pady = 5) Button(self.frameInsertOption, text = "Save", font=("helvetica", 12), bg="#0B0C10", fg="#C5C6C7", command=self.setupRegexList).grid(row=5, column = 0, ipadx = 10, pady = 10) Button(self.frameInsertOption, text = "Reset", font=("helvetica", 12), bg="#0B0C10", fg="#C5C6C7", command=self.clearRegexConfig).grid(row=5, column = 1, ipadx = 10, pady = 10) self.regex_widget = Text(self.frameCurr, font=("helvetica", 13), width = 60, height = 6, spacing1=3, bg="#1F2833", fg="#C5C6C7") self.regex_widget.grid(row = 0, column = 0) # print current regex patterns for i in range(REGEX_LIST['length']): self.regex_widget.insert(END, f"{REGEX_LIST['entities'][i]} - {REGEX_LIST['names_pattern'][i]} - {REGEX_LIST['patterns'][i]} - {REGEX_LIST['scores'][i]} - {REGEX_LIST['context_words'][i]}\n") self.regex_widget.configure(state='disabled') def setupDenyList(self): if len(self.entity.get()) > 2 and len(self.values.get()) > 2: DENY_LIST['supported_entities'].append(self.entity.get()) DENY_LIST['valuesList'].append(self.values.get()) DENY_LIST['length'] += 1 self.deny_widget.configure(state='normal') self.deny_widget.insert(END, f"{self.entity.get()} - {self.values.get()}\n") self.deny_widget.configure(state='disabled') messagebox.showinfo(parent=self.settings, title = "Save", message=f"Deny list for {self.entity.get()} saved!") else: messagebox.showerror(parent=self.settings, title ="Error", message="Compile all the fields!") #print(DENY_LIST) def clearDenyConfig(self): answer = messagebox.askyesno(parent=self.settings, title = None, message="Do you want to reset deny list configuration?") if answer: DENY_LIST['supported_entities'] = [] DENY_LIST['valuesList'] = [] DENY_LIST['length'] = 0 self.deny_widget.configure(state='normal') self.deny_widget.delete("1.0", END) self.deny_widget.configure(state='disabled') def setupRegexList(self): if len(self.entity_regex.get()) > 2: REGEX_LIST['entities'].append(self.entity_regex.get()) REGEX_LIST['names_pattern'].append(self.name_pattern.get()) REGEX_LIST['patterns'].append(self.regex.get()) REGEX_LIST['scores'].append(self.score_regex.get()) REGEX_LIST['context_words'].append(self.context.get()) REGEX_LIST['length'] += 1 self.regex_widget.configure(state='normal') self.regex_widget.insert(END, f"{self.entity_regex.get()} - {self.name_pattern.get()} - {self.regex.get()} - {self.score_regex.get()} - {self.context.get()}\n") self.regex_widget.configure(state='disabled') messagebox.showinfo(parent=self.settings, title = "Save", message=f"Regex for {self.entity_regex.get()} saved!") else: messagebox.showerror(parent=self.settings, title ="Error", message="Compile all the fields!") #print(REGEX_LIST) def clearRegexConfig(self): answer = messagebox.askyesno(parent=self.settings, title = None, message="Do you want to reset regex configuration?") if answer: REGEX_LIST['entities'] = [] REGEX_LIST['names_pattern'] = [] REGEX_LIST['patterns'] = [] REGEX_LIST['scores'] = [] REGEX_LIST['context_words'] = [] REGEX_LIST['length'] = 0 self.regex_widget.configure(state='normal') self.regex_widget.delete("1.0", END) self.regex_widget.configure(state='disabled') root = Tk() app = Frames(root) root.mainloop()
21,999
7,806
# mods 1 import random print(random.randint(1,10))
53
25