max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
application/core/generators/cut_and_match.py | opencbsoft/kids-worksheet-generator | 0 | 6623351 | import string
import random
from core.utils import Generator
class Main(Generator):
name = 'Decupeaza si potriveste'
years = [3, 4, 5]
directions = 'Decupeaza si lipseste literele si cifrele in zonele dedicate lor.'
template = 'generators/cut_and_match.html'
content_height = 1050
def generate_data(self):
if self.extra:
max_number = int(self.extra)
else:
max_number = 10
possible = [i for i in string.ascii_letters]
possible += [str(i) for i in range(1, max_number + 1)]
results = []
for i in range(self.count):
row = random.sample(range(1, max_number + 1), 5) # Select a minimum of 5 numbers and 5 letters before
row += random.sample(string.ascii_letters, 5) # Also add 5 minimum letters
while len(row) != 20:
symbol = random.choice(possible)
if symbol not in row:
row.append(symbol)
random.shuffle(row)
results.append(row)
self.data = results
return results
def get_context_data(self, iteration):
context = super(Main, self).get_context_data(iteration)
context['items'] = random.choice(context['items'])
return context
| import string
import random
from core.utils import Generator
class Main(Generator):
name = 'Decupeaza si potriveste'
years = [3, 4, 5]
directions = 'Decupeaza si lipseste literele si cifrele in zonele dedicate lor.'
template = 'generators/cut_and_match.html'
content_height = 1050
def generate_data(self):
if self.extra:
max_number = int(self.extra)
else:
max_number = 10
possible = [i for i in string.ascii_letters]
possible += [str(i) for i in range(1, max_number + 1)]
results = []
for i in range(self.count):
row = random.sample(range(1, max_number + 1), 5) # Select a minimum of 5 numbers and 5 letters before
row += random.sample(string.ascii_letters, 5) # Also add 5 minimum letters
while len(row) != 20:
symbol = random.choice(possible)
if symbol not in row:
row.append(symbol)
random.shuffle(row)
results.append(row)
self.data = results
return results
def get_context_data(self, iteration):
context = super(Main, self).get_context_data(iteration)
context['items'] = random.choice(context['items'])
return context
| en | 0.641488 | # Select a minimum of 5 numbers and 5 letters before # Also add 5 minimum letters | 3.483956 | 3 |
Dataset/Leetcode/train/112/214.py | kkcookies99/UAST | 0 | 6623352 | class Solution:
def XXX(self, root: Optional[TreeNode], targetSum: int) -> bool:
if not root:
return False
res = False
def dfs(node, pre):
nonlocal res
if not node:
return
node.val += pre
pre = node.val
if node.left == node.right == None and pre == targetSum:
res = True
return
dfs(node.left, pre)
dfs(node.right, pre)
dfs(root, 0)
return res
| class Solution:
def XXX(self, root: Optional[TreeNode], targetSum: int) -> bool:
if not root:
return False
res = False
def dfs(node, pre):
nonlocal res
if not node:
return
node.val += pre
pre = node.val
if node.left == node.right == None and pre == targetSum:
res = True
return
dfs(node.left, pre)
dfs(node.right, pre)
dfs(root, 0)
return res
| none | 1 | 3.186179 | 3 | |
kot/debug/__init__.py | dragon-hex/kot-two-project | 0 | 6623353 | <gh_stars>0
from .debug import kotDebug | from .debug import kotDebug | none | 1 | 1.034956 | 1 | |
Tools/migrate3d.py | SanTelva/Crystal3D | 0 | 6623354 | import sys
sys.path += ['..']
from draw2.nboio import *
from draw2.nbolib import *
from draw2._legacy.igorio import *
if len(sys.argv) < 3:
print('Usage:', sys.argv[0], 'input.igor3d output.nbo3d')
__import__('_sitebuiltins').Quitter('','')(1) # exit(1)
angleA, angleB, angleG, a, b, c, primitiveCell = myInput(sys.argv[1])
nodes, bonds = igorToNBO((angleA, angleB, angleG, a, b, c, primitiveCell))
nboOutput(sys.argv[2], (angleA, angleB, angleG), a, b, c, nodes, bonds)
| import sys
sys.path += ['..']
from draw2.nboio import *
from draw2.nbolib import *
from draw2._legacy.igorio import *
if len(sys.argv) < 3:
print('Usage:', sys.argv[0], 'input.igor3d output.nbo3d')
__import__('_sitebuiltins').Quitter('','')(1) # exit(1)
angleA, angleB, angleG, a, b, c, primitiveCell = myInput(sys.argv[1])
nodes, bonds = igorToNBO((angleA, angleB, angleG, a, b, c, primitiveCell))
nboOutput(sys.argv[2], (angleA, angleB, angleG), a, b, c, nodes, bonds)
| none | 1 | 2.370426 | 2 | |
search/main.py | Necrophote/demotvcrawl | 0 | 6623355 | <filename>search/main.py
from app import app
from db_setup import init_db, db_session
from forms import MusicSearchForm
from flask import flash, render_template, request, redirect
from models import Product
from tables import Results
init_db()
@app.route('/', methods=['GET', 'POST'])
def index():
search = MusicSearchForm(request.form)
if request.method == 'POST':
return search_results(search)
return render_template('index.html', form=search)
@app.route('/results')
def search_results(search):
results = []
search_string = search.data['search']
if search_string:
qry = db_session.query(Product).filter(Product.name.contains(search_string))
results = qry.all()
if not results:
flash('No results found!')
return redirect('/')
else:
# display results
table = Results(results)
table.border = True
return render_template('results.html', table=table)
if __name__ == '__main__':
app.run() | <filename>search/main.py
from app import app
from db_setup import init_db, db_session
from forms import MusicSearchForm
from flask import flash, render_template, request, redirect
from models import Product
from tables import Results
init_db()
@app.route('/', methods=['GET', 'POST'])
def index():
search = MusicSearchForm(request.form)
if request.method == 'POST':
return search_results(search)
return render_template('index.html', form=search)
@app.route('/results')
def search_results(search):
results = []
search_string = search.data['search']
if search_string:
qry = db_session.query(Product).filter(Product.name.contains(search_string))
results = qry.all()
if not results:
flash('No results found!')
return redirect('/')
else:
# display results
table = Results(results)
table.border = True
return render_template('results.html', table=table)
if __name__ == '__main__':
app.run() | en | 0.322625 | # display results | 2.374731 | 2 |
Server/TCP/example_client.py | listenzcc/SocketServerInPython | 0 | 6623356 | # File: example_client.py
# Aim: Define example of client connection
import socket
import threading
from . import CONFIG, tools
CONFIG.logger.debug('Define components in TCP package')
class ExampleClient(object):
def __init__(self, IP, port):
# Initialize and setup client
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
# Connet to IP:port
client.connect((IP, port))
name = client.getsockname()
# Report and set attributes
CONFIG.logger.info(
f'Client {name} is connected to server at {IP}:{port}')
self.client = client
self.name = name
def listen(self):
# Listen to the server
CONFIG.logger.info(f'Client {self.name} starts listening')
while True:
# Wait until new message is received
income = self.client.recv(tools.buffer_size)
CONFIG.logger.info(f'Received {income} from server')
def start(self):
# Start client connection to server
thread = threading.Thread(
target=self.listen, name='TCP connection client')
thread.setDaemon(True)
thread.start()
CONFIG.logger.info(f'Client starts listening')
# Say hello to server
self.send(f'Hello from client {self.name}')
def send(self, message):
# Send [message] to server
message = tools.encode(message)
self.client.sendall(message)
CONFIG.logger.debug(f'Sent {message} to server')
| # File: example_client.py
# Aim: Define example of client connection
import socket
import threading
from . import CONFIG, tools
CONFIG.logger.debug('Define components in TCP package')
class ExampleClient(object):
def __init__(self, IP, port):
# Initialize and setup client
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
# Connet to IP:port
client.connect((IP, port))
name = client.getsockname()
# Report and set attributes
CONFIG.logger.info(
f'Client {name} is connected to server at {IP}:{port}')
self.client = client
self.name = name
def listen(self):
# Listen to the server
CONFIG.logger.info(f'Client {self.name} starts listening')
while True:
# Wait until new message is received
income = self.client.recv(tools.buffer_size)
CONFIG.logger.info(f'Received {income} from server')
def start(self):
# Start client connection to server
thread = threading.Thread(
target=self.listen, name='TCP connection client')
thread.setDaemon(True)
thread.start()
CONFIG.logger.info(f'Client starts listening')
# Say hello to server
self.send(f'Hello from client {self.name}')
def send(self, message):
# Send [message] to server
message = tools.encode(message)
self.client.sendall(message)
CONFIG.logger.debug(f'Sent {message} to server')
| en | 0.845473 | # File: example_client.py # Aim: Define example of client connection # Initialize and setup client # Connet to IP:port # Report and set attributes # Listen to the server # Wait until new message is received # Start client connection to server # Say hello to server # Send [message] to server | 3.214319 | 3 |
CodeGenerator.py | rdbv/cisol | 55 | 6623357 | <filename>CodeGenerator.py
from capstone import *
from capstone.x86 import *
from Instructions import *
# Basic code..
codeC = '''
#include "environment.h"
void func() {
%s}\n
int main() {
}
'''
''' Just load bytes in desired range '''
def loadCode(filename, start, stop):
f = open(filename, 'rb')
code = f.read()
return code[start:stop]
class CodeGenerator:
def __init__(self, name, start, stop, flags):
self.code = loadCode(name, start, stop)
self.cCode = ''
# Instruction lookup
self.cinstr = {
'mov' : mov,
'movzx' : movzx,
'movsx' : movzx,
'cdqe' : cdqe,
'sub' : sub,
'add' : add,
'inc' : inc,
'dec' : dec,
'cmp' : cmp,
'jmp' : jmp,
'jne' : jne,
'je' : je,
'jnb' : jnb,
'jb' : jb,
'jbe' : jbe
}
self.jumps = ['jmp', 'je', 'jne', 'jz', 'jnz', 'jnb', 'jb', 'jbe']
self.usedFlags = flags
# Init capstone
self.cs = Cs(CS_ARCH_X86, CS_MODE_64)
self.cs.detail = True
self.instructions = [x for x in self.cs.disasm(self.code, 0)]
self.jumpPlaces = {}
self.checkJumps()
# go go go
self.generate()
''' Just fill C template '''
def getAsC(self):
return codeC % self.cCode
''' Every jump must have place to jump,
here we check, that every jump has place,
if code is self-modyifing or jumps in middle of instruction
then this method will fail
'''
def checkJumps(self):
for instr in self.instructions:
# Is it jump?
if instr.mnemonic in self.jumps:
# Yes, so get address
addr = int(instr.operands[0].imm)
found = False
# Check existence of target instruction
for _instr in self.instructions:
if _instr.address == addr:
found = True
self.jumpPlaces[addr] = True
break
if not found:
print("Jump to nonexisting instr (Or jump in instr middle)...\nQuitting...")
exit(0)
''' Go over each instruction in range '''
def generate(self):
for inst in self.instructions:
# If we will jump to this instruction
# Add label for goto
if inst.address in self.jumpPlaces:
self.emit('_%x:' % (inst.address), indent=0)
# Operands is list of ops, 0, 1, 2 (may more) ops
ops = [x for x in inst.operands]
args = [self.buildOperand(inst, x) for x in ops]
#print('%x: %s %s -> %s' % (inst.address, inst.mnemonic, inst.op_str, '') )
# check is available
if inst.mnemonic not in self.cinstr:
print("Instruction not found...\nQuitting...")
exit(0)
# process instruction
self.cinstr[ inst.mnemonic ](*args, inst, self)
'''
Create Operand
'''
def buildOperand(self, instr, operand):
type = operand.type
# eg. regs.eax
if type == X86_OP_REG:
return Operand(type, instr.reg_name(operand.reg), operand.size)
# eg. rax + rbx * 1 - 0x13
if type == X86_OP_MEM:
baseReg = instr.reg_name(operand.mem.base)
displacement = operand.mem.disp
index, scale = (operand.mem.index, operand.mem.scale)
out = baseReg
if index:
out += '+' + instr.reg_name(index) + '*' + str(scale)
if displacement:
if displacement < 0: out += '-'
if displacement > 0: out += '+'
out += str(abs(displacement))
return Operand(type, out, operand.size)
# eg. 0x10
if type == X86_OP_IMM:
return Operand(type, str(operand.imm), operand.size)
raise "Unknown type..."
''' Spaces FTW '''
def emit(self, data, flags = '', actions = '', comment = '', indent = 1):
self.cCode += ' ' * indent
self.cCode += data + '; '
# Append comment
if len(comment):
self.cCode += '// ' + comment + '\n'
# Check is flag used, and append
if len(flags):
for (id, flag) in flags:
if id in self.usedFlags:
self.cCode += (' ' * indent) + ' ' + flag + ';\n'
# Add actions, executed after setting flags
if len(actions):
for action in actions:
self.cCode += (' ' * indent) + ' ' + action + ';\n'
if len(comment) == 0 and len(flags) == 0:
self.cCode += '\n'
| <filename>CodeGenerator.py
from capstone import *
from capstone.x86 import *
from Instructions import *
# Basic code..
codeC = '''
#include "environment.h"
void func() {
%s}\n
int main() {
}
'''
''' Just load bytes in desired range '''
def loadCode(filename, start, stop):
f = open(filename, 'rb')
code = f.read()
return code[start:stop]
class CodeGenerator:
def __init__(self, name, start, stop, flags):
self.code = loadCode(name, start, stop)
self.cCode = ''
# Instruction lookup
self.cinstr = {
'mov' : mov,
'movzx' : movzx,
'movsx' : movzx,
'cdqe' : cdqe,
'sub' : sub,
'add' : add,
'inc' : inc,
'dec' : dec,
'cmp' : cmp,
'jmp' : jmp,
'jne' : jne,
'je' : je,
'jnb' : jnb,
'jb' : jb,
'jbe' : jbe
}
self.jumps = ['jmp', 'je', 'jne', 'jz', 'jnz', 'jnb', 'jb', 'jbe']
self.usedFlags = flags
# Init capstone
self.cs = Cs(CS_ARCH_X86, CS_MODE_64)
self.cs.detail = True
self.instructions = [x for x in self.cs.disasm(self.code, 0)]
self.jumpPlaces = {}
self.checkJumps()
# go go go
self.generate()
''' Just fill C template '''
def getAsC(self):
return codeC % self.cCode
''' Every jump must have place to jump,
here we check, that every jump has place,
if code is self-modyifing or jumps in middle of instruction
then this method will fail
'''
def checkJumps(self):
for instr in self.instructions:
# Is it jump?
if instr.mnemonic in self.jumps:
# Yes, so get address
addr = int(instr.operands[0].imm)
found = False
# Check existence of target instruction
for _instr in self.instructions:
if _instr.address == addr:
found = True
self.jumpPlaces[addr] = True
break
if not found:
print("Jump to nonexisting instr (Or jump in instr middle)...\nQuitting...")
exit(0)
''' Go over each instruction in range '''
def generate(self):
for inst in self.instructions:
# If we will jump to this instruction
# Add label for goto
if inst.address in self.jumpPlaces:
self.emit('_%x:' % (inst.address), indent=0)
# Operands is list of ops, 0, 1, 2 (may more) ops
ops = [x for x in inst.operands]
args = [self.buildOperand(inst, x) for x in ops]
#print('%x: %s %s -> %s' % (inst.address, inst.mnemonic, inst.op_str, '') )
# check is available
if inst.mnemonic not in self.cinstr:
print("Instruction not found...\nQuitting...")
exit(0)
# process instruction
self.cinstr[ inst.mnemonic ](*args, inst, self)
'''
Create Operand
'''
def buildOperand(self, instr, operand):
type = operand.type
# eg. regs.eax
if type == X86_OP_REG:
return Operand(type, instr.reg_name(operand.reg), operand.size)
# eg. rax + rbx * 1 - 0x13
if type == X86_OP_MEM:
baseReg = instr.reg_name(operand.mem.base)
displacement = operand.mem.disp
index, scale = (operand.mem.index, operand.mem.scale)
out = baseReg
if index:
out += '+' + instr.reg_name(index) + '*' + str(scale)
if displacement:
if displacement < 0: out += '-'
if displacement > 0: out += '+'
out += str(abs(displacement))
return Operand(type, out, operand.size)
# eg. 0x10
if type == X86_OP_IMM:
return Operand(type, str(operand.imm), operand.size)
raise "Unknown type..."
''' Spaces FTW '''
def emit(self, data, flags = '', actions = '', comment = '', indent = 1):
self.cCode += ' ' * indent
self.cCode += data + '; '
# Append comment
if len(comment):
self.cCode += '// ' + comment + '\n'
# Check is flag used, and append
if len(flags):
for (id, flag) in flags:
if id in self.usedFlags:
self.cCode += (' ' * indent) + ' ' + flag + ';\n'
# Add actions, executed after setting flags
if len(actions):
for action in actions:
self.cCode += (' ' * indent) + ' ' + action + ';\n'
if len(comment) == 0 and len(flags) == 0:
self.cCode += '\n'
| en | 0.709669 | # Basic code.. #include "environment.h" void func() { %s}\n int main() { } Just load bytes in desired range # Instruction lookup # Init capstone # go go go Just fill C template Every jump must have place to jump, here we check, that every jump has place, if code is self-modyifing or jumps in middle of instruction then this method will fail # Is it jump? # Yes, so get address # Check existence of target instruction Go over each instruction in range # If we will jump to this instruction # Add label for goto # Operands is list of ops, 0, 1, 2 (may more) ops #print('%x: %s %s -> %s' % (inst.address, inst.mnemonic, inst.op_str, '') ) # check is available # process instruction Create Operand # eg. regs.eax # eg. rax + rbx * 1 - 0x13 # eg. 0x10 Spaces FTW # Append comment # Check is flag used, and append # Add actions, executed after setting flags | 3.019857 | 3 |
ble_client/utilities.py | bnlerner/ble_client | 0 | 6623358 | """
This module offers some utilities, in a way they are work in both Python 2 and 3
"""
from pybleno import Characteristic
import array
import sys
import traceback
import binascii
import logging
from struct import unpack
import queue as queue
log = logging.getLogger(__name__)
queue = queue # just to use it
class BleCharacteristic(Characteristic):
def __init__(self, uuid):
Characteristic.__init__(self, {
'uuid': uuid,
'properties': ['read', 'write', 'notify'],
'value': None
})
self._value = array.array('B', [0] * 0)
self._updateValueCallback = None
def onReadRequest(self, offset, callback):
print('EchoCharacteristic - %s - onReadRequest: value = %s' % (self['uuid'], [hex(c) for c in self._value]))
callback(Characteristic.RESULT_SUCCESS, self._value[offset:])
def onWriteRequest(self, data, offset, withoutResponse, callback):
self._value = data
print('EchoCharacteristic - %s - onWriteRequest: value = %s' % (self['uuid'], [hex(c) for c in self._value]))
if self._updateValueCallback:
print('EchoCharacteristic - onWriteRequest: notifying')
self._updateValueCallback(self._value)
callback(Characteristic.RESULT_SUCCESS)
def onSubscribe(self, maxValueSize, updateValueCallback):
print('EchoCharacteristic - onSubscribe')
self._updateValueCallback = updateValueCallback
def onUnsubscribe(self):
print('EchoCharacteristic - onUnsubscribe')
self._updateValueCallback = None
def check_unpack(seq, index, pattern, size):
"""Check that we got size bytes, if so, unpack using pattern"""
data = seq[index: index + size]
assert len(data) == size, "Unexpected data len %d, expected %d" % (len(data), size)
return unpack(pattern, data)[0]
def usbyte(seq, index):
return check_unpack(seq, index, "<B", 1)
def ushort(seq, index):
return check_unpack(seq, index, "<H", 2)
def usint(seq, index):
return check_unpack(seq, index, "<I", 4)
def str2hex(data): # we need it for python 2+3 compatibility
# if sys.version_info[0] == 3:
# data = bytes(data, 'ascii')
if not isinstance(data, (bytes, bytearray)):
data = bytes(data, "ascii")
hexed = binascii.hexlify(data)
return hexed
| """
This module offers some utilities, in a way they are work in both Python 2 and 3
"""
from pybleno import Characteristic
import array
import sys
import traceback
import binascii
import logging
from struct import unpack
import queue as queue
log = logging.getLogger(__name__)
queue = queue # just to use it
class BleCharacteristic(Characteristic):
def __init__(self, uuid):
Characteristic.__init__(self, {
'uuid': uuid,
'properties': ['read', 'write', 'notify'],
'value': None
})
self._value = array.array('B', [0] * 0)
self._updateValueCallback = None
def onReadRequest(self, offset, callback):
print('EchoCharacteristic - %s - onReadRequest: value = %s' % (self['uuid'], [hex(c) for c in self._value]))
callback(Characteristic.RESULT_SUCCESS, self._value[offset:])
def onWriteRequest(self, data, offset, withoutResponse, callback):
self._value = data
print('EchoCharacteristic - %s - onWriteRequest: value = %s' % (self['uuid'], [hex(c) for c in self._value]))
if self._updateValueCallback:
print('EchoCharacteristic - onWriteRequest: notifying')
self._updateValueCallback(self._value)
callback(Characteristic.RESULT_SUCCESS)
def onSubscribe(self, maxValueSize, updateValueCallback):
print('EchoCharacteristic - onSubscribe')
self._updateValueCallback = updateValueCallback
def onUnsubscribe(self):
print('EchoCharacteristic - onUnsubscribe')
self._updateValueCallback = None
def check_unpack(seq, index, pattern, size):
"""Check that we got size bytes, if so, unpack using pattern"""
data = seq[index: index + size]
assert len(data) == size, "Unexpected data len %d, expected %d" % (len(data), size)
return unpack(pattern, data)[0]
def usbyte(seq, index):
return check_unpack(seq, index, "<B", 1)
def ushort(seq, index):
return check_unpack(seq, index, "<H", 2)
def usint(seq, index):
return check_unpack(seq, index, "<I", 4)
def str2hex(data): # we need it for python 2+3 compatibility
# if sys.version_info[0] == 3:
# data = bytes(data, 'ascii')
if not isinstance(data, (bytes, bytearray)):
data = bytes(data, "ascii")
hexed = binascii.hexlify(data)
return hexed
| en | 0.788593 | This module offers some utilities, in a way they are work in both Python 2 and 3 # just to use it Check that we got size bytes, if so, unpack using pattern # we need it for python 2+3 compatibility # if sys.version_info[0] == 3: # data = bytes(data, 'ascii') | 2.429798 | 2 |
conda_hooks/env_store.py | f-koehler/conda-hooks | 1 | 6623359 | <reponame>f-koehler/conda-hooks
from __future__ import annotations
import argparse
import logging
import os
from pathlib import Path
from typing import Sequence
from .environment import ENV_DEFAULT_PATHS, EnvironmentFile
from .errors import CondaHookError, EnvFileNotFoundError, NoEnvFileError, NotAFileError
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
LOGGER = logging.getLogger(__name__)
def get_argument_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
description=(
"Check if one or multiple conda environment files"
" are up-to-date with the installed packages."
),
)
parser.add_argument(
"-g",
"--glob",
type=str,
action="append",
default=[],
help=(
"Globbing pattern used to find environment files"
" (can be specified multiple times)."
),
)
parser.add_argument(
"files",
type=Path,
nargs="*",
default=[],
help="Paths to environment files.",
)
return parser
def get_env_files(args: argparse.Namespace) -> list[Path]:
files: list[Path] = []
LOGGER.error(args.glob)
for glob in args.glob:
files += list(Path.cwd().glob(glob))
for file in args.files:
if not file.exists():
raise EnvFileNotFoundError(file)
if not file.is_file():
raise NotAFileError(file)
files.append(file)
if not files:
files = [file for file in ENV_DEFAULT_PATHS if file.exists() and file.is_file()]
return [file.resolve() for file in files]
def main(argv: Sequence[str] | None = None):
try:
parser = get_argument_parser()
args = parser.parse_args(argv)
files = get_env_files(args)
if not files:
raise NoEnvFileError()
for file in files:
env = EnvironmentFile(file)
new_env = EnvironmentFile(file)
if env.exists():
for dep in env.get_installed_dependencies():
if dep not in env.dependencies:
LOGGER.error(f"found missing dependency: {dep}")
new_env.dependencies.append(dep)
new_env.dependencies.sort()
if new_env != env:
LOGGER.error("environment changed!")
new_env.write()
else:
LOGGER.info("environment did not change.")
except CondaHookError as e:
LOGGER.error(f"conda-hooks error: {e}")
if __name__ == "__main__":
main()
| from __future__ import annotations
import argparse
import logging
import os
from pathlib import Path
from typing import Sequence
from .environment import ENV_DEFAULT_PATHS, EnvironmentFile
from .errors import CondaHookError, EnvFileNotFoundError, NoEnvFileError, NotAFileError
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
LOGGER = logging.getLogger(__name__)
def get_argument_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
description=(
"Check if one or multiple conda environment files"
" are up-to-date with the installed packages."
),
)
parser.add_argument(
"-g",
"--glob",
type=str,
action="append",
default=[],
help=(
"Globbing pattern used to find environment files"
" (can be specified multiple times)."
),
)
parser.add_argument(
"files",
type=Path,
nargs="*",
default=[],
help="Paths to environment files.",
)
return parser
def get_env_files(args: argparse.Namespace) -> list[Path]:
files: list[Path] = []
LOGGER.error(args.glob)
for glob in args.glob:
files += list(Path.cwd().glob(glob))
for file in args.files:
if not file.exists():
raise EnvFileNotFoundError(file)
if not file.is_file():
raise NotAFileError(file)
files.append(file)
if not files:
files = [file for file in ENV_DEFAULT_PATHS if file.exists() and file.is_file()]
return [file.resolve() for file in files]
def main(argv: Sequence[str] | None = None):
try:
parser = get_argument_parser()
args = parser.parse_args(argv)
files = get_env_files(args)
if not files:
raise NoEnvFileError()
for file in files:
env = EnvironmentFile(file)
new_env = EnvironmentFile(file)
if env.exists():
for dep in env.get_installed_dependencies():
if dep not in env.dependencies:
LOGGER.error(f"found missing dependency: {dep}")
new_env.dependencies.append(dep)
new_env.dependencies.sort()
if new_env != env:
LOGGER.error("environment changed!")
new_env.write()
else:
LOGGER.info("environment did not change.")
except CondaHookError as e:
LOGGER.error(f"conda-hooks error: {e}")
if __name__ == "__main__":
main() | none | 1 | 2.429463 | 2 | |
Test/FunctionalTests/FsmEditorTestScripts/ExpectedFailureNoSuccessMessage.py | jethac/ATF | 821 | 6623360 | <gh_stars>100-1000
#Copyright (c) 2014 Sony Computer Entertainment America LLC. See License.txt.
import sys
sys.path.append("./CommonTestScripts")
import Test
Test.Equal(1, 1)
Test.Equal(2, 2)
#Intentionally commented, we want this script to fail
#print Test.SUCCESS | #Copyright (c) 2014 Sony Computer Entertainment America LLC. See License.txt.
import sys
sys.path.append("./CommonTestScripts")
import Test
Test.Equal(1, 1)
Test.Equal(2, 2)
#Intentionally commented, we want this script to fail
#print Test.SUCCESS | en | 0.661123 | #Copyright (c) 2014 Sony Computer Entertainment America LLC. See License.txt. #Intentionally commented, we want this script to fail #print Test.SUCCESS | 1.363355 | 1 |
Python/files/sortsvnls.py | ebouaziz/miscripts | 0 | 6623361 | <reponame>ebouaziz/miscripts
#!/usr/bin/env python2.7
# Deal with various SVN date output to sort SVN ls by date
import re
import sys
from datetime import timedelta
from time import localtime, mktime, strptime, time
svncre = re.compile(r'\s*(?P<rev>\d+)\s+(?P<author>\w+)\s+'
r'(?:(?P<mdate>\w{3}\s\d{2}\s\d{2}:\d{2})|'
r'(?P<ydate>\w{3}\s\d{2}\s\d{4}))\s+'
r'(?P<path>.*)$')
entries = [svncre.match(l) for l in sys.stdin]
items = [mo.groupdict() for mo in entries if mo]
def totime(x):
if x['mdate']:
t = strptime("%d %s" % (localtime().tm_year, x['mdate']),
"%Y %b %d %H:%M")
elif x['ydate']:
t = strptime(x['ydate'], "%b %d %Y")
else:
raise ValueError('Invalid date')
return t
now = time()
author = ''
authors = set()
for item in sorted(items, key=lambda x: (x['author'], mktime(totime(x)))):
delta = timedelta(seconds=now-mktime(totime(item)))
if delta.days > 30:
if author != item['author']:
print ''
author = item['author']
authors.add(author)
print '%s:' % author
print ' {: <8} {: >12} days'.format(item['path'], delta.days)
print ''
print ', '.join(["<EMAIL>" % a for a in sorted(authors)])
| #!/usr/bin/env python2.7
# Deal with various SVN date output to sort SVN ls by date
import re
import sys
from datetime import timedelta
from time import localtime, mktime, strptime, time
svncre = re.compile(r'\s*(?P<rev>\d+)\s+(?P<author>\w+)\s+'
r'(?:(?P<mdate>\w{3}\s\d{2}\s\d{2}:\d{2})|'
r'(?P<ydate>\w{3}\s\d{2}\s\d{4}))\s+'
r'(?P<path>.*)$')
entries = [svncre.match(l) for l in sys.stdin]
items = [mo.groupdict() for mo in entries if mo]
def totime(x):
if x['mdate']:
t = strptime("%d %s" % (localtime().tm_year, x['mdate']),
"%Y %b %d %H:%M")
elif x['ydate']:
t = strptime(x['ydate'], "%b %d %Y")
else:
raise ValueError('Invalid date')
return t
now = time()
author = ''
authors = set()
for item in sorted(items, key=lambda x: (x['author'], mktime(totime(x)))):
delta = timedelta(seconds=now-mktime(totime(item)))
if delta.days > 30:
if author != item['author']:
print ''
author = item['author']
authors.add(author)
print '%s:' % author
print ' {: <8} {: >12} days'.format(item['path'], delta.days)
print ''
print ', '.join(["<EMAIL>" % a for a in sorted(authors)]) | en | 0.759126 | #!/usr/bin/env python2.7 # Deal with various SVN date output to sort SVN ls by date | 2.91333 | 3 |
kaggleQuora.py | Hyagoro/Quora_Question_Pairs | 0 | 6623362 | <reponame>Hyagoro/Quora_Question_Pairs
import gensim
import csv
from pyemd import emd
import numpy as np
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn import preprocessing
from itertools import islice
from sklearn.decomposition import PCA
import time
# merge with toto.py to continue the training
# model = gensim.models.Word2Vec(sentences, size=100, window=5, min_count=5, workers=4)
# model = gensim.models.Word2Vec \
# .load_word2vec_format('/home/steve/Downloads/GoogleNews-vectors-negative300.bin.gz', binary=True)
#
# toto = model.most_similar(positive=['woman', 'king'], negative=['man'], topn=1)
# print(toto)
#
#
# def myfunc1(w2v_model, input_word="apple"):
# print(w2v_model.similar_by_word(input_word))
#
#
# for word in ["apple", "man"]:
# myfunc1(model, word)
def timing(f):
def wrap(*args):
time1 = time.time()
ret = f(*args)
time2 = time.time()
print('%s function took %0.3f ms' % (f.__name__, (time2-time1)*1000.0))
return ret
return wrap
def wmd(v_1, v_2, D_):
v_1 = v_1.astype(np.double)
v_2 = v_2.astype(np.double)
v_1 /= v_1.sum()
v_2 /= v_2.sum()
D_ = D_.astype(np.double)
D_ /= D_.max() # just for comparison purposes
print("d(doc_1, doc_2) = {:.2f}".format(emd(v_1, v_2, D_)))
X_text = []
y = []
documents_couple = []
print("Read CSV file")
with open('/home/steve/Documents/KaggleQuora/train.csv', 'r') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in islice(spamreader, 1, None):
documents_couple.append([row[3], row[4]])
X_text.append(gensim.models.doc2vec.TaggedDocument(gensim.utils.simple_preprocess(row[3]), row[5]))
X_text.append(gensim.models.doc2vec.TaggedDocument(gensim.utils.simple_preprocess(row[4]), row[5]))
y.append(row[5])
model = gensim.models.doc2vec.Doc2Vec(size=100, window=5, min_count=5, workers=4)
try:
print("Load W2V")
model = gensim.models.doc2vec.Doc2Vec.load("/home/steve/Documents/KaggleQuora/w2v")
except Exception as e:
print(e)
print("Train W2V")
model.build_vocab(X_text)
model.train(X_text)
model.save("/home/steve/Documents/KaggleQuora/w2v")
print("Infer W2V vectors")
X = []
for document in documents_couple:
l1 = model.infer_vector(document[0])
l2 = model.infer_vector(document[1])
tmp = []
tmp.extend(l1)
tmp.extend(l2)
X.append(tmp)
print("Normalize data")
min_max_scaler = preprocessing.MinMaxScaler()
X = min_max_scaler.fit_transform(X)
print("PCA")
pca = PCA(n_components=200)
X = pca.fit_transform(X)
print("Split dataset")
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=10)
print("Train MLPClassifier")
classifier = MLPClassifier(solver='lbfgs', alpha=1e-7, hidden_layer_sizes=(100,), random_state=1)
@timing
def train_classifier(classifier_, X_train_, y_train_):
classifier_.fit(X_train_, y_train_)
train_classifier(classifier, X_train, y_train)
# classifier.fit(X_train, y_train)
print("Predict")
y_pred = classifier.predict(X_test)
print("Scores")
print(classification_report(y_test, y_pred))
| import gensim
import csv
from pyemd import emd
import numpy as np
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn import preprocessing
from itertools import islice
from sklearn.decomposition import PCA
import time
# merge with toto.py to continue the training
# model = gensim.models.Word2Vec(sentences, size=100, window=5, min_count=5, workers=4)
# model = gensim.models.Word2Vec \
# .load_word2vec_format('/home/steve/Downloads/GoogleNews-vectors-negative300.bin.gz', binary=True)
#
# toto = model.most_similar(positive=['woman', 'king'], negative=['man'], topn=1)
# print(toto)
#
#
# def myfunc1(w2v_model, input_word="apple"):
# print(w2v_model.similar_by_word(input_word))
#
#
# for word in ["apple", "man"]:
# myfunc1(model, word)
def timing(f):
def wrap(*args):
time1 = time.time()
ret = f(*args)
time2 = time.time()
print('%s function took %0.3f ms' % (f.__name__, (time2-time1)*1000.0))
return ret
return wrap
def wmd(v_1, v_2, D_):
v_1 = v_1.astype(np.double)
v_2 = v_2.astype(np.double)
v_1 /= v_1.sum()
v_2 /= v_2.sum()
D_ = D_.astype(np.double)
D_ /= D_.max() # just for comparison purposes
print("d(doc_1, doc_2) = {:.2f}".format(emd(v_1, v_2, D_)))
X_text = []
y = []
documents_couple = []
print("Read CSV file")
with open('/home/steve/Documents/KaggleQuora/train.csv', 'r') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in islice(spamreader, 1, None):
documents_couple.append([row[3], row[4]])
X_text.append(gensim.models.doc2vec.TaggedDocument(gensim.utils.simple_preprocess(row[3]), row[5]))
X_text.append(gensim.models.doc2vec.TaggedDocument(gensim.utils.simple_preprocess(row[4]), row[5]))
y.append(row[5])
model = gensim.models.doc2vec.Doc2Vec(size=100, window=5, min_count=5, workers=4)
try:
print("Load W2V")
model = gensim.models.doc2vec.Doc2Vec.load("/home/steve/Documents/KaggleQuora/w2v")
except Exception as e:
print(e)
print("Train W2V")
model.build_vocab(X_text)
model.train(X_text)
model.save("/home/steve/Documents/KaggleQuora/w2v")
print("Infer W2V vectors")
X = []
for document in documents_couple:
l1 = model.infer_vector(document[0])
l2 = model.infer_vector(document[1])
tmp = []
tmp.extend(l1)
tmp.extend(l2)
X.append(tmp)
print("Normalize data")
min_max_scaler = preprocessing.MinMaxScaler()
X = min_max_scaler.fit_transform(X)
print("PCA")
pca = PCA(n_components=200)
X = pca.fit_transform(X)
print("Split dataset")
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=10)
print("Train MLPClassifier")
classifier = MLPClassifier(solver='lbfgs', alpha=1e-7, hidden_layer_sizes=(100,), random_state=1)
@timing
def train_classifier(classifier_, X_train_, y_train_):
classifier_.fit(X_train_, y_train_)
train_classifier(classifier, X_train, y_train)
# classifier.fit(X_train, y_train)
print("Predict")
y_pred = classifier.predict(X_test)
print("Scores")
print(classification_report(y_test, y_pred)) | en | 0.480115 | # merge with toto.py to continue the training # model = gensim.models.Word2Vec(sentences, size=100, window=5, min_count=5, workers=4) # model = gensim.models.Word2Vec \ # .load_word2vec_format('/home/steve/Downloads/GoogleNews-vectors-negative300.bin.gz', binary=True) # # toto = model.most_similar(positive=['woman', 'king'], negative=['man'], topn=1) # print(toto) # # # def myfunc1(w2v_model, input_word="apple"): # print(w2v_model.similar_by_word(input_word)) # # # for word in ["apple", "man"]: # myfunc1(model, word) # just for comparison purposes # classifier.fit(X_train, y_train) | 2.559861 | 3 |
tkinter/__canvas__/canvas-scrollbar/main.py | whitmans-max/python-examples | 140 | 6623363 | <reponame>whitmans-max/python-examples
import tkinter as tk
# date: 2019.05.01
# author: Bartłomiej 'furas' Burek
# You have to add something to canvas to use scrollbar.
# You have to use `scrollregion=` after you put items in canvas
# or you can use `after` to do it after tkinter shows window.
#def resize():
# canvas.configure(scrollregion=canvas.bbox("all"))
root = tk.Tk()
frame1 = tk.Frame(root, width=900, height=800)
frame1.pack(expand=True, fill='both')
canvas = tk.Canvas(frame1, width=900, height= 900)
canvas.pack(side='left', fill='both', expand=True)
vsb = tk.Scrollbar(frame1, orient='vertical')
vsb.pack(fill='y', side='right', expand=False)
vsb.configure(command=canvas.yview)
item_1 = tk.Frame(canvas, bg='red', width=500, height=500)
canvas.create_window(0, 0, window=item_1, anchor='nw')
item_2 = tk.Frame(canvas, bg='green', width=500, height=500)
canvas.create_window(500, 500, window=item_2, anchor='nw')
canvas.configure(yscrollcommand=vsb.set, scrollregion=canvas.bbox("all"))
#root.after(100, resize)
root.mainloop()
| import tkinter as tk
# date: 2019.05.01
# author: Bartłomiej 'furas' Burek
# You have to add something to canvas to use scrollbar.
# You have to use `scrollregion=` after you put items in canvas
# or you can use `after` to do it after tkinter shows window.
#def resize():
# canvas.configure(scrollregion=canvas.bbox("all"))
root = tk.Tk()
frame1 = tk.Frame(root, width=900, height=800)
frame1.pack(expand=True, fill='both')
canvas = tk.Canvas(frame1, width=900, height= 900)
canvas.pack(side='left', fill='both', expand=True)
vsb = tk.Scrollbar(frame1, orient='vertical')
vsb.pack(fill='y', side='right', expand=False)
vsb.configure(command=canvas.yview)
item_1 = tk.Frame(canvas, bg='red', width=500, height=500)
canvas.create_window(0, 0, window=item_1, anchor='nw')
item_2 = tk.Frame(canvas, bg='green', width=500, height=500)
canvas.create_window(500, 500, window=item_2, anchor='nw')
canvas.configure(yscrollcommand=vsb.set, scrollregion=canvas.bbox("all"))
#root.after(100, resize)
root.mainloop() | en | 0.703731 | # date: 2019.05.01 # author: Bartłomiej 'furas' Burek # You have to add something to canvas to use scrollbar. # You have to use `scrollregion=` after you put items in canvas # or you can use `after` to do it after tkinter shows window. #def resize(): # canvas.configure(scrollregion=canvas.bbox("all")) #root.after(100, resize) | 4.052551 | 4 |
djangopypi2/apps/pypi_metadata/models.py | MediaMath/djangopypi2 | 12 | 6623364 | <reponame>MediaMath/djangopypi2
from django.db import models
from django.utils.translation import ugettext_lazy as _
def ClassifierSerializer(o):
if isinstance(o, Classifier):
return o.name
return o
class Classifier(models.Model):
name = models.CharField(max_length=255, primary_key=True)
class Meta:
verbose_name = _(u"classifier")
verbose_name_plural = _(u"classifiers")
ordering = ('name',)
def __unicode__(self):
return self.name
class PythonVersion(models.Model):
major = models.IntegerField()
minor = models.IntegerField()
class Meta:
ordering = ('major', 'minor')
verbose_name = _(u'python version')
verbose_name_plural = _(u'python versions')
unique_together = ('major', 'minor')
def __unicode__(self):
return '%s.%s' % (self.major, self.minor)
class PlatformName(models.Model):
key = models.CharField(max_length=32, primary_key=True)
name = models.CharField(max_length=32)
class Meta:
verbose_name = _(u'platform name')
verbose_name_plural = _(u'platform names')
ordering = ('name', )
def __unicode__(self):
return self.name
class Architecture(models.Model):
key = models.CharField(max_length=16, primary_key=True)
name = models.CharField(max_length=64)
class Meta:
verbose_name = _(u'architecture')
verbose_name_plural = _(u'architectures')
ordering = ('name', )
def __unicode__(self):
return self.name
class DistributionType(models.Model):
key = models.CharField(max_length=32, primary_key=True)
name = models.CharField(max_length=64)
class Meta:
verbose_name = _(u'distribution type')
verbose_name_plural = _(u'distribution types')
ordering = ('name', )
def __unicode__(self):
return self.name
| from django.db import models
from django.utils.translation import ugettext_lazy as _
def ClassifierSerializer(o):
if isinstance(o, Classifier):
return o.name
return o
class Classifier(models.Model):
name = models.CharField(max_length=255, primary_key=True)
class Meta:
verbose_name = _(u"classifier")
verbose_name_plural = _(u"classifiers")
ordering = ('name',)
def __unicode__(self):
return self.name
class PythonVersion(models.Model):
major = models.IntegerField()
minor = models.IntegerField()
class Meta:
ordering = ('major', 'minor')
verbose_name = _(u'python version')
verbose_name_plural = _(u'python versions')
unique_together = ('major', 'minor')
def __unicode__(self):
return '%s.%s' % (self.major, self.minor)
class PlatformName(models.Model):
key = models.CharField(max_length=32, primary_key=True)
name = models.CharField(max_length=32)
class Meta:
verbose_name = _(u'platform name')
verbose_name_plural = _(u'platform names')
ordering = ('name', )
def __unicode__(self):
return self.name
class Architecture(models.Model):
key = models.CharField(max_length=16, primary_key=True)
name = models.CharField(max_length=64)
class Meta:
verbose_name = _(u'architecture')
verbose_name_plural = _(u'architectures')
ordering = ('name', )
def __unicode__(self):
return self.name
class DistributionType(models.Model):
key = models.CharField(max_length=32, primary_key=True)
name = models.CharField(max_length=64)
class Meta:
verbose_name = _(u'distribution type')
verbose_name_plural = _(u'distribution types')
ordering = ('name', )
def __unicode__(self):
return self.name | none | 1 | 2.259209 | 2 | |
tools_plot/plt_training_surv.py | rbn42/LearningToDrive | 1 | 6623365 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""Greeter.
Usage:
launcher.py <path>
Options:
-h --help Show this screen.
"""
from docopt import docopt
arguments = docopt(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.lines as lines
import matplotlib.transforms as mtransforms
import matplotlib.text as mtext
import glob
file=open(arguments['<path>'])
l=[]
for s in file:
if 'True' in s:
l.append(1)
elif 'False' in s:
l.append(0)
lt=np.asarray(l)
lf=1-lt
n=50.0
lt=np.convolve(lt,np.ones(n),'valid')/n
plt.plot(lt)
plt.show()
| #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""Greeter.
Usage:
launcher.py <path>
Options:
-h --help Show this screen.
"""
from docopt import docopt
arguments = docopt(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.lines as lines
import matplotlib.transforms as mtransforms
import matplotlib.text as mtext
import glob
file=open(arguments['<path>'])
l=[]
for s in file:
if 'True' in s:
l.append(1)
elif 'False' in s:
l.append(0)
lt=np.asarray(l)
lf=1-lt
n=50.0
lt=np.convolve(lt,np.ones(n),'valid')/n
plt.plot(lt)
plt.show()
| en | 0.409984 | #!/usr/bin/env python # -*- coding: UTF-8 -*- Greeter. Usage: launcher.py <path> Options: -h --help Show this screen. | 2.867101 | 3 |
soundsep/core/stft/lattice.py | theunissenlab/soundsep2 | 0 | 6623366 | import warnings
from dataclasses import dataclass
from typing import List, Optional
import numpy as np
from numpy.lib.stride_tricks import sliding_window_view
from soundsep.core.utils import ceil_div
@dataclass
class Bound:
start: int
stop: int
def __sub__(self, other: int):
return Bound(self.start - other, self.stop - other)
def __add__(self, other: int):
return Bound(self.start + other, self.stop + other)
def __mul__(self, other: int):
return Bound(self.start * other, self.stop * other)
def __floordiv__(self, other: int):
return Bound(self.start // other, self.stop // other)
def __eq__(self, other: 'Bound'):
return self.start == other.start and self.stop == other.stop
def __contains__(self, idx: int):
return self.start <= idx < self.stop
@dataclass
class Lattice:
offset: int
step: int
def __hash__(self):
return (offset, step)
def __eq__(self, other: 'Lattice'):
return self.offset == other.offset and self.step == other.step
def __mul__(self, other: int):
return self.scale_up(other)
def __floordiv__(self, other: int):
return self.scale_down(other)
def __iter__(self):
raise NotImplementedError("Cannot iterate over infinite lattice. Use BoundedLattice instead")
# next_ = offset
# while True:
# yield next_
# next_ += step
def scale_up(self, scale_factor: int):
"""Scales the Lattice up relative to zero"""
return Lattice(offset=self.offset * scale_factor, step=self.step * scale_factor)
def scale_down(self, scale_factor: int):
"""Scale the Lattice down relative to zero
If scale_factor does not divide evenly into step and/or offset, does
floor division but shows a warning.
"""
if self.step % scale_factor or self.offset % scale_factor:
warnings.warn("Scaling down a lattice by an uneven multiple of step or offset")
return Lattice(offset=self.offset // scale_factor, step=self.step // scale_factor)
def __contains__(self, idx: int):
return (idx - self.offset) % self.step == 0
def with_bound(self, bound: 'Bound') -> 'BoundedLattice':
return BoundedLattice(self.offset, self.step, bound)
@dataclass
class BoundedLattice(Lattice):
# TODO: enforce that the bound's starting inde
bound: Bound
def __len__(self):
return ceil_div(self.bound.stop - ceil(max(self.bound.start, self.offset), self), self.step)
def __iter__(self):
for i in overlapping_range(self.bound.start, self.bound.stop, self):
yield i
def __eq__(self, other: 'BoundedLattice'):
return super().__eq__(other) and self.bound == other.bound
def __getitem__(self, idx: 'Union[int, slice]'):
if isinstance(idx, int):
return ceil(self.bound.start + idx * self.step, self)
elif isinstance(idx, slice):
return list(self)[slice]
def to_position(self, idx: int):
"""Map a index in StftIndex coordinates to a integer index [0, len(self))
"""
if (idx - self.offset) % self.step:
raise ValueError("Given index does not lie on the lattice: {} {}".format(idx, self))
return (idx - self[0]) // self.step
def scale_up(self, scale_factor: int):
"""Scales the Lattice up relative to zero"""
return BoundedLattice(
offset=self.offset * scale_factor,
step=self.step * scale_factor,
bound=self.bound * scale_factor,
)
def scale_down(self, scale_factor: int):
"""Scale the Lattice down relative to zero
If scale_factor does not divide evenly into step and/or offset, does
floor division but shows a warning.
"""
if self.step % scale_factor or self.offset % scale_factor:
warnings.warn("Scaling down a lattice by an uneven multiple of step or offset")
return BoundedLattice(
offset=self.offset // scale_factor,
step=self.step // scale_factor,
bound=self.bound // scale_factor,
)
def to_slice(self, relative_to: int = 0) -> slice:
return slice(*overlapping_slice(self.bound.start, self.bound.stop, self, relative_to))
def without_bound(self) -> 'Lattice':
return Lattice(offset=self.offset, step=self.step)
def __contains__(self, idx: int):
return super().__contains__(idx) and self.bound.__contains__(idx)
def floor(idx: int, lattice: Lattice):
"""
Returns floor of the given index when projected onto a lattice defined by (offset, step)
"""
return lattice.offset + lattice.step * ((idx - lattice.offset) // lattice.step)
def ceil(idx: int, lattice: Lattice):
"""
Returns ceil of the given index when projected onto a lattice defined by (offset, step)
"""
return lattice.offset + lattice.step * ceil_div(idx - lattice.offset, lattice.step)
def overlapping_range(i0: int, i1: int, lattice: Lattice, relative_to: Optional[int] = 0):
"""
Returns the indicies where the lattice overlaps the range i0, i1
"""
start, stop, step = overlapping_slice(i0, i1, lattice, relative_to=relative_to)
return range(start, stop, step)
def overlapping_slice(i0: int, i1: int, lattice: Lattice, relative_to: Optional[int] = 0):
"""Gets the coordinates where a lattice overlaps the range [i0, i1)
Returns the slice values (start, stop, step) where start and stop are in the
global coordinates of i0
"""
if isinstance(lattice, BoundedLattice):
start = ceil(max(i0, lattice.bound.start), lattice)
stop = ceil(max(start, min(i1, lattice.bound.stop)), lattice)
else:
start = ceil(i0, lattice)
stop = ceil(i1, lattice)
return start - relative_to, stop - relative_to, lattice.step
def test_flood(i0: int, i1: int, layers: List[Lattice]):
"""Test function to fill a range at given indices
Create an array with each layer of a lattice filling in its index in its own spots
"""
output = np.zeros((i1 - i0))
for i, layer in enumerate(layers):
slice_ = slice(*overlapping_slice(i0, i1, layer, relative_to=i0))
output[slice_] = i + 1
yield output
| import warnings
from dataclasses import dataclass
from typing import List, Optional
import numpy as np
from numpy.lib.stride_tricks import sliding_window_view
from soundsep.core.utils import ceil_div
@dataclass
class Bound:
start: int
stop: int
def __sub__(self, other: int):
return Bound(self.start - other, self.stop - other)
def __add__(self, other: int):
return Bound(self.start + other, self.stop + other)
def __mul__(self, other: int):
return Bound(self.start * other, self.stop * other)
def __floordiv__(self, other: int):
return Bound(self.start // other, self.stop // other)
def __eq__(self, other: 'Bound'):
return self.start == other.start and self.stop == other.stop
def __contains__(self, idx: int):
return self.start <= idx < self.stop
@dataclass
class Lattice:
offset: int
step: int
def __hash__(self):
return (offset, step)
def __eq__(self, other: 'Lattice'):
return self.offset == other.offset and self.step == other.step
def __mul__(self, other: int):
return self.scale_up(other)
def __floordiv__(self, other: int):
return self.scale_down(other)
def __iter__(self):
raise NotImplementedError("Cannot iterate over infinite lattice. Use BoundedLattice instead")
# next_ = offset
# while True:
# yield next_
# next_ += step
def scale_up(self, scale_factor: int):
"""Scales the Lattice up relative to zero"""
return Lattice(offset=self.offset * scale_factor, step=self.step * scale_factor)
def scale_down(self, scale_factor: int):
"""Scale the Lattice down relative to zero
If scale_factor does not divide evenly into step and/or offset, does
floor division but shows a warning.
"""
if self.step % scale_factor or self.offset % scale_factor:
warnings.warn("Scaling down a lattice by an uneven multiple of step or offset")
return Lattice(offset=self.offset // scale_factor, step=self.step // scale_factor)
def __contains__(self, idx: int):
return (idx - self.offset) % self.step == 0
def with_bound(self, bound: 'Bound') -> 'BoundedLattice':
return BoundedLattice(self.offset, self.step, bound)
@dataclass
class BoundedLattice(Lattice):
# TODO: enforce that the bound's starting inde
bound: Bound
def __len__(self):
return ceil_div(self.bound.stop - ceil(max(self.bound.start, self.offset), self), self.step)
def __iter__(self):
for i in overlapping_range(self.bound.start, self.bound.stop, self):
yield i
def __eq__(self, other: 'BoundedLattice'):
return super().__eq__(other) and self.bound == other.bound
def __getitem__(self, idx: 'Union[int, slice]'):
if isinstance(idx, int):
return ceil(self.bound.start + idx * self.step, self)
elif isinstance(idx, slice):
return list(self)[slice]
def to_position(self, idx: int):
"""Map a index in StftIndex coordinates to a integer index [0, len(self))
"""
if (idx - self.offset) % self.step:
raise ValueError("Given index does not lie on the lattice: {} {}".format(idx, self))
return (idx - self[0]) // self.step
def scale_up(self, scale_factor: int):
"""Scales the Lattice up relative to zero"""
return BoundedLattice(
offset=self.offset * scale_factor,
step=self.step * scale_factor,
bound=self.bound * scale_factor,
)
def scale_down(self, scale_factor: int):
"""Scale the Lattice down relative to zero
If scale_factor does not divide evenly into step and/or offset, does
floor division but shows a warning.
"""
if self.step % scale_factor or self.offset % scale_factor:
warnings.warn("Scaling down a lattice by an uneven multiple of step or offset")
return BoundedLattice(
offset=self.offset // scale_factor,
step=self.step // scale_factor,
bound=self.bound // scale_factor,
)
def to_slice(self, relative_to: int = 0) -> slice:
return slice(*overlapping_slice(self.bound.start, self.bound.stop, self, relative_to))
def without_bound(self) -> 'Lattice':
return Lattice(offset=self.offset, step=self.step)
def __contains__(self, idx: int):
return super().__contains__(idx) and self.bound.__contains__(idx)
def floor(idx: int, lattice: Lattice):
"""
Returns floor of the given index when projected onto a lattice defined by (offset, step)
"""
return lattice.offset + lattice.step * ((idx - lattice.offset) // lattice.step)
def ceil(idx: int, lattice: Lattice):
"""
Returns ceil of the given index when projected onto a lattice defined by (offset, step)
"""
return lattice.offset + lattice.step * ceil_div(idx - lattice.offset, lattice.step)
def overlapping_range(i0: int, i1: int, lattice: Lattice, relative_to: Optional[int] = 0):
"""
Returns the indicies where the lattice overlaps the range i0, i1
"""
start, stop, step = overlapping_slice(i0, i1, lattice, relative_to=relative_to)
return range(start, stop, step)
def overlapping_slice(i0: int, i1: int, lattice: Lattice, relative_to: Optional[int] = 0):
"""Gets the coordinates where a lattice overlaps the range [i0, i1)
Returns the slice values (start, stop, step) where start and stop are in the
global coordinates of i0
"""
if isinstance(lattice, BoundedLattice):
start = ceil(max(i0, lattice.bound.start), lattice)
stop = ceil(max(start, min(i1, lattice.bound.stop)), lattice)
else:
start = ceil(i0, lattice)
stop = ceil(i1, lattice)
return start - relative_to, stop - relative_to, lattice.step
def test_flood(i0: int, i1: int, layers: List[Lattice]):
"""Test function to fill a range at given indices
Create an array with each layer of a lattice filling in its index in its own spots
"""
output = np.zeros((i1 - i0))
for i, layer in enumerate(layers):
slice_ = slice(*overlapping_slice(i0, i1, layer, relative_to=i0))
output[slice_] = i + 1
yield output
| en | 0.797967 | # next_ = offset # while True: # yield next_ # next_ += step Scales the Lattice up relative to zero Scale the Lattice down relative to zero If scale_factor does not divide evenly into step and/or offset, does floor division but shows a warning. # TODO: enforce that the bound's starting inde Map a index in StftIndex coordinates to a integer index [0, len(self)) Scales the Lattice up relative to zero Scale the Lattice down relative to zero If scale_factor does not divide evenly into step and/or offset, does floor division but shows a warning. Returns floor of the given index when projected onto a lattice defined by (offset, step) Returns ceil of the given index when projected onto a lattice defined by (offset, step) Returns the indicies where the lattice overlaps the range i0, i1 Gets the coordinates where a lattice overlaps the range [i0, i1) Returns the slice values (start, stop, step) where start and stop are in the global coordinates of i0 Test function to fill a range at given indices Create an array with each layer of a lattice filling in its index in its own spots | 2.737004 | 3 |
project08/CodeWriter.py | keystrega55/nand2tetris | 0 | 6623367 | from pathlib import Path
from typing import List
class CodeWriter:
def __init__(self, asm_file, vm_file) -> None:
self.in_file = vm_file
self.in_file_name = Path(vm_file).stem
self.out_file = asm_file
self.out_file_name = Path(asm_file).stem
# counters for Boolean comparisons
self.eq_count = 0
self.gt_count = 0
self.lt_count = 0
self.call_count = 0
self.function_name = None # for function labels
self.addresses = self.address_dict()
# load M[address] to D
def write_push(self, segment: str, index: str) -> None:
self.write_line(f'// push {segment} {index}')
self.resolve_address(segment, index)
if segment == 'constant': # check
self.write_line('D=A')
else:
self.write_line('D=M')
self.push_D_to_stack()
# load D to M[address]
def write_pop(self, segment: str, index: str) -> None:
self.write_line(f'// pop {segment} {index}')
self.resolve_address(segment, index)
self.write_lines(
[
'D=A',
'@R13', # store resolved address in R13
'M=D'
]
)
self.pop_stack_to_D()
self.write_lines(
[
'@R13',
'A=M',
'M=D'
]
)
def resolve_address(self, segment: str, index: int) -> None:
address = self.addresses.get(segment)
if segment == 'constant':
self.write_line(f'@{str(index)}')
elif segment == 'static':
self.write_line(f'@{self.in_file_name}.{str(index)}')
elif segment in ['pointer', 'temp']:
self.write_line(f'@R{str(address + index)}') # address type is int
elif segment in ['local', 'argument', 'this', 'that']:
self.write_lines(
[
f'@{address}',
'D=M',
f'@{str(index)}',
'A=D+A' # D is segment base
]
)
else:
self.raise_unknown_error(segment)
def write_arithmetic(self, operation: str) -> None:
self.write_line(f'// {operation}')
if operation not in ['neg', 'not']: # binary operators
self.pop_stack_to_D()
self.decrement_sp()
self.set_A_to_sp()
if operation == 'add': # arithmetic operators
self.write_line('M=M+D')
elif operation == 'sub':
self.write_line('M=M-D')
elif operation == 'and':
self.write_line('M=M&D')
elif operation == 'or':
self.write_line('M=M|D')
elif operation == 'neg':
self.write_line('M=-M')
elif operation == 'not':
self.write_line('M=!M')
elif operation == 'eq': # Boolean operators
self.write_eq()
elif operation == 'gt':
self.write_gt()
elif operation == 'lt':
self.write_lt()
else:
self.raise_unknown_error(operation)
self.increment_sp()
def write_eq(self) -> None:
self.write_lines(
[
'D=M-D',
f'@EQ.{self.eq_count}',
'D;JEQ'
]
)
self.set_A_to_sp()
self.write_lines(
[
'M=0', # False
f'@ENDEQ.{self.eq_count}',
'0;JMP',
f'(EQ.{self.eq_count})'
]
)
self.set_A_to_sp()
self.write_lines(
[
'M=-1', # True
f'(ENDEQ.{self.eq_count})'
]
)
self.eq_count += 1
def write_gt(self) -> None:
self.write_lines(
[
'D=M-D',
f'@GT.{self.gt_count}',
'D;JGT'
]
)
self.set_A_to_sp()
self.write_lines(
[
'M=0', # False
f'@ENDGT.{self.gt_count}',
'0;JMP',
f'(GT.{self.gt_count})'
]
)
self.set_A_to_sp()
self.write_lines(
[
'M=-1', # True
f'(ENDGT.{self.gt_count})'
]
)
self.gt_count += 1
def write_lt(self) -> None:
self.write_lines(
[
'D=M-D',
f'@LT.{self.lt_count}',
'D;JLT'
]
)
self.set_A_to_sp()
self.write_lines(
[
'M=0', # False
f'@ENDLT.{self.lt_count}',
'0;JMP',
f'(LT.{self.lt_count})'
]
)
self.set_A_to_sp()
self.write_lines(
[
'M=-1', # True
f'(ENDLT.{self.lt_count})'
]
)
self.lt_count += 1
def create_label(self, label: str, function_type: str = None) -> str:
asm_label = f'{self.in_file_name}${label}'
if function_type in ['if', 'goto']:
return f'@{asm_label}'
else:
return f'({asm_label})'
def write_label(self, label: str) -> None:
self.write_line(f'// label {label}')
self.write_line(f'{self.create_label(label)}')
def write_goto(self, label: str) -> None: # check
self.write_line(f'// goto {label}')
self.write_lines(
[
f"{self.create_label(label, 'goto')}",
'0;JMP'
]
)
def write_if(self, label: str) -> None: # check
self.write_line(f'// if-goto {label}')
self.pop_stack_to_D()
self.write_lines(
[
f"{self.create_label(label, 'if')}",
'D;JNE'
]
)
def write_function(self, function_name: str, num_locals: int) -> None: # check
self.write_line(f'// function {function_name} {num_locals}')
self.write_line(f'({function_name})')
for i in range(num_locals): # push constant 0 i times
self.write_line('D=0')
self.push_D_to_stack()
self.function_name = function_name
def write_return(self) -> None: # debug if needed
FRAME = 'R13'
RET_ADDR = 'R14'
self.write_line(f'// return')
# FRAME = LCL
self.write_lines(
[
'@LCL',
'D=M',
f'@{FRAME}',
'M=D'
]
)
# RET = *(FRAME - 5)
self.write_lines(
[
f'@{FRAME}', # debug
'D=M', # debug - save start of frame
'@5',
'D=D-A', # adjust address
'A=D', # prepare to load value at address
'D=M', # store value
f'@{RET_ADDR}',
'M=D' # save value
]
)
# *ARG = pop()
self.pop_stack_to_D()
self.write_lines(
[
'@ARG',
'A=M',
'M=D'
]
)
# SP = ARG + 1
self.write_lines(
[
'@ARG',
'D=M',
'@SP',
'M=D+1'
]
)
# THAT = *(FRAME - 1)
# THIS = *(FRAME - 2)
# ARG = *(FRAME - 3)
# LCL = *(FRAME - 4)
offset = 1
for address in ['@THAT', '@THIS', '@ARG', '@LCL']:
self.write_lines(
[
f'@{FRAME}',
'D=M', # save start of frame
f'@{str(offset)}',
'D=D-A', # adjust address
'A=D', # prepare to load value at address
'D=M', # store value
f'{address}',
'M=D' # save value
]
)
offset += 1
# goto RET_ADDR
self.write_lines(
[
f'@{RET_ADDR}',
'A=M',
'0;JMP'
]
)
def write_call(self, function_name: str, num_args: int) -> None: # check
# unique return label
RET_ADDR = f'{function_name}$Ret.{self.call_count}'
self.write_line(f'// call {function_name} {num_args}')
# push return address
self.write_lines(
[
f'@{RET_ADDR}',
'D=A'
]
)
self.push_D_to_stack()
# push LCL, ARG, THIS, THAT
for address in ['@LCL', '@ARG', '@THIS', '@THAT']:
self.write_lines(
[
f'{address}',
'D=M'
]
)
self.push_D_to_stack()
# LCL = SP
self.write_lines(
[
'@SP',
'D=M',
'@LCL',
'M=D'
]
)
# ARG = SP - 5 - nArgs
self.write_lines(
[
# '@SP', # debug - remove if needed
# 'D=M', # debug - remove if needed
f'@{str(num_args + 5)}',
'D=D-A',
'@ARG',
'M=D'
]
)
# goto f
self.write_lines(
[
f'@{function_name}',
'0;JMP'
]
)
self.write_line(f'({RET_ADDR})') # (return_address)
self.call_count += 1
def write_bootstrap(self) -> None:
self.write_lines(
[
'@256',
'D=A',
'@SP',
'M=D'
]
)
self.write_call('Sys.init', 0)
def address_dict(self) -> dict:
return {
'local': 'LCL', # base address R1
'argument': 'ARG', # R2
'this': 'THIS', # R3
'that': 'THAT', # R4
'pointer': 3, # R3, R4
'temp': 5, # R5 - R12 (R13 - R15 are free)
'static': 16, # base addresses 16 - 255
}
def write_line(self, line: str) -> None:
with open(self.out_file, 'a') as f:
f.write(f'{line}\n')
def write_lines(self, lines: List[str]) -> None:
with open(self.out_file, 'a') as f:
for line in lines:
f.write(f'{line}\n')
def push_D_to_stack(self) -> None:
# Push D value to top of stack, increment @SP
self.set_A_to_sp()
self.write_line('M=D') # Push D to stack
self.increment_sp()
def pop_stack_to_D(self) -> None:
# Decrement @SP, pop top of stack to D
self.decrement_sp()
self.write_lines(
[
'A=M',
'D=M'
]
)
# self.write_lines( # more efficient
# [
# '@SP',
# 'AM=M-1',
# 'D=M'
# ]
# )
def increment_sp(self) -> None:
self.write_lines(['@SP',
'M=M+1'])
def decrement_sp(self) -> None:
self.write_lines(['@SP',
'M=M-1'])
def set_A_to_sp(self) -> None:
self.write_lines(['@SP', # Get current stack pointer (SP)
'A=M']) # Set address to current SP
def raise_unknown_error(self, argument: str) -> ValueError:
raise ValueError(f'{argument} is an invalid argument.')
| from pathlib import Path
from typing import List
class CodeWriter:
def __init__(self, asm_file, vm_file) -> None:
self.in_file = vm_file
self.in_file_name = Path(vm_file).stem
self.out_file = asm_file
self.out_file_name = Path(asm_file).stem
# counters for Boolean comparisons
self.eq_count = 0
self.gt_count = 0
self.lt_count = 0
self.call_count = 0
self.function_name = None # for function labels
self.addresses = self.address_dict()
# load M[address] to D
def write_push(self, segment: str, index: str) -> None:
self.write_line(f'// push {segment} {index}')
self.resolve_address(segment, index)
if segment == 'constant': # check
self.write_line('D=A')
else:
self.write_line('D=M')
self.push_D_to_stack()
# load D to M[address]
def write_pop(self, segment: str, index: str) -> None:
self.write_line(f'// pop {segment} {index}')
self.resolve_address(segment, index)
self.write_lines(
[
'D=A',
'@R13', # store resolved address in R13
'M=D'
]
)
self.pop_stack_to_D()
self.write_lines(
[
'@R13',
'A=M',
'M=D'
]
)
def resolve_address(self, segment: str, index: int) -> None:
address = self.addresses.get(segment)
if segment == 'constant':
self.write_line(f'@{str(index)}')
elif segment == 'static':
self.write_line(f'@{self.in_file_name}.{str(index)}')
elif segment in ['pointer', 'temp']:
self.write_line(f'@R{str(address + index)}') # address type is int
elif segment in ['local', 'argument', 'this', 'that']:
self.write_lines(
[
f'@{address}',
'D=M',
f'@{str(index)}',
'A=D+A' # D is segment base
]
)
else:
self.raise_unknown_error(segment)
def write_arithmetic(self, operation: str) -> None:
self.write_line(f'// {operation}')
if operation not in ['neg', 'not']: # binary operators
self.pop_stack_to_D()
self.decrement_sp()
self.set_A_to_sp()
if operation == 'add': # arithmetic operators
self.write_line('M=M+D')
elif operation == 'sub':
self.write_line('M=M-D')
elif operation == 'and':
self.write_line('M=M&D')
elif operation == 'or':
self.write_line('M=M|D')
elif operation == 'neg':
self.write_line('M=-M')
elif operation == 'not':
self.write_line('M=!M')
elif operation == 'eq': # Boolean operators
self.write_eq()
elif operation == 'gt':
self.write_gt()
elif operation == 'lt':
self.write_lt()
else:
self.raise_unknown_error(operation)
self.increment_sp()
def write_eq(self) -> None:
self.write_lines(
[
'D=M-D',
f'@EQ.{self.eq_count}',
'D;JEQ'
]
)
self.set_A_to_sp()
self.write_lines(
[
'M=0', # False
f'@ENDEQ.{self.eq_count}',
'0;JMP',
f'(EQ.{self.eq_count})'
]
)
self.set_A_to_sp()
self.write_lines(
[
'M=-1', # True
f'(ENDEQ.{self.eq_count})'
]
)
self.eq_count += 1
def write_gt(self) -> None:
self.write_lines(
[
'D=M-D',
f'@GT.{self.gt_count}',
'D;JGT'
]
)
self.set_A_to_sp()
self.write_lines(
[
'M=0', # False
f'@ENDGT.{self.gt_count}',
'0;JMP',
f'(GT.{self.gt_count})'
]
)
self.set_A_to_sp()
self.write_lines(
[
'M=-1', # True
f'(ENDGT.{self.gt_count})'
]
)
self.gt_count += 1
def write_lt(self) -> None:
self.write_lines(
[
'D=M-D',
f'@LT.{self.lt_count}',
'D;JLT'
]
)
self.set_A_to_sp()
self.write_lines(
[
'M=0', # False
f'@ENDLT.{self.lt_count}',
'0;JMP',
f'(LT.{self.lt_count})'
]
)
self.set_A_to_sp()
self.write_lines(
[
'M=-1', # True
f'(ENDLT.{self.lt_count})'
]
)
self.lt_count += 1
def create_label(self, label: str, function_type: str = None) -> str:
asm_label = f'{self.in_file_name}${label}'
if function_type in ['if', 'goto']:
return f'@{asm_label}'
else:
return f'({asm_label})'
def write_label(self, label: str) -> None:
self.write_line(f'// label {label}')
self.write_line(f'{self.create_label(label)}')
def write_goto(self, label: str) -> None: # check
self.write_line(f'// goto {label}')
self.write_lines(
[
f"{self.create_label(label, 'goto')}",
'0;JMP'
]
)
def write_if(self, label: str) -> None: # check
self.write_line(f'// if-goto {label}')
self.pop_stack_to_D()
self.write_lines(
[
f"{self.create_label(label, 'if')}",
'D;JNE'
]
)
def write_function(self, function_name: str, num_locals: int) -> None: # check
self.write_line(f'// function {function_name} {num_locals}')
self.write_line(f'({function_name})')
for i in range(num_locals): # push constant 0 i times
self.write_line('D=0')
self.push_D_to_stack()
self.function_name = function_name
def write_return(self) -> None: # debug if needed
FRAME = 'R13'
RET_ADDR = 'R14'
self.write_line(f'// return')
# FRAME = LCL
self.write_lines(
[
'@LCL',
'D=M',
f'@{FRAME}',
'M=D'
]
)
# RET = *(FRAME - 5)
self.write_lines(
[
f'@{FRAME}', # debug
'D=M', # debug - save start of frame
'@5',
'D=D-A', # adjust address
'A=D', # prepare to load value at address
'D=M', # store value
f'@{RET_ADDR}',
'M=D' # save value
]
)
# *ARG = pop()
self.pop_stack_to_D()
self.write_lines(
[
'@ARG',
'A=M',
'M=D'
]
)
# SP = ARG + 1
self.write_lines(
[
'@ARG',
'D=M',
'@SP',
'M=D+1'
]
)
# THAT = *(FRAME - 1)
# THIS = *(FRAME - 2)
# ARG = *(FRAME - 3)
# LCL = *(FRAME - 4)
offset = 1
for address in ['@THAT', '@THIS', '@ARG', '@LCL']:
self.write_lines(
[
f'@{FRAME}',
'D=M', # save start of frame
f'@{str(offset)}',
'D=D-A', # adjust address
'A=D', # prepare to load value at address
'D=M', # store value
f'{address}',
'M=D' # save value
]
)
offset += 1
# goto RET_ADDR
self.write_lines(
[
f'@{RET_ADDR}',
'A=M',
'0;JMP'
]
)
def write_call(self, function_name: str, num_args: int) -> None: # check
# unique return label
RET_ADDR = f'{function_name}$Ret.{self.call_count}'
self.write_line(f'// call {function_name} {num_args}')
# push return address
self.write_lines(
[
f'@{RET_ADDR}',
'D=A'
]
)
self.push_D_to_stack()
# push LCL, ARG, THIS, THAT
for address in ['@LCL', '@ARG', '@THIS', '@THAT']:
self.write_lines(
[
f'{address}',
'D=M'
]
)
self.push_D_to_stack()
# LCL = SP
self.write_lines(
[
'@SP',
'D=M',
'@LCL',
'M=D'
]
)
# ARG = SP - 5 - nArgs
self.write_lines(
[
# '@SP', # debug - remove if needed
# 'D=M', # debug - remove if needed
f'@{str(num_args + 5)}',
'D=D-A',
'@ARG',
'M=D'
]
)
# goto f
self.write_lines(
[
f'@{function_name}',
'0;JMP'
]
)
self.write_line(f'({RET_ADDR})') # (return_address)
self.call_count += 1
def write_bootstrap(self) -> None:
self.write_lines(
[
'@256',
'D=A',
'@SP',
'M=D'
]
)
self.write_call('Sys.init', 0)
def address_dict(self) -> dict:
return {
'local': 'LCL', # base address R1
'argument': 'ARG', # R2
'this': 'THIS', # R3
'that': 'THAT', # R4
'pointer': 3, # R3, R4
'temp': 5, # R5 - R12 (R13 - R15 are free)
'static': 16, # base addresses 16 - 255
}
def write_line(self, line: str) -> None:
with open(self.out_file, 'a') as f:
f.write(f'{line}\n')
def write_lines(self, lines: List[str]) -> None:
with open(self.out_file, 'a') as f:
for line in lines:
f.write(f'{line}\n')
def push_D_to_stack(self) -> None:
# Push D value to top of stack, increment @SP
self.set_A_to_sp()
self.write_line('M=D') # Push D to stack
self.increment_sp()
def pop_stack_to_D(self) -> None:
# Decrement @SP, pop top of stack to D
self.decrement_sp()
self.write_lines(
[
'A=M',
'D=M'
]
)
# self.write_lines( # more efficient
# [
# '@SP',
# 'AM=M-1',
# 'D=M'
# ]
# )
def increment_sp(self) -> None:
self.write_lines(['@SP',
'M=M+1'])
def decrement_sp(self) -> None:
self.write_lines(['@SP',
'M=M-1'])
def set_A_to_sp(self) -> None:
self.write_lines(['@SP', # Get current stack pointer (SP)
'A=M']) # Set address to current SP
def raise_unknown_error(self, argument: str) -> ValueError:
raise ValueError(f'{argument} is an invalid argument.')
| en | 0.682558 | # counters for Boolean comparisons # for function labels # load M[address] to D # check # load D to M[address] # store resolved address in R13 # address type is int # D is segment base # binary operators # arithmetic operators # Boolean operators # False # True # False # True # False # True # check # check # check # push constant 0 i times # debug if needed # FRAME = LCL # RET = *(FRAME - 5) # debug # debug - save start of frame # adjust address # prepare to load value at address # store value # save value # *ARG = pop() # SP = ARG + 1 # THAT = *(FRAME - 1) # THIS = *(FRAME - 2) # ARG = *(FRAME - 3) # LCL = *(FRAME - 4) # save start of frame # adjust address # prepare to load value at address # store value # save value # goto RET_ADDR # check # unique return label # push return address # push LCL, ARG, THIS, THAT # LCL = SP # ARG = SP - 5 - nArgs # '@SP', # debug - remove if needed # 'D=M', # debug - remove if needed # goto f # (return_address) # base address R1 # R2 # R3 # R4 # R3, R4 # R5 - R12 (R13 - R15 are free) # base addresses 16 - 255 # Push D value to top of stack, increment @SP # Push D to stack # Decrement @SP, pop top of stack to D # self.write_lines( # more efficient # [ # '@SP', # 'AM=M-1', # 'D=M' # ] # ) # Get current stack pointer (SP) # Set address to current SP | 2.950986 | 3 |
Menu.py | ruiting-chen/Escape_Room | 0 | 6623368 | <filename>Menu.py
# Date created: 19/04/10
# Date last modified: 19/04/12
# Description: Menu of Escape Room Game
# Ask for an action from user.
print("You can take 4 actions in this game:")
print("\tSearch\n\tUse\n\tTake\n\tTansform")
ask = input("\nWhat action do you want to take?").lower().strip()
if ask == "search":
print("Search!")
elif ask == "use":
print("Use!")
elif ask == "take":
print("Take!")
elif ask == "transform":
print("Transform!")
else:
print("Invalid action!")
| <filename>Menu.py
# Date created: 19/04/10
# Date last modified: 19/04/12
# Description: Menu of Escape Room Game
# Ask for an action from user.
print("You can take 4 actions in this game:")
print("\tSearch\n\tUse\n\tTake\n\tTansform")
ask = input("\nWhat action do you want to take?").lower().strip()
if ask == "search":
print("Search!")
elif ask == "use":
print("Use!")
elif ask == "take":
print("Take!")
elif ask == "transform":
print("Transform!")
else:
print("Invalid action!")
| en | 0.798805 | # Date created: 19/04/10 # Date last modified: 19/04/12 # Description: Menu of Escape Room Game # Ask for an action from user. | 3.646667 | 4 |
tests/settings.py | NyanKiyoshi/django-positions | 0 | 6623369 | SECRET_KEY = 'you_saw_nothing.'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
INSTALLED_APPS = [
'django_positions_2.apps.DjangoPositions2Config',
'tests'
]
| SECRET_KEY = 'you_saw_nothing.'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
INSTALLED_APPS = [
'django_positions_2.apps.DjangoPositions2Config',
'tests'
]
| none | 1 | 1.223713 | 1 | |
ptera/tools.py | mila-iqia/ptera | 6 | 6623370 | class Range:
def __init__(self, start=0, end=None, modulo=None):
self.start = start
self.end = end
self.modulo = modulo
def __call__(self, value):
if self.start is not None and value < self.start:
return False
if self.end is not None and value >= self.end:
return False
if self.modulo is not None:
return (
(value - (self.start or 0)) + self.modulo
) % self.modulo == 0
return True
def every(modulo=None, start=0, end=None):
return Range(modulo=modulo, start=start, end=end)
def between(start, end, modulo=None):
return Range(modulo=modulo, start=start, end=end)
def lt(end):
return lambda x: x < end
def gt(start):
return lambda x: x > start
def lte(end):
return lambda x: x <= end
def gte(start):
return lambda x: x >= start
class throttle:
def __init__(self, period):
self.period = period
self.current = None
self.trigger = None
def __call__(self, value):
if self.current is None:
self.current = value
self.trigger = self.current + self.period
if value == self.current:
return True
elif value >= self.trigger:
self.current = value
self.trigger += self.period
return True
else:
return False
# def __call__(self, value):
# if self.trigger is None:
# self.trigger = value
# if value == self.trigger:
# return True
# elif value > self.trigger:
# self.trigger += self.period
# return self(value)
# else:
# return False
| class Range:
def __init__(self, start=0, end=None, modulo=None):
self.start = start
self.end = end
self.modulo = modulo
def __call__(self, value):
if self.start is not None and value < self.start:
return False
if self.end is not None and value >= self.end:
return False
if self.modulo is not None:
return (
(value - (self.start or 0)) + self.modulo
) % self.modulo == 0
return True
def every(modulo=None, start=0, end=None):
return Range(modulo=modulo, start=start, end=end)
def between(start, end, modulo=None):
return Range(modulo=modulo, start=start, end=end)
def lt(end):
return lambda x: x < end
def gt(start):
return lambda x: x > start
def lte(end):
return lambda x: x <= end
def gte(start):
return lambda x: x >= start
class throttle:
def __init__(self, period):
self.period = period
self.current = None
self.trigger = None
def __call__(self, value):
if self.current is None:
self.current = value
self.trigger = self.current + self.period
if value == self.current:
return True
elif value >= self.trigger:
self.current = value
self.trigger += self.period
return True
else:
return False
# def __call__(self, value):
# if self.trigger is None:
# self.trigger = value
# if value == self.trigger:
# return True
# elif value > self.trigger:
# self.trigger += self.period
# return self(value)
# else:
# return False
| en | 0.212422 | # def __call__(self, value): # if self.trigger is None: # self.trigger = value # if value == self.trigger: # return True # elif value > self.trigger: # self.trigger += self.period # return self(value) # else: # return False | 3.222095 | 3 |
tests/jupyter/test_jupyter.py | thomashopkins32/LEAP | 53 | 6623371 | <reponame>thomashopkins32/LEAP
import glob
import os
import pathlib
from nbconvert import NotebookExporter
from nbconvert.preprocessors import ExecutePreprocessor
import nbformat
import pytest
from traitlets.config import Config
def run_notebook(path, timeout=120):
"""
Execute a Jupyter Notebook and return any errors that it produces.
:param path: path to the .ipynb file to execute
:param int timeout: number of seconds to let the notebook run before we throw an exception by default.
:return: a tuple (nb, errors) containing the parsed Notebook object and a list of errors, respectively
"""
# We'll use a NotebookExporter from the nbconvert package to load the notebook and re-export it to a temporary file.
# First we want to configure it to execute the notebook before writing it:
c = Config()
c.NotebookExporter.preprocessors = ['nbconvert.preprocessors.ExecutePreprocessor']
c.ExecutePreprocessor.timeout = timeout
c.ExecutePreprocessor.kernel_name = 'LEAP_venv' # We assume a kernel named "LEAP_venv" that lives in our venv
exp = NotebookExporter(config=c)
# Load the notebook
with open(path, 'r') as nb_file:
body, resources = exp.from_file(nb_file)
# Parse the notebook string into a notebook object
nb = nbformat.reads(body, nbformat.current_nbformat)
errors = [output for cell in nb.cells if "outputs" in cell
for output in cell["outputs"]
if output.output_type == "error"]
return nb, errors
def is_hidden_path(path):
"""Return true if the file or any of its ancesor directories begins with '.'."""
head, tail = os.path.split(path)
if tail.startswith('.'):
return True
elif head == '' or tail == '':
return False
else:
return is_hidden_path(head)
notebooks = pathlib.Path(__file__, '..', '../..', 'examples').resolve().rglob('*.ipynb')
notebooks = [ p for p in notebooks if not is_hidden_path(p) ]
# We give Jupyter tests a separate marker, because they can only run if the 'LEAP_venv' kernel is configured propertly by the user
@pytest.mark.jupyter
@pytest.mark.parametrize('path', notebooks)
def test_notebook(path):
"""Ensure that all of the notebooks in the examples directory run without errors."""
nb, errors = run_notebook(path)
# No errors is success
assert errors == []
| import glob
import os
import pathlib
from nbconvert import NotebookExporter
from nbconvert.preprocessors import ExecutePreprocessor
import nbformat
import pytest
from traitlets.config import Config
def run_notebook(path, timeout=120):
"""
Execute a Jupyter Notebook and return any errors that it produces.
:param path: path to the .ipynb file to execute
:param int timeout: number of seconds to let the notebook run before we throw an exception by default.
:return: a tuple (nb, errors) containing the parsed Notebook object and a list of errors, respectively
"""
# We'll use a NotebookExporter from the nbconvert package to load the notebook and re-export it to a temporary file.
# First we want to configure it to execute the notebook before writing it:
c = Config()
c.NotebookExporter.preprocessors = ['nbconvert.preprocessors.ExecutePreprocessor']
c.ExecutePreprocessor.timeout = timeout
c.ExecutePreprocessor.kernel_name = 'LEAP_venv' # We assume a kernel named "LEAP_venv" that lives in our venv
exp = NotebookExporter(config=c)
# Load the notebook
with open(path, 'r') as nb_file:
body, resources = exp.from_file(nb_file)
# Parse the notebook string into a notebook object
nb = nbformat.reads(body, nbformat.current_nbformat)
errors = [output for cell in nb.cells if "outputs" in cell
for output in cell["outputs"]
if output.output_type == "error"]
return nb, errors
def is_hidden_path(path):
"""Return true if the file or any of its ancesor directories begins with '.'."""
head, tail = os.path.split(path)
if tail.startswith('.'):
return True
elif head == '' or tail == '':
return False
else:
return is_hidden_path(head)
notebooks = pathlib.Path(__file__, '..', '../..', 'examples').resolve().rglob('*.ipynb')
notebooks = [ p for p in notebooks if not is_hidden_path(p) ]
# We give Jupyter tests a separate marker, because they can only run if the 'LEAP_venv' kernel is configured propertly by the user
@pytest.mark.jupyter
@pytest.mark.parametrize('path', notebooks)
def test_notebook(path):
"""Ensure that all of the notebooks in the examples directory run without errors."""
nb, errors = run_notebook(path)
# No errors is success
assert errors == [] | en | 0.8366 | Execute a Jupyter Notebook and return any errors that it produces. :param path: path to the .ipynb file to execute :param int timeout: number of seconds to let the notebook run before we throw an exception by default. :return: a tuple (nb, errors) containing the parsed Notebook object and a list of errors, respectively # We'll use a NotebookExporter from the nbconvert package to load the notebook and re-export it to a temporary file. # First we want to configure it to execute the notebook before writing it: # We assume a kernel named "LEAP_venv" that lives in our venv # Load the notebook # Parse the notebook string into a notebook object Return true if the file or any of its ancesor directories begins with '.'. # We give Jupyter tests a separate marker, because they can only run if the 'LEAP_venv' kernel is configured propertly by the user Ensure that all of the notebooks in the examples directory run without errors. # No errors is success | 2.553058 | 3 |
database/bifrost.py | aasensio/graphnet_rt | 0 | 6623372 | from lightweaver.fal import Falc82
from lightweaver.rh_atoms import H_6_atom, H_6_CRD_atom, H_3_atom, C_atom, O_atom, OI_ord_atom, Si_atom, Al_atom, CaII_atom, Fe_atom, FeI_atom, He_9_atom, He_atom, He_large_atom, MgII_atom, N_atom, Na_atom, S_atom
import lightweaver as lw
import matplotlib.pyplot as plt
import numpy as np
from astropy.io import fits
def iterate_ctx_crd(ctx, Nscatter=10, NmaxIter=500):
for i in range(NmaxIter):
dJ = ctx.formal_sol_gamma_matrices(verbose=True)
if i < Nscatter:
continue
delta = ctx.stat_equil(printUpdate=True)
if dJ < 3e-3 and delta < 1e-3:
print(i)
print('----------')
return
def synth_spectrum(atmos, depthData=False, Nthreads=1, conserveCharge=False, allactive=True):
atmos.quadrature(5)
aSet = lw.RadiativeSet([H_6_atom(),
C_atom(),
OI_ord_atom(), Si_atom(), Al_atom(),
CaII_atom(),
Fe_atom(),
He_9_atom(),
MgII_atom(), N_atom(), Na_atom(), S_atom()
])
if (allactive):
aSet.set_active('H', 'Ca')
else:
aSet.set_active('Ca')
spect = aSet.compute_wavelength_grid()
eqPops = aSet.compute_eq_pops(atmos)
ctx = lw.Context(atmos, spect, eqPops, ngOptions=lw.utils.NgOptions(0,0,0), Nthreads=Nthreads, conserveCharge=conserveCharge)
if depthData:
ctx.depthData.fill = True
iterate_ctx_crd(ctx)
eqPops.update_lte_atoms_Hmin_pops(atmos)
ctx.formal_sol_gamma_matrices()
return ctx
atmosRef = Falc82()
# ctxRef = synth_spectrum(atmosRef, depthData=True, conserveCharge=True)
fmodel = fits.open('/net/drogon/scratch1/aasensio/3dcubes/Enhanced_network_385_tau_from_RH_01_tau8.fits')
x, y = 50, 20
bifrost = fmodel[0].data[:, :, x, y].astype('<f8')
# tau, T, Pe, vmicro, B, vlos, theta, azimuth, z, Pgas, rho_gas
tau500 = np.ascontiguousarray(10.0**bifrost[0, ::-1])
T = np.ascontiguousarray(bifrost[1, ::-1])
vlos = np.ascontiguousarray(bifrost[5, ::-1]) / 100.0 # m/s
vturb = np.ascontiguousarray(bifrost[3, ::-1]) / 100.0 # m/s
Pe = np.ascontiguousarray(bifrost[2, ::-1])
Ne = Pe / (1.381e-16 * T) * 1e6 # m-3
atmos = lw.Atmosphere.make_1d(scale=lw.ScaleType.Tau500, depthScale=tau500, temperature=T, vlos=vlos, vturb=vturb, ne = Ne, verbose=True)
ctx = synth_spectrum(atmos, depthData=True, conserveCharge=False, allactive=True)
ctx2 = synth_spectrum(atmos, depthData=True, conserveCharge=False, allactive=False)
# cmass_max = 1.8
# cmass_min = -4.0
# n = 82
# cmass = np.linspace(cmass_min, cmass_max, n)
# f = int.interp1d(np.log10(atmosRef.cmass), atmosRef.temperature, bounds_error=False, fill_value=(atmosRef.temperature[0], atmosRef.temperature[-1]))
# Tnew = f(cmass)
# f = int.interp1d(np.log10(atmosRef.cmass), atmosRef.vlos, bounds_error=False, fill_value=(atmosRef.vlos[0], atmosRef.vlos[-1]))
# vlosnew = f(cmass)
# f = int.interp1d(np.log10(atmosRef.cmass), atmosRef.vturb, bounds_error=False, fill_value=(atmosRef.vturb[0], atmosRef.vturb[-1]))
# vturbnew = f(cmass)
# atmos = lw.Atmosphere.make_1d(scale=lw.ScaleType.ColumnMass, depthScale=10**cmass, temperature=Tnew, vlos=vlosnew, vturb=vturbnew, verbose=True)
# ctx = synth_spectrum(atmos, depthData=True, conserveCharge=True)
plt.ion()
# plt.plot(ctx.spect.wavelength, (ctxFast.spect.I[:, -1] - ctx.spect.I[:, -1]) / ctxFast.spect.I[:, -1])
plt.plot(ctx.spect.wavelength, ctx.spect.I[:, -1])
plt.plot(ctx2.spect.wavelength, ctx2.spect.I[:, -1])
# plt.plot(ctx.spect.wavelength, ctxPyTau.spect.I[:, -1])
# plt.plot(ctx.spect.wavelength, ctxRef.spect.I[:, -1], '--')
plt.xlim(853.9444, 854.9444)
| from lightweaver.fal import Falc82
from lightweaver.rh_atoms import H_6_atom, H_6_CRD_atom, H_3_atom, C_atom, O_atom, OI_ord_atom, Si_atom, Al_atom, CaII_atom, Fe_atom, FeI_atom, He_9_atom, He_atom, He_large_atom, MgII_atom, N_atom, Na_atom, S_atom
import lightweaver as lw
import matplotlib.pyplot as plt
import numpy as np
from astropy.io import fits
def iterate_ctx_crd(ctx, Nscatter=10, NmaxIter=500):
for i in range(NmaxIter):
dJ = ctx.formal_sol_gamma_matrices(verbose=True)
if i < Nscatter:
continue
delta = ctx.stat_equil(printUpdate=True)
if dJ < 3e-3 and delta < 1e-3:
print(i)
print('----------')
return
def synth_spectrum(atmos, depthData=False, Nthreads=1, conserveCharge=False, allactive=True):
atmos.quadrature(5)
aSet = lw.RadiativeSet([H_6_atom(),
C_atom(),
OI_ord_atom(), Si_atom(), Al_atom(),
CaII_atom(),
Fe_atom(),
He_9_atom(),
MgII_atom(), N_atom(), Na_atom(), S_atom()
])
if (allactive):
aSet.set_active('H', 'Ca')
else:
aSet.set_active('Ca')
spect = aSet.compute_wavelength_grid()
eqPops = aSet.compute_eq_pops(atmos)
ctx = lw.Context(atmos, spect, eqPops, ngOptions=lw.utils.NgOptions(0,0,0), Nthreads=Nthreads, conserveCharge=conserveCharge)
if depthData:
ctx.depthData.fill = True
iterate_ctx_crd(ctx)
eqPops.update_lte_atoms_Hmin_pops(atmos)
ctx.formal_sol_gamma_matrices()
return ctx
atmosRef = Falc82()
# ctxRef = synth_spectrum(atmosRef, depthData=True, conserveCharge=True)
fmodel = fits.open('/net/drogon/scratch1/aasensio/3dcubes/Enhanced_network_385_tau_from_RH_01_tau8.fits')
x, y = 50, 20
bifrost = fmodel[0].data[:, :, x, y].astype('<f8')
# tau, T, Pe, vmicro, B, vlos, theta, azimuth, z, Pgas, rho_gas
tau500 = np.ascontiguousarray(10.0**bifrost[0, ::-1])
T = np.ascontiguousarray(bifrost[1, ::-1])
vlos = np.ascontiguousarray(bifrost[5, ::-1]) / 100.0 # m/s
vturb = np.ascontiguousarray(bifrost[3, ::-1]) / 100.0 # m/s
Pe = np.ascontiguousarray(bifrost[2, ::-1])
Ne = Pe / (1.381e-16 * T) * 1e6 # m-3
atmos = lw.Atmosphere.make_1d(scale=lw.ScaleType.Tau500, depthScale=tau500, temperature=T, vlos=vlos, vturb=vturb, ne = Ne, verbose=True)
ctx = synth_spectrum(atmos, depthData=True, conserveCharge=False, allactive=True)
ctx2 = synth_spectrum(atmos, depthData=True, conserveCharge=False, allactive=False)
# cmass_max = 1.8
# cmass_min = -4.0
# n = 82
# cmass = np.linspace(cmass_min, cmass_max, n)
# f = int.interp1d(np.log10(atmosRef.cmass), atmosRef.temperature, bounds_error=False, fill_value=(atmosRef.temperature[0], atmosRef.temperature[-1]))
# Tnew = f(cmass)
# f = int.interp1d(np.log10(atmosRef.cmass), atmosRef.vlos, bounds_error=False, fill_value=(atmosRef.vlos[0], atmosRef.vlos[-1]))
# vlosnew = f(cmass)
# f = int.interp1d(np.log10(atmosRef.cmass), atmosRef.vturb, bounds_error=False, fill_value=(atmosRef.vturb[0], atmosRef.vturb[-1]))
# vturbnew = f(cmass)
# atmos = lw.Atmosphere.make_1d(scale=lw.ScaleType.ColumnMass, depthScale=10**cmass, temperature=Tnew, vlos=vlosnew, vturb=vturbnew, verbose=True)
# ctx = synth_spectrum(atmos, depthData=True, conserveCharge=True)
plt.ion()
# plt.plot(ctx.spect.wavelength, (ctxFast.spect.I[:, -1] - ctx.spect.I[:, -1]) / ctxFast.spect.I[:, -1])
plt.plot(ctx.spect.wavelength, ctx.spect.I[:, -1])
plt.plot(ctx2.spect.wavelength, ctx2.spect.I[:, -1])
# plt.plot(ctx.spect.wavelength, ctxPyTau.spect.I[:, -1])
# plt.plot(ctx.spect.wavelength, ctxRef.spect.I[:, -1], '--')
plt.xlim(853.9444, 854.9444)
| en | 0.340779 | # ctxRef = synth_spectrum(atmosRef, depthData=True, conserveCharge=True) # tau, T, Pe, vmicro, B, vlos, theta, azimuth, z, Pgas, rho_gas # m/s # m/s # m-3 # cmass_max = 1.8 # cmass_min = -4.0 # n = 82 # cmass = np.linspace(cmass_min, cmass_max, n) # f = int.interp1d(np.log10(atmosRef.cmass), atmosRef.temperature, bounds_error=False, fill_value=(atmosRef.temperature[0], atmosRef.temperature[-1])) # Tnew = f(cmass) # f = int.interp1d(np.log10(atmosRef.cmass), atmosRef.vlos, bounds_error=False, fill_value=(atmosRef.vlos[0], atmosRef.vlos[-1])) # vlosnew = f(cmass) # f = int.interp1d(np.log10(atmosRef.cmass), atmosRef.vturb, bounds_error=False, fill_value=(atmosRef.vturb[0], atmosRef.vturb[-1])) # vturbnew = f(cmass) # atmos = lw.Atmosphere.make_1d(scale=lw.ScaleType.ColumnMass, depthScale=10**cmass, temperature=Tnew, vlos=vlosnew, vturb=vturbnew, verbose=True) # ctx = synth_spectrum(atmos, depthData=True, conserveCharge=True) # plt.plot(ctx.spect.wavelength, (ctxFast.spect.I[:, -1] - ctx.spect.I[:, -1]) / ctxFast.spect.I[:, -1]) # plt.plot(ctx.spect.wavelength, ctxPyTau.spect.I[:, -1]) # plt.plot(ctx.spect.wavelength, ctxRef.spect.I[:, -1], '--') | 1.864111 | 2 |
01.py | hendrikjeb/Euler | 1 | 6623373 | # -*- coding: utf-8 -*-
#Problem 1 - Multiples of 3 and 5
#If we list all the natural numbers below 10 that are multiples of 3 or 5,
#we get 3, 5, 6 and 9. The sum of these multiples is 23.
#Find the sum of all the multiples of 3 or 5 below 1000.
lijst = []
z = 0
for x in xrange(1000):
if x % 3 == 0 or x % 5 == 0:
lijst.append(x)
z += x
for x in lijst:
print x,
print '\n\n', z | # -*- coding: utf-8 -*-
#Problem 1 - Multiples of 3 and 5
#If we list all the natural numbers below 10 that are multiples of 3 or 5,
#we get 3, 5, 6 and 9. The sum of these multiples is 23.
#Find the sum of all the multiples of 3 or 5 below 1000.
lijst = []
z = 0
for x in xrange(1000):
if x % 3 == 0 or x % 5 == 0:
lijst.append(x)
z += x
for x in lijst:
print x,
print '\n\n', z | en | 0.751412 | # -*- coding: utf-8 -*- #Problem 1 - Multiples of 3 and 5 #If we list all the natural numbers below 10 that are multiples of 3 or 5, #we get 3, 5, 6 and 9. The sum of these multiples is 23. #Find the sum of all the multiples of 3 or 5 below 1000. | 4.072109 | 4 |
examples/bram_example.py | Verkhovskaya/PyDL | 5 | 6623374 | <reponame>Verkhovskaya/PyDL
from pywire import *
mem = BRAM(8, 2, True, True)
bram_address = Signal(1, io="in", port="P51")
bram_write_data = Signal(8, io="in", port=["P35", "P33", "P30", "P27", "P24", "P22", "P17", "P15"])
bram_write_en = Signal(1, io="in", port="P41")
bram_read = Signal(8, io="out", port=["P134", "P133", "P132", "P131", "P127", "P126", "P124", "P123"])
def identity(x):
return x
mem.a_address.drive(identity, bram_address)
mem.a_data_in.drive(identity, bram_write_data)
mem.a_write_en.drive(identity, bram_write_en)
bram_read.drive(identity, mem.a_data_out)
rename_signals(globals())
print(generate(name="bram_example"))
#print(timing(globals(), 50, 'P56', vendor="Xilinx")) | from pywire import *
mem = BRAM(8, 2, True, True)
bram_address = Signal(1, io="in", port="P51")
bram_write_data = Signal(8, io="in", port=["P35", "P33", "P30", "P27", "P24", "P22", "P17", "P15"])
bram_write_en = Signal(1, io="in", port="P41")
bram_read = Signal(8, io="out", port=["P134", "P133", "P132", "P131", "P127", "P126", "P124", "P123"])
def identity(x):
return x
mem.a_address.drive(identity, bram_address)
mem.a_data_in.drive(identity, bram_write_data)
mem.a_write_en.drive(identity, bram_write_en)
bram_read.drive(identity, mem.a_data_out)
rename_signals(globals())
print(generate(name="bram_example"))
#print(timing(globals(), 50, 'P56', vendor="Xilinx")) | en | 0.139836 | #print(timing(globals(), 50, 'P56', vendor="Xilinx")) | 2.249498 | 2 |
lambda_function.py | samjeffcoat/selenium-practice-scraping | 0 | 6623375 | import smtplib
import os
import json
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
YOUTUBE_TRENDING_URL = 'https://www.youtube.com/feed/trending'
def get_driver():
options = Options()
options.binary_location = '/opt/headless-chromium'
options.add_argument('--headless')
options.add_argument('--no-sandbox')
options.add_argument('--single-process')
options.add_argument('--disable-dev-shm-usage')
driver = webdriver.Chrome('/opt/chromedriver',chrome_options=options)
return driver
def get_videos(driver):
VIDEO_DIV_TAG = 'ytd-video-renderer'
driver.get(YOUTUBE_TRENDING_URL)
videos = driver.find_elements(By.TAG_NAME, VIDEO_DIV_TAG)
return videos
def parse_video(video):
title_tag = video.find_element(By.ID, 'video-title')
title = title_tag.text
url = title_tag.get_attribute('href')
thumbnail_tag = video.find_element(By.TAG_NAME, 'img')
thumbnail_url = thumbnail_tag.get_attribute('src')
channel_div = video.find_element(By.CLASS_NAME, 'ytd-channel-name')
channel_name = channel_div.text
description = video.find_element(By.ID, 'description-text').text
return {
'title':title,
'url' : url,
'thumbnail_url': thumbnail_url,
'channel': channel_name,
'description' : description
}
def send_email(body):
try:
server_ssl = smtplib.SMTP_SSL('smtp.gmail.com', 465)
server_ssl.ehlo()
SENDER_EMAIL = '<EMAIL>'
RECEIVER_EMAIL = '<EMAIL>'
SENDER_PASSWORD = os.environ['GMAIL_PASSWORD']
subject = 'Youtube Trending Videos'
email_text = f"""
From:{SENDER_EMAIL}
To: {RECEIVER_EMAIL}
Subject: {subject}
{body}
"""
server_ssl.login(SENDER_EMAIL, SENDER_PASSWORD )
server_ssl.sendmail(SENDER_EMAIL, RECEIVER_EMAIL, email_text)
server_ssl.close()
except:
print('Something went wrong.....')
def lambda_handler(event, context):
#create the browser
driver = get_driver()
#get the videos
videos = get_videos(driver)
#parse the cideos
videos_data = [parse_video(video) for video in videos[:10]]
# send the data over email
body = json.dumps(videos_data)
send_email(body)
driver.close();
driver.quit();
response = {
"statusCode": 200,
"body": body
}
return response
| import smtplib
import os
import json
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
YOUTUBE_TRENDING_URL = 'https://www.youtube.com/feed/trending'
def get_driver():
options = Options()
options.binary_location = '/opt/headless-chromium'
options.add_argument('--headless')
options.add_argument('--no-sandbox')
options.add_argument('--single-process')
options.add_argument('--disable-dev-shm-usage')
driver = webdriver.Chrome('/opt/chromedriver',chrome_options=options)
return driver
def get_videos(driver):
VIDEO_DIV_TAG = 'ytd-video-renderer'
driver.get(YOUTUBE_TRENDING_URL)
videos = driver.find_elements(By.TAG_NAME, VIDEO_DIV_TAG)
return videos
def parse_video(video):
title_tag = video.find_element(By.ID, 'video-title')
title = title_tag.text
url = title_tag.get_attribute('href')
thumbnail_tag = video.find_element(By.TAG_NAME, 'img')
thumbnail_url = thumbnail_tag.get_attribute('src')
channel_div = video.find_element(By.CLASS_NAME, 'ytd-channel-name')
channel_name = channel_div.text
description = video.find_element(By.ID, 'description-text').text
return {
'title':title,
'url' : url,
'thumbnail_url': thumbnail_url,
'channel': channel_name,
'description' : description
}
def send_email(body):
try:
server_ssl = smtplib.SMTP_SSL('smtp.gmail.com', 465)
server_ssl.ehlo()
SENDER_EMAIL = '<EMAIL>'
RECEIVER_EMAIL = '<EMAIL>'
SENDER_PASSWORD = os.environ['GMAIL_PASSWORD']
subject = 'Youtube Trending Videos'
email_text = f"""
From:{SENDER_EMAIL}
To: {RECEIVER_EMAIL}
Subject: {subject}
{body}
"""
server_ssl.login(SENDER_EMAIL, SENDER_PASSWORD )
server_ssl.sendmail(SENDER_EMAIL, RECEIVER_EMAIL, email_text)
server_ssl.close()
except:
print('Something went wrong.....')
def lambda_handler(event, context):
#create the browser
driver = get_driver()
#get the videos
videos = get_videos(driver)
#parse the cideos
videos_data = [parse_video(video) for video in videos[:10]]
# send the data over email
body = json.dumps(videos_data)
send_email(body)
driver.close();
driver.quit();
response = {
"statusCode": 200,
"body": body
}
return response
| en | 0.310436 | From:{SENDER_EMAIL} To: {RECEIVER_EMAIL} Subject: {subject} {body} #create the browser #get the videos #parse the cideos # send the data over email | 2.765909 | 3 |
util/gather_training_data.py | wallywangka/pybot | 1 | 6623376 | <filename>util/gather_training_data.py<gh_stars>1-10
import os, sys
from util.core.client import Client
import numpy as np
import cv2
import mss
import argparse
import threading
import util.debug_ssd as db
def get_game_screen(sct4, client_box):
return np.array(sct4.grab(client_box))[:, :, :-1]
from pykeyboard import PyKeyboardEvent
class MonitorSuper(PyKeyboardEvent):
def __init__(self):
PyKeyboardEvent.__init__(self)
self.sct4 = mss.mss()
self.client = Client()
PyKeyboardEvent.i = last_id
def tap(self, keycode, character, press):
if character == 'w':
if press:
PyKeyboardEvent.i +=1
filename = os.path.join(data_path, label + "_%s.png" % format(PyKeyboardEvent.i, '04d'))
if args.generate_annotations:
db.generate_xml(filename=label + "_%s" % format(PyKeyboardEvent.i, '04d'), img_path=filename)
print('map_saved', str(PyKeyboardEvent.i))
print(filename)
cv2.imwrite(filename, get_game_screen(self.sct4, self.client.box))
if character == 'q':
sys.exit()
if __name__ == "__main__":
label = 'astrals'
data_path = os.path.join(os.path.dirname(__file__), "training_data/astrals/JPEGImages/")
parser = argparse.ArgumentParser()
parser.add_argument('--label', type = str, default = label)
parser.add_argument('--dir_path', type=str, default = data_path)
parser.add_argument('--debug_ssd', type = bool, default = True)
parser.add_argument('--generate_annotations', type=bool, default=True)
args = parser.parse_args()
label = args.label
data_path = args.dir_path
num = []
for f in os.listdir(data_path):
if label == f.split('_')[0]:
num.append(int(f.split('_')[-1].split('.')[0]))
if num:
last_id = max(num)
else:
last_id = 0
mon = MonitorSuper()
if args.debug_ssd:
t = threading.Thread(target = mon.run)
m = threading.Thread(target = db.debug_thread)
t.start()
m.start()
else:
mon.run()
| <filename>util/gather_training_data.py<gh_stars>1-10
import os, sys
from util.core.client import Client
import numpy as np
import cv2
import mss
import argparse
import threading
import util.debug_ssd as db
def get_game_screen(sct4, client_box):
return np.array(sct4.grab(client_box))[:, :, :-1]
from pykeyboard import PyKeyboardEvent
class MonitorSuper(PyKeyboardEvent):
def __init__(self):
PyKeyboardEvent.__init__(self)
self.sct4 = mss.mss()
self.client = Client()
PyKeyboardEvent.i = last_id
def tap(self, keycode, character, press):
if character == 'w':
if press:
PyKeyboardEvent.i +=1
filename = os.path.join(data_path, label + "_%s.png" % format(PyKeyboardEvent.i, '04d'))
if args.generate_annotations:
db.generate_xml(filename=label + "_%s" % format(PyKeyboardEvent.i, '04d'), img_path=filename)
print('map_saved', str(PyKeyboardEvent.i))
print(filename)
cv2.imwrite(filename, get_game_screen(self.sct4, self.client.box))
if character == 'q':
sys.exit()
if __name__ == "__main__":
label = 'astrals'
data_path = os.path.join(os.path.dirname(__file__), "training_data/astrals/JPEGImages/")
parser = argparse.ArgumentParser()
parser.add_argument('--label', type = str, default = label)
parser.add_argument('--dir_path', type=str, default = data_path)
parser.add_argument('--debug_ssd', type = bool, default = True)
parser.add_argument('--generate_annotations', type=bool, default=True)
args = parser.parse_args()
label = args.label
data_path = args.dir_path
num = []
for f in os.listdir(data_path):
if label == f.split('_')[0]:
num.append(int(f.split('_')[-1].split('.')[0]))
if num:
last_id = max(num)
else:
last_id = 0
mon = MonitorSuper()
if args.debug_ssd:
t = threading.Thread(target = mon.run)
m = threading.Thread(target = db.debug_thread)
t.start()
m.start()
else:
mon.run()
| none | 1 | 2.371397 | 2 | |
deeppixel/cam/basecam.py | r0cketr1kky/DeepPixel | 0 | 6623377 | import os
import copy
import numpy as np
from PIL import Image, ImageFilter
import matplotlib.cm as mpl_color_map
import torch
import torch.nn as nn
from torch.autograd import Variable
from torchvision import models
import matplotlib.pyplot as plt
import torchvision.transforms as transforms
import torchvision.transforms.functional as F
import torchvision.models
def find_resnet_layer(arch, target_layer_name):
"""Find resnet layer to calculate GradCAM and GradCAM++
Args:
arch: default torchvision densenet models
target_layer_name (str): the name of layer with its hierarchical information. please refer to usages below.
target_layer_name = 'conv1'
target_layer_name = 'layer1'
target_layer_name = 'layer1_basicblock0'
target_layer_name = 'layer1_basicblock0_relu'
target_layer_name = 'layer1_bottleneck0'
target_layer_name = 'layer1_bottleneck0_conv1'
target_layer_name = 'layer1_bottleneck0_downsample'
target_layer_name = 'layer1_bottleneck0_downsample_0'
target_layer_name = 'avgpool'
target_layer_name = 'fc'
Return:
target_layer: found layer. this layer will be hooked to get forward/backward pass information.
"""
if target_layer_name is None:
target_layer_name = 'layer4'
if 'layer' in target_layer_name:
hierarchy = target_layer_name.split('_')
layer_num = int(hierarchy[0].lstrip('layer'))
if layer_num == 1:
target_layer = arch.layer1
elif layer_num == 2:
target_layer = arch.layer2
elif layer_num == 3:
target_layer = arch.layer3
elif layer_num == 4:
target_layer = arch.layer4
else:
raise ValueError('unknown layer : {}'.format(target_layer_name))
if len(hierarchy) >= 2:
bottleneck_num = int(hierarchy[1].lower().lstrip('bottleneck').lstrip('basicblock'))
target_layer = target_layer[bottleneck_num]
if len(hierarchy) >= 3:
target_layer = target_layer._modules[hierarchy[2]]
if len(hierarchy) == 4:
target_layer = target_layer._modules[hierarchy[3]]
else:
target_layer = arch._modules[target_layer_name]
return target_layer
def find_densenet_layer(arch, target_layer_name):
"""Find densenet layer to calculate GradCAM and GradCAM++
Args:
arch: default torchvision densenet models
target_layer_name (str): the name of layer with its hierarchical information. please refer to usages below.
target_layer_name = 'features'
target_layer_name = 'features_transition1'
target_layer_name = 'features_transition1_norm'
target_layer_name = 'features_denseblock2_denselayer12'
target_layer_name = 'features_denseblock2_denselayer12_norm1'
target_layer_name = 'features_denseblock2_denselayer12_norm1'
target_layer_name = 'classifier'
Return:
target_layer: found layer. this layer will be hooked to get forward/backward pass information.
"""
if target_layer_name is None:
target_layer_name = 'features'
hierarchy = target_layer_name.split('_')
target_layer = arch._modules[hierarchy[0]]
if len(hierarchy) >= 2:
target_layer = target_layer._modules[hierarchy[1]]
if len(hierarchy) >= 3:
target_layer = target_layer._modules[hierarchy[2]]
if len(hierarchy) == 4:
target_layer = target_layer._modules[hierarchy[3]]
return target_layer
def find_vgg_layer(arch, target_layer_name):
"""Find vgg layer to calculate GradCAM and GradCAM++
Args:
arch: default torchvision densenet models
target_layer_name (str): the name of layer with its hierarchical information. please refer to usages below.
target_layer_name = 'features'
target_layer_name = 'features_42'
target_layer_name = 'classifier'
target_layer_name = 'classifier_0'
Return:
target_layer: found layer. this layer will be hooked to get forward/backward pass information.
"""
if target_layer_name is None:
target_layer_name = 'features'
hierarchy = target_layer_name.split('_')
if len(hierarchy) >= 1:
target_layer = arch.features
if len(hierarchy) == 2:
target_layer = target_layer[int(hierarchy[1])]
return target_layer
def find_alexnet_layer(arch, target_layer_name):
"""Find alexnet layer to calculate GradCAM and GradCAM++
saliency_map2 = torch.unsqueeze(activations[:, i, :, :], 1)
Args:
arch: default torchvision densenet models
target_layer_name (str): the name of layer with its hierarchical information. please refer to usages below.
target_layer_name = 'features'
target_layer_name = 'features_0'
target_layer_name = 'classifier'
target_layer_name = 'classifier_0'
Return:
target_layer: found layer. this layer will be hooked to get forward/backward pass information.
"""
if target_layer_name is None:
target_layer_name = 'features_29'
hierarchy = target_layer_name.split('_')
if len(hierarchy) >= 1:
target_layer = arch.features
if len(hierarchy) == 2:
target_layer = target_layer[int(hierarchy[1])]
return target_layer
def find_squeezenet_layer(arch, target_layer_name):
"""Find squeezenet layer to calculate GradCAM and GradCAM++
Args:
- **arch - **: default torchvision densenet models
- **target_layer_name (str) - **: the name of layer with its hierarchical information. please refer to usages below.
target_layer_name = 'features_12'
target_layer_name = 'features_12_expand3x3'
target_layer_name = 'features_12_expand3x3_activation'
Return:
target_layer: found layer. this layer will be hooked to get forward/backward pass information.
"""
if target_layer_name is None:
target_layer_name = 'features'
hierarchy = target_layer_name.split('_')
target_layer = arch._modules[hierarchy[0]]
if len(hierarchy) >= 2:
target_layer = target_layer._modules[hierarchy[1]]
if len(hierarchy) == 3:
target_layer = target_layer._modules[hierarchy[2]]
elif len(hierarchy) == 4:
target_layer = target_layer._modules[hierarchy[2] + '_' + hierarchy[3]]
return target_layer
def find_googlenet_layer(arch, target_layer_name):
"""Find squeezenet layer to calculate GradCAM and GradCAM++
Args:
- **arch - **: default torchvision googlenet models
- **target_layer_name (str) - **: the name of layer with its hierarchical information. please refer to usages below.
target_layer_name = 'inception5b'
Return:
target_layer: found layer. this layer will be hooked to get forward/backward pass information.
"""
if target_layer_name is None:
target_layer_name = 'features'
hierarchy = target_layer_name.split('_')
target_layer = arch._modules[hierarchy[0]]
if len(hierarchy) >= 2:
target_layer = target_layer._modules[hierarchy[1]]
if len(hierarchy) == 3:
target_layer = target_layer._modules[hierarchy[2]]
elif len(hierarchy) == 4:
target_layer = target_layer._modules[hierarchy[2] + '_' + hierarchy[3]]
return target_layer
def find_mobilenet_layer(arch, target_layer_name):
"""Find mobilenet layer to calculate GradCAM and GradCAM++
Args:
- **arch - **: default torchvision googlenet models
- **target_layer_name (str) - **: the name of layer with its hierarchical information. please refer to usages below.
target_layer_name = 'features'
Return:
target_layer: found layer. this layer will be hooked to get forward/backward pass information.
"""
if target_layer_name is None:
target_layer_name = 'features'
hierarchy = target_layer_name.split('_')
target_layer = arch._modules[hierarchy[0]]
if len(hierarchy) >= 2:
target_layer = target_layer._modules[hierarchy[1]]
if len(hierarchy) == 3:
target_layer = target_layer._modules[hierarchy[2]]
elif len(hierarchy) == 4:
target_layer = target_layer._modules[hierarchy[2] + '_' + hierarchy[3]]
return target_layer
def find_shufflenet_layer(arch, target_layer_name):
"""Find mobilenet layer to calculate GradCAM and GradCAM++
Args:
- **arch - **: default torchvision googlenet models
- **target_layer_name (str) - **: the name of layer with its hierarchical information. please refer to usages below.
target_layer_name = 'conv5'
Return:
target_layer: found layer. this layer will be hooked to get forward/backward pass information.
"""
if target_layer_name is None:
target_layer_name = 'features'
hierarchy = target_layer_name.split('_')
target_layer = arch._modules[hierarchy[0]]
if len(hierarchy) >= 2:
target_layer = target_layer._modules[hierarchy[1]]
if len(hierarchy) == 3:
target_layer = target_layer._modules[hierarchy[2]]
elif len(hierarchy) == 4:
target_layer = target_layer._modules[hierarchy[2] + '_' + hierarchy[3]]
return target_layer
def find_layer(arch, target_layer_name):
"""Find target layer to calculate CAM.
: Args:
- **arch - **: Self-defined architecture.
- **target_layer_name - ** (str): Name of target class.
: Return:
- **target_layer - **: Found layer. This layer will be hooked to get forward/backward pass information.
"""
if target_layer_name.split('_') not in arch._modules.keys():
raise Exception("Invalid target layer name.")
target_layer = arch._modules[target_layer_name]
return target_layer
'''
Part of code borrows from https://github.com/1Konny/gradcam_plus_plus-pytorch
'''
import torch
class BaseCAM(object):
""" Base class for Class activation mapping.
: Args
- **model_dict -** : Dict. Has format as dict(type='vgg', arch=torchvision.models.vgg16(pretrained=True),
layer_name='features',input_size=(224, 224)).
"""
def __init__(self, model_dict):
model_type = model_dict['type']
layer_name = model_dict['layer_name']
self.model_arch = model_dict['arch']
self.model_arch.eval()
#if torch.cuda.is_available():
# self.model_arch.cuda()
self.gradients = dict()
self.activations = dict()
def backward_hook(module, grad_input, grad_output):
#if torch.cuda.is_available():
# self.gradients['value'] = grad_output[0].cuda()
self.gradients['value'] = grad_output[0]
return None
def forward_hook(module, input, output):
#if torch.cuda.is_available():
# self.activations['value'] = output.cuda()
self.activations['value'] = output
return None
if 'vgg' in model_type.lower():
self.target_layer = find_vgg_layer(self.model_arch, layer_name)
elif 'resnet' in model_type.lower():
self.target_layer = find_resnet_layer(self.model_arch, layer_name)
elif 'densenet' in model_type.lower():
self.target_layer = find_densenet_layer(self.model_arch, layer_name)
elif 'alexnet' in model_type.lower():
self.target_layer = find_alexnet_layer(self.model_arch, layer_name)
elif 'squeezenet' in model_type.lower():
self.target_layer = find_squeezenet_layer(self.model_arch, layer_name)
elif 'googlenet' in model_type.lower():
self.target_layer = find_googlenet_layer(self.model_arch, layer_name)
elif 'shufflenet' in model_type.lower():
self.target_layer = find_shufflenet_layer(self.model_arch, layer_name)
elif 'mobilenet' in model_type.lower():
self.target_layer = find_mobilenet_layer(self.model_arch, layer_name)
else:
self.target_layer = find_layer(self.model_arch, layer_name)
self.target_layer.register_forward_hook(forward_hook)
self.target_layer.register_backward_hook(backward_hook)
def forward(self, input, class_idx=None, retain_graph=False):
return None
def __call__(self, input, class_idx=None, retain_graph=False):
return self.forward(input, class_idx, retain_graph)
| import os
import copy
import numpy as np
from PIL import Image, ImageFilter
import matplotlib.cm as mpl_color_map
import torch
import torch.nn as nn
from torch.autograd import Variable
from torchvision import models
import matplotlib.pyplot as plt
import torchvision.transforms as transforms
import torchvision.transforms.functional as F
import torchvision.models
def find_resnet_layer(arch, target_layer_name):
"""Find resnet layer to calculate GradCAM and GradCAM++
Args:
arch: default torchvision densenet models
target_layer_name (str): the name of layer with its hierarchical information. please refer to usages below.
target_layer_name = 'conv1'
target_layer_name = 'layer1'
target_layer_name = 'layer1_basicblock0'
target_layer_name = 'layer1_basicblock0_relu'
target_layer_name = 'layer1_bottleneck0'
target_layer_name = 'layer1_bottleneck0_conv1'
target_layer_name = 'layer1_bottleneck0_downsample'
target_layer_name = 'layer1_bottleneck0_downsample_0'
target_layer_name = 'avgpool'
target_layer_name = 'fc'
Return:
target_layer: found layer. this layer will be hooked to get forward/backward pass information.
"""
if target_layer_name is None:
target_layer_name = 'layer4'
if 'layer' in target_layer_name:
hierarchy = target_layer_name.split('_')
layer_num = int(hierarchy[0].lstrip('layer'))
if layer_num == 1:
target_layer = arch.layer1
elif layer_num == 2:
target_layer = arch.layer2
elif layer_num == 3:
target_layer = arch.layer3
elif layer_num == 4:
target_layer = arch.layer4
else:
raise ValueError('unknown layer : {}'.format(target_layer_name))
if len(hierarchy) >= 2:
bottleneck_num = int(hierarchy[1].lower().lstrip('bottleneck').lstrip('basicblock'))
target_layer = target_layer[bottleneck_num]
if len(hierarchy) >= 3:
target_layer = target_layer._modules[hierarchy[2]]
if len(hierarchy) == 4:
target_layer = target_layer._modules[hierarchy[3]]
else:
target_layer = arch._modules[target_layer_name]
return target_layer
def find_densenet_layer(arch, target_layer_name):
"""Find densenet layer to calculate GradCAM and GradCAM++
Args:
arch: default torchvision densenet models
target_layer_name (str): the name of layer with its hierarchical information. please refer to usages below.
target_layer_name = 'features'
target_layer_name = 'features_transition1'
target_layer_name = 'features_transition1_norm'
target_layer_name = 'features_denseblock2_denselayer12'
target_layer_name = 'features_denseblock2_denselayer12_norm1'
target_layer_name = 'features_denseblock2_denselayer12_norm1'
target_layer_name = 'classifier'
Return:
target_layer: found layer. this layer will be hooked to get forward/backward pass information.
"""
if target_layer_name is None:
target_layer_name = 'features'
hierarchy = target_layer_name.split('_')
target_layer = arch._modules[hierarchy[0]]
if len(hierarchy) >= 2:
target_layer = target_layer._modules[hierarchy[1]]
if len(hierarchy) >= 3:
target_layer = target_layer._modules[hierarchy[2]]
if len(hierarchy) == 4:
target_layer = target_layer._modules[hierarchy[3]]
return target_layer
def find_vgg_layer(arch, target_layer_name):
"""Find vgg layer to calculate GradCAM and GradCAM++
Args:
arch: default torchvision densenet models
target_layer_name (str): the name of layer with its hierarchical information. please refer to usages below.
target_layer_name = 'features'
target_layer_name = 'features_42'
target_layer_name = 'classifier'
target_layer_name = 'classifier_0'
Return:
target_layer: found layer. this layer will be hooked to get forward/backward pass information.
"""
if target_layer_name is None:
target_layer_name = 'features'
hierarchy = target_layer_name.split('_')
if len(hierarchy) >= 1:
target_layer = arch.features
if len(hierarchy) == 2:
target_layer = target_layer[int(hierarchy[1])]
return target_layer
def find_alexnet_layer(arch, target_layer_name):
"""Find alexnet layer to calculate GradCAM and GradCAM++
saliency_map2 = torch.unsqueeze(activations[:, i, :, :], 1)
Args:
arch: default torchvision densenet models
target_layer_name (str): the name of layer with its hierarchical information. please refer to usages below.
target_layer_name = 'features'
target_layer_name = 'features_0'
target_layer_name = 'classifier'
target_layer_name = 'classifier_0'
Return:
target_layer: found layer. this layer will be hooked to get forward/backward pass information.
"""
if target_layer_name is None:
target_layer_name = 'features_29'
hierarchy = target_layer_name.split('_')
if len(hierarchy) >= 1:
target_layer = arch.features
if len(hierarchy) == 2:
target_layer = target_layer[int(hierarchy[1])]
return target_layer
def find_squeezenet_layer(arch, target_layer_name):
"""Find squeezenet layer to calculate GradCAM and GradCAM++
Args:
- **arch - **: default torchvision densenet models
- **target_layer_name (str) - **: the name of layer with its hierarchical information. please refer to usages below.
target_layer_name = 'features_12'
target_layer_name = 'features_12_expand3x3'
target_layer_name = 'features_12_expand3x3_activation'
Return:
target_layer: found layer. this layer will be hooked to get forward/backward pass information.
"""
if target_layer_name is None:
target_layer_name = 'features'
hierarchy = target_layer_name.split('_')
target_layer = arch._modules[hierarchy[0]]
if len(hierarchy) >= 2:
target_layer = target_layer._modules[hierarchy[1]]
if len(hierarchy) == 3:
target_layer = target_layer._modules[hierarchy[2]]
elif len(hierarchy) == 4:
target_layer = target_layer._modules[hierarchy[2] + '_' + hierarchy[3]]
return target_layer
def find_googlenet_layer(arch, target_layer_name):
"""Find squeezenet layer to calculate GradCAM and GradCAM++
Args:
- **arch - **: default torchvision googlenet models
- **target_layer_name (str) - **: the name of layer with its hierarchical information. please refer to usages below.
target_layer_name = 'inception5b'
Return:
target_layer: found layer. this layer will be hooked to get forward/backward pass information.
"""
if target_layer_name is None:
target_layer_name = 'features'
hierarchy = target_layer_name.split('_')
target_layer = arch._modules[hierarchy[0]]
if len(hierarchy) >= 2:
target_layer = target_layer._modules[hierarchy[1]]
if len(hierarchy) == 3:
target_layer = target_layer._modules[hierarchy[2]]
elif len(hierarchy) == 4:
target_layer = target_layer._modules[hierarchy[2] + '_' + hierarchy[3]]
return target_layer
def find_mobilenet_layer(arch, target_layer_name):
"""Find mobilenet layer to calculate GradCAM and GradCAM++
Args:
- **arch - **: default torchvision googlenet models
- **target_layer_name (str) - **: the name of layer with its hierarchical information. please refer to usages below.
target_layer_name = 'features'
Return:
target_layer: found layer. this layer will be hooked to get forward/backward pass information.
"""
if target_layer_name is None:
target_layer_name = 'features'
hierarchy = target_layer_name.split('_')
target_layer = arch._modules[hierarchy[0]]
if len(hierarchy) >= 2:
target_layer = target_layer._modules[hierarchy[1]]
if len(hierarchy) == 3:
target_layer = target_layer._modules[hierarchy[2]]
elif len(hierarchy) == 4:
target_layer = target_layer._modules[hierarchy[2] + '_' + hierarchy[3]]
return target_layer
def find_shufflenet_layer(arch, target_layer_name):
"""Find mobilenet layer to calculate GradCAM and GradCAM++
Args:
- **arch - **: default torchvision googlenet models
- **target_layer_name (str) - **: the name of layer with its hierarchical information. please refer to usages below.
target_layer_name = 'conv5'
Return:
target_layer: found layer. this layer will be hooked to get forward/backward pass information.
"""
if target_layer_name is None:
target_layer_name = 'features'
hierarchy = target_layer_name.split('_')
target_layer = arch._modules[hierarchy[0]]
if len(hierarchy) >= 2:
target_layer = target_layer._modules[hierarchy[1]]
if len(hierarchy) == 3:
target_layer = target_layer._modules[hierarchy[2]]
elif len(hierarchy) == 4:
target_layer = target_layer._modules[hierarchy[2] + '_' + hierarchy[3]]
return target_layer
def find_layer(arch, target_layer_name):
"""Find target layer to calculate CAM.
: Args:
- **arch - **: Self-defined architecture.
- **target_layer_name - ** (str): Name of target class.
: Return:
- **target_layer - **: Found layer. This layer will be hooked to get forward/backward pass information.
"""
if target_layer_name.split('_') not in arch._modules.keys():
raise Exception("Invalid target layer name.")
target_layer = arch._modules[target_layer_name]
return target_layer
'''
Part of code borrows from https://github.com/1Konny/gradcam_plus_plus-pytorch
'''
import torch
class BaseCAM(object):
""" Base class for Class activation mapping.
: Args
- **model_dict -** : Dict. Has format as dict(type='vgg', arch=torchvision.models.vgg16(pretrained=True),
layer_name='features',input_size=(224, 224)).
"""
def __init__(self, model_dict):
model_type = model_dict['type']
layer_name = model_dict['layer_name']
self.model_arch = model_dict['arch']
self.model_arch.eval()
#if torch.cuda.is_available():
# self.model_arch.cuda()
self.gradients = dict()
self.activations = dict()
def backward_hook(module, grad_input, grad_output):
#if torch.cuda.is_available():
# self.gradients['value'] = grad_output[0].cuda()
self.gradients['value'] = grad_output[0]
return None
def forward_hook(module, input, output):
#if torch.cuda.is_available():
# self.activations['value'] = output.cuda()
self.activations['value'] = output
return None
if 'vgg' in model_type.lower():
self.target_layer = find_vgg_layer(self.model_arch, layer_name)
elif 'resnet' in model_type.lower():
self.target_layer = find_resnet_layer(self.model_arch, layer_name)
elif 'densenet' in model_type.lower():
self.target_layer = find_densenet_layer(self.model_arch, layer_name)
elif 'alexnet' in model_type.lower():
self.target_layer = find_alexnet_layer(self.model_arch, layer_name)
elif 'squeezenet' in model_type.lower():
self.target_layer = find_squeezenet_layer(self.model_arch, layer_name)
elif 'googlenet' in model_type.lower():
self.target_layer = find_googlenet_layer(self.model_arch, layer_name)
elif 'shufflenet' in model_type.lower():
self.target_layer = find_shufflenet_layer(self.model_arch, layer_name)
elif 'mobilenet' in model_type.lower():
self.target_layer = find_mobilenet_layer(self.model_arch, layer_name)
else:
self.target_layer = find_layer(self.model_arch, layer_name)
self.target_layer.register_forward_hook(forward_hook)
self.target_layer.register_backward_hook(backward_hook)
def forward(self, input, class_idx=None, retain_graph=False):
return None
def __call__(self, input, class_idx=None, retain_graph=False):
return self.forward(input, class_idx, retain_graph)
| en | 0.663923 | Find resnet layer to calculate GradCAM and GradCAM++ Args: arch: default torchvision densenet models target_layer_name (str): the name of layer with its hierarchical information. please refer to usages below. target_layer_name = 'conv1' target_layer_name = 'layer1' target_layer_name = 'layer1_basicblock0' target_layer_name = 'layer1_basicblock0_relu' target_layer_name = 'layer1_bottleneck0' target_layer_name = 'layer1_bottleneck0_conv1' target_layer_name = 'layer1_bottleneck0_downsample' target_layer_name = 'layer1_bottleneck0_downsample_0' target_layer_name = 'avgpool' target_layer_name = 'fc' Return: target_layer: found layer. this layer will be hooked to get forward/backward pass information. Find densenet layer to calculate GradCAM and GradCAM++ Args: arch: default torchvision densenet models target_layer_name (str): the name of layer with its hierarchical information. please refer to usages below. target_layer_name = 'features' target_layer_name = 'features_transition1' target_layer_name = 'features_transition1_norm' target_layer_name = 'features_denseblock2_denselayer12' target_layer_name = 'features_denseblock2_denselayer12_norm1' target_layer_name = 'features_denseblock2_denselayer12_norm1' target_layer_name = 'classifier' Return: target_layer: found layer. this layer will be hooked to get forward/backward pass information. Find vgg layer to calculate GradCAM and GradCAM++ Args: arch: default torchvision densenet models target_layer_name (str): the name of layer with its hierarchical information. please refer to usages below. target_layer_name = 'features' target_layer_name = 'features_42' target_layer_name = 'classifier' target_layer_name = 'classifier_0' Return: target_layer: found layer. this layer will be hooked to get forward/backward pass information. Find alexnet layer to calculate GradCAM and GradCAM++ saliency_map2 = torch.unsqueeze(activations[:, i, :, :], 1) Args: arch: default torchvision densenet models target_layer_name (str): the name of layer with its hierarchical information. please refer to usages below. target_layer_name = 'features' target_layer_name = 'features_0' target_layer_name = 'classifier' target_layer_name = 'classifier_0' Return: target_layer: found layer. this layer will be hooked to get forward/backward pass information. Find squeezenet layer to calculate GradCAM and GradCAM++ Args: - **arch - **: default torchvision densenet models - **target_layer_name (str) - **: the name of layer with its hierarchical information. please refer to usages below. target_layer_name = 'features_12' target_layer_name = 'features_12_expand3x3' target_layer_name = 'features_12_expand3x3_activation' Return: target_layer: found layer. this layer will be hooked to get forward/backward pass information. Find squeezenet layer to calculate GradCAM and GradCAM++ Args: - **arch - **: default torchvision googlenet models - **target_layer_name (str) - **: the name of layer with its hierarchical information. please refer to usages below. target_layer_name = 'inception5b' Return: target_layer: found layer. this layer will be hooked to get forward/backward pass information. Find mobilenet layer to calculate GradCAM and GradCAM++ Args: - **arch - **: default torchvision googlenet models - **target_layer_name (str) - **: the name of layer with its hierarchical information. please refer to usages below. target_layer_name = 'features' Return: target_layer: found layer. this layer will be hooked to get forward/backward pass information. Find mobilenet layer to calculate GradCAM and GradCAM++ Args: - **arch - **: default torchvision googlenet models - **target_layer_name (str) - **: the name of layer with its hierarchical information. please refer to usages below. target_layer_name = 'conv5' Return: target_layer: found layer. this layer will be hooked to get forward/backward pass information. Find target layer to calculate CAM. : Args: - **arch - **: Self-defined architecture. - **target_layer_name - ** (str): Name of target class. : Return: - **target_layer - **: Found layer. This layer will be hooked to get forward/backward pass information. Part of code borrows from https://github.com/1Konny/gradcam_plus_plus-pytorch Base class for Class activation mapping. : Args - **model_dict -** : Dict. Has format as dict(type='vgg', arch=torchvision.models.vgg16(pretrained=True), layer_name='features',input_size=(224, 224)). #if torch.cuda.is_available(): # self.model_arch.cuda() #if torch.cuda.is_available(): # self.gradients['value'] = grad_output[0].cuda() #if torch.cuda.is_available(): # self.activations['value'] = output.cuda() | 2.416429 | 2 |
auto_repair_saas/test_settings.py | wangonya/auto-repair-saas | 6 | 6623378 | <filename>auto_repair_saas/test_settings.py
from .settings import *
SENDGRID_SANDBOX_MODE_IN_DEBUG = True
MIDDLEWARE.remove(
'auto_repair_saas.apps.utils.middleware.CurrentUserMiddleware'
)
if os.environ.get('GITHUB_WORKFLOW'):
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'github_actions',
'USER': 'postgres',
'PASSWORD': '<PASSWORD>',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
| <filename>auto_repair_saas/test_settings.py
from .settings import *
SENDGRID_SANDBOX_MODE_IN_DEBUG = True
MIDDLEWARE.remove(
'auto_repair_saas.apps.utils.middleware.CurrentUserMiddleware'
)
if os.environ.get('GITHUB_WORKFLOW'):
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'github_actions',
'USER': 'postgres',
'PASSWORD': '<PASSWORD>',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
| none | 1 | 1.334785 | 1 | |
tssv/cli.py | Redmar-van-den-Berg/tssv | 1 | 6623379 | from argparse import ArgumentParser, FileType, RawDescriptionHelpFormatter
from sys import stdout
from xopen import xopen
from . import usage, version
from .tssv import tssv
def main():
"""Main entry point."""
parser = ArgumentParser(
description=usage[0], epilog=usage[1],
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument(
'input_handle', metavar='INPUT',
help='a FASTA/FASTQ file')
parser.add_argument(
'library_handle', metavar='LIBRARY', type=FileType('r'),
help='library of flanking sequences')
parser.add_argument(
'-m', dest='threshold', type=float, default=0.08,
help='mismatches per nucleotide (default=%(default)s)')
parser.add_argument(
'-M', dest='mismatches', type=int,
help='fixed number of mismatches, overrides -m (default=%(default)s)')
parser.add_argument(
'-n', dest='indel_score', type=int, default=1,
help='insertions and deletions are penalised this number of times '
'more heavily than mismatches (default=%(default)s)')
parser.add_argument(
'-r', dest='report_handle', type=FileType('w'), default=stdout,
help='name of the report file')
parser.add_argument(
'-j', dest='json_report', action='store_true', default=False,
help='use json format for the output file')
parser.add_argument('-d', dest='path', type=str, help='output directory')
parser.add_argument(
'-a', dest='minimum', type=int, default=0,
help='minimum count per allele (default=%(default)s)')
parser.add_argument(
'-s', dest='method_sse', action='store_true',
help='if specified, use SSE2 alignment implementation')
parser.add_argument('-v', action='version', version=version(parser.prog))
args = parser.parse_args()
# Have a little look in the input file to determine the file format.
with xopen(args.input_handle, 'r') as fin:
if next(fin).startswith('>'):
args.file_format = 'fasta'
else:
args.file_format = 'fastq'
# Now that we we know the file format, we can open the file again and
# have access to the full file content.
args.input_handle = xopen(args.input_handle)
try:
tssv(**{k: v for k, v in vars(args).items()
if k not in ('func', 'subcommand')})
except OSError as error:
parser.error(error)
if __name__ == '__main__':
main()
| from argparse import ArgumentParser, FileType, RawDescriptionHelpFormatter
from sys import stdout
from xopen import xopen
from . import usage, version
from .tssv import tssv
def main():
"""Main entry point."""
parser = ArgumentParser(
description=usage[0], epilog=usage[1],
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument(
'input_handle', metavar='INPUT',
help='a FASTA/FASTQ file')
parser.add_argument(
'library_handle', metavar='LIBRARY', type=FileType('r'),
help='library of flanking sequences')
parser.add_argument(
'-m', dest='threshold', type=float, default=0.08,
help='mismatches per nucleotide (default=%(default)s)')
parser.add_argument(
'-M', dest='mismatches', type=int,
help='fixed number of mismatches, overrides -m (default=%(default)s)')
parser.add_argument(
'-n', dest='indel_score', type=int, default=1,
help='insertions and deletions are penalised this number of times '
'more heavily than mismatches (default=%(default)s)')
parser.add_argument(
'-r', dest='report_handle', type=FileType('w'), default=stdout,
help='name of the report file')
parser.add_argument(
'-j', dest='json_report', action='store_true', default=False,
help='use json format for the output file')
parser.add_argument('-d', dest='path', type=str, help='output directory')
parser.add_argument(
'-a', dest='minimum', type=int, default=0,
help='minimum count per allele (default=%(default)s)')
parser.add_argument(
'-s', dest='method_sse', action='store_true',
help='if specified, use SSE2 alignment implementation')
parser.add_argument('-v', action='version', version=version(parser.prog))
args = parser.parse_args()
# Have a little look in the input file to determine the file format.
with xopen(args.input_handle, 'r') as fin:
if next(fin).startswith('>'):
args.file_format = 'fasta'
else:
args.file_format = 'fastq'
# Now that we we know the file format, we can open the file again and
# have access to the full file content.
args.input_handle = xopen(args.input_handle)
try:
tssv(**{k: v for k, v in vars(args).items()
if k not in ('func', 'subcommand')})
except OSError as error:
parser.error(error)
if __name__ == '__main__':
main()
| en | 0.917572 | Main entry point. # Have a little look in the input file to determine the file format. # Now that we we know the file format, we can open the file again and # have access to the full file content. | 2.686529 | 3 |
python/rpdb_client.py | AlexandreZani/vim_rpdb | 3 | 6623380 | <filename>python/rpdb_client.py
import json_socket
import socket
import vim
class RpdbClient(object):
def __init__(self):
self.socket_family = socket.AF_INET
self.socket_addr = ('localhost', 59000)
self.conn = None
self.jsock = None
self.cur_file = None
self.cur_line = None
def connect(self):
self.conn = socket.create_connection(self.socket_addr)
self.jsock = json_socket.JsonSocket(self.conn)
self.process_msg()
def cleanup(self):
if self.conn != None:
self.conn.close()
def go_to_cur_frame(self):
if self.cur_file is None or self.cur_line is None:
return
go_to_loc(self.cur_file, self.cur_line)
def set_cur_frame(self, fn, line_no):
self.cur_file = fn
self.cur_line = line_no
self.go_to_cur_frame()
def send_msg(self):
self.jsock.send_msg()
def do_next(self):
self.jsock.send_msg({
'command': 'next'})
self.process_msg()
def do_continue(self):
self.jsock.send_msg({
'command': 'continue'})
self.process_msg()
def do_step(self):
self.jsock.send_msg({
'command': 'step'})
self.process_msg()
def process_msg(self):
msg = self.jsock.recv_msg()
if msg is None:
end_debug_session()
return
if msg['type'] == 'current_frame':
self.set_cur_frame(msg['file'], msg['line_no'])
RPDB_CLIENT = None
def start_debug_session():
global RPDB_CLIENT
if RPDB_CLIENT is not None:
RPDB_CLIENT.cleanup()
RPDB_CLIENT = None
RPDB_CLIENT = RpdbClient()
RPDB_CLIENT.connect()
def end_debug_session():
global RPDB_CLIENT
if RPDB_CLIENT is not None:
RPDB_CLIENT.cleanup()
RPDB_CLIENT = None
print "Debug session ended."
def do_next():
RPDB_CLIENT.do_next()
def do_step():
RPDB_CLIENT.do_step()
def do_continue():
RPDB_CLIENT.do_continue()
def go_to_cur_frame():
RPDB_CLIENT.go_to_cur_frame()
def go_to_loc(fn, line_no):
vim.command('e ' + fn)
vim.command(str(line_no))
| <filename>python/rpdb_client.py
import json_socket
import socket
import vim
class RpdbClient(object):
def __init__(self):
self.socket_family = socket.AF_INET
self.socket_addr = ('localhost', 59000)
self.conn = None
self.jsock = None
self.cur_file = None
self.cur_line = None
def connect(self):
self.conn = socket.create_connection(self.socket_addr)
self.jsock = json_socket.JsonSocket(self.conn)
self.process_msg()
def cleanup(self):
if self.conn != None:
self.conn.close()
def go_to_cur_frame(self):
if self.cur_file is None or self.cur_line is None:
return
go_to_loc(self.cur_file, self.cur_line)
def set_cur_frame(self, fn, line_no):
self.cur_file = fn
self.cur_line = line_no
self.go_to_cur_frame()
def send_msg(self):
self.jsock.send_msg()
def do_next(self):
self.jsock.send_msg({
'command': 'next'})
self.process_msg()
def do_continue(self):
self.jsock.send_msg({
'command': 'continue'})
self.process_msg()
def do_step(self):
self.jsock.send_msg({
'command': 'step'})
self.process_msg()
def process_msg(self):
msg = self.jsock.recv_msg()
if msg is None:
end_debug_session()
return
if msg['type'] == 'current_frame':
self.set_cur_frame(msg['file'], msg['line_no'])
RPDB_CLIENT = None
def start_debug_session():
global RPDB_CLIENT
if RPDB_CLIENT is not None:
RPDB_CLIENT.cleanup()
RPDB_CLIENT = None
RPDB_CLIENT = RpdbClient()
RPDB_CLIENT.connect()
def end_debug_session():
global RPDB_CLIENT
if RPDB_CLIENT is not None:
RPDB_CLIENT.cleanup()
RPDB_CLIENT = None
print "Debug session ended."
def do_next():
RPDB_CLIENT.do_next()
def do_step():
RPDB_CLIENT.do_step()
def do_continue():
RPDB_CLIENT.do_continue()
def go_to_cur_frame():
RPDB_CLIENT.go_to_cur_frame()
def go_to_loc(fn, line_no):
vim.command('e ' + fn)
vim.command(str(line_no))
| none | 1 | 2.269921 | 2 | |
Coding/Competitive_Coding/CodeForces/0 - 1300/A_Tram.py | Phantom586/My_Codes | 0 | 6623381 | <filename>Coding/Competitive_Coding/CodeForces/0 - 1300/A_Tram.py
# Linear Kingdom has exactly one tram line. It has n stops, numbered from 1 to n in the order of tram's movement.
# At the i-th stop ai passengers exit the tram, while bi passengers enter it. The tram is empty before it arrives at
# the first stop. Also, when the tram arrives at the last stop, all passengers exit so that it becomes empty.
# Your task is to calculate the tram's minimum capacity such that the number of people inside the tram at any time
# never exceeds this capacity. Note that at each stop all exiting passengers exit before any entering passenger
# enters the tram.
# Input
# The first line contains a single number n (2 ≤ n ≤ 1000) — the number of the tram's stops.
# Then n lines follow, each contains two integers ai and bi (0 ≤ ai, bi ≤ 1000) — the number of passengers that exits
# the tram at the i-th stop, and the number of passengers that enter the tram at the i-th stop.
# The stops are given from the first to the last stop in the order of tram's movement.
# The number of people who exit at a given stop does not exceed the total number of people in the tram immediately
# before it arrives at the stop. More formally, . This particularly means that a1 = 0.
# At the last stop, all the passengers exit the tram and it becomes empty. More formally, .
# No passenger will enter the train at the last stop. That is, bn = 0.
# Output
# print a single integer denoting the minimum possible capacity of the tram (0 is allowed).
# Examples
# input
# 4
# 0 3
# 2 5
# 4 2
# 4 0
# output
# 6
# Note
# For the first example, a capacity of 6 is sufficient:
# At the first stop, the number of passengers inside the tram before arriving is 0. Then, 3 passengers enter the tram,
# and the number of passengers inside the tram becomes 3.
# At the second stop, 2 passengers exit the tram (1 passenger remains inside). Then, 5 passengers enter the tram.
# There are 6 passengers inside the tram now.
# At the third stop, 4 passengers exit the tram (2 passengers remain inside). Then, 2 passengers enter the tram.
# There are 4 passengers inside the tram now.
# Finally, all the remaining passengers inside the tram exit the tram at the last stop. There are no passenger
# inside the tram now, which is in line with the constraints.
# Since the number of passengers inside the tram never exceeds 6, a capacity of 6 is sufficient. Furthermore it is not
# possible for the tram to have a capacity less than 6. Hence, 6 is the correct answer.
n = int(input())
stations = []
for _ in range(n):
stations.append(list(map(int, input().split())))
current_passengers = [stations[0][1]]
for i in range(1, n):
current_passengers.append(current_passengers[i-1] - stations[i][0] + stations[i][1])
print(max(current_passengers))
| <filename>Coding/Competitive_Coding/CodeForces/0 - 1300/A_Tram.py
# Linear Kingdom has exactly one tram line. It has n stops, numbered from 1 to n in the order of tram's movement.
# At the i-th stop ai passengers exit the tram, while bi passengers enter it. The tram is empty before it arrives at
# the first stop. Also, when the tram arrives at the last stop, all passengers exit so that it becomes empty.
# Your task is to calculate the tram's minimum capacity such that the number of people inside the tram at any time
# never exceeds this capacity. Note that at each stop all exiting passengers exit before any entering passenger
# enters the tram.
# Input
# The first line contains a single number n (2 ≤ n ≤ 1000) — the number of the tram's stops.
# Then n lines follow, each contains two integers ai and bi (0 ≤ ai, bi ≤ 1000) — the number of passengers that exits
# the tram at the i-th stop, and the number of passengers that enter the tram at the i-th stop.
# The stops are given from the first to the last stop in the order of tram's movement.
# The number of people who exit at a given stop does not exceed the total number of people in the tram immediately
# before it arrives at the stop. More formally, . This particularly means that a1 = 0.
# At the last stop, all the passengers exit the tram and it becomes empty. More formally, .
# No passenger will enter the train at the last stop. That is, bn = 0.
# Output
# print a single integer denoting the minimum possible capacity of the tram (0 is allowed).
# Examples
# input
# 4
# 0 3
# 2 5
# 4 2
# 4 0
# output
# 6
# Note
# For the first example, a capacity of 6 is sufficient:
# At the first stop, the number of passengers inside the tram before arriving is 0. Then, 3 passengers enter the tram,
# and the number of passengers inside the tram becomes 3.
# At the second stop, 2 passengers exit the tram (1 passenger remains inside). Then, 5 passengers enter the tram.
# There are 6 passengers inside the tram now.
# At the third stop, 4 passengers exit the tram (2 passengers remain inside). Then, 2 passengers enter the tram.
# There are 4 passengers inside the tram now.
# Finally, all the remaining passengers inside the tram exit the tram at the last stop. There are no passenger
# inside the tram now, which is in line with the constraints.
# Since the number of passengers inside the tram never exceeds 6, a capacity of 6 is sufficient. Furthermore it is not
# possible for the tram to have a capacity less than 6. Hence, 6 is the correct answer.
n = int(input())
stations = []
for _ in range(n):
stations.append(list(map(int, input().split())))
current_passengers = [stations[0][1]]
for i in range(1, n):
current_passengers.append(current_passengers[i-1] - stations[i][0] + stations[i][1])
print(max(current_passengers))
| en | 0.916934 | # Linear Kingdom has exactly one tram line. It has n stops, numbered from 1 to n in the order of tram's movement. # At the i-th stop ai passengers exit the tram, while bi passengers enter it. The tram is empty before it arrives at # the first stop. Also, when the tram arrives at the last stop, all passengers exit so that it becomes empty. # Your task is to calculate the tram's minimum capacity such that the number of people inside the tram at any time # never exceeds this capacity. Note that at each stop all exiting passengers exit before any entering passenger # enters the tram. # Input # The first line contains a single number n (2 ≤ n ≤ 1000) — the number of the tram's stops. # Then n lines follow, each contains two integers ai and bi (0 ≤ ai, bi ≤ 1000) — the number of passengers that exits # the tram at the i-th stop, and the number of passengers that enter the tram at the i-th stop. # The stops are given from the first to the last stop in the order of tram's movement. # The number of people who exit at a given stop does not exceed the total number of people in the tram immediately # before it arrives at the stop. More formally, . This particularly means that a1 = 0. # At the last stop, all the passengers exit the tram and it becomes empty. More formally, . # No passenger will enter the train at the last stop. That is, bn = 0. # Output # print a single integer denoting the minimum possible capacity of the tram (0 is allowed). # Examples # input # 4 # 0 3 # 2 5 # 4 2 # 4 0 # output # 6 # Note # For the first example, a capacity of 6 is sufficient: # At the first stop, the number of passengers inside the tram before arriving is 0. Then, 3 passengers enter the tram, # and the number of passengers inside the tram becomes 3. # At the second stop, 2 passengers exit the tram (1 passenger remains inside). Then, 5 passengers enter the tram. # There are 6 passengers inside the tram now. # At the third stop, 4 passengers exit the tram (2 passengers remain inside). Then, 2 passengers enter the tram. # There are 4 passengers inside the tram now. # Finally, all the remaining passengers inside the tram exit the tram at the last stop. There are no passenger # inside the tram now, which is in line with the constraints. # Since the number of passengers inside the tram never exceeds 6, a capacity of 6 is sufficient. Furthermore it is not # possible for the tram to have a capacity less than 6. Hence, 6 is the correct answer. | 3.963816 | 4 |
client/views/group_settings.py | omerk2511/dropbox | 4 | 6623382 | <reponame>omerk2511/dropbox<filename>client/views/group_settings.py<gh_stars>1-10
from Tkinter import *
from common import Codes
from ..handlers.data import Data
from ..controllers import GroupController
class GroupSettings(Frame):
def __init__(self, parent):
Frame.__init__(self, parent)
self.parent = parent
self.elements = {}
title_frame = Frame(self)
title_frame.pack(expand=True, fill=BOTH, padx=70, pady=(30, 20))
self.elements['title'] = Label(title_frame, text='Settings',
fg='#003399', font=('Arial', 28))
self.elements['title'].pack(side=TOP)
group_name_frame = Frame(self)
group_name_frame.pack(expand=True, fill=BOTH, padx=70)
self.elements['group_name_label'] = Label(group_name_frame, text='Group Name: ',
font=('Arial', 18))
self.elements['group_name_label'].pack(side=LEFT, padx=6)
self.elements['group_name_entry'] = Entry(group_name_frame, font=('Arial', 18))
self.elements['group_name_entry'].pack(side=LEFT, padx=6, expand=True, fill=X)
buttons_frame = Frame(self)
buttons_frame.pack(expand=True, fill=BOTH, padx=70, pady=(10, 0))
self.elements['update_group_name_button'] = Button(buttons_frame, text='UPDATE',
bg='#003399', activebackground='#002266', fg='#ffffff', font=('Arial', 16, 'bold'),
activeforeground='#ffffff', command=self.update_group_name)
self.elements['update_group_name_button'].pack(side=TOP, expand=True, fill=X, padx=6)
self.elements['group_users_frame'] = Frame(self)
self.elements['group_users_frame'].pack(expand=True, fill=BOTH, padx=70, pady=40)
self.elements['group_user_frames'] = []
def initialize(self):
self.current_group_data = Data().get_current_group()
for group_user_frame in self.elements['group_user_frames']:
group_user_frame.pack_forget()
self.elements['group_user_frames'] = []
self.elements['group_users_frame'].pack_forget()
self.elements['group_users_frame'].pack(expand=True, fill=BOTH, padx=70, pady=40)
users = self.current_group_data['users']
for user in users:
user_frame = Frame(self.elements['group_users_frame'], bg='gray')
user_frame.pack(side=TOP, expand=False, fill=X, pady=10)
user_label = Label(user_frame, font=('Arial', 18), bg='gray',
text='%s (%s)' % (user['username'], user['full_name']))
user_label.pack(side=LEFT, padx=20, pady=10)
if user['id'] != Data().get_user_data()['id']:
transfer_ownership_button = Button(user_frame, text='Transfer Ownership',
font=('Arial', 16), bg='#004d00', fg='#ffffff', activebackground='#006600',
activeforeground='#ffffff', command=self.generate_transfer_ownership(user['id']))
transfer_ownership_button.pack(side=RIGHT, padx=20, pady=10)
kick_user_button = Button(user_frame, text='Kick User',
font=('Arial', 16), bg='#990000', fg='#ffffff', activebackground='#b30000',
activeforeground='#ffffff', command=self.generate_kick_user(user['id']))
kick_user_button.pack(side=RIGHT, pady=10)
self.elements['group_user_frames'].append(user_frame)
def generate_transfer_ownership(self, user_id):
return lambda: self.transfer_ownership(user_id)
def generate_kick_user(self, user_id):
return lambda: self.kick_user(user_id)
def transfer_ownership(self, user_id):
response = GroupController.update_group(self.current_group_data['id'],
Data().get_token(), owner=user_id)
if response.code == Codes.SUCCESS:
self.parent.display_info('The group ownership has been transferred successfully!')
self.parent.return_frame()
else:
self.parent.display_error(response.payload['message'])
def kick_user(self, user_id):
response = GroupController.kick_group_user(self.current_group_data['id'],
user_id, Data().get_token())
if response.code == Codes.SUCCESS:
self.parent.display_info('The user has been kicked successfully!')
self.initialize()
else:
self.parent.display_error(response.payload['message'])
def update_group_name(self):
group_name = self.elements['group_name_entry'].get()
self.elements['group_name_entry'].delete(0, END)
if group_name:
response = GroupController.update_group(self.current_group_data['id'],
Data().get_token(), name=group_name)
if response.code == Codes.SUCCESS:
self.parent.display_info('The group name has been updated successfully!')
else:
self.parent.display_error(response.payload['message'])
else:
self.parent.display_error('You have to enter a group name.') | from Tkinter import *
from common import Codes
from ..handlers.data import Data
from ..controllers import GroupController
class GroupSettings(Frame):
def __init__(self, parent):
Frame.__init__(self, parent)
self.parent = parent
self.elements = {}
title_frame = Frame(self)
title_frame.pack(expand=True, fill=BOTH, padx=70, pady=(30, 20))
self.elements['title'] = Label(title_frame, text='Settings',
fg='#003399', font=('Arial', 28))
self.elements['title'].pack(side=TOP)
group_name_frame = Frame(self)
group_name_frame.pack(expand=True, fill=BOTH, padx=70)
self.elements['group_name_label'] = Label(group_name_frame, text='Group Name: ',
font=('Arial', 18))
self.elements['group_name_label'].pack(side=LEFT, padx=6)
self.elements['group_name_entry'] = Entry(group_name_frame, font=('Arial', 18))
self.elements['group_name_entry'].pack(side=LEFT, padx=6, expand=True, fill=X)
buttons_frame = Frame(self)
buttons_frame.pack(expand=True, fill=BOTH, padx=70, pady=(10, 0))
self.elements['update_group_name_button'] = Button(buttons_frame, text='UPDATE',
bg='#003399', activebackground='#002266', fg='#ffffff', font=('Arial', 16, 'bold'),
activeforeground='#ffffff', command=self.update_group_name)
self.elements['update_group_name_button'].pack(side=TOP, expand=True, fill=X, padx=6)
self.elements['group_users_frame'] = Frame(self)
self.elements['group_users_frame'].pack(expand=True, fill=BOTH, padx=70, pady=40)
self.elements['group_user_frames'] = []
def initialize(self):
self.current_group_data = Data().get_current_group()
for group_user_frame in self.elements['group_user_frames']:
group_user_frame.pack_forget()
self.elements['group_user_frames'] = []
self.elements['group_users_frame'].pack_forget()
self.elements['group_users_frame'].pack(expand=True, fill=BOTH, padx=70, pady=40)
users = self.current_group_data['users']
for user in users:
user_frame = Frame(self.elements['group_users_frame'], bg='gray')
user_frame.pack(side=TOP, expand=False, fill=X, pady=10)
user_label = Label(user_frame, font=('Arial', 18), bg='gray',
text='%s (%s)' % (user['username'], user['full_name']))
user_label.pack(side=LEFT, padx=20, pady=10)
if user['id'] != Data().get_user_data()['id']:
transfer_ownership_button = Button(user_frame, text='Transfer Ownership',
font=('Arial', 16), bg='#004d00', fg='#ffffff', activebackground='#006600',
activeforeground='#ffffff', command=self.generate_transfer_ownership(user['id']))
transfer_ownership_button.pack(side=RIGHT, padx=20, pady=10)
kick_user_button = Button(user_frame, text='Kick User',
font=('Arial', 16), bg='#990000', fg='#ffffff', activebackground='#b30000',
activeforeground='#ffffff', command=self.generate_kick_user(user['id']))
kick_user_button.pack(side=RIGHT, pady=10)
self.elements['group_user_frames'].append(user_frame)
def generate_transfer_ownership(self, user_id):
return lambda: self.transfer_ownership(user_id)
def generate_kick_user(self, user_id):
return lambda: self.kick_user(user_id)
def transfer_ownership(self, user_id):
response = GroupController.update_group(self.current_group_data['id'],
Data().get_token(), owner=user_id)
if response.code == Codes.SUCCESS:
self.parent.display_info('The group ownership has been transferred successfully!')
self.parent.return_frame()
else:
self.parent.display_error(response.payload['message'])
def kick_user(self, user_id):
response = GroupController.kick_group_user(self.current_group_data['id'],
user_id, Data().get_token())
if response.code == Codes.SUCCESS:
self.parent.display_info('The user has been kicked successfully!')
self.initialize()
else:
self.parent.display_error(response.payload['message'])
def update_group_name(self):
group_name = self.elements['group_name_entry'].get()
self.elements['group_name_entry'].delete(0, END)
if group_name:
response = GroupController.update_group(self.current_group_data['id'],
Data().get_token(), name=group_name)
if response.code == Codes.SUCCESS:
self.parent.display_info('The group name has been updated successfully!')
else:
self.parent.display_error(response.payload['message'])
else:
self.parent.display_error('You have to enter a group name.') | none | 1 | 2.789903 | 3 | |
003/day-3-1-exercise/main.py | barondandi/AppBrewery | 0 | 6623383 | <reponame>barondandi/AppBrewery<filename>003/day-3-1-exercise/main.py
# 🚨 Don't change the code below 👇
number = int(input("Which number do you want to check? "))
# 🚨 Don't change the code above 👆
#Write your code below this line 👇
remainder = number % 2
if remainder == 0:
print("This is an even number.")
else:
print("This is an odd number.")
''' SOLUTION
#If the number can be divided by 2 with 0 remainder.
if number % 2 == 0:
print("This is an even number.")
#Otherwise (number cannot be divided by 2 with 0 remainder).
else:
print("This is an odd number.")
''' | # 🚨 Don't change the code below 👇
number = int(input("Which number do you want to check? "))
# 🚨 Don't change the code above 👆
#Write your code below this line 👇
remainder = number % 2
if remainder == 0:
print("This is an even number.")
else:
print("This is an odd number.")
''' SOLUTION
#If the number can be divided by 2 with 0 remainder.
if number % 2 == 0:
print("This is an even number.")
#Otherwise (number cannot be divided by 2 with 0 remainder).
else:
print("This is an odd number.")
''' | en | 0.837319 | # 🚨 Don't change the code below 👇 # 🚨 Don't change the code above 👆 #Write your code below this line 👇 SOLUTION #If the number can be divided by 2 with 0 remainder. if number % 2 == 0: print("This is an even number.") #Otherwise (number cannot be divided by 2 with 0 remainder). else: print("This is an odd number.") | 4.421074 | 4 |
locale/languages/fetch-trans.py | glv2/peercoin.net | 0 | 6623384 | #GHETTO TRANSLATION FETCHER - Too lazy to figure out how the transifex client works
lang = input("Input the language shorthand: ")
resources = [
"https://www.transifex.com/projects/p/website-ppc/resource/developers-page/l/" + lang + "/download/for_use/",
"https://www.transifex.com/projects/p/website-ppc/resource/exchanges-page/l/" + lang + "/download/for_use/",
"https://www.transifex.com/projects/p/website-ppc/resource/footer/l/" + lang + "/download/for_use/",
"https://www.transifex.com/projects/p/website-ppc/resource/frequently-asked-questions-page/l/" + lang + "/download/for_use/",
"https://www.transifex.com/projects/p/website-ppc/resource/global-variables/l/" + lang + "/download/for_use/",
"https://www.transifex.com/projects/p/website-ppc/resource/header/l/" + lang + "/download/for_use/",
"https://www.transifex.com/projects/p/website-ppc/resource/homepage/l/" + lang + "/download/for_use/",
"https://www.transifex.com/projects/p/website-ppc/resource/investors-page/l/" + lang + "/download/for_use/",
"https://www.transifex.com/projects/p/website-ppc/resource/merchants-page/l/" + lang + "/download/for_use/",
"https://www.transifex.com/projects/p/website-ppc/resource/mining-page/l/" + lang + "/download/for_use/",
"https://www.transifex.com/projects/p/website-ppc/resource/minting-guide/l/" + lang + "/download/for_use/",
"https://www.transifex.com/projects/p/website-ppc/resource/mining-guidephp/l/" + lang + "/download/for_use/",
"https://www.transifex.com/projects/p/website-ppc/resource/minting-page/l/" + lang + "/download/for_use/",
"https://www.transifex.com/projects/p/website-ppc/resource/newcomers-page/l/" + lang + "/download/for_use/",
"https://www.transifex.com/projects/p/website-ppc/resource/payment-integration-guide/l/" + lang + "/download/for_use/",
"https://www.transifex.com/projects/p/website-ppc/resource/wallet-download-page/l/" + lang + "/download/for_use/",
"https://www.transifex.com/projects/p/website-ppc/resource/whitepaper-page/l/" + lang + "/download/for_use/",
"https://www.transifex.com/projects/p/website-ppc/resource/wallets-page/l/" + lang + "/download/for_use/"
]
import requests
final = "<?php\n$lang['"+ lang +"'] = array();\n"
cookies = dict(sessionid='')
for resource in resources:
o = requests.get(resource, cookies=cookies)
o.encoding = "utf-8"
final += o.text + "\n"
final = final.replace("'en'", "'"+lang+"'")
with open(lang+'.php', 'w') as f:
f.write(final)
print("Success!") | #GHETTO TRANSLATION FETCHER - Too lazy to figure out how the transifex client works
lang = input("Input the language shorthand: ")
resources = [
"https://www.transifex.com/projects/p/website-ppc/resource/developers-page/l/" + lang + "/download/for_use/",
"https://www.transifex.com/projects/p/website-ppc/resource/exchanges-page/l/" + lang + "/download/for_use/",
"https://www.transifex.com/projects/p/website-ppc/resource/footer/l/" + lang + "/download/for_use/",
"https://www.transifex.com/projects/p/website-ppc/resource/frequently-asked-questions-page/l/" + lang + "/download/for_use/",
"https://www.transifex.com/projects/p/website-ppc/resource/global-variables/l/" + lang + "/download/for_use/",
"https://www.transifex.com/projects/p/website-ppc/resource/header/l/" + lang + "/download/for_use/",
"https://www.transifex.com/projects/p/website-ppc/resource/homepage/l/" + lang + "/download/for_use/",
"https://www.transifex.com/projects/p/website-ppc/resource/investors-page/l/" + lang + "/download/for_use/",
"https://www.transifex.com/projects/p/website-ppc/resource/merchants-page/l/" + lang + "/download/for_use/",
"https://www.transifex.com/projects/p/website-ppc/resource/mining-page/l/" + lang + "/download/for_use/",
"https://www.transifex.com/projects/p/website-ppc/resource/minting-guide/l/" + lang + "/download/for_use/",
"https://www.transifex.com/projects/p/website-ppc/resource/mining-guidephp/l/" + lang + "/download/for_use/",
"https://www.transifex.com/projects/p/website-ppc/resource/minting-page/l/" + lang + "/download/for_use/",
"https://www.transifex.com/projects/p/website-ppc/resource/newcomers-page/l/" + lang + "/download/for_use/",
"https://www.transifex.com/projects/p/website-ppc/resource/payment-integration-guide/l/" + lang + "/download/for_use/",
"https://www.transifex.com/projects/p/website-ppc/resource/wallet-download-page/l/" + lang + "/download/for_use/",
"https://www.transifex.com/projects/p/website-ppc/resource/whitepaper-page/l/" + lang + "/download/for_use/",
"https://www.transifex.com/projects/p/website-ppc/resource/wallets-page/l/" + lang + "/download/for_use/"
]
import requests
final = "<?php\n$lang['"+ lang +"'] = array();\n"
cookies = dict(sessionid='')
for resource in resources:
o = requests.get(resource, cookies=cookies)
o.encoding = "utf-8"
final += o.text + "\n"
final = final.replace("'en'", "'"+lang+"'")
with open(lang+'.php', 'w') as f:
f.write(final)
print("Success!") | en | 0.77917 | #GHETTO TRANSLATION FETCHER - Too lazy to figure out how the transifex client works | 2.318869 | 2 |
lion/update.py | bikeshedder/lion | 2 | 6623385 | from collections import namedtuple
Change = namedtuple('Change', ['old', 'new'])
def update_object(obj, data, fields):
'''
Update an object by using the values from `data` limited by
the fields in `fields`. Returns a dictionary of the format
`{field_name: (old_value, new_value)}`.
'''
modified = {}
for field in fields:
if field in data:
# FIXME smart conversion of data types
new_value = data[field]
old_value = getattr(obj, field)
if old_value != new_value:
modified[field] = Change(old_value, new_value)
setattr(obj, field, new_value)
return modified
| from collections import namedtuple
Change = namedtuple('Change', ['old', 'new'])
def update_object(obj, data, fields):
'''
Update an object by using the values from `data` limited by
the fields in `fields`. Returns a dictionary of the format
`{field_name: (old_value, new_value)}`.
'''
modified = {}
for field in fields:
if field in data:
# FIXME smart conversion of data types
new_value = data[field]
old_value = getattr(obj, field)
if old_value != new_value:
modified[field] = Change(old_value, new_value)
setattr(obj, field, new_value)
return modified
| en | 0.603023 | Update an object by using the values from `data` limited by the fields in `fields`. Returns a dictionary of the format `{field_name: (old_value, new_value)}`. # FIXME smart conversion of data types | 3.762452 | 4 |
FADiff/fad/__init__.py | liuxiaoxuan97/FADiff | 0 | 6623386 | <gh_stars>0
from FADiff.fad.Gradients import Scal
from FADiff.fad.Matrices import Vect | from FADiff.fad.Gradients import Scal
from FADiff.fad.Matrices import Vect | none | 1 | 0.962504 | 1 | |
Python3/0743-Network-Delay-Time/soln.py | wyaadarsh/LeetCode-Solutions | 5 | 6623387 | class Solution:
def networkDelayTime(self, times, N, K):
"""
:type times: List[List[int]]
:type N: int
:type K: int
:rtype: int
"""
network = collections.defaultdict(dict)
for u, v, w in times:
network[u][v] = w
visit_time = {K : 0}
queue = [(0, K)]
while queue:
time, node = heapq.heappop(queue)
for nei, t in network[node].items():
if nei not in visit_time or visit_time[nei] > time + t:
visit_time[nei] = time + t
heapq.heappush(queue, (time + t, nei))
return max(visit_time.values()) if len(visit_time) == N else -1 | class Solution:
def networkDelayTime(self, times, N, K):
"""
:type times: List[List[int]]
:type N: int
:type K: int
:rtype: int
"""
network = collections.defaultdict(dict)
for u, v, w in times:
network[u][v] = w
visit_time = {K : 0}
queue = [(0, K)]
while queue:
time, node = heapq.heappop(queue)
for nei, t in network[node].items():
if nei not in visit_time or visit_time[nei] > time + t:
visit_time[nei] = time + t
heapq.heappush(queue, (time + t, nei))
return max(visit_time.values()) if len(visit_time) == N else -1 | en | 0.318328 | :type times: List[List[int]] :type N: int :type K: int :rtype: int | 3.177164 | 3 |
cmdb-compliance/libs/aws/com_nat.py | zjj1002/aws-cloud-cmdb-system | 0 | 6623388 | <gh_stars>0
import boto3
from libs.web_logs import ins_log
class ComplianceNatGateWayApi():
def __init__(self, session):
self.nat_list = []
# 获取ec2的client
self.nat_client = session.client('ec2')
def get_nat_gate_ways_response(self):
"""
获取返回值
:return:
"""
response_data = {}
err = None
try:
response_data = self.nat_client.describe_nat_gateways()
except Exception as e:
err = e
return response_data, err
def get_unused_nat_list(self):
"""
获取返回值
:return:
"""
response_data, err = self.get_nat_gate_ways_response()
if err:
ins_log.read_log('error', '获取失败:{}'.format(err))
return False
# pending | failed | available | deleting | deleted
for each in response_data["NatGateways"]:
nat_dict ={}
if each["State"] != "available":
nat_dict["is_use"] = False
nat_dict["natgatewayid"] = each["NatGatewayId"]
nat_dict["state"] = each["State"]
nat_dict["subnetId"] = each["SubnetId"]
nat_dict["vpcid"] = each["VpcId"]
nat_dict["createTime"] = each["CreateTime"]
self.nat_list.append(nat_dict)
return self.nat_list
def main(self):
result = self.get_unused_nat_list()
return result
def test_auth(self):
"""
测试接口权限等信息是否异常
:return:
"""
response_data = self.nat_client.describe_nat_gateways()
return response_data
| import boto3
from libs.web_logs import ins_log
class ComplianceNatGateWayApi():
def __init__(self, session):
self.nat_list = []
# 获取ec2的client
self.nat_client = session.client('ec2')
def get_nat_gate_ways_response(self):
"""
获取返回值
:return:
"""
response_data = {}
err = None
try:
response_data = self.nat_client.describe_nat_gateways()
except Exception as e:
err = e
return response_data, err
def get_unused_nat_list(self):
"""
获取返回值
:return:
"""
response_data, err = self.get_nat_gate_ways_response()
if err:
ins_log.read_log('error', '获取失败:{}'.format(err))
return False
# pending | failed | available | deleting | deleted
for each in response_data["NatGateways"]:
nat_dict ={}
if each["State"] != "available":
nat_dict["is_use"] = False
nat_dict["natgatewayid"] = each["NatGatewayId"]
nat_dict["state"] = each["State"]
nat_dict["subnetId"] = each["SubnetId"]
nat_dict["vpcid"] = each["VpcId"]
nat_dict["createTime"] = each["CreateTime"]
self.nat_list.append(nat_dict)
return self.nat_list
def main(self):
result = self.get_unused_nat_list()
return result
def test_auth(self):
"""
测试接口权限等信息是否异常
:return:
"""
response_data = self.nat_client.describe_nat_gateways()
return response_data | en | 0.604843 | # 获取ec2的client 获取返回值 :return: 获取返回值 :return: # pending | failed | available | deleting | deleted 测试接口权限等信息是否异常 :return: | 2.115448 | 2 |
classes/Player.py | WordsetterFak/Discord.py-Gaming-Bot | 10 | 6623389 | <gh_stars>1-10
class Player:
occupied_players: list[int] = [] # all players, currently in a game
def __init__(self, discord_id: int):
self.discord_id: int = discord_id
| class Player:
occupied_players: list[int] = [] # all players, currently in a game
def __init__(self, discord_id: int):
self.discord_id: int = discord_id | en | 0.989317 | # all players, currently in a game | 2.588274 | 3 |
pydnameth/routines/common.py | AaronBlare/pydnameth | 0 | 6623390 | <gh_stars>0
import plotly.graph_objs as go
import numpy as np
import pandas
def categorize_data(data):
can_cast = np.can_cast(data, float)
if can_cast:
data = data.astype(float)
else:
data = pandas.factorize(data, sort=True)[0]
data = np.array(data, dtype=float)
return data
def is_categorical(data):
is_can_cast_to_float = np.can_cast(np.asarray(data), float)
if not is_can_cast_to_float:
return True
len_set = len(set(data))
if len_set < 10:
return True
else:
return False
def is_float(value):
try:
float(value)
return True
except ValueError:
return False
def find_nearest_id(array, value):
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx
def dict_slice(origin_dict, id):
new_dict = {}
for key, value in origin_dict.items():
new_dict[key] = [value[id]]
return new_dict
def normalize_to_0_1(values):
max_val = max(values)
min_val = min(values)
shift_val = (max_val - min_val)
values_normed = (np.asarray(values) - min_val) / shift_val
return values_normed
def get_axis(title):
axis = dict(
title=title,
showgrid=True,
linewidth=2,
linecolor='black',
showline=True,
gridcolor='gray',
mirror=True,
ticks='outside',
titlefont=dict(
family='Arial',
size=33,
color='black'
),
showticklabels=True,
tickangle=0,
tickfont=dict(
family='Arial',
size=30,
color='black'
),
exponentformat='e',
showexponent='all'
)
return axis
def get_legend():
legend = dict(
font=dict(
family='Arial',
size=33,
),
orientation="h",
x=0.25,
y=1.2,
)
return legend
def get_margin():
margin = go.layout.Margin(
l=80,
r=20,
b=80,
t=10,
pad=0
)
return margin
def process_names(config):
name = str(config.attributes.observables)
if 'gender' in name:
name = name.replace('gender', 'sex')
return name
def get_names(config, plot_params):
if 'legend_size' in plot_params:
legend_size = plot_params['legend_size']
parts = process_names(config).split(')_')
if len(parts) > 1:
name = ')_'.join(parts[0:legend_size]) + ')'
elif legend_size > len(parts):
name = process_names(config)
else:
name = process_names(config)
else:
name = process_names(config)
return name
def update_parent_dict_with_children(parent_metrics_keys, item, config_parent, config_child):
item_id = config_child.advanced_dict[item]
for key in config_child.advanced_data:
if key not in parent_metrics_keys:
advanced_data = config_child.advanced_data[key][item_id]
config_parent.metrics[key].append(advanced_data)
parent_metrics_keys.append(key)
| import plotly.graph_objs as go
import numpy as np
import pandas
def categorize_data(data):
can_cast = np.can_cast(data, float)
if can_cast:
data = data.astype(float)
else:
data = pandas.factorize(data, sort=True)[0]
data = np.array(data, dtype=float)
return data
def is_categorical(data):
is_can_cast_to_float = np.can_cast(np.asarray(data), float)
if not is_can_cast_to_float:
return True
len_set = len(set(data))
if len_set < 10:
return True
else:
return False
def is_float(value):
try:
float(value)
return True
except ValueError:
return False
def find_nearest_id(array, value):
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx
def dict_slice(origin_dict, id):
new_dict = {}
for key, value in origin_dict.items():
new_dict[key] = [value[id]]
return new_dict
def normalize_to_0_1(values):
max_val = max(values)
min_val = min(values)
shift_val = (max_val - min_val)
values_normed = (np.asarray(values) - min_val) / shift_val
return values_normed
def get_axis(title):
axis = dict(
title=title,
showgrid=True,
linewidth=2,
linecolor='black',
showline=True,
gridcolor='gray',
mirror=True,
ticks='outside',
titlefont=dict(
family='Arial',
size=33,
color='black'
),
showticklabels=True,
tickangle=0,
tickfont=dict(
family='Arial',
size=30,
color='black'
),
exponentformat='e',
showexponent='all'
)
return axis
def get_legend():
legend = dict(
font=dict(
family='Arial',
size=33,
),
orientation="h",
x=0.25,
y=1.2,
)
return legend
def get_margin():
margin = go.layout.Margin(
l=80,
r=20,
b=80,
t=10,
pad=0
)
return margin
def process_names(config):
name = str(config.attributes.observables)
if 'gender' in name:
name = name.replace('gender', 'sex')
return name
def get_names(config, plot_params):
if 'legend_size' in plot_params:
legend_size = plot_params['legend_size']
parts = process_names(config).split(')_')
if len(parts) > 1:
name = ')_'.join(parts[0:legend_size]) + ')'
elif legend_size > len(parts):
name = process_names(config)
else:
name = process_names(config)
else:
name = process_names(config)
return name
def update_parent_dict_with_children(parent_metrics_keys, item, config_parent, config_child):
item_id = config_child.advanced_dict[item]
for key in config_child.advanced_data:
if key not in parent_metrics_keys:
advanced_data = config_child.advanced_data[key][item_id]
config_parent.metrics[key].append(advanced_data)
parent_metrics_keys.append(key) | none | 1 | 2.557193 | 3 | |
visualize_volumetric_data_on__multi_view_surf.py | ngohgia/multi_view_brain_vol | 0 | 6623391 | <gh_stars>0
import numpy as np
import matplotlib.pyplot as plt
from nilearn import surface
from nilearn.plotting import plot_surf_stat_map
from matplotlib.colors import Normalize, LinearSegmentedColormap
from matplotlib.cm import ScalarMappable, get_cmap
from matplotlib.colorbar import make_axes
def visualize_volumetric_data_on__multi_view_surf(vol_img, fsaverage=None, threshold=None, vmin=None, vmax=None, title=None, cmap='cold_hot_r'):
""" Visualizing volumetric data on fsaverage surfaces for the
lateral, medial, dorsal and ventral view of both hemispheres
Parameters
----------
vol_img: brain image in the volumetric space, such as from
the output of nilearn.image.load_img
fsaverage: fsaverage surface mesh, either fsaverage5 mesh (10242 nodes),
or fsaverage mesh (163842 nodes), output from
nilearn.datasets.fetch_surf_fsaverage()
threshold : a number or None, default is None.
If None is given, the image is not thresholded.
If a number is given, it is used to threshold the image, values
below the threshold (in absolute value) are plotted as transparent.
vmin, vmax: lower / upper bound to plot surf_data values
If None , the values will be set to min/max of the data
title: str, optional
Figure title.
cmap: str, name of the colormap used for visualizing, default is 'cold_hot_r'
"""
lh_mesh = surface.load_surf_mesh(fsaverage.pial_left)
rh_mesh = surface.load_surf_mesh(fsaverage.pial_right)
lh_surf = surface.vol_to_surf(vol_img, fsaverage.pial_left)
rh_surf = surface.vol_to_surf(vol_img, fsaverage.pial_right)
if vmin is None:
vmin = np.min((np.min(lh_surf), np.min(rh_surf)))
if vmax is None:
vmax = np.max((np.max(lh_surf), np.max(rh_surf)))
mesh = [np.vstack((lh_mesh[0], rh_mesh[0])),
np.vstack((lh_mesh[1], rh_mesh[1] + lh_mesh[1].max(axis=0) + 1))]
modes = ['lateral', 'medial', 'dorsal', 'ventral']
hemis = ['left', 'right']
surf = {'left': lh_mesh, 'right': rh_mesh, 'both': mesh}
data = {'left': lh_surf, 'right': rh_surf,
'both': np.hstack((lh_surf, rh_surf))}
lh_sulc = surface.load_surf_data(fsaverage.sulc_left)
rh_sulc = surface.load_surf_data(fsaverage.sulc_right)
bg_map = {'left': lh_sulc, 'right': rh_sulc,
'both': np.hstack((lh_sulc, rh_sulc))}
abs_max = np.max((np.abs(vmin), np.abs(vmax)))
fig, axes = plt.subplots(nrows=2, ncols=3,
figsize=(20, 10), # not the best fix
subplot_kw={'projection': '3d'}, dpi=80)
for index_view, view in enumerate(['lateral', 'medial']):
for index_hemi, hemi in enumerate(hemis):
plot_surf_stat_map(surf[hemi], data[hemi],
view=view,
hemi=hemi,
bg_map=bg_map[hemi],
axes=axes[index_view, index_hemi],
colorbar=False,
threshold=threshold,
symmetric_cbar=False,
vmax=abs_max,
cmap=cmap,
darkness=0)
axes[index_view, index_hemi].margins(0)
for i_m, view in enumerate(['dorsal', 'ventral']):
plot_surf_stat_map(surf['both'],
data['both'],
view=view,
hemi='left',
bg_map=bg_map['both'],
axes=axes[i_m, 2],
colorbar=False,
threshold=threshold,
symmetric_cbar=False,
vmax=abs_max,
cmap=cmap,
darkness=0)
axes[i_m, 2].margins(0)
true_cmap = get_cmap(cmap)
norm = Normalize(vmin=-abs_max, vmax=abs_max)
nb_ticks = 5
ticks = np.linspace(-abs_max, abs_max, nb_ticks)
bounds = np.linspace(-abs_max, abs_max, true_cmap.N)
if threshold is not None:
cmaplist = [true_cmap(i) for i in reversed(range(true_cmap.N))]
# set colors to grey for absolute values < threshold
istart = int(norm(-threshold, clip=True) * (true_cmap.N - 1))
istop = int(norm(threshold, clip=True) * (true_cmap.N - 1))
for i in range(istart, istop):
cmaplist[i] = (0.5, 0.5, 0.5, 1.)
true_cmap = LinearSegmentedColormap.from_list(
'Custom cmap', cmaplist, true_cmap.N)
proxy_mappable = ScalarMappable(cmap=true_cmap, norm=norm)
proxy_mappable.set_array(np.array([-abs_max, abs_max]))
cbar_ax, kw = make_axes(axes, location='bottom', fraction=.05,
shrink=.3, pad=0.5)
cbar = fig.colorbar(
proxy_mappable, cax=cbar_ax, ticks=ticks,
boundaries=bounds, spacing='proportional',
format='%.2g', orientation='horizontal')
cbar.ax.tick_params(labelsize=16)
if title is not None:
fig.suptitle(title, y=0.9, fontsize=20)
fig.subplots_adjust(wspace=-0.28, hspace=-0.1)
| import numpy as np
import matplotlib.pyplot as plt
from nilearn import surface
from nilearn.plotting import plot_surf_stat_map
from matplotlib.colors import Normalize, LinearSegmentedColormap
from matplotlib.cm import ScalarMappable, get_cmap
from matplotlib.colorbar import make_axes
def visualize_volumetric_data_on__multi_view_surf(vol_img, fsaverage=None, threshold=None, vmin=None, vmax=None, title=None, cmap='cold_hot_r'):
""" Visualizing volumetric data on fsaverage surfaces for the
lateral, medial, dorsal and ventral view of both hemispheres
Parameters
----------
vol_img: brain image in the volumetric space, such as from
the output of nilearn.image.load_img
fsaverage: fsaverage surface mesh, either fsaverage5 mesh (10242 nodes),
or fsaverage mesh (163842 nodes), output from
nilearn.datasets.fetch_surf_fsaverage()
threshold : a number or None, default is None.
If None is given, the image is not thresholded.
If a number is given, it is used to threshold the image, values
below the threshold (in absolute value) are plotted as transparent.
vmin, vmax: lower / upper bound to plot surf_data values
If None , the values will be set to min/max of the data
title: str, optional
Figure title.
cmap: str, name of the colormap used for visualizing, default is 'cold_hot_r'
"""
lh_mesh = surface.load_surf_mesh(fsaverage.pial_left)
rh_mesh = surface.load_surf_mesh(fsaverage.pial_right)
lh_surf = surface.vol_to_surf(vol_img, fsaverage.pial_left)
rh_surf = surface.vol_to_surf(vol_img, fsaverage.pial_right)
if vmin is None:
vmin = np.min((np.min(lh_surf), np.min(rh_surf)))
if vmax is None:
vmax = np.max((np.max(lh_surf), np.max(rh_surf)))
mesh = [np.vstack((lh_mesh[0], rh_mesh[0])),
np.vstack((lh_mesh[1], rh_mesh[1] + lh_mesh[1].max(axis=0) + 1))]
modes = ['lateral', 'medial', 'dorsal', 'ventral']
hemis = ['left', 'right']
surf = {'left': lh_mesh, 'right': rh_mesh, 'both': mesh}
data = {'left': lh_surf, 'right': rh_surf,
'both': np.hstack((lh_surf, rh_surf))}
lh_sulc = surface.load_surf_data(fsaverage.sulc_left)
rh_sulc = surface.load_surf_data(fsaverage.sulc_right)
bg_map = {'left': lh_sulc, 'right': rh_sulc,
'both': np.hstack((lh_sulc, rh_sulc))}
abs_max = np.max((np.abs(vmin), np.abs(vmax)))
fig, axes = plt.subplots(nrows=2, ncols=3,
figsize=(20, 10), # not the best fix
subplot_kw={'projection': '3d'}, dpi=80)
for index_view, view in enumerate(['lateral', 'medial']):
for index_hemi, hemi in enumerate(hemis):
plot_surf_stat_map(surf[hemi], data[hemi],
view=view,
hemi=hemi,
bg_map=bg_map[hemi],
axes=axes[index_view, index_hemi],
colorbar=False,
threshold=threshold,
symmetric_cbar=False,
vmax=abs_max,
cmap=cmap,
darkness=0)
axes[index_view, index_hemi].margins(0)
for i_m, view in enumerate(['dorsal', 'ventral']):
plot_surf_stat_map(surf['both'],
data['both'],
view=view,
hemi='left',
bg_map=bg_map['both'],
axes=axes[i_m, 2],
colorbar=False,
threshold=threshold,
symmetric_cbar=False,
vmax=abs_max,
cmap=cmap,
darkness=0)
axes[i_m, 2].margins(0)
true_cmap = get_cmap(cmap)
norm = Normalize(vmin=-abs_max, vmax=abs_max)
nb_ticks = 5
ticks = np.linspace(-abs_max, abs_max, nb_ticks)
bounds = np.linspace(-abs_max, abs_max, true_cmap.N)
if threshold is not None:
cmaplist = [true_cmap(i) for i in reversed(range(true_cmap.N))]
# set colors to grey for absolute values < threshold
istart = int(norm(-threshold, clip=True) * (true_cmap.N - 1))
istop = int(norm(threshold, clip=True) * (true_cmap.N - 1))
for i in range(istart, istop):
cmaplist[i] = (0.5, 0.5, 0.5, 1.)
true_cmap = LinearSegmentedColormap.from_list(
'Custom cmap', cmaplist, true_cmap.N)
proxy_mappable = ScalarMappable(cmap=true_cmap, norm=norm)
proxy_mappable.set_array(np.array([-abs_max, abs_max]))
cbar_ax, kw = make_axes(axes, location='bottom', fraction=.05,
shrink=.3, pad=0.5)
cbar = fig.colorbar(
proxy_mappable, cax=cbar_ax, ticks=ticks,
boundaries=bounds, spacing='proportional',
format='%.2g', orientation='horizontal')
cbar.ax.tick_params(labelsize=16)
if title is not None:
fig.suptitle(title, y=0.9, fontsize=20)
fig.subplots_adjust(wspace=-0.28, hspace=-0.1) | en | 0.690182 | Visualizing volumetric data on fsaverage surfaces for the lateral, medial, dorsal and ventral view of both hemispheres Parameters ---------- vol_img: brain image in the volumetric space, such as from the output of nilearn.image.load_img fsaverage: fsaverage surface mesh, either fsaverage5 mesh (10242 nodes), or fsaverage mesh (163842 nodes), output from nilearn.datasets.fetch_surf_fsaverage() threshold : a number or None, default is None. If None is given, the image is not thresholded. If a number is given, it is used to threshold the image, values below the threshold (in absolute value) are plotted as transparent. vmin, vmax: lower / upper bound to plot surf_data values If None , the values will be set to min/max of the data title: str, optional Figure title. cmap: str, name of the colormap used for visualizing, default is 'cold_hot_r' # not the best fix # set colors to grey for absolute values < threshold | 2.637053 | 3 |
nasws/rnn/genotype.py | kcyu2014/nas-landmarkreg | 8 | 6623392 | """
genotype.py
This file should contain the definition of RNN and CNN genotype.
For CNN genotype, it also provide a function to transform the genytpe
""" | """
genotype.py
This file should contain the definition of RNN and CNN genotype.
For CNN genotype, it also provide a function to transform the genytpe
""" | en | 0.76839 | genotype.py This file should contain the definition of RNN and CNN genotype. For CNN genotype, it also provide a function to transform the genytpe | 1.545927 | 2 |
mnultitool/interpolate/spline.py | artus9033/MNultitool | 3 | 6623393 | <gh_stars>1-10
from typing import Tuple
import numpy as np
from ..misc.utils import isType
def firstOrderSpline(x: np.ndarray, y: np.ndarray):
"""
Computes the coefficients of a first order spline, according to the formulas:
.. math::
a_k=\\frac{y_{k+1}-y_k}{x_{k+1}-x_k}
.. math::
b_k=y_k-a_k*x_k
:param x: function arguments (x axis)
:param y: function values (y axis)
:return: the coefficients of the linear function in the tuple of shape (a0, a1)"""
if not isType(x, np.ndarray) or not isType(y, np.ndarray) or len(x) != len(y):
return None
a = []
b = []
for k in range(len(x) - 1):
a.append((y[k + 1] - y[k]) / (x[k + 1] - x[k]))
b.append(y[k] - a[k] * x[k])
return (a, b)
def cubicSpline(x: np.ndarray, y: np.ndarray) -> Tuple[float, float, float, float]:
"""
Computes the coefficients of a third order (cubic) spline
:param x: function arguments (x axis)
:param y: function values (y axis)
:return: the coefficients in the tuple of shape (a0, a1, a2, a3)"""
if not isType(x, np.ndarray) or not isType(y, np.ndarray) or len(x) != len(y):
return np.nan, np.nan, np.nan, np.nan
h = np.zeros(len(y))
d = np.zeros(len(y))
lmb = np.zeros(len(y))
rho = np.zeros(len(y))
m = np.zeros(len(y))
b3 = np.zeros(len(y) - 1)
b2 = np.zeros(len(y) - 1)
b1 = np.zeros(len(y) - 1)
b0 = np.zeros(len(y) - 1)
tgtLen = len(x) - 1
for k in range(tgtLen):
h[k] = x[k + 1] - x[k]
d[k] = (y[k + 1] - y[k]) / h[k]
for k in range(tgtLen):
lmb[k + 1] = h[k + 1] / (h[k] + h[k + 1])
rho[k + 1] = h[k] / (h[k] + h[k + 1])
for k in range(tgtLen - 1):
m[k + 1] = 3 * (d[k + 1] - d[k]) / (h[k] + h[k + 1]) - \
m[k] * rho[k + 1] / 2 - m[k + 2] * lmb[k + 1] / 2
for k in range(tgtLen):
b0[k] = y[k]
b1[k] = d[k] - h[k] * (2 * m[k] + m[k + 1]) / 6
b2[k] = m[k] / 2
b3[k] = (m[k + 1] - m[k]) / 6 * h[k]
xCut = x[:-1]
a3 = b3
a2 = b2 - 3 * b3 * xCut
a1 = b1 - 2 * b2 * xCut + 3 * b3 * xCut**2
a0 = b0 - b1 * xCut + b2 * xCut**2 - b3 * xCut**3
return (a0, a1, a2, a3)
| from typing import Tuple
import numpy as np
from ..misc.utils import isType
def firstOrderSpline(x: np.ndarray, y: np.ndarray):
"""
Computes the coefficients of a first order spline, according to the formulas:
.. math::
a_k=\\frac{y_{k+1}-y_k}{x_{k+1}-x_k}
.. math::
b_k=y_k-a_k*x_k
:param x: function arguments (x axis)
:param y: function values (y axis)
:return: the coefficients of the linear function in the tuple of shape (a0, a1)"""
if not isType(x, np.ndarray) or not isType(y, np.ndarray) or len(x) != len(y):
return None
a = []
b = []
for k in range(len(x) - 1):
a.append((y[k + 1] - y[k]) / (x[k + 1] - x[k]))
b.append(y[k] - a[k] * x[k])
return (a, b)
def cubicSpline(x: np.ndarray, y: np.ndarray) -> Tuple[float, float, float, float]:
"""
Computes the coefficients of a third order (cubic) spline
:param x: function arguments (x axis)
:param y: function values (y axis)
:return: the coefficients in the tuple of shape (a0, a1, a2, a3)"""
if not isType(x, np.ndarray) or not isType(y, np.ndarray) or len(x) != len(y):
return np.nan, np.nan, np.nan, np.nan
h = np.zeros(len(y))
d = np.zeros(len(y))
lmb = np.zeros(len(y))
rho = np.zeros(len(y))
m = np.zeros(len(y))
b3 = np.zeros(len(y) - 1)
b2 = np.zeros(len(y) - 1)
b1 = np.zeros(len(y) - 1)
b0 = np.zeros(len(y) - 1)
tgtLen = len(x) - 1
for k in range(tgtLen):
h[k] = x[k + 1] - x[k]
d[k] = (y[k + 1] - y[k]) / h[k]
for k in range(tgtLen):
lmb[k + 1] = h[k + 1] / (h[k] + h[k + 1])
rho[k + 1] = h[k] / (h[k] + h[k + 1])
for k in range(tgtLen - 1):
m[k + 1] = 3 * (d[k + 1] - d[k]) / (h[k] + h[k + 1]) - \
m[k] * rho[k + 1] / 2 - m[k + 2] * lmb[k + 1] / 2
for k in range(tgtLen):
b0[k] = y[k]
b1[k] = d[k] - h[k] * (2 * m[k] + m[k + 1]) / 6
b2[k] = m[k] / 2
b3[k] = (m[k + 1] - m[k]) / 6 * h[k]
xCut = x[:-1]
a3 = b3
a2 = b2 - 3 * b3 * xCut
a1 = b1 - 2 * b2 * xCut + 3 * b3 * xCut**2
a0 = b0 - b1 * xCut + b2 * xCut**2 - b3 * xCut**3
return (a0, a1, a2, a3) | en | 0.497414 | Computes the coefficients of a first order spline, according to the formulas: .. math:: a_k=\\frac{y_{k+1}-y_k}{x_{k+1}-x_k} .. math:: b_k=y_k-a_k*x_k :param x: function arguments (x axis) :param y: function values (y axis) :return: the coefficients of the linear function in the tuple of shape (a0, a1) Computes the coefficients of a third order (cubic) spline :param x: function arguments (x axis) :param y: function values (y axis) :return: the coefficients in the tuple of shape (a0, a1, a2, a3) | 3.734423 | 4 |
modeling/parse.py | vs-uulm/CoinView | 0 | 6623394 | import csv
import sys
import time
from collections import defaultdict
from tqdm import *
start = time.time()
time_type = 10**6
time_unit = "ms"
tx = defaultdict(list)
us = defaultdict(int)
unique = set()
with open(sys.argv[1],"r") as csvfile:
datareader = csv.reader(csvfile, delimiter=',')
for row in tqdm(datareader):
if (row[1], row[2]) not in unique:
tx[row[2]].append(int(row[0]))
us[row[1]] += 1
unique.add((row[1], row[2]))
counter = defaultdict(int)
maxtimes = []
avgtimes = []
datasamples = []
for t in tqdm(tx.values()):
counter[len(t)] += 1
if len(t) > 1:
maxtimes.append(max(t)-min(t))
avgtimes.append(sum([_-min(t) for _ in t])/len(t))
if len(t) > 100:
datasamples.append(list(map(lambda x: x-min(t), t)))
print("Writing output.")
with open(sys.argv[1][0:-4]+"-data.csv","w") as csvout:
writer = csv.writer(csvout, delimiter=',')
for sample in datasamples:
writer.writerow(sample[1:])
with open(sys.argv[1][0:-4]+"-results.txt","w") as resultsout:
resultsout.write("Statistics:\n")
resultsout.write(str(len(us))+" different participants\n")
resultsout.write(str(len(tx))+" different transactions\n")
resultsout.write("")
resultsout.write("Global max max: "+str(max(maxtimes)/time_type)+time_unit+"\n")
resultsout.write("Global min max: "+str(min(maxtimes)/time_type)+time_unit+"\n")
resultsout.write("Global max avg: "+str(max(avgtimes)/time_type)+time_unit+"\n")
resultsout.write("Global min avg: "+str(min(avgtimes)/time_type)+time_unit+"\n")
resultsout.write("Global average of max: "+str(sum(maxtimes)/len(maxtimes)/time_type)+time_unit+"\n")
resultsout.write("Global average of avg: "+str(sum(avgtimes)/len(avgtimes)/time_type)+time_unit+"\n")
end = time.time()
print("Runtime of evaluation script: {:2.2}s".format(end-start)) | import csv
import sys
import time
from collections import defaultdict
from tqdm import *
start = time.time()
time_type = 10**6
time_unit = "ms"
tx = defaultdict(list)
us = defaultdict(int)
unique = set()
with open(sys.argv[1],"r") as csvfile:
datareader = csv.reader(csvfile, delimiter=',')
for row in tqdm(datareader):
if (row[1], row[2]) not in unique:
tx[row[2]].append(int(row[0]))
us[row[1]] += 1
unique.add((row[1], row[2]))
counter = defaultdict(int)
maxtimes = []
avgtimes = []
datasamples = []
for t in tqdm(tx.values()):
counter[len(t)] += 1
if len(t) > 1:
maxtimes.append(max(t)-min(t))
avgtimes.append(sum([_-min(t) for _ in t])/len(t))
if len(t) > 100:
datasamples.append(list(map(lambda x: x-min(t), t)))
print("Writing output.")
with open(sys.argv[1][0:-4]+"-data.csv","w") as csvout:
writer = csv.writer(csvout, delimiter=',')
for sample in datasamples:
writer.writerow(sample[1:])
with open(sys.argv[1][0:-4]+"-results.txt","w") as resultsout:
resultsout.write("Statistics:\n")
resultsout.write(str(len(us))+" different participants\n")
resultsout.write(str(len(tx))+" different transactions\n")
resultsout.write("")
resultsout.write("Global max max: "+str(max(maxtimes)/time_type)+time_unit+"\n")
resultsout.write("Global min max: "+str(min(maxtimes)/time_type)+time_unit+"\n")
resultsout.write("Global max avg: "+str(max(avgtimes)/time_type)+time_unit+"\n")
resultsout.write("Global min avg: "+str(min(avgtimes)/time_type)+time_unit+"\n")
resultsout.write("Global average of max: "+str(sum(maxtimes)/len(maxtimes)/time_type)+time_unit+"\n")
resultsout.write("Global average of avg: "+str(sum(avgtimes)/len(avgtimes)/time_type)+time_unit+"\n")
end = time.time()
print("Runtime of evaluation script: {:2.2}s".format(end-start)) | none | 1 | 2.708921 | 3 | |
custom_components/airzone/innobus.py | gpulido/homeassistant-airzone | 26 | 6623395 | from enum import Enum
import logging
from typing import List, Optional
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
CURRENT_HVAC_COOL,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
CURRENT_HVAC_OFF,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
PRESET_NONE,
)
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS
from .const import (
AVAILABLE_ATTRIBUTES_ZONE,
MACHINE_HVAC_MODES,
MACHINE_PRESET_MODES,
MACHINE_SUPPORT_FLAGS,
PRESET_AIR_MODE,
PRESET_FLOOR_MODE,
PRESET_SLEEP,
ZONE_FAN_MODES,
ZONE_FAN_MODES_R,
ZONE_HVAC_MODES,
ZONE_PRESET_MODES,
ZONE_SUPPORT_FLAGS,
)
_LOGGER = logging.getLogger(__name__)
class InnobusZone(ClimateEntity):
"""Representation of a Innobus Zone."""
def __init__(self, airzone_zone):
"""Initialize the device."""
self._name = "Airzone Zone " + str(airzone_zone._zone_id)
_LOGGER.info("Airzone configure zone " + self._name)
self._airzone_zone = airzone_zone
self._available_attributes = AVAILABLE_ATTRIBUTES_ZONE
self._state_attrs = {}
self._state_attrs.update(
{attribute: None for attribute in self._available_attributes})
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._state_attrs
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def supported_features(self):
"""Return the list of supported features."""
return ZONE_SUPPORT_FLAGS
@property
def temperature_unit(self):
"""Return the unit of measurement that is used."""
return TEMP_CELSIUS
def turn_on(self):
"""Turn on."""
self._airzone_zone.turn_on()
def turn_off(self):
"""Turn off."""
self._airzone_zone.turn_off()
@property
def hvac_mode(self) -> str:
"""Return hvac operation ie. heat, cool mode.
Need to be one of HVAC_MODE_*.
"""
tacto_on = bool(self._airzone_zone.is_tacto_on())
auto_on = bool(self._airzone_zone.is_automatic_mode())
if tacto_on and auto_on:
return HVAC_MODE_AUTO
elif tacto_on and not auto_on:
return HVAC_MODE_HEAT_COOL
else:
return HVAC_MODE_OFF
@property
def hvac_modes(self) -> List[str]:
"""Return the list of available hvac operation modes.
Need to be a subset of HVAC_MODES.
"""
return ZONE_HVAC_MODES
def set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target hvac mode."""
if hvac_mode == HVAC_MODE_OFF:
self._airzone_zone.turnoff_tacto()
elif hvac_mode == HVAC_MODE_HEAT_COOL:
self._airzone_zone.turnoff_automatic_mode()
self._airzone_zone.retrieve_zone_state()
self._airzone_zone.turnon_tacto()
elif hvac_mode == HVAC_MODE_AUTO:
self._airzone_zone.turnon_automatic_mode()
self._airzone_zone.retrieve_zone_state()
self._airzone_zone.turnon_tacto()
@property
def hvac_action(self) -> Optional[str]:
"""Return the current running hvac operation."""
op_mode = self._airzone_zone._machine.operation_mode.name
if self._airzone_zone.is_floor_active():
return CURRENT_HVAC_HEAT
if self._airzone_zone.is_requesting_air():
if op_mode == 'HOT_AIR':
return CURRENT_HVAC_HEAT
return CURRENT_HVAC_COOL
if op_mode == 'STOP':
return CURRENT_HVAC_OFF
return CURRENT_HVAC_IDLE
@property
def current_temperature(self):
"""Return the current temperature."""
return self._airzone_zone.local_temperature
@property
def target_temperature(self):
return self._airzone_zone.signal_temperature_value
def set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return None
self._airzone_zone.set_signal_temperature_value(round(float(temperature), 1))
@property
def preset_mode(self) -> Optional[str]:
"""Return the current preset mode, e.g., home, away, temp.
Requires SUPPORT_PRESET_MODE.
"""
if self._airzone_zone.is_sleep_on():
return PRESET_SLEEP
return PRESET_NONE
@property
def preset_modes(self) -> Optional[List[str]]:
"""Return a list of available preset modes.
Requires SUPPORT_PRESET_MODE.
"""
return ZONE_PRESET_MODES
def set_preset_mode(self, preset_mode: str) -> None:
"""Set new preset mode."""
if preset_mode == PRESET_NONE:
self._airzone_zone.turnoff_sleep()
else:
self._airzone_zone.turnon_sleep()
@property
def fan_mode(self) -> Optional[str]:
"""Return the fan setting.
"""
fan_mode = self._airzone_zone.get_speed_selection().name
return ZONE_FAN_MODES_R[fan_mode]
@property
def fan_modes(self) -> Optional[List[str]]:
"""Return the list of available fan modes.
"""
return list(ZONE_FAN_MODES.keys())
def set_fan_mode(self, fan_mode: str) -> None:
"""Set new target fan mode."""
self._airzone_zone.set_speed_selection(ZONE_FAN_MODES[fan_mode])
@property
def min_temp(self):
return self._airzone_zone.min_temp
@property
def max_temp(self):
return self._airzone_zone.max_temp
@property
def unique_id(self):
return self._airzone_zone.unique_id
def update(self):
self._airzone_zone.retrieve_zone_state()
self._state_attrs.update(
{key: self._extract_value_from_attribute(self._airzone_zone, value) for
key, value in self._available_attributes.items()})
_LOGGER.debug(str(self._airzone_zone))
@staticmethod
def _extract_value_from_attribute(state, attribute):
func = getattr(state, attribute)
value = func()
if isinstance(value, Enum):
return value.value
return value
class InnobusMachine(ClimateEntity):
"""Representation of a Innobus Machine."""
def __init__(self, airzone_machine):
"""Initialize the device."""
self._name = "Airzone Machine " + str(airzone_machine._machineId)
_LOGGER.info("Airzone configure machine " + self._name)
self._airzone_machine = airzone_machine
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def supported_features(self):
"""Return the list of supported features."""
return MACHINE_SUPPORT_FLAGS
@property
def temperature_unit(self):
"""Return the unit of measurement that is used."""
return TEMP_CELSIUS
@property
def hvac_mode(self) -> str:
"""Return hvac operation ie. heat, cool mode.
Need to be one of HVAC_MODE_*.
"""
from airzone.innobus import OperationMode
current_op = self._airzone_machine.operation_mode
if current_op in [OperationMode.HOT, OperationMode.HOT_AIR, OperationMode.HOTPLUS]:
return HVAC_MODE_HEAT
if current_op == OperationMode.COLD:
return HVAC_MODE_COOL
if current_op == OperationMode.AIR:
return HVAC_MODE_FAN_ONLY
return HVAC_MODE_OFF
@property
def hvac_modes(self) -> List[str]:
"""Return the list of available hvac operation modes.
Need to be a subset of HVAC_MODES.
"""
return MACHINE_HVAC_MODES
def set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target hvac mode."""
if hvac_mode == HVAC_MODE_OFF:
self._airzone_machine.operation_mode = 'STOP'
return
if hvac_mode == HVAC_MODE_COOL:
self._airzone_machine.operation_mode = 'COLD'
return
if hvac_mode == HVAC_MODE_FAN_ONLY:
self._airzone_machine.operation_mode = 'AIR'
return
if hvac_mode == HVAC_MODE_HEAT:
if self.is_aux_heat:
self._airzone_machine.operation_mode = 'HOTPLUS'
return
if self.preset_mode == PRESET_AIR_MODE:
self._airzone_machine.operation_mode = 'HOT_AIR'
return
if self.preset_mode == PRESET_FLOOR_MODE:
self._airzone_machine.operation_mode = 'HOT'
return
@property
def preset_mode(self) -> Optional[str]:
"""Return the current preset mode, e.g., home, away, temp.
Requires SUPPORT_PRESET_MODE.
"""
current_op = self._airzone_machine.operation_mode.name
if current_op == 'HOT_AIR':
return PRESET_AIR_MODE
else:
return PRESET_FLOOR_MODE
@property
def preset_modes(self) -> Optional[List[str]]:
"""Return a list of available preset modes.
Requires SUPPORT_PRESET_MODE.
"""
return MACHINE_PRESET_MODES
def set_preset_mode(self, preset_mode: str) -> None:
"""Set new preset mode."""
if self.hvac_mode == HVAC_MODE_HEAT:
if preset_mode == PRESET_FLOOR_MODE:
self._airzone_machine.operation_mode = 'HOT'
return
if preset_mode == PRESET_AIR_MODE:
self._airzone_machine.operation_mode = 'HOT_AIR'
return
def turn_aux_heat_on(self) -> None:
"""Turn auxiliary heater on."""
self._airzone_machine.operation_mode = 'HOTPLUS'
def turn_aux_heat_off(self) -> None:
"""Turn auxiliary heater on."""
if self.preset_mode == PRESET_AIR_MODE:
self._airzone_machine.operation_mode = 'AIR'
elif self.preset_mode == PRESET_FLOOR_MODE:
self._airzone_machine.operation_mode = 'HOT'
@property
def is_aux_heat(self) -> Optional[bool]:
"""Return true if aux heater.
Requires SUPPORT_AUX_HEAT.
"""
return self._airzone_machine.operation_mode.name == 'HOTPLUS'
@property
def unique_id(self):
return self._airzone_machine.unique_id
async def async_update(self):
self._airzone_machine._retrieve_machine_state()
_LOGGER.debug(str(self._airzone_machine))
| from enum import Enum
import logging
from typing import List, Optional
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
CURRENT_HVAC_COOL,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
CURRENT_HVAC_OFF,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
PRESET_NONE,
)
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS
from .const import (
AVAILABLE_ATTRIBUTES_ZONE,
MACHINE_HVAC_MODES,
MACHINE_PRESET_MODES,
MACHINE_SUPPORT_FLAGS,
PRESET_AIR_MODE,
PRESET_FLOOR_MODE,
PRESET_SLEEP,
ZONE_FAN_MODES,
ZONE_FAN_MODES_R,
ZONE_HVAC_MODES,
ZONE_PRESET_MODES,
ZONE_SUPPORT_FLAGS,
)
_LOGGER = logging.getLogger(__name__)
class InnobusZone(ClimateEntity):
"""Representation of a Innobus Zone."""
def __init__(self, airzone_zone):
"""Initialize the device."""
self._name = "Airzone Zone " + str(airzone_zone._zone_id)
_LOGGER.info("Airzone configure zone " + self._name)
self._airzone_zone = airzone_zone
self._available_attributes = AVAILABLE_ATTRIBUTES_ZONE
self._state_attrs = {}
self._state_attrs.update(
{attribute: None for attribute in self._available_attributes})
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._state_attrs
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def supported_features(self):
"""Return the list of supported features."""
return ZONE_SUPPORT_FLAGS
@property
def temperature_unit(self):
"""Return the unit of measurement that is used."""
return TEMP_CELSIUS
def turn_on(self):
"""Turn on."""
self._airzone_zone.turn_on()
def turn_off(self):
"""Turn off."""
self._airzone_zone.turn_off()
@property
def hvac_mode(self) -> str:
"""Return hvac operation ie. heat, cool mode.
Need to be one of HVAC_MODE_*.
"""
tacto_on = bool(self._airzone_zone.is_tacto_on())
auto_on = bool(self._airzone_zone.is_automatic_mode())
if tacto_on and auto_on:
return HVAC_MODE_AUTO
elif tacto_on and not auto_on:
return HVAC_MODE_HEAT_COOL
else:
return HVAC_MODE_OFF
@property
def hvac_modes(self) -> List[str]:
"""Return the list of available hvac operation modes.
Need to be a subset of HVAC_MODES.
"""
return ZONE_HVAC_MODES
def set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target hvac mode."""
if hvac_mode == HVAC_MODE_OFF:
self._airzone_zone.turnoff_tacto()
elif hvac_mode == HVAC_MODE_HEAT_COOL:
self._airzone_zone.turnoff_automatic_mode()
self._airzone_zone.retrieve_zone_state()
self._airzone_zone.turnon_tacto()
elif hvac_mode == HVAC_MODE_AUTO:
self._airzone_zone.turnon_automatic_mode()
self._airzone_zone.retrieve_zone_state()
self._airzone_zone.turnon_tacto()
@property
def hvac_action(self) -> Optional[str]:
"""Return the current running hvac operation."""
op_mode = self._airzone_zone._machine.operation_mode.name
if self._airzone_zone.is_floor_active():
return CURRENT_HVAC_HEAT
if self._airzone_zone.is_requesting_air():
if op_mode == 'HOT_AIR':
return CURRENT_HVAC_HEAT
return CURRENT_HVAC_COOL
if op_mode == 'STOP':
return CURRENT_HVAC_OFF
return CURRENT_HVAC_IDLE
@property
def current_temperature(self):
"""Return the current temperature."""
return self._airzone_zone.local_temperature
@property
def target_temperature(self):
return self._airzone_zone.signal_temperature_value
def set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return None
self._airzone_zone.set_signal_temperature_value(round(float(temperature), 1))
@property
def preset_mode(self) -> Optional[str]:
"""Return the current preset mode, e.g., home, away, temp.
Requires SUPPORT_PRESET_MODE.
"""
if self._airzone_zone.is_sleep_on():
return PRESET_SLEEP
return PRESET_NONE
@property
def preset_modes(self) -> Optional[List[str]]:
"""Return a list of available preset modes.
Requires SUPPORT_PRESET_MODE.
"""
return ZONE_PRESET_MODES
def set_preset_mode(self, preset_mode: str) -> None:
"""Set new preset mode."""
if preset_mode == PRESET_NONE:
self._airzone_zone.turnoff_sleep()
else:
self._airzone_zone.turnon_sleep()
@property
def fan_mode(self) -> Optional[str]:
"""Return the fan setting.
"""
fan_mode = self._airzone_zone.get_speed_selection().name
return ZONE_FAN_MODES_R[fan_mode]
@property
def fan_modes(self) -> Optional[List[str]]:
"""Return the list of available fan modes.
"""
return list(ZONE_FAN_MODES.keys())
def set_fan_mode(self, fan_mode: str) -> None:
"""Set new target fan mode."""
self._airzone_zone.set_speed_selection(ZONE_FAN_MODES[fan_mode])
@property
def min_temp(self):
return self._airzone_zone.min_temp
@property
def max_temp(self):
return self._airzone_zone.max_temp
@property
def unique_id(self):
return self._airzone_zone.unique_id
def update(self):
self._airzone_zone.retrieve_zone_state()
self._state_attrs.update(
{key: self._extract_value_from_attribute(self._airzone_zone, value) for
key, value in self._available_attributes.items()})
_LOGGER.debug(str(self._airzone_zone))
@staticmethod
def _extract_value_from_attribute(state, attribute):
func = getattr(state, attribute)
value = func()
if isinstance(value, Enum):
return value.value
return value
class InnobusMachine(ClimateEntity):
"""Representation of a Innobus Machine."""
def __init__(self, airzone_machine):
"""Initialize the device."""
self._name = "Airzone Machine " + str(airzone_machine._machineId)
_LOGGER.info("Airzone configure machine " + self._name)
self._airzone_machine = airzone_machine
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def supported_features(self):
"""Return the list of supported features."""
return MACHINE_SUPPORT_FLAGS
@property
def temperature_unit(self):
"""Return the unit of measurement that is used."""
return TEMP_CELSIUS
@property
def hvac_mode(self) -> str:
"""Return hvac operation ie. heat, cool mode.
Need to be one of HVAC_MODE_*.
"""
from airzone.innobus import OperationMode
current_op = self._airzone_machine.operation_mode
if current_op in [OperationMode.HOT, OperationMode.HOT_AIR, OperationMode.HOTPLUS]:
return HVAC_MODE_HEAT
if current_op == OperationMode.COLD:
return HVAC_MODE_COOL
if current_op == OperationMode.AIR:
return HVAC_MODE_FAN_ONLY
return HVAC_MODE_OFF
@property
def hvac_modes(self) -> List[str]:
"""Return the list of available hvac operation modes.
Need to be a subset of HVAC_MODES.
"""
return MACHINE_HVAC_MODES
def set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target hvac mode."""
if hvac_mode == HVAC_MODE_OFF:
self._airzone_machine.operation_mode = 'STOP'
return
if hvac_mode == HVAC_MODE_COOL:
self._airzone_machine.operation_mode = 'COLD'
return
if hvac_mode == HVAC_MODE_FAN_ONLY:
self._airzone_machine.operation_mode = 'AIR'
return
if hvac_mode == HVAC_MODE_HEAT:
if self.is_aux_heat:
self._airzone_machine.operation_mode = 'HOTPLUS'
return
if self.preset_mode == PRESET_AIR_MODE:
self._airzone_machine.operation_mode = 'HOT_AIR'
return
if self.preset_mode == PRESET_FLOOR_MODE:
self._airzone_machine.operation_mode = 'HOT'
return
@property
def preset_mode(self) -> Optional[str]:
"""Return the current preset mode, e.g., home, away, temp.
Requires SUPPORT_PRESET_MODE.
"""
current_op = self._airzone_machine.operation_mode.name
if current_op == 'HOT_AIR':
return PRESET_AIR_MODE
else:
return PRESET_FLOOR_MODE
@property
def preset_modes(self) -> Optional[List[str]]:
"""Return a list of available preset modes.
Requires SUPPORT_PRESET_MODE.
"""
return MACHINE_PRESET_MODES
def set_preset_mode(self, preset_mode: str) -> None:
"""Set new preset mode."""
if self.hvac_mode == HVAC_MODE_HEAT:
if preset_mode == PRESET_FLOOR_MODE:
self._airzone_machine.operation_mode = 'HOT'
return
if preset_mode == PRESET_AIR_MODE:
self._airzone_machine.operation_mode = 'HOT_AIR'
return
def turn_aux_heat_on(self) -> None:
"""Turn auxiliary heater on."""
self._airzone_machine.operation_mode = 'HOTPLUS'
def turn_aux_heat_off(self) -> None:
"""Turn auxiliary heater on."""
if self.preset_mode == PRESET_AIR_MODE:
self._airzone_machine.operation_mode = 'AIR'
elif self.preset_mode == PRESET_FLOOR_MODE:
self._airzone_machine.operation_mode = 'HOT'
@property
def is_aux_heat(self) -> Optional[bool]:
"""Return true if aux heater.
Requires SUPPORT_AUX_HEAT.
"""
return self._airzone_machine.operation_mode.name == 'HOTPLUS'
@property
def unique_id(self):
return self._airzone_machine.unique_id
async def async_update(self):
self._airzone_machine._retrieve_machine_state()
_LOGGER.debug(str(self._airzone_machine))
| en | 0.631097 | Representation of a Innobus Zone. Initialize the device. Return the state attributes. Return the name of the sensor. Return the list of supported features. Return the unit of measurement that is used. Turn on. Turn off. Return hvac operation ie. heat, cool mode. Need to be one of HVAC_MODE_*. Return the list of available hvac operation modes. Need to be a subset of HVAC_MODES. Set new target hvac mode. Return the current running hvac operation. Return the current temperature. Set new target temperature. Return the current preset mode, e.g., home, away, temp. Requires SUPPORT_PRESET_MODE. Return a list of available preset modes. Requires SUPPORT_PRESET_MODE. Set new preset mode. Return the fan setting. Return the list of available fan modes. Set new target fan mode. Representation of a Innobus Machine. Initialize the device. Return the name of the sensor. Return the list of supported features. Return the unit of measurement that is used. Return hvac operation ie. heat, cool mode. Need to be one of HVAC_MODE_*. Return the list of available hvac operation modes. Need to be a subset of HVAC_MODES. Set new target hvac mode. Return the current preset mode, e.g., home, away, temp. Requires SUPPORT_PRESET_MODE. Return a list of available preset modes. Requires SUPPORT_PRESET_MODE. Set new preset mode. Turn auxiliary heater on. Turn auxiliary heater on. Return true if aux heater. Requires SUPPORT_AUX_HEAT. | 2.557014 | 3 |
PoissonDirect/PoissonDirect.py | hpkeeler/posts | 24 | 6623396 | <reponame>hpkeeler/posts
# Author: <NAME>, 2019.
# Website: hpaulkeeler.com
# Repository: github.com/hpaulkeeler/posts
#
# This program simulates Poisson random variables based
# on the direct method of using exponential inter-arrival times.
# For more details, see the post:
# hpaulkeeler.com/simulating-poisson-random-variables-direct-method/
#
# WARNING: This program is only suitable for small Poisson parameter (mu)
# values, for example, mu<20.
# This program is intended as an illustration. Python (NumPy) has its own in-built
# function numpy.random.poisson, which is much faster. Use it -- not this function.
import numpy as np; # NumPy package for arrays, random number generation, etc
mu=5; #Poisson parameter
numbSim=10**2; #number of simulations
muVector=mu*np.ones(numbSim);
### START - Function definitions ###
#Use while loop to generate Poisson variates
def funPoissonLoop(mu):
T=0; #initialize sum of exponential variables as zero
n=-1;#initialize counting variable as negative one
while (T<1):
E=-(1/mu)*np.log(np.random.rand(1));#generate exponential random variable
T=T+E; #update sum of exponential variables
n=n+1; #update number of exponential variables
N=n;
return N;
#Use recursion to generate Poisson variates
#WARNING: Might be slow or use up too much memory
def funPoissonRecursive(mu):
T=0; #initialize sum of exponential variables as zero
n=-1; #initialize counting variable as negative one
def funStepExp(nu,S,m):
if S<1:
#run if sum of exponential variables is not high enough
#generate exponential random variable
E=(-np.log(np.random.rand(1)))/nu;
S=S+E; #update sum of exponential variables
m=m+1; #update nunber of exponential variables
#recursively call function again
[T,N]=funStepExp(nu,S,m);
else:
T=S;
N=m;
return T,N;
#run (recursive) exponential function step function
_,N=funStepExp(mu,T,n);
return N;
### END - Function definitions ###
#generate many Poisson variables
randPoissonLoop=np.array(list(map(lambda x: funPoissonLoop(x), muVector)));
randPoissonRecursive=np.array(list(map(lambda x: funPoissonRecursive(x), muVector)));
#calculate sample mean
meanPoissonLoop=np.mean(randPoissonLoop)
print('meanPoissonLoop = ',meanPoissonLoop)
meanPoissonRecursive=np.mean(randPoissonRecursive)
print('meanPoissonRecursive = ',meanPoissonRecursive)
#calculate sample variance
varPoissonLoop=np.var(randPoissonLoop)
print('varPoissonLoop = ',varPoissonLoop)
varPoissonRecursive=np.var(randPoissonRecursive)
print('varPoissonRecursive = ',varPoissonRecursive) | # Author: <NAME>, 2019.
# Website: hpaulkeeler.com
# Repository: github.com/hpaulkeeler/posts
#
# This program simulates Poisson random variables based
# on the direct method of using exponential inter-arrival times.
# For more details, see the post:
# hpaulkeeler.com/simulating-poisson-random-variables-direct-method/
#
# WARNING: This program is only suitable for small Poisson parameter (mu)
# values, for example, mu<20.
# This program is intended as an illustration. Python (NumPy) has its own in-built
# function numpy.random.poisson, which is much faster. Use it -- not this function.
import numpy as np; # NumPy package for arrays, random number generation, etc
mu=5; #Poisson parameter
numbSim=10**2; #number of simulations
muVector=mu*np.ones(numbSim);
### START - Function definitions ###
#Use while loop to generate Poisson variates
def funPoissonLoop(mu):
T=0; #initialize sum of exponential variables as zero
n=-1;#initialize counting variable as negative one
while (T<1):
E=-(1/mu)*np.log(np.random.rand(1));#generate exponential random variable
T=T+E; #update sum of exponential variables
n=n+1; #update number of exponential variables
N=n;
return N;
#Use recursion to generate Poisson variates
#WARNING: Might be slow or use up too much memory
def funPoissonRecursive(mu):
T=0; #initialize sum of exponential variables as zero
n=-1; #initialize counting variable as negative one
def funStepExp(nu,S,m):
if S<1:
#run if sum of exponential variables is not high enough
#generate exponential random variable
E=(-np.log(np.random.rand(1)))/nu;
S=S+E; #update sum of exponential variables
m=m+1; #update nunber of exponential variables
#recursively call function again
[T,N]=funStepExp(nu,S,m);
else:
T=S;
N=m;
return T,N;
#run (recursive) exponential function step function
_,N=funStepExp(mu,T,n);
return N;
### END - Function definitions ###
#generate many Poisson variables
randPoissonLoop=np.array(list(map(lambda x: funPoissonLoop(x), muVector)));
randPoissonRecursive=np.array(list(map(lambda x: funPoissonRecursive(x), muVector)));
#calculate sample mean
meanPoissonLoop=np.mean(randPoissonLoop)
print('meanPoissonLoop = ',meanPoissonLoop)
meanPoissonRecursive=np.mean(randPoissonRecursive)
print('meanPoissonRecursive = ',meanPoissonRecursive)
#calculate sample variance
varPoissonLoop=np.var(randPoissonLoop)
print('varPoissonLoop = ',varPoissonLoop)
varPoissonRecursive=np.var(randPoissonRecursive)
print('varPoissonRecursive = ',varPoissonRecursive) | en | 0.642229 | # Author: <NAME>, 2019. # Website: hpaulkeeler.com # Repository: github.com/hpaulkeeler/posts # # This program simulates Poisson random variables based # on the direct method of using exponential inter-arrival times. # For more details, see the post: # hpaulkeeler.com/simulating-poisson-random-variables-direct-method/ # # WARNING: This program is only suitable for small Poisson parameter (mu) # values, for example, mu<20. # This program is intended as an illustration. Python (NumPy) has its own in-built # function numpy.random.poisson, which is much faster. Use it -- not this function. # NumPy package for arrays, random number generation, etc #Poisson parameter #number of simulations ### START - Function definitions ### #Use while loop to generate Poisson variates #initialize sum of exponential variables as zero #initialize counting variable as negative one #generate exponential random variable #update sum of exponential variables #update number of exponential variables #Use recursion to generate Poisson variates #WARNING: Might be slow or use up too much memory #initialize sum of exponential variables as zero #initialize counting variable as negative one #run if sum of exponential variables is not high enough #generate exponential random variable #update sum of exponential variables #update nunber of exponential variables #recursively call function again #run (recursive) exponential function step function ### END - Function definitions ### #generate many Poisson variables #calculate sample mean #calculate sample variance | 3.801713 | 4 |
quora-question-pairs-data-prep.py | JobQiu/keras-quora-question-pairs | 0 | 6623397 | <gh_stars>0
# coding: utf-8
# # Quora question pairs: data preparation
# ## Import packages
# In[1]:
from __future__ import print_function
import numpy as np
import csv, json
from zipfile import ZipFile
from os.path import expanduser, exists
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils.data_utils import get_file
# ## Initialize global variables
# In[2]:
KERAS_DATASETS_DIR = expanduser('~/.keras/datasets/')
QUESTION_PAIRS_FILE_URL = 'http://qim.ec.quoracdn.net/quora_duplicate_questions.tsv'
QUESTION_PAIRS_FILE = 'quora_duplicate_questions.tsv'
GLOVE_ZIP_FILE_URL = 'http://nlp.stanford.edu/data/glove.840B.300d.zip'
GLOVE_ZIP_FILE = 'glove.840B.300d.zip'
GLOVE_FILE = 'glove.840B.300d.txt'
Q1_TRAINING_DATA_FILE = 'q1_train.npy'
Q2_TRAINING_DATA_FILE = 'q2_train.npy'
LABEL_TRAINING_DATA_FILE = 'label_train.npy'
WORD_EMBEDDING_MATRIX_FILE = 'word_embedding_matrix.npy'
NB_WORDS_DATA_FILE = 'nb_words.json'
MAX_NB_WORDS = 200000
MAX_SEQUENCE_LENGTH = 25
EMBEDDING_DIM = 300
# ## Download and extract questions pairs data
# In[3]:
if not exists(KERAS_DATASETS_DIR + QUESTION_PAIRS_FILE):
get_file(QUESTION_PAIRS_FILE, QUESTION_PAIRS_FILE_URL)
print("Processing", QUESTION_PAIRS_FILE)
question1 = []
question2 = []
is_duplicate = []
with open(KERAS_DATASETS_DIR + QUESTION_PAIRS_FILE, encoding='utf-8') as csvfile:
reader = csv.DictReader(csvfile, delimiter='\t')
for row in reader:
question1.append(row['question1'])
question2.append(row['question2'])
is_duplicate.append(row['is_duplicate'])
print('Question pairs: %d' % len(question1))
# ## Build tokenized word index
# In[4]:
questions = question1 + question2
tokenizer = Tokenizer(num_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(questions)
question1_word_sequences = tokenizer.texts_to_sequences(question1)
question2_word_sequences = tokenizer.texts_to_sequences(question2)
word_index = tokenizer.word_index
print("Words in index: %d" % len(word_index))
# ## Download and process GloVe embeddings
# In[5]:
if not exists(KERAS_DATASETS_DIR + GLOVE_ZIP_FILE):
zipfile = ZipFile(get_file(GLOVE_ZIP_FILE, GLOVE_ZIP_FILE_URL))
zipfile.extract(GLOVE_FILE, path=KERAS_DATASETS_DIR)
print("Processing", GLOVE_FILE)
embeddings_index = {}
with open(KERAS_DATASETS_DIR + GLOVE_FILE, encoding='utf-8') as f:
for line in f:
values = line.split(' ')
word = values[0]
embedding = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = embedding
print('Word embeddings: %d' % len(embeddings_index))
# ## Prepare word embedding matrix
# In[6]:
nb_words = min(MAX_NB_WORDS, len(word_index))
word_embedding_matrix = np.zeros((nb_words + 1, EMBEDDING_DIM))
for word, i in word_index.items():
if i > MAX_NB_WORDS:
continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
word_embedding_matrix[i] = embedding_vector
print('Null word embeddings: %d' % np.sum(np.sum(word_embedding_matrix, axis=1) == 0))
# ## Prepare training data tensors
# In[7]:
q1_data = pad_sequences(question1_word_sequences, maxlen=MAX_SEQUENCE_LENGTH)
q2_data = pad_sequences(question2_word_sequences, maxlen=MAX_SEQUENCE_LENGTH)
labels = np.array(is_duplicate, dtype=int)
print('Shape of question1 data tensor:', q1_data.shape)
print('Shape of question2 data tensor:', q2_data.shape)
print('Shape of label tensor:', labels.shape)
# ## Persist training and configuration data to files
# In[8]:
np.save(open(Q1_TRAINING_DATA_FILE, 'wb'), q1_data)
np.save(open(Q2_TRAINING_DATA_FILE, 'wb'), q2_data)
np.save(open(LABEL_TRAINING_DATA_FILE, 'wb'), labels)
np.save(open(WORD_EMBEDDING_MATRIX_FILE, 'wb'), word_embedding_matrix)
with open(NB_WORDS_DATA_FILE, 'w') as f:
json.dump({'nb_words': nb_words}, f)
| # coding: utf-8
# # Quora question pairs: data preparation
# ## Import packages
# In[1]:
from __future__ import print_function
import numpy as np
import csv, json
from zipfile import ZipFile
from os.path import expanduser, exists
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils.data_utils import get_file
# ## Initialize global variables
# In[2]:
KERAS_DATASETS_DIR = expanduser('~/.keras/datasets/')
QUESTION_PAIRS_FILE_URL = 'http://qim.ec.quoracdn.net/quora_duplicate_questions.tsv'
QUESTION_PAIRS_FILE = 'quora_duplicate_questions.tsv'
GLOVE_ZIP_FILE_URL = 'http://nlp.stanford.edu/data/glove.840B.300d.zip'
GLOVE_ZIP_FILE = 'glove.840B.300d.zip'
GLOVE_FILE = 'glove.840B.300d.txt'
Q1_TRAINING_DATA_FILE = 'q1_train.npy'
Q2_TRAINING_DATA_FILE = 'q2_train.npy'
LABEL_TRAINING_DATA_FILE = 'label_train.npy'
WORD_EMBEDDING_MATRIX_FILE = 'word_embedding_matrix.npy'
NB_WORDS_DATA_FILE = 'nb_words.json'
MAX_NB_WORDS = 200000
MAX_SEQUENCE_LENGTH = 25
EMBEDDING_DIM = 300
# ## Download and extract questions pairs data
# In[3]:
if not exists(KERAS_DATASETS_DIR + QUESTION_PAIRS_FILE):
get_file(QUESTION_PAIRS_FILE, QUESTION_PAIRS_FILE_URL)
print("Processing", QUESTION_PAIRS_FILE)
question1 = []
question2 = []
is_duplicate = []
with open(KERAS_DATASETS_DIR + QUESTION_PAIRS_FILE, encoding='utf-8') as csvfile:
reader = csv.DictReader(csvfile, delimiter='\t')
for row in reader:
question1.append(row['question1'])
question2.append(row['question2'])
is_duplicate.append(row['is_duplicate'])
print('Question pairs: %d' % len(question1))
# ## Build tokenized word index
# In[4]:
questions = question1 + question2
tokenizer = Tokenizer(num_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(questions)
question1_word_sequences = tokenizer.texts_to_sequences(question1)
question2_word_sequences = tokenizer.texts_to_sequences(question2)
word_index = tokenizer.word_index
print("Words in index: %d" % len(word_index))
# ## Download and process GloVe embeddings
# In[5]:
if not exists(KERAS_DATASETS_DIR + GLOVE_ZIP_FILE):
zipfile = ZipFile(get_file(GLOVE_ZIP_FILE, GLOVE_ZIP_FILE_URL))
zipfile.extract(GLOVE_FILE, path=KERAS_DATASETS_DIR)
print("Processing", GLOVE_FILE)
embeddings_index = {}
with open(KERAS_DATASETS_DIR + GLOVE_FILE, encoding='utf-8') as f:
for line in f:
values = line.split(' ')
word = values[0]
embedding = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = embedding
print('Word embeddings: %d' % len(embeddings_index))
# ## Prepare word embedding matrix
# In[6]:
nb_words = min(MAX_NB_WORDS, len(word_index))
word_embedding_matrix = np.zeros((nb_words + 1, EMBEDDING_DIM))
for word, i in word_index.items():
if i > MAX_NB_WORDS:
continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
word_embedding_matrix[i] = embedding_vector
print('Null word embeddings: %d' % np.sum(np.sum(word_embedding_matrix, axis=1) == 0))
# ## Prepare training data tensors
# In[7]:
q1_data = pad_sequences(question1_word_sequences, maxlen=MAX_SEQUENCE_LENGTH)
q2_data = pad_sequences(question2_word_sequences, maxlen=MAX_SEQUENCE_LENGTH)
labels = np.array(is_duplicate, dtype=int)
print('Shape of question1 data tensor:', q1_data.shape)
print('Shape of question2 data tensor:', q2_data.shape)
print('Shape of label tensor:', labels.shape)
# ## Persist training and configuration data to files
# In[8]:
np.save(open(Q1_TRAINING_DATA_FILE, 'wb'), q1_data)
np.save(open(Q2_TRAINING_DATA_FILE, 'wb'), q2_data)
np.save(open(LABEL_TRAINING_DATA_FILE, 'wb'), labels)
np.save(open(WORD_EMBEDDING_MATRIX_FILE, 'wb'), word_embedding_matrix)
with open(NB_WORDS_DATA_FILE, 'w') as f:
json.dump({'nb_words': nb_words}, f) | en | 0.500954 | # coding: utf-8 # # Quora question pairs: data preparation # ## Import packages # In[1]: # ## Initialize global variables # In[2]: # ## Download and extract questions pairs data # In[3]: # ## Build tokenized word index # In[4]: # ## Download and process GloVe embeddings # In[5]: # ## Prepare word embedding matrix # In[6]: # ## Prepare training data tensors # In[7]: # ## Persist training and configuration data to files # In[8]: | 2.80207 | 3 |
hospital/migrations/0013_test_center.py | horizonltd/corona | 0 | 6623398 | # Generated by Django 2.2 on 2020-03-30 10:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hospital', '0012_auto_20190502_0033'),
]
operations = [
migrations.CreateModel(
name='Test_Center',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=120)),
('address', models.CharField(max_length=120)),
('picture', models.ImageField(upload_to='center/')),
('details', models.TextField(blank=True, null=True)),
('geolocation', models.TextField(blank=True, null=True)),
],
),
]
| # Generated by Django 2.2 on 2020-03-30 10:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hospital', '0012_auto_20190502_0033'),
]
operations = [
migrations.CreateModel(
name='Test_Center',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=120)),
('address', models.CharField(max_length=120)),
('picture', models.ImageField(upload_to='center/')),
('details', models.TextField(blank=True, null=True)),
('geolocation', models.TextField(blank=True, null=True)),
],
),
]
| en | 0.716802 | # Generated by Django 2.2 on 2020-03-30 10:22 | 1.774457 | 2 |
RvtCheckVersion.py | Cyril-Pop/Python-checkRvtVersion | 2 | 6623399 | import sys
from PyQt5 import QtGui, QtWidgets, QtCore
from PyQt5.QtWidgets import QApplication, QWidget, QLabel, QVBoxLayout, QLayout
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QPixmap, QFont
import os
import os.path as op
import olefile
import re
import base64
__author__ = "<NAME>"
__copyright__ = "Cyril POUPIN (c) 2022 "
__license__ = "MIT License"
__version__ = "2.1.0"
class ImageLabel(QLabel):
def __init__(self):
super().__init__()
self.setAlignment(Qt.AlignCenter)
self.setText('\n\n Drop Revit files Here \n\n')
self.setFont(QFont('Arial', 10))
self.setStyleSheet('''
QLabel{
border: 4px dashed #aaa
}
''')
def setPixmap(self, image):
super().setPixmap(image)
class AppDemo(QWidget):
def __init__(self):
super().__init__()
self.resize(400, 600)
self.setAcceptDrops(True)
self.setWindowTitle("Check Revit Version")
mainLayout = QVBoxLayout()
mainLayout.setSizeConstraint(QLayout.SetFixedSize)
self.photoViewer = ImageLabel()
mainLayout.addWidget(self.photoViewer)
#
self.LabelRvtVersion = QLabel(self)
self.LabelRvtVersion.setEnabled(True)
self.LabelRvtVersion.setText("Version")
self.LabelRvtVersion.setAlignment(Qt.AlignCenter)
self.LabelRvtVersion.setFont(QFont('Arial', 12, QFont.Bold))
self.LabelRvtVersion.setMargin(5)
mainLayout.addWidget(self.LabelRvtVersion)
#
self.LabelDetail = QLabel(self)
self.LabelDetail.setEnabled(True)
self.LabelDetail.setText("Detail")
self.LabelDetail.setAlignment(Qt.AlignCenter)
self.LabelDetail.setFont(QFont('Arial', 10))
self.LabelDetail.setMargin(5)
mainLayout.addWidget(self.LabelDetail)
#
self.Label_lnk = QLabel(self)
self.Label_lnk.setEnabled(True)
urlLink = "Version : "+ __version__ +" : <a href=\"https://voltadynabim.blogspot.com\">'voltadynabim.blogspot.com'</a>"
self.Label_lnk.setText(urlLink)
self.Label_lnk.setAlignment(Qt.AlignBottom)
self.Label_lnk.setOpenExternalLinks(True)
self.Label_lnk.setMargin(5)
mainLayout.addWidget(self.Label_lnk)
self.setLayout(mainLayout)
def dragEnterEvent(self, event):
mimeData = event.mimeData()
mineDataTxt = mimeData.text()
file_path = event.mimeData().urls()[0].toLocalFile()
if file_path.endswith(('.rfa', '.rvt', '.rte')):
event.accept()
else:
event.ignore()
def dragMoveEvent(self, event):
mimeData = event.mimeData()
mineDataTxt = mimeData.text()
file_path = event.mimeData().urls()[0].toLocalFile()
if file_path.endswith(('.rfa','.rvt', '.rte')):
event.accept()
else:
event.ignore()
def dropEvent(self, event):
mimeData = event.mimeData()
mineDataTxt = mimeData.text()
if mineDataTxt.endswith(('.rfa','.rvt', '.rte')):
event.setDropAction(Qt.CopyAction)
file_path = event.mimeData().urls()[0].toLocalFile()
print(file_path)
self.set_rvt_preview(file_path)
self.set_rvt_file_version(file_path)
event.accept()
else:
event.ignore()
def getfileInfo(self, rvt_ole):
rvt_version = 'Version not found'
bfiLe = rvt_ole.openstream("BasicFileInfo")
file_infoLe = bfiLe.read().decode("utf-16le", "ignore")
bfiBe = rvt_ole.openstream("BasicFileInfo")
file_infoBe = bfiBe.read().decode("utf-16be", "ignore")
file_info = file_infoBe if "" in file_infoLe else file_infoLe
print(file_info)
if "Format" in file_info:
rvt_file_version = re.search(r"Format.+?(\d{4})", file_info).group(1)
else:
rvt_file_version = re.search(r"(\d{4}).+Build", file_info).group(1)
rvt_version = 'Revit version {}'.format(rvt_file_version)
return rvt_version, file_info
def set_rvt_file_version(self, rvt_file):
rvt_version = None
if op.exists(rvt_file):
if olefile.isOleFile(rvt_file):
self.LabelRvtVersion.setText("Processing...")
rvt_ole = olefile.OleFileIO(rvt_file)
#bfi = rvt_ole.openstream("BasicFileInfo")
# internal function
rvt_version, file_info = self.getfileInfo(rvt_ole)
fileinfoReader = file_info.split("sharing:").pop()
self.LabelRvtVersion.setText(rvt_version)
self.LabelDetail.setText(fileinfoReader)
rvt_ole.close()
return rvt_version
else:
self.LabelRvtVersion.setText("Error OLE structure")
return rvt_version
else:
self.LabelRvtVersion.setText("File not found")
return rvt_version
def set_rvt_preview(self, rvt_file):
newpreview = None
if op.exists(rvt_file):
if olefile.isOleFile(rvt_file):
try:
# Open ole file
rvt_ole = olefile.OleFileIO(rvt_file)
bfi = rvt_ole.openstream("RevitPreview4.0")
readed = bfi.read()
# Find png signature
readed_hex = readed.hex()
pos = readed_hex.find('89504e470d0a1a0a')
png_hex = readed_hex[pos:]
data = bytes.fromhex(png_hex)
# encode to 64 to push in PhotoImage
base64_encoded_data = base64.b64encode(data)
print(base64_encoded_data)
pm = QPixmap()
pm.loadFromData(base64.b64decode(base64_encoded_data))
pm = pm.scaledToWidth(180)
self.photoViewer.setPixmap(pm)
newpreview = pm
rvt_ole.close()
except Exception as ex:
print(ex)
return newpreview
app = QApplication(sys.argv)
demo = AppDemo()
demo.show()
sys.exit(app.exec_()) | import sys
from PyQt5 import QtGui, QtWidgets, QtCore
from PyQt5.QtWidgets import QApplication, QWidget, QLabel, QVBoxLayout, QLayout
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QPixmap, QFont
import os
import os.path as op
import olefile
import re
import base64
__author__ = "<NAME>"
__copyright__ = "Cyril POUPIN (c) 2022 "
__license__ = "MIT License"
__version__ = "2.1.0"
class ImageLabel(QLabel):
def __init__(self):
super().__init__()
self.setAlignment(Qt.AlignCenter)
self.setText('\n\n Drop Revit files Here \n\n')
self.setFont(QFont('Arial', 10))
self.setStyleSheet('''
QLabel{
border: 4px dashed #aaa
}
''')
def setPixmap(self, image):
super().setPixmap(image)
class AppDemo(QWidget):
def __init__(self):
super().__init__()
self.resize(400, 600)
self.setAcceptDrops(True)
self.setWindowTitle("Check Revit Version")
mainLayout = QVBoxLayout()
mainLayout.setSizeConstraint(QLayout.SetFixedSize)
self.photoViewer = ImageLabel()
mainLayout.addWidget(self.photoViewer)
#
self.LabelRvtVersion = QLabel(self)
self.LabelRvtVersion.setEnabled(True)
self.LabelRvtVersion.setText("Version")
self.LabelRvtVersion.setAlignment(Qt.AlignCenter)
self.LabelRvtVersion.setFont(QFont('Arial', 12, QFont.Bold))
self.LabelRvtVersion.setMargin(5)
mainLayout.addWidget(self.LabelRvtVersion)
#
self.LabelDetail = QLabel(self)
self.LabelDetail.setEnabled(True)
self.LabelDetail.setText("Detail")
self.LabelDetail.setAlignment(Qt.AlignCenter)
self.LabelDetail.setFont(QFont('Arial', 10))
self.LabelDetail.setMargin(5)
mainLayout.addWidget(self.LabelDetail)
#
self.Label_lnk = QLabel(self)
self.Label_lnk.setEnabled(True)
urlLink = "Version : "+ __version__ +" : <a href=\"https://voltadynabim.blogspot.com\">'voltadynabim.blogspot.com'</a>"
self.Label_lnk.setText(urlLink)
self.Label_lnk.setAlignment(Qt.AlignBottom)
self.Label_lnk.setOpenExternalLinks(True)
self.Label_lnk.setMargin(5)
mainLayout.addWidget(self.Label_lnk)
self.setLayout(mainLayout)
def dragEnterEvent(self, event):
mimeData = event.mimeData()
mineDataTxt = mimeData.text()
file_path = event.mimeData().urls()[0].toLocalFile()
if file_path.endswith(('.rfa', '.rvt', '.rte')):
event.accept()
else:
event.ignore()
def dragMoveEvent(self, event):
mimeData = event.mimeData()
mineDataTxt = mimeData.text()
file_path = event.mimeData().urls()[0].toLocalFile()
if file_path.endswith(('.rfa','.rvt', '.rte')):
event.accept()
else:
event.ignore()
def dropEvent(self, event):
mimeData = event.mimeData()
mineDataTxt = mimeData.text()
if mineDataTxt.endswith(('.rfa','.rvt', '.rte')):
event.setDropAction(Qt.CopyAction)
file_path = event.mimeData().urls()[0].toLocalFile()
print(file_path)
self.set_rvt_preview(file_path)
self.set_rvt_file_version(file_path)
event.accept()
else:
event.ignore()
def getfileInfo(self, rvt_ole):
rvt_version = 'Version not found'
bfiLe = rvt_ole.openstream("BasicFileInfo")
file_infoLe = bfiLe.read().decode("utf-16le", "ignore")
bfiBe = rvt_ole.openstream("BasicFileInfo")
file_infoBe = bfiBe.read().decode("utf-16be", "ignore")
file_info = file_infoBe if "" in file_infoLe else file_infoLe
print(file_info)
if "Format" in file_info:
rvt_file_version = re.search(r"Format.+?(\d{4})", file_info).group(1)
else:
rvt_file_version = re.search(r"(\d{4}).+Build", file_info).group(1)
rvt_version = 'Revit version {}'.format(rvt_file_version)
return rvt_version, file_info
def set_rvt_file_version(self, rvt_file):
rvt_version = None
if op.exists(rvt_file):
if olefile.isOleFile(rvt_file):
self.LabelRvtVersion.setText("Processing...")
rvt_ole = olefile.OleFileIO(rvt_file)
#bfi = rvt_ole.openstream("BasicFileInfo")
# internal function
rvt_version, file_info = self.getfileInfo(rvt_ole)
fileinfoReader = file_info.split("sharing:").pop()
self.LabelRvtVersion.setText(rvt_version)
self.LabelDetail.setText(fileinfoReader)
rvt_ole.close()
return rvt_version
else:
self.LabelRvtVersion.setText("Error OLE structure")
return rvt_version
else:
self.LabelRvtVersion.setText("File not found")
return rvt_version
def set_rvt_preview(self, rvt_file):
newpreview = None
if op.exists(rvt_file):
if olefile.isOleFile(rvt_file):
try:
# Open ole file
rvt_ole = olefile.OleFileIO(rvt_file)
bfi = rvt_ole.openstream("RevitPreview4.0")
readed = bfi.read()
# Find png signature
readed_hex = readed.hex()
pos = readed_hex.find('89504e470d0a1a0a')
png_hex = readed_hex[pos:]
data = bytes.fromhex(png_hex)
# encode to 64 to push in PhotoImage
base64_encoded_data = base64.b64encode(data)
print(base64_encoded_data)
pm = QPixmap()
pm.loadFromData(base64.b64decode(base64_encoded_data))
pm = pm.scaledToWidth(180)
self.photoViewer.setPixmap(pm)
newpreview = pm
rvt_ole.close()
except Exception as ex:
print(ex)
return newpreview
app = QApplication(sys.argv)
demo = AppDemo()
demo.show()
sys.exit(app.exec_()) | en | 0.404637 | QLabel{
border: 4px dashed #aaa
} # # # #bfi = rvt_ole.openstream("BasicFileInfo") # internal function # Open ole file # Find png signature # encode to 64 to push in PhotoImage | 2.16983 | 2 |
test/src/lib/idol/py_mar/all/target/assembled_optional.py | lyric-com/idol | 0 | 6623400 | <reponame>lyric-com/idol
# This file was scaffold by idol_mar, but it will not be overwritten, so feel free to edit.
# This file will be regenerated if you delete it.
from ...codegen.all.target.assembled_optional import (
AllTargetAssembledOptionalSchema as AssembledOptionalSchemaCodegen,
)
class AssembledOptionalSchema(AssembledOptionalSchemaCodegen):
pass
| # This file was scaffold by idol_mar, but it will not be overwritten, so feel free to edit.
# This file will be regenerated if you delete it.
from ...codegen.all.target.assembled_optional import (
AllTargetAssembledOptionalSchema as AssembledOptionalSchemaCodegen,
)
class AssembledOptionalSchema(AssembledOptionalSchemaCodegen):
pass | en | 0.976163 | # This file was scaffold by idol_mar, but it will not be overwritten, so feel free to edit. # This file will be regenerated if you delete it. | 1.052708 | 1 |
api/migrations/0004_product_image.py | MurungaKibaara/Ecommerce-Django-Rest-API | 0 | 6623401 | <gh_stars>0
# Generated by Django 3.0.3 on 2020-03-12 08:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0003_category_owner'),
]
operations = [
migrations.AddField(
model_name='product',
name='image',
field=models.ImageField(default='1.jpg', upload_to='static/images/upload/'),
preserve_default=False,
),
]
| # Generated by Django 3.0.3 on 2020-03-12 08:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0003_category_owner'),
]
operations = [
migrations.AddField(
model_name='product',
name='image',
field=models.ImageField(default='1.jpg', upload_to='static/images/upload/'),
preserve_default=False,
),
] | en | 0.676041 | # Generated by Django 3.0.3 on 2020-03-12 08:31 | 1.681515 | 2 |
stanfordnlp/server/__init__.py | loretoparisi/stanfordnlp | 2,859 | 6623402 | <filename>stanfordnlp/server/__init__.py
from stanfordnlp.protobuf import to_text
from stanfordnlp.protobuf import Document, Sentence, Token, IndexedWord, Span
from stanfordnlp.protobuf import ParseTree, DependencyGraph, CorefChain
from stanfordnlp.protobuf import Mention, NERMention, Entity, Relation, RelationTriple, Timex
from stanfordnlp.protobuf import Quote, SpeakerInfo
from stanfordnlp.protobuf import Operator, Polarity
from stanfordnlp.protobuf import SentenceFragment, TokenLocation
from stanfordnlp.protobuf import MapStringString, MapIntString
from .client import CoreNLPClient, AnnotationException, TimeoutException
from .annotator import Annotator
| <filename>stanfordnlp/server/__init__.py
from stanfordnlp.protobuf import to_text
from stanfordnlp.protobuf import Document, Sentence, Token, IndexedWord, Span
from stanfordnlp.protobuf import ParseTree, DependencyGraph, CorefChain
from stanfordnlp.protobuf import Mention, NERMention, Entity, Relation, RelationTriple, Timex
from stanfordnlp.protobuf import Quote, SpeakerInfo
from stanfordnlp.protobuf import Operator, Polarity
from stanfordnlp.protobuf import SentenceFragment, TokenLocation
from stanfordnlp.protobuf import MapStringString, MapIntString
from .client import CoreNLPClient, AnnotationException, TimeoutException
from .annotator import Annotator
| none | 1 | 1.546567 | 2 | |
metrics/__init__.py | shuvozula/eth-runner | 6 | 6623403 | <gh_stars>1-10
#!/usr/bin/env python
import threading
import time
from log.log import LOG
_EPOCH_SLEEP_SECONDS = 60
_WAKEUP_SLEEP_SECONDS = 10 * 60 # 10 mins
class AbstractMetricsCollector(threading.Thread):
"""
Base class for metrics collection. Encapsulates the behavior of data-collection, reporting and
monitoring (Command pattern)
"""
def __init__(self, influxdb_client, watchdog, exit_flag_event):
"""
"""
threading.Thread.__init__(self)
if type(self) is AbstractMetricsCollector:
raise NotImplementedError('Abstract class cannot be directly instantiated!')
self.name = str(self)
self._influxdb_client = influxdb_client
self._watchdog = watchdog
self._exit_flag_event = exit_flag_event
def collect_metrics(self):
raise NotImplementedError('Needs to be overriden by derived class.')
def run(self):
"""
Starts the data collection, calls collect_metrics() for data and then inserts into InfluxDB and
monitors it for abnormalities using the provided Watchdog
"""
LOG.info('Collecting %s...', self)
try:
while not self._exit_flag_event.is_set():
# call the derived methor for data, or a NotImplementedError is raised
json_body = self.collect_metrics()
# write metrics to InfluxDB
self._influxdb_client.write_points(json_body)
# monitor the data for any abnormalities in the AMD GPUs
self._watchdog.monitor(json_body)
time.sleep(_EPOCH_SLEEP_SECONDS)
LOG.info('Exiting %s data collection...', self)
except Exception as e:
LOG.exception('Suffered a critical error! Switching off miner for {} seconds\n{}'.format(
_WAKEUP_SLEEP_SECONDS, e))
self._watchdog.switch_off_miner(_WAKEUP_SLEEP_SECONDS)
| #!/usr/bin/env python
import threading
import time
from log.log import LOG
_EPOCH_SLEEP_SECONDS = 60
_WAKEUP_SLEEP_SECONDS = 10 * 60 # 10 mins
class AbstractMetricsCollector(threading.Thread):
"""
Base class for metrics collection. Encapsulates the behavior of data-collection, reporting and
monitoring (Command pattern)
"""
def __init__(self, influxdb_client, watchdog, exit_flag_event):
"""
"""
threading.Thread.__init__(self)
if type(self) is AbstractMetricsCollector:
raise NotImplementedError('Abstract class cannot be directly instantiated!')
self.name = str(self)
self._influxdb_client = influxdb_client
self._watchdog = watchdog
self._exit_flag_event = exit_flag_event
def collect_metrics(self):
raise NotImplementedError('Needs to be overriden by derived class.')
def run(self):
"""
Starts the data collection, calls collect_metrics() for data and then inserts into InfluxDB and
monitors it for abnormalities using the provided Watchdog
"""
LOG.info('Collecting %s...', self)
try:
while not self._exit_flag_event.is_set():
# call the derived methor for data, or a NotImplementedError is raised
json_body = self.collect_metrics()
# write metrics to InfluxDB
self._influxdb_client.write_points(json_body)
# monitor the data for any abnormalities in the AMD GPUs
self._watchdog.monitor(json_body)
time.sleep(_EPOCH_SLEEP_SECONDS)
LOG.info('Exiting %s data collection...', self)
except Exception as e:
LOG.exception('Suffered a critical error! Switching off miner for {} seconds\n{}'.format(
_WAKEUP_SLEEP_SECONDS, e))
self._watchdog.switch_off_miner(_WAKEUP_SLEEP_SECONDS) | en | 0.778557 | #!/usr/bin/env python # 10 mins Base class for metrics collection. Encapsulates the behavior of data-collection, reporting and monitoring (Command pattern) Starts the data collection, calls collect_metrics() for data and then inserts into InfluxDB and monitors it for abnormalities using the provided Watchdog # call the derived methor for data, or a NotImplementedError is raised # write metrics to InfluxDB # monitor the data for any abnormalities in the AMD GPUs | 2.517893 | 3 |
freelearn_by_team_alpha/freelearn/gama/urls.py | chandankeshri1812/IEEE-Megaproject | 8 | 6623404 | <gh_stars>1-10
from django.urls import path
from . import views
# from .views import video
urlpatterns = [
path('',views.index,name="index"),
path('login',views.login,name ="login"),
path('logout',views.logout,name ="logout"),
path('signup',views.signup,name ="signup"),
# *******>>>>NEET<<<<********
path('neet',views.neet,name="neet"),
path('ccneet',views.ccneet,name="ccneet"),
path('cbneet',views.cbneet,name ="cbneet"),
path('mpneet',views.mpneet,name ="mpneet"),
path('mcneet',views.mcneet,name="mcneet"),
path('mbneet',views.mbneet,name ="mbneet"),
path('tpneet',views.tpneet,name ="tpneet"),
path('tcneet',views.tcneet,name="tcneet"),
path('tbneet',views.tbneet,name="tbneet"),
# **********>>>>>>>>BOARDS<<<<<<<<*********
path('boards',views.boards,name="boards"),
path('ccboards',views.ccboards,name="ccboards"),
path('cmboards',views.cmboards,name="cmboards"),
path('cbboards',views.cbboards,name="cbboards"),
path('mpboards',views.mpboards,name="mpboards"),
path('mcboards',views.mcboards,name="mcboards"),
path('mmboards',views.mmboards,name="mmboards"),
path('mbboards',views.mbboards,name="mbboards"),
path('tpboards',views.tpboards,name="tpboards"),
path('tcboards',views.tcboards,name="tcboards"),
path('tmboards',views.tmboards,name="tmboards"),
path('tbboards',views.tbboards,name="tbboards"),
# *****>>>>>>>>JEE<<<<<<<********
path('jacp',views.jacp,name ="jacp"),
path('jacm',views.jacm,name ="jacm"),
path('jacc',views.jacc,name ="jacc"),
path('jamp',views.jamp,name="jamp"),
path('jamc',views.jamc,name ="jamc"),
path('jamm',views.jamm,name ="jamm"),
path('jatp',views.jatp,name ="jatp"),
path('jatc',views.jatc,name ="jatc"),
path('jatm',views.jatm,name ="jatm"),
# *****>>>>PROFILE<<<<<******
path('profile', views.profile, name="profile"),
] | from django.urls import path
from . import views
# from .views import video
urlpatterns = [
path('',views.index,name="index"),
path('login',views.login,name ="login"),
path('logout',views.logout,name ="logout"),
path('signup',views.signup,name ="signup"),
# *******>>>>NEET<<<<********
path('neet',views.neet,name="neet"),
path('ccneet',views.ccneet,name="ccneet"),
path('cbneet',views.cbneet,name ="cbneet"),
path('mpneet',views.mpneet,name ="mpneet"),
path('mcneet',views.mcneet,name="mcneet"),
path('mbneet',views.mbneet,name ="mbneet"),
path('tpneet',views.tpneet,name ="tpneet"),
path('tcneet',views.tcneet,name="tcneet"),
path('tbneet',views.tbneet,name="tbneet"),
# **********>>>>>>>>BOARDS<<<<<<<<*********
path('boards',views.boards,name="boards"),
path('ccboards',views.ccboards,name="ccboards"),
path('cmboards',views.cmboards,name="cmboards"),
path('cbboards',views.cbboards,name="cbboards"),
path('mpboards',views.mpboards,name="mpboards"),
path('mcboards',views.mcboards,name="mcboards"),
path('mmboards',views.mmboards,name="mmboards"),
path('mbboards',views.mbboards,name="mbboards"),
path('tpboards',views.tpboards,name="tpboards"),
path('tcboards',views.tcboards,name="tcboards"),
path('tmboards',views.tmboards,name="tmboards"),
path('tbboards',views.tbboards,name="tbboards"),
# *****>>>>>>>>JEE<<<<<<<********
path('jacp',views.jacp,name ="jacp"),
path('jacm',views.jacm,name ="jacm"),
path('jacc',views.jacc,name ="jacc"),
path('jamp',views.jamp,name="jamp"),
path('jamc',views.jamc,name ="jamc"),
path('jamm',views.jamm,name ="jamm"),
path('jatp',views.jatp,name ="jatp"),
path('jatc',views.jatc,name ="jatc"),
path('jatm',views.jatm,name ="jatm"),
# *****>>>>PROFILE<<<<<******
path('profile', views.profile, name="profile"),
] | ja | 0.222359 | # from .views import video # *******>>>>NEET<<<<******** # **********>>>>>>>>BOARDS<<<<<<<<********* # *****>>>>>>>>JEE<<<<<<<******** # *****>>>>PROFILE<<<<<****** | 1.82818 | 2 |
src/train_classifier.py | canbakiskan/neuro-inspired-defense | 0 | 6623405 | <filename>src/train_classifier.py
import time
import os
from tqdm import tqdm
import numpy as np
import logging
import torch
import torch.optim as optim
import torch.backends.cudnn as cudnn
from .models.resnet import ResNet, ResNetWide
from .models.efficientnet import EfficientNet
from .models.preact_resnet import PreActResNet101
from .models.ablation.dropout_resnet import dropout_ResNet
from .train_test_functions import (
train,
test,
)
from .parameters import get_arguments
from .utils.read_datasets import cifar10, tiny_imagenet, imagenette
from .utils.namers import (
autoencoder_ckpt_namer,
autoencoder_log_namer,
classifier_ckpt_namer,
classifier_log_namer,
)
from .models.combined import Combined
from .utils.get_modules import get_autoencoder
from deepillusion.torchattacks import (
PGD,
PGD_EOT,
FGSM,
RFGSM,
PGD_EOT_normalized,
PGD_EOT_sign,
)
from deepillusion.torchdefenses import (
adversarial_epoch,
adversarial_test,
)
from .models.autoencoders import *
import sys
logger = logging.getLogger(__name__)
def main():
""" main function to run the experiments """
args = get_arguments()
if not os.path.exists(args.directory + "logs"):
os.mkdir(args.directory + "logs")
logging.basicConfig(
format="[%(asctime)s] - %(message)s",
datefmt="%Y/%m/%d %H:%M:%S",
level=logging.INFO,
handlers=[
logging.FileHandler(classifier_log_namer(args)),
logging.StreamHandler(sys.stdout),
],
)
logger.info(args)
logger.info("\n")
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
x_min = 0.0
x_max = 1.0
# L = round((32 - args.defense_patchsize) / args.defense_stride + 1)
if args.dataset == "CIFAR10":
train_loader, test_loader = cifar10(args)
elif args.dataset == "Tiny-ImageNet":
train_loader, test_loader = tiny_imagenet(args)
elif args.dataset == "Imagenette":
train_loader, test_loader = imagenette(args)
else:
raise NotImplementedError
if args.classifier_arch == "resnet":
classifier = ResNet(num_outputs=args.num_classes).to(device)
elif args.classifier_arch == "resnetwide":
classifier = ResNetWide(num_outputs=args.num_classes).to(device)
elif args.classifier_arch == "efficientnet":
classifier = EfficientNet.from_name(
"efficientnet-b0", num_classes=args.num_classes, dropout_rate=0.2).to(device)
elif args.classifier_arch == "preact_resnet":
classifier = PreActResNet101(num_classes=args.num_classes).to(device)
elif args.classifier_arch == "dropout_resnet":
classifier = dropout_ResNet(
dropout_p=args.dropout_p, nb_filters=args.dict_nbatoms, num_outputs=args.num_classes).to(device)
else:
raise NotImplementedError
if not args.no_autoencoder:
if args.autoencoder_train_supervised:
autoencoder = autoencoder_dict[args.autoencoder_arch](
args).to(device)
for p in autoencoder.parameters():
p.requires_grad = True
autoencoder.encoder_no_update()
else:
autoencoder = get_autoencoder(args)
for p in autoencoder.parameters():
p.requires_grad = False
model = Combined(autoencoder, classifier)
else:
model = classifier
model.train()
if device == "cuda":
model = torch.nn.DataParallel(model)
cudnn.benchmark = True
logger.info(model)
logger.info("\n")
# Which optimizer to be used for training
if args.optimizer == "sgd":
optimizer = optim.SGD(
model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
elif args.optimizer == "rms":
optimizer = optim.RMSprop(
model.parameters(),
lr=args.lr,
weight_decay=args.weight_decay,
momentum=args.momentum)
elif args.optimizer == "adam":
optimizer = optim.Adam(
model.parameters(),
lr=args.lr,
weight_decay=args.weight_decay,
)
else:
raise NotImplementedError
if args.lr_scheduler == "cyc":
lr_steps = args.classifier_epochs * len(train_loader)
scheduler = torch.optim.lr_scheduler.CyclicLR(
optimizer,
base_lr=args.lr_min,
max_lr=args.lr_max,
step_size_up=lr_steps / 2,
step_size_down=lr_steps / 2,
)
elif args.lr_scheduler == "step":
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer,
milestones=[50, 80],
gamma=0.1)
elif args.lr_scheduler == "mult":
def lr_fun(epoch):
if epoch % 3 == 0:
return 0.962
else:
return 1.0
scheduler = torch.optim.lr_scheduler.MultiplicativeLR(
optimizer, lr_fun)
else:
raise NotImplementedError
if args.adv_training_attack:
attacks = dict(
PGD=PGD,
PGD_EOT=PGD_EOT,
PGD_EOT_normalized=PGD_EOT_normalized,
PGD_EOT_sign=PGD_EOT_sign,
FGSM=FGSM,
RFGSM=RFGSM,
)
attack_params = {
"norm": args.adv_training_norm,
"eps": args.adv_training_epsilon,
"alpha": args.adv_training_alpha,
"step_size": args.adv_training_step_size,
"num_steps": args.adv_training_num_steps,
"random_start": (
args.adv_training_rand and args.adv_training_num_restarts > 1
),
"num_restarts": args.adv_training_num_restarts,
"EOT_size": args.adv_training_EOT_size,
}
data_params = {"x_min": 0.0, "x_max": 1.0}
if "CWlinf" in args.adv_training_attack:
adv_training_attack = args.adv_training_attack.replace(
"CWlinf", "PGD")
loss_function = "carlini_wagner"
else:
adv_training_attack = args.adv_training_attack
loss_function = "cross_entropy"
adversarial_args = dict(
attack=attacks[adv_training_attack],
attack_args=dict(
net=model, data_params=data_params, attack_params=attack_params
),
loss_function=loss_function,
)
logger.info(args.adv_training_attack + " training")
logger.info("Epoch \t Seconds \t LR \t \t Train Loss \t Train Acc")
for epoch in tqdm(range(1, args.classifier_epochs + 1)):
start_time = time.time()
train_args = dict(
model=model,
train_loader=train_loader,
optimizer=optimizer,
scheduler=scheduler,
adversarial_args=adversarial_args,
)
train_loss, train_acc = adversarial_epoch(**train_args)
test_args = dict(model=model, test_loader=test_loader)
test_loss, test_acc = adversarial_test(**test_args)
end_time = time.time()
lr = scheduler.get_lr()[0]
logger.info(
f"{epoch} \t {end_time - start_time:.0f} \t \t {lr:.4f} \t {train_loss:.4f} \t {train_acc:.4f}"
)
logger.info(
f"Test \t loss: {test_loss:.4f} \t acc: {test_acc:.4f}")
else:
logger.info("Epoch \t Seconds \t LR \t \t Train Loss \t Train Acc")
logger.info("Standard training")
for epoch in tqdm(range(1, args.classifier_epochs + 1)):
start_time = time.time()
train_loss, train_acc = train(
model, train_loader, optimizer, scheduler)
test_loss, test_acc = test(model, test_loader)
end_time = time.time()
# lr = scheduler.get_lr()[0]
lr = scheduler.get_last_lr()[0]
logger.info(
f"{epoch} \t {end_time - start_time:.0f} \t \t {lr:.4f} \t {train_loss:.4f} \t {train_acc:.4f}"
)
logger.info(
f"Test \t loss: {test_loss:.4f} \t acc: {test_acc:.4f}")
# Save model parameters
if args.save_checkpoint:
if not os.path.exists(args.directory + "checkpoints/classifiers/"):
os.makedirs(args.directory + "checkpoints/classifiers/")
classifier_filepath = classifier_ckpt_namer(args)
torch.save(
classifier.state_dict(), classifier_filepath,
)
logger.info(f"Saved to {classifier_filepath}")
if not args.no_autoencoder:
if not os.path.exists(args.directory + "checkpoints/autoencoders/"):
os.makedirs(args.directory + "checkpoints/autoencoders/")
autoencoder_filepath = autoencoder_ckpt_namer(args)
if args.autoencoder_train_supervised:
torch.save(
autoencoder.state_dict(), autoencoder_filepath,
)
logger.info(f"Saved to {autoencoder_filepath}")
if __name__ == "__main__":
main()
| <filename>src/train_classifier.py
import time
import os
from tqdm import tqdm
import numpy as np
import logging
import torch
import torch.optim as optim
import torch.backends.cudnn as cudnn
from .models.resnet import ResNet, ResNetWide
from .models.efficientnet import EfficientNet
from .models.preact_resnet import PreActResNet101
from .models.ablation.dropout_resnet import dropout_ResNet
from .train_test_functions import (
train,
test,
)
from .parameters import get_arguments
from .utils.read_datasets import cifar10, tiny_imagenet, imagenette
from .utils.namers import (
autoencoder_ckpt_namer,
autoencoder_log_namer,
classifier_ckpt_namer,
classifier_log_namer,
)
from .models.combined import Combined
from .utils.get_modules import get_autoencoder
from deepillusion.torchattacks import (
PGD,
PGD_EOT,
FGSM,
RFGSM,
PGD_EOT_normalized,
PGD_EOT_sign,
)
from deepillusion.torchdefenses import (
adversarial_epoch,
adversarial_test,
)
from .models.autoencoders import *
import sys
logger = logging.getLogger(__name__)
def main():
""" main function to run the experiments """
args = get_arguments()
if not os.path.exists(args.directory + "logs"):
os.mkdir(args.directory + "logs")
logging.basicConfig(
format="[%(asctime)s] - %(message)s",
datefmt="%Y/%m/%d %H:%M:%S",
level=logging.INFO,
handlers=[
logging.FileHandler(classifier_log_namer(args)),
logging.StreamHandler(sys.stdout),
],
)
logger.info(args)
logger.info("\n")
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
x_min = 0.0
x_max = 1.0
# L = round((32 - args.defense_patchsize) / args.defense_stride + 1)
if args.dataset == "CIFAR10":
train_loader, test_loader = cifar10(args)
elif args.dataset == "Tiny-ImageNet":
train_loader, test_loader = tiny_imagenet(args)
elif args.dataset == "Imagenette":
train_loader, test_loader = imagenette(args)
else:
raise NotImplementedError
if args.classifier_arch == "resnet":
classifier = ResNet(num_outputs=args.num_classes).to(device)
elif args.classifier_arch == "resnetwide":
classifier = ResNetWide(num_outputs=args.num_classes).to(device)
elif args.classifier_arch == "efficientnet":
classifier = EfficientNet.from_name(
"efficientnet-b0", num_classes=args.num_classes, dropout_rate=0.2).to(device)
elif args.classifier_arch == "preact_resnet":
classifier = PreActResNet101(num_classes=args.num_classes).to(device)
elif args.classifier_arch == "dropout_resnet":
classifier = dropout_ResNet(
dropout_p=args.dropout_p, nb_filters=args.dict_nbatoms, num_outputs=args.num_classes).to(device)
else:
raise NotImplementedError
if not args.no_autoencoder:
if args.autoencoder_train_supervised:
autoencoder = autoencoder_dict[args.autoencoder_arch](
args).to(device)
for p in autoencoder.parameters():
p.requires_grad = True
autoencoder.encoder_no_update()
else:
autoencoder = get_autoencoder(args)
for p in autoencoder.parameters():
p.requires_grad = False
model = Combined(autoencoder, classifier)
else:
model = classifier
model.train()
if device == "cuda":
model = torch.nn.DataParallel(model)
cudnn.benchmark = True
logger.info(model)
logger.info("\n")
# Which optimizer to be used for training
if args.optimizer == "sgd":
optimizer = optim.SGD(
model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
elif args.optimizer == "rms":
optimizer = optim.RMSprop(
model.parameters(),
lr=args.lr,
weight_decay=args.weight_decay,
momentum=args.momentum)
elif args.optimizer == "adam":
optimizer = optim.Adam(
model.parameters(),
lr=args.lr,
weight_decay=args.weight_decay,
)
else:
raise NotImplementedError
if args.lr_scheduler == "cyc":
lr_steps = args.classifier_epochs * len(train_loader)
scheduler = torch.optim.lr_scheduler.CyclicLR(
optimizer,
base_lr=args.lr_min,
max_lr=args.lr_max,
step_size_up=lr_steps / 2,
step_size_down=lr_steps / 2,
)
elif args.lr_scheduler == "step":
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer,
milestones=[50, 80],
gamma=0.1)
elif args.lr_scheduler == "mult":
def lr_fun(epoch):
if epoch % 3 == 0:
return 0.962
else:
return 1.0
scheduler = torch.optim.lr_scheduler.MultiplicativeLR(
optimizer, lr_fun)
else:
raise NotImplementedError
if args.adv_training_attack:
attacks = dict(
PGD=PGD,
PGD_EOT=PGD_EOT,
PGD_EOT_normalized=PGD_EOT_normalized,
PGD_EOT_sign=PGD_EOT_sign,
FGSM=FGSM,
RFGSM=RFGSM,
)
attack_params = {
"norm": args.adv_training_norm,
"eps": args.adv_training_epsilon,
"alpha": args.adv_training_alpha,
"step_size": args.adv_training_step_size,
"num_steps": args.adv_training_num_steps,
"random_start": (
args.adv_training_rand and args.adv_training_num_restarts > 1
),
"num_restarts": args.adv_training_num_restarts,
"EOT_size": args.adv_training_EOT_size,
}
data_params = {"x_min": 0.0, "x_max": 1.0}
if "CWlinf" in args.adv_training_attack:
adv_training_attack = args.adv_training_attack.replace(
"CWlinf", "PGD")
loss_function = "carlini_wagner"
else:
adv_training_attack = args.adv_training_attack
loss_function = "cross_entropy"
adversarial_args = dict(
attack=attacks[adv_training_attack],
attack_args=dict(
net=model, data_params=data_params, attack_params=attack_params
),
loss_function=loss_function,
)
logger.info(args.adv_training_attack + " training")
logger.info("Epoch \t Seconds \t LR \t \t Train Loss \t Train Acc")
for epoch in tqdm(range(1, args.classifier_epochs + 1)):
start_time = time.time()
train_args = dict(
model=model,
train_loader=train_loader,
optimizer=optimizer,
scheduler=scheduler,
adversarial_args=adversarial_args,
)
train_loss, train_acc = adversarial_epoch(**train_args)
test_args = dict(model=model, test_loader=test_loader)
test_loss, test_acc = adversarial_test(**test_args)
end_time = time.time()
lr = scheduler.get_lr()[0]
logger.info(
f"{epoch} \t {end_time - start_time:.0f} \t \t {lr:.4f} \t {train_loss:.4f} \t {train_acc:.4f}"
)
logger.info(
f"Test \t loss: {test_loss:.4f} \t acc: {test_acc:.4f}")
else:
logger.info("Epoch \t Seconds \t LR \t \t Train Loss \t Train Acc")
logger.info("Standard training")
for epoch in tqdm(range(1, args.classifier_epochs + 1)):
start_time = time.time()
train_loss, train_acc = train(
model, train_loader, optimizer, scheduler)
test_loss, test_acc = test(model, test_loader)
end_time = time.time()
# lr = scheduler.get_lr()[0]
lr = scheduler.get_last_lr()[0]
logger.info(
f"{epoch} \t {end_time - start_time:.0f} \t \t {lr:.4f} \t {train_loss:.4f} \t {train_acc:.4f}"
)
logger.info(
f"Test \t loss: {test_loss:.4f} \t acc: {test_acc:.4f}")
# Save model parameters
if args.save_checkpoint:
if not os.path.exists(args.directory + "checkpoints/classifiers/"):
os.makedirs(args.directory + "checkpoints/classifiers/")
classifier_filepath = classifier_ckpt_namer(args)
torch.save(
classifier.state_dict(), classifier_filepath,
)
logger.info(f"Saved to {classifier_filepath}")
if not args.no_autoencoder:
if not os.path.exists(args.directory + "checkpoints/autoencoders/"):
os.makedirs(args.directory + "checkpoints/autoencoders/")
autoencoder_filepath = autoencoder_ckpt_namer(args)
if args.autoencoder_train_supervised:
torch.save(
autoencoder.state_dict(), autoencoder_filepath,
)
logger.info(f"Saved to {autoencoder_filepath}")
if __name__ == "__main__":
main()
| en | 0.464519 | main function to run the experiments # L = round((32 - args.defense_patchsize) / args.defense_stride + 1) # Which optimizer to be used for training # lr = scheduler.get_lr()[0] # Save model parameters | 2.039629 | 2 |
simulation_workshop/_nbdev.py | hgzech/simulation_workshop | 0 | 6623406 | <filename>simulation_workshop/_nbdev.py
# AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"choose": "01_simulation.ipynb",
"simulate_M3RescorlaWagner_v1": "01_simulation.ipynb",
"plot_rescorla_game": "04_interactive_session.ipynb",
"simulate_M4ChoiceKernel_v1": "04_interactive_session.ipynb",
"plot_choice_kernel_game": "04_interactive_session.ipynb"}
modules = ["simulation.py",
"interactive_session.py"]
doc_url = "https://hgzech.github.io/simulation_workshop/"
git_url = "https://github.com/hgzech/simulation_workshop/tree/master/"
def custom_doc_links(name): return None
| <filename>simulation_workshop/_nbdev.py
# AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"choose": "01_simulation.ipynb",
"simulate_M3RescorlaWagner_v1": "01_simulation.ipynb",
"plot_rescorla_game": "04_interactive_session.ipynb",
"simulate_M4ChoiceKernel_v1": "04_interactive_session.ipynb",
"plot_choice_kernel_game": "04_interactive_session.ipynb"}
modules = ["simulation.py",
"interactive_session.py"]
doc_url = "https://hgzech.github.io/simulation_workshop/"
git_url = "https://github.com/hgzech/simulation_workshop/tree/master/"
def custom_doc_links(name): return None
| en | 0.183601 | # AUTOGENERATED BY NBDEV! DO NOT EDIT! | 1.440262 | 1 |
pyteiser/wrappers/report_csvs.py | goodarzilab/pyteiser | 6 | 6623407 | import numpy as np
import pandas as pd
import argparse
from .. import IO
from .. import matchmaker
from .. import type_conversions
def handler(raw_args = None):
parser = argparse.ArgumentParser()
parser.add_argument("--rna_fastafile", help="fasta file with RNA sequences", type=str)
parser.add_argument("--rna_bin_file", help="", type=str)
parser.add_argument("--exp_mask_file", help="file with binary expression file, pre-overlapped with "
"the reference transcriptome", type=str)
parser.add_argument("--combined_seeds_filename", help="", type=str)
parser.add_argument("--combined_profiles_filename", help="", type=str)
parser.add_argument("--combined_MI_pv_zscores_filename", help="", type=str)
parser.add_argument("--combined_robustness_filename", help="", type=str)
parser.add_argument("--out_info_table", help="output file with statistics for the discovered seeds", type=str)
parser.add_argument("--out_matches_table", help="output file with sequences of the matches for the discovered seeds", type=str)
parser.add_argument("--indices_mode", help="compression in the index mode", type=bool)
parser.set_defaults(
rna_fastafile='/Users/student/Documents/hani/programs/pyteiser/data/tutorial_example_files/test_seqs.fa',
rna_bin_file='/Users/student/Documents/hani/programs/pyteiser/data/temp/inputs/rna.bin',
exp_mask_file='/Users/student/Documents/hani/programs/pyteiser/data/temp/inputs/exp_mask.bin',
combined_seeds_filename='/Users/student/Documents/hani/programs/pyteiser/data/temp/interm/optimized_seeds_1.bin',
combined_profiles_filename='/Users/student/Documents/hani/programs/pyteiser/data/temp/interm/optimized_profiles_1.bin',
combined_MI_pv_zscores_filename='/Users/student/Documents/hani/programs/pyteiser/data/temp/interm/optimized_MI_pv_zscores_1.bin',
combined_robustness_filename='/Users/student/Documents/hani/programs/pyteiser/data/temp/interm/robustness_array_1.bin',
out_info_table='/Users/student/Documents/hani/programs/pyteiser/data/temp/out/info.csv',
out_matches_table='/Users/student/Documents/hani/programs/pyteiser/data/temp/out/matches.csv',
indices_mode=True,
# combined_seeds_filename='/Users/student/Documents/hani/programs/pyteiser/data/combined_optimized_seeds/tarbp2/seed_optimized_100k_tarbp2_utrs_10k.bin',
# combined_profiles_filename='/Users/student/Documents/hani/programs/pyteiser/data/combined_optimized_seeds/tarbp2/profiles_optimized_100k_tarbp2_utrs_10k.bin',
# combined_MI_pv_zscores_filename='/Users/student/Documents/hani/programs/pyteiser/data/combined_optimized_seeds/tarbp2/seed_characteristics_optimized_100k_tarbp2_utrs_10k.bin',
# combined_robustness_filename='/Users/student/Documents/hani/programs/pyteiser/data/combined_optimized_seeds/tarbp2/robustness_optimized_100k_tarbp2_utrs_10k.bin',
#
# indices_mode=False,
# combined_seeds_filename='/Users/student/Documents/hani/programs/pyteiser/data/combined_optimized_seeds/snrnpa1/seed_optimized_100k_snrnpa1_10k.bin',
# combined_profiles_filename='/Users/student/Documents/hani/programs/pyteiser/data/combined_optimized_seeds/snrnpa1/profiles_optimized_100k_snrnpa1_10k.bin',
# combined_MI_pv_zscores_filename='/Users/student/Documents/hani/programs/pyteiser/data/combined_optimized_seeds/snrnpa1/seed_characteristics_optimized_100k_snrnpa1_10k.bin',
# combined_robustness_filename='/Users/student/Documents/hani/programs/pyteiser/data/combined_optimized_seeds/snrnpa1/robustness_optimized_100k_snrnpa1_10k.bin',
)
args = parser.parse_args(raw_args)
return args
def read_seeds_and_characteristics(args):
seeds_optimized = IO.read_motif_file(args.combined_seeds_filename)
profiles_optimized = IO.unpack_profiles_file(args.combined_profiles_filename, args.indices_mode)
seed_charact_array = IO.read_np_array(args.combined_MI_pv_zscores_filename, np.dtype('float64'))
robustness_array = IO.read_np_array(args.combined_robustness_filename, np.dtype('bool'))
return seeds_optimized, profiles_optimized, \
seed_charact_array, robustness_array
def get_n_seqs_list(rna_bin_filename):
seqs_dict, seqs_order = IO.read_rna_bin_file(rna_bin_filename)
w_seqs_list = [seqs_dict[name] for name in seqs_order]
n_seqs_list = type_conversions.w_to_n_sequences_list(w_seqs_list)
return n_seqs_list
def make_sequences_dataframe(fasta_filename, exp_mask_file):
tr_dict_loc, seqs_order = IO.read_fasta_no_compression(fasta_filename)
index_array, values_array = IO.unpack_mask_file(exp_mask_file)
present_seqs_order = [x for i, x in enumerate(seqs_order) if index_array[i]]
sub_dict_seq = {x : tr_dict_loc[x] for x in present_seqs_order}
seq_df = pd.DataFrame.from_dict(sub_dict_seq, orient = 'index')
seq_df = seq_df.rename({0 : 'sequence'}, axis = 1)
return seq_df
def generate_report_csv(seeds_optimized, profiles_optimized,
seed_charact_array, robustness_array):
MI_values_array = seed_charact_array[:, 0]
seed_indices_sorted = np.argsort(MI_values_array)[::-1]
report_csv = pd.DataFrame(columns = ['seed_id', 'sequence', 'structure',
'MI', 'pvalue', 'zscore', 'robust', 'sequences_bound'],
index = np.arange(seed_indices_sorted.shape[0]))
for index, i in enumerate(seed_indices_sorted):
report_csv.at[index, 'seed_id'] = "%d" % i
report_csv.at[index, 'sequence'] = seeds_optimized[i].print_linear_sequence(return_string = True)
report_csv.at[index, 'structure'] = seeds_optimized[i].print_linear_structure(return_string = True)
report_csv.at[index, 'MI'] = seed_charact_array[i,0]
report_csv.at[index, 'pvalue'] = seed_charact_array[i,1]
report_csv.at[index, 'zscore'] = seed_charact_array[i,2]
report_csv.at[index, 'robust'] = robustness_array[i]
report_csv.at[index, 'sequences_bound'] = profiles_optimized[i].sum()
return report_csv
def add_matches_columns(seeds_optimized,
seed_charact_array,
n_seqs_list,
inp_df,
seq_column_name = 'sequence'):
MI_values_array = seed_charact_array[:, 0]
seed_indices_sorted = np.argsort(MI_values_array)[::-1]
out_df = inp_df.copy()
for index, i in enumerate(seed_indices_sorted):
current_seed = seeds_optimized[i]
current_seed.print_linear_sequence()
current_seed.print_linear_structure()
column_name = "matches_seed_%d" % (i)
curr_matches_list = []
for index in range(inp_df.shape[0]):
sequence = inp_df.iloc[index][seq_column_name]
all_instances = matchmaker.find_all_motif_instances(
type_conversions.w_to_n_motif(current_seed),
n_seqs_list[index],
is_degenerate = True)
matches = []
for inst in all_instances:
curr_match = sequence[inst : inst + current_seed.linear_length]
matches.append(curr_match)
matches_string = "; ".join(matches)
curr_matches_list.append(matches_string)
out_df[column_name] = curr_matches_list
return out_df
def main(raw_args = None):
args = handler(raw_args)
seeds_optimized, profiles_optimized, \
seed_charact_array, robustness_array = read_seeds_and_characteristics(args)
n_seqs_list = get_n_seqs_list(args.rna_bin_file)
report_csv = generate_report_csv(seeds_optimized, profiles_optimized,
seed_charact_array, robustness_array)
seq_df = make_sequences_dataframe(args.rna_fastafile, args.exp_mask_file)
matches_df = add_matches_columns(seeds_optimized,
seed_charact_array,
n_seqs_list,
seq_df)
report_csv.to_csv(args.out_info_table, index = False, sep = '\t')
matches_df.to_csv(args.out_matches_table, sep='\t')
if __name__ == "__main__":
main() | import numpy as np
import pandas as pd
import argparse
from .. import IO
from .. import matchmaker
from .. import type_conversions
def handler(raw_args = None):
parser = argparse.ArgumentParser()
parser.add_argument("--rna_fastafile", help="fasta file with RNA sequences", type=str)
parser.add_argument("--rna_bin_file", help="", type=str)
parser.add_argument("--exp_mask_file", help="file with binary expression file, pre-overlapped with "
"the reference transcriptome", type=str)
parser.add_argument("--combined_seeds_filename", help="", type=str)
parser.add_argument("--combined_profiles_filename", help="", type=str)
parser.add_argument("--combined_MI_pv_zscores_filename", help="", type=str)
parser.add_argument("--combined_robustness_filename", help="", type=str)
parser.add_argument("--out_info_table", help="output file with statistics for the discovered seeds", type=str)
parser.add_argument("--out_matches_table", help="output file with sequences of the matches for the discovered seeds", type=str)
parser.add_argument("--indices_mode", help="compression in the index mode", type=bool)
parser.set_defaults(
rna_fastafile='/Users/student/Documents/hani/programs/pyteiser/data/tutorial_example_files/test_seqs.fa',
rna_bin_file='/Users/student/Documents/hani/programs/pyteiser/data/temp/inputs/rna.bin',
exp_mask_file='/Users/student/Documents/hani/programs/pyteiser/data/temp/inputs/exp_mask.bin',
combined_seeds_filename='/Users/student/Documents/hani/programs/pyteiser/data/temp/interm/optimized_seeds_1.bin',
combined_profiles_filename='/Users/student/Documents/hani/programs/pyteiser/data/temp/interm/optimized_profiles_1.bin',
combined_MI_pv_zscores_filename='/Users/student/Documents/hani/programs/pyteiser/data/temp/interm/optimized_MI_pv_zscores_1.bin',
combined_robustness_filename='/Users/student/Documents/hani/programs/pyteiser/data/temp/interm/robustness_array_1.bin',
out_info_table='/Users/student/Documents/hani/programs/pyteiser/data/temp/out/info.csv',
out_matches_table='/Users/student/Documents/hani/programs/pyteiser/data/temp/out/matches.csv',
indices_mode=True,
# combined_seeds_filename='/Users/student/Documents/hani/programs/pyteiser/data/combined_optimized_seeds/tarbp2/seed_optimized_100k_tarbp2_utrs_10k.bin',
# combined_profiles_filename='/Users/student/Documents/hani/programs/pyteiser/data/combined_optimized_seeds/tarbp2/profiles_optimized_100k_tarbp2_utrs_10k.bin',
# combined_MI_pv_zscores_filename='/Users/student/Documents/hani/programs/pyteiser/data/combined_optimized_seeds/tarbp2/seed_characteristics_optimized_100k_tarbp2_utrs_10k.bin',
# combined_robustness_filename='/Users/student/Documents/hani/programs/pyteiser/data/combined_optimized_seeds/tarbp2/robustness_optimized_100k_tarbp2_utrs_10k.bin',
#
# indices_mode=False,
# combined_seeds_filename='/Users/student/Documents/hani/programs/pyteiser/data/combined_optimized_seeds/snrnpa1/seed_optimized_100k_snrnpa1_10k.bin',
# combined_profiles_filename='/Users/student/Documents/hani/programs/pyteiser/data/combined_optimized_seeds/snrnpa1/profiles_optimized_100k_snrnpa1_10k.bin',
# combined_MI_pv_zscores_filename='/Users/student/Documents/hani/programs/pyteiser/data/combined_optimized_seeds/snrnpa1/seed_characteristics_optimized_100k_snrnpa1_10k.bin',
# combined_robustness_filename='/Users/student/Documents/hani/programs/pyteiser/data/combined_optimized_seeds/snrnpa1/robustness_optimized_100k_snrnpa1_10k.bin',
)
args = parser.parse_args(raw_args)
return args
def read_seeds_and_characteristics(args):
seeds_optimized = IO.read_motif_file(args.combined_seeds_filename)
profiles_optimized = IO.unpack_profiles_file(args.combined_profiles_filename, args.indices_mode)
seed_charact_array = IO.read_np_array(args.combined_MI_pv_zscores_filename, np.dtype('float64'))
robustness_array = IO.read_np_array(args.combined_robustness_filename, np.dtype('bool'))
return seeds_optimized, profiles_optimized, \
seed_charact_array, robustness_array
def get_n_seqs_list(rna_bin_filename):
seqs_dict, seqs_order = IO.read_rna_bin_file(rna_bin_filename)
w_seqs_list = [seqs_dict[name] for name in seqs_order]
n_seqs_list = type_conversions.w_to_n_sequences_list(w_seqs_list)
return n_seqs_list
def make_sequences_dataframe(fasta_filename, exp_mask_file):
tr_dict_loc, seqs_order = IO.read_fasta_no_compression(fasta_filename)
index_array, values_array = IO.unpack_mask_file(exp_mask_file)
present_seqs_order = [x for i, x in enumerate(seqs_order) if index_array[i]]
sub_dict_seq = {x : tr_dict_loc[x] for x in present_seqs_order}
seq_df = pd.DataFrame.from_dict(sub_dict_seq, orient = 'index')
seq_df = seq_df.rename({0 : 'sequence'}, axis = 1)
return seq_df
def generate_report_csv(seeds_optimized, profiles_optimized,
seed_charact_array, robustness_array):
MI_values_array = seed_charact_array[:, 0]
seed_indices_sorted = np.argsort(MI_values_array)[::-1]
report_csv = pd.DataFrame(columns = ['seed_id', 'sequence', 'structure',
'MI', 'pvalue', 'zscore', 'robust', 'sequences_bound'],
index = np.arange(seed_indices_sorted.shape[0]))
for index, i in enumerate(seed_indices_sorted):
report_csv.at[index, 'seed_id'] = "%d" % i
report_csv.at[index, 'sequence'] = seeds_optimized[i].print_linear_sequence(return_string = True)
report_csv.at[index, 'structure'] = seeds_optimized[i].print_linear_structure(return_string = True)
report_csv.at[index, 'MI'] = seed_charact_array[i,0]
report_csv.at[index, 'pvalue'] = seed_charact_array[i,1]
report_csv.at[index, 'zscore'] = seed_charact_array[i,2]
report_csv.at[index, 'robust'] = robustness_array[i]
report_csv.at[index, 'sequences_bound'] = profiles_optimized[i].sum()
return report_csv
def add_matches_columns(seeds_optimized,
seed_charact_array,
n_seqs_list,
inp_df,
seq_column_name = 'sequence'):
MI_values_array = seed_charact_array[:, 0]
seed_indices_sorted = np.argsort(MI_values_array)[::-1]
out_df = inp_df.copy()
for index, i in enumerate(seed_indices_sorted):
current_seed = seeds_optimized[i]
current_seed.print_linear_sequence()
current_seed.print_linear_structure()
column_name = "matches_seed_%d" % (i)
curr_matches_list = []
for index in range(inp_df.shape[0]):
sequence = inp_df.iloc[index][seq_column_name]
all_instances = matchmaker.find_all_motif_instances(
type_conversions.w_to_n_motif(current_seed),
n_seqs_list[index],
is_degenerate = True)
matches = []
for inst in all_instances:
curr_match = sequence[inst : inst + current_seed.linear_length]
matches.append(curr_match)
matches_string = "; ".join(matches)
curr_matches_list.append(matches_string)
out_df[column_name] = curr_matches_list
return out_df
def main(raw_args = None):
args = handler(raw_args)
seeds_optimized, profiles_optimized, \
seed_charact_array, robustness_array = read_seeds_and_characteristics(args)
n_seqs_list = get_n_seqs_list(args.rna_bin_file)
report_csv = generate_report_csv(seeds_optimized, profiles_optimized,
seed_charact_array, robustness_array)
seq_df = make_sequences_dataframe(args.rna_fastafile, args.exp_mask_file)
matches_df = add_matches_columns(seeds_optimized,
seed_charact_array,
n_seqs_list,
seq_df)
report_csv.to_csv(args.out_info_table, index = False, sep = '\t')
matches_df.to_csv(args.out_matches_table, sep='\t')
if __name__ == "__main__":
main() | en | 0.616994 | # combined_seeds_filename='/Users/student/Documents/hani/programs/pyteiser/data/combined_optimized_seeds/tarbp2/seed_optimized_100k_tarbp2_utrs_10k.bin', # combined_profiles_filename='/Users/student/Documents/hani/programs/pyteiser/data/combined_optimized_seeds/tarbp2/profiles_optimized_100k_tarbp2_utrs_10k.bin', # combined_MI_pv_zscores_filename='/Users/student/Documents/hani/programs/pyteiser/data/combined_optimized_seeds/tarbp2/seed_characteristics_optimized_100k_tarbp2_utrs_10k.bin', # combined_robustness_filename='/Users/student/Documents/hani/programs/pyteiser/data/combined_optimized_seeds/tarbp2/robustness_optimized_100k_tarbp2_utrs_10k.bin', # # indices_mode=False, # combined_seeds_filename='/Users/student/Documents/hani/programs/pyteiser/data/combined_optimized_seeds/snrnpa1/seed_optimized_100k_snrnpa1_10k.bin', # combined_profiles_filename='/Users/student/Documents/hani/programs/pyteiser/data/combined_optimized_seeds/snrnpa1/profiles_optimized_100k_snrnpa1_10k.bin', # combined_MI_pv_zscores_filename='/Users/student/Documents/hani/programs/pyteiser/data/combined_optimized_seeds/snrnpa1/seed_characteristics_optimized_100k_snrnpa1_10k.bin', # combined_robustness_filename='/Users/student/Documents/hani/programs/pyteiser/data/combined_optimized_seeds/snrnpa1/robustness_optimized_100k_snrnpa1_10k.bin', | 2.378228 | 2 |
fromimports.py | caleblevy/pyfun | 2 | 6623408 | """from everything available in Python 3.5.2"""
# Processing Services
from string import *
from re import *
from difflib import *
from textwrap import *
from unicodedata import *
from stringprep import *
from readline import *
from rlcompleter import *
# Binary Data Services
from struct import *
from codecs import *
# Data Types
from datetime import *
from calendar import *
from collections import *
from collections.abc import *
from heapq import *
from bisect import *
from array import *
from weakref import *
from types import *
from copy import *
from pprint import *
from reprlib import *
from enum import *
# Numeric and Mathematical Modules
from numbers import *
from math import *
from cmath import *
from decimal import *
from fractions import *
from random import *
from statistics import *
# Functional Programming Modules
from itertools import *
from functools import *
from operator import *
# File and Directory Access
from pathlib import *
from os.path import *
from fileinput import *
from stat import *
from filecmp import *
from tempfile import *
from glob import *
from fnmatch import *
from linecache import *
from shutil import *
from macpath import *
# Data Persistence
from pickle import *
from copyreg import *
from shelve import *
from marshal import *
from dbm import *
from sqlite3 import *
# Data Compression and Archiving
from zlib import *
from gzip import *
from bz2 import *
from lzma import *
from zipfile import *
from tarfile import *
# File Formats
from csv import *
from configparser import *
from netrc import *
from xdrlib import *
from plistlib import *
# Cryptographic Services
from hashlib import *
from hmac import *
from secrets import *
# Generic Operating System Services
from os import *
from io import *
from time import *
from argparse import *
from getopt import *
from logging import *
from logging.config import *
from logging.handlers import *
from getpass import *
from curses import *
from curses.textpad import *
from curses.ascii import *
from curses.panel import *
from platform import *
from errno import *
from ctypes import *
# Concurrent Execution
from threading import *
from multiprocessing import *
from concurrent.futures import *
from subprocess import *
from sched import *
from queue import *
from dummy_threading import *
from _thread import *
from _dummy_thread import *
# Interprocess Communication and Networking
from socket import *
from ssl import *
from select import *
from selectors import *
from asyncio import *
from asyncore import *
from asynchat import *
from signal import *
from mmap import *
# Internet Data Handling
from email import *
from json import *
from mailcap import *
from mailbox import *
from mimetypes import *
from base64 import *
from binhex import *
from binascii import *
from quopri import *
from uu import *
# Structured Markup Processing Tools
from html import *
from html.parser import *
from html.entities import *
# XML Processing Modules
from xml.etree.ElementTree import *
from xml.dom import *
from xml.dom.minidom import *
from xml.dom.pulldom import *
from xml.sax import *
from xml.sax.handler import *
from xml.sax.saxutils import *
from xml.sax.xmlreader import *
from xml.parsers.expat import *
# Internet Protocols and Support
from webbrowser import *
from cgi import *
from cgitb import *
from wsgiref import *
from urllib import *
from urllib.request import *
from urllib.response import *
from urllib.parse import *
from urllib.error import *
from urllib.robotparser import *
from http import *
from http.client import *
from ftplib import *
from poplib import *
from imaplib import *
from nntplib import *
from smtplib import *
from smtpd import *
from telnetlib import *
from uuid import *
from socketserver import *
from http.server import *
from http.cookies import *
from http.cookiejar import *
from xmlrpc import *
from xmlrpc.client import *
from xmlrpc.server import *
from ipaddress import *
# Multimedia Services
from audioop import *
from aifc import *
from sunau import *
from wave import *
from chunk import *
from colorsys import *
from imghdr import *
from sndhdr import *
# from ossaudiodev
# Internationalization
from gettext import *
from locale import *
# Program Frameworks
from turtle import *
from cmd import *
from shlex import *
# Graphical User Interfaces with Tk
from tkinter import *
from tkinter.ttk import *
from tkinter.tix import *
from tkinter.scrolledtext import *
# Development Tools
from typing import *
from pydoc import *
from doctest import *
from unittest import *
from unittest.mock import *
from test import *
from test.support import *
# Debugging and Profiling
from bdb import *
from faulthandler import *
from pdb import *
# The Python Profilers
from timeit import *
from trace import *
from tracemalloc import *
# Software Packaging and Distribution
from distutils import *
from ensurepip import *
from venv import *
from zipapp import *
# Python Runtime Services
from sys import *
from sysconfig import *
from builtins import *
from warnings import *
from contextlib import *
from abc import *
from atexit import *
from traceback import *
from gc import *
from inspect import *
from site import *
# from fpectl
# Custom Python Interpreters
from code import *
from codeop import *
# Importing Modules
from zipimport import *
from pkgutil import *
from modulefinder import *
from runpy import *
from importlib import *
# Python Language Services
from parser import *
from ast import *
from symtable import *
from symbol import *
from token import *
from keyword import *
from tokenize import *
from tabnanny import *
from pyclbr import *
from py_compile import *
from compileall import *
from dis import *
from pickletools import *
# Miscellaneous Tools
from formatter import *
from test import *
| """from everything available in Python 3.5.2"""
# Processing Services
from string import *
from re import *
from difflib import *
from textwrap import *
from unicodedata import *
from stringprep import *
from readline import *
from rlcompleter import *
# Binary Data Services
from struct import *
from codecs import *
# Data Types
from datetime import *
from calendar import *
from collections import *
from collections.abc import *
from heapq import *
from bisect import *
from array import *
from weakref import *
from types import *
from copy import *
from pprint import *
from reprlib import *
from enum import *
# Numeric and Mathematical Modules
from numbers import *
from math import *
from cmath import *
from decimal import *
from fractions import *
from random import *
from statistics import *
# Functional Programming Modules
from itertools import *
from functools import *
from operator import *
# File and Directory Access
from pathlib import *
from os.path import *
from fileinput import *
from stat import *
from filecmp import *
from tempfile import *
from glob import *
from fnmatch import *
from linecache import *
from shutil import *
from macpath import *
# Data Persistence
from pickle import *
from copyreg import *
from shelve import *
from marshal import *
from dbm import *
from sqlite3 import *
# Data Compression and Archiving
from zlib import *
from gzip import *
from bz2 import *
from lzma import *
from zipfile import *
from tarfile import *
# File Formats
from csv import *
from configparser import *
from netrc import *
from xdrlib import *
from plistlib import *
# Cryptographic Services
from hashlib import *
from hmac import *
from secrets import *
# Generic Operating System Services
from os import *
from io import *
from time import *
from argparse import *
from getopt import *
from logging import *
from logging.config import *
from logging.handlers import *
from getpass import *
from curses import *
from curses.textpad import *
from curses.ascii import *
from curses.panel import *
from platform import *
from errno import *
from ctypes import *
# Concurrent Execution
from threading import *
from multiprocessing import *
from concurrent.futures import *
from subprocess import *
from sched import *
from queue import *
from dummy_threading import *
from _thread import *
from _dummy_thread import *
# Interprocess Communication and Networking
from socket import *
from ssl import *
from select import *
from selectors import *
from asyncio import *
from asyncore import *
from asynchat import *
from signal import *
from mmap import *
# Internet Data Handling
from email import *
from json import *
from mailcap import *
from mailbox import *
from mimetypes import *
from base64 import *
from binhex import *
from binascii import *
from quopri import *
from uu import *
# Structured Markup Processing Tools
from html import *
from html.parser import *
from html.entities import *
# XML Processing Modules
from xml.etree.ElementTree import *
from xml.dom import *
from xml.dom.minidom import *
from xml.dom.pulldom import *
from xml.sax import *
from xml.sax.handler import *
from xml.sax.saxutils import *
from xml.sax.xmlreader import *
from xml.parsers.expat import *
# Internet Protocols and Support
from webbrowser import *
from cgi import *
from cgitb import *
from wsgiref import *
from urllib import *
from urllib.request import *
from urllib.response import *
from urllib.parse import *
from urllib.error import *
from urllib.robotparser import *
from http import *
from http.client import *
from ftplib import *
from poplib import *
from imaplib import *
from nntplib import *
from smtplib import *
from smtpd import *
from telnetlib import *
from uuid import *
from socketserver import *
from http.server import *
from http.cookies import *
from http.cookiejar import *
from xmlrpc import *
from xmlrpc.client import *
from xmlrpc.server import *
from ipaddress import *
# Multimedia Services
from audioop import *
from aifc import *
from sunau import *
from wave import *
from chunk import *
from colorsys import *
from imghdr import *
from sndhdr import *
# from ossaudiodev
# Internationalization
from gettext import *
from locale import *
# Program Frameworks
from turtle import *
from cmd import *
from shlex import *
# Graphical User Interfaces with Tk
from tkinter import *
from tkinter.ttk import *
from tkinter.tix import *
from tkinter.scrolledtext import *
# Development Tools
from typing import *
from pydoc import *
from doctest import *
from unittest import *
from unittest.mock import *
from test import *
from test.support import *
# Debugging and Profiling
from bdb import *
from faulthandler import *
from pdb import *
# The Python Profilers
from timeit import *
from trace import *
from tracemalloc import *
# Software Packaging and Distribution
from distutils import *
from ensurepip import *
from venv import *
from zipapp import *
# Python Runtime Services
from sys import *
from sysconfig import *
from builtins import *
from warnings import *
from contextlib import *
from abc import *
from atexit import *
from traceback import *
from gc import *
from inspect import *
from site import *
# from fpectl
# Custom Python Interpreters
from code import *
from codeop import *
# Importing Modules
from zipimport import *
from pkgutil import *
from modulefinder import *
from runpy import *
from importlib import *
# Python Language Services
from parser import *
from ast import *
from symtable import *
from symbol import *
from token import *
from keyword import *
from tokenize import *
from tabnanny import *
from pyclbr import *
from py_compile import *
from compileall import *
from dis import *
from pickletools import *
# Miscellaneous Tools
from formatter import *
from test import *
| en | 0.636387 | from everything available in Python 3.5.2 # Processing Services # Binary Data Services # Data Types # Numeric and Mathematical Modules # Functional Programming Modules # File and Directory Access # Data Persistence # Data Compression and Archiving # File Formats # Cryptographic Services # Generic Operating System Services # Concurrent Execution # Interprocess Communication and Networking # Internet Data Handling # Structured Markup Processing Tools # XML Processing Modules # Internet Protocols and Support # Multimedia Services # from ossaudiodev # Internationalization # Program Frameworks # Graphical User Interfaces with Tk # Development Tools # Debugging and Profiling # The Python Profilers # Software Packaging and Distribution # Python Runtime Services # from fpectl # Custom Python Interpreters # Importing Modules # Python Language Services # Miscellaneous Tools | 2.013052 | 2 |
src/pyrin/globalization/datetime/services.py | wilsonGmn/pyrin | 0 | 6623409 | # -*- coding: utf-8 -*-
"""
datetime services module.
"""
from pyrin.application.services import get_component
from pyrin.globalization.datetime import DateTimePackage
def now(server=True, timezone=None):
"""
gets the current datetime based on requested timezone.
:param bool server: if set to True, server timezone will be used.
if set to False, client timezone will be used.
defaults to True.
:param str timezone: timezone name to get datetime based on it.
if provided, the value of `server` input
will be ignored. defaults to None.
:rtype: datetime
"""
return get_component(DateTimePackage.COMPONENT_NAME).now(server, timezone)
def client_now():
"""
gets the current datetime based on client timezone.
this is a helper method to let get the client datetime
without providing value to `now` method.
:rtype: datetime
"""
return get_component(DateTimePackage.COMPONENT_NAME).client_now()
def today(server=True, timezone=None):
"""
gets the current date based on requested timezone.
:param bool server: if set to True, server timezone will be used.
if set to False, client timezone will be used.
defaults to True.
:param str timezone: timezone name to get date based on it.
if provided, the value of `server` input
will be ignored. defaults to None.
:rtype: date
"""
return get_component(DateTimePackage.COMPONENT_NAME).today(server, timezone)
def client_today():
"""
gets the current date based on client timezone.
this is a helper method to let get the client date
without providing value to `today` method.
:rtype: date
"""
return get_component(DateTimePackage.COMPONENT_NAME).client_today()
def current_year(server=True, timezone=None):
"""
gets the current year based on requested timezone.
:param bool server: if set to True, server timezone will be used.
if set to False, client timezone will be used.
defaults to True.
:param str timezone: timezone name to get year based on it.
if provided, the value of `server` input
will be ignored. defaults to None.
:rtype: int
"""
return get_component(DateTimePackage.COMPONENT_NAME).current_year(server, timezone)
def current_client_year():
"""
gets the current year based on client timezone.
this is a helper method to let get the client year
without providing value to `current_year` method.
:rtype: int
"""
return get_component(DateTimePackage.COMPONENT_NAME).current_client_year()
def get_default_client_timezone():
"""
gets the default client timezone.
:rtype: tzinfo
"""
return get_component(DateTimePackage.COMPONENT_NAME).get_default_client_timezone()
def get_current_timezone(server):
"""
gets the current timezone for server or client.
:param bool server: if set to True, server timezone will be returned.
if set to False, client timezone will be returned.
:rtype: tzinfo
"""
return get_component(DateTimePackage.COMPONENT_NAME).get_current_timezone(server)
def get_timezone(timezone):
"""
gets the timezone based on given timezone name.
:param str timezone: timezone name.
:rtype: tzinfo
"""
return get_component(DateTimePackage.COMPONENT_NAME).get_timezone(timezone)
def get_timezone_name(server):
"""
gets the server or client timezone name.
:param bool server: if set to True, server timezone name will be returned.
if set to False, client timezone name will be returned.
:rtype: str
"""
return get_component(DateTimePackage.COMPONENT_NAME).get_timezone_name(server)
def convert(value, to_server, from_server=None):
"""
converts the given datetime between server and client timezones.
:param datetime value: value to be converted.
:param bool to_server: specifies that value must be normalized
to server timezone. if set to False, it
will be normalized to client timezone.
:param bool from_server: specifies that value must be normalized
from server timezone. if set to False, it
will be normalized from client timezone.
if not provided, it will be set to opposite
of `to_server` value.
:rtype: datetime
"""
return get_component(DateTimePackage.COMPONENT_NAME).convert(value, to_server,
from_server=from_server)
def convert_to_utc(value, from_server):
"""
converts the given datetime to utc.
:param datetime value: value to be converted.
:param bool from_server: specifies that value must be normalized
from server timezone. if set to False, it
will be normalized from client timezone.
:rtype: datetime
"""
return get_component(DateTimePackage.COMPONENT_NAME).convert_to_utc(value, from_server)
def convert_from_utc(value, to_server):
"""
converts the given datetime from utc.
:param datetime value: value to be converted.
:param bool to_server: specifies that value must be normalized
to server timezone. if set to False, it
will be normalized to client timezone.
:rtype: datetime
"""
return get_component(DateTimePackage.COMPONENT_NAME).convert_from_utc(value, to_server)
def as_timezone(value, server):
"""
gets the result of `astimezone` on the given value.
:param datetime value: value to get normalized.
:param bool server: if set to True, server timezone name will be used.
if set to False, client timezone name will be used.
:rtype: datetime
"""
return get_component(DateTimePackage.COMPONENT_NAME).as_timezone(value, server)
def normalize(value, server):
"""
normalizes input value using server or client current timezone and returns it.
:param datetime value: value to get normalized.
:param bool server: specifies that server or client timezone must used.
:rtype: datetime
"""
return get_component(DateTimePackage.COMPONENT_NAME).normalize(value, server)
def localize(value, server):
"""
localizes input datetime with current timezone.
input value should not have a timezone info.
:param datetime value: value to be localized.
:param bool server: specifies that server or client timezone must used.
:rtype: datetime
"""
return get_component(DateTimePackage.COMPONENT_NAME).localize(value, server)
def try_add_timezone(value, server):
"""
adds the current timezone info into input value if it has no timezone info.
:param datetime value: value to add timezone info into it.
:param bool server: specifies that server or client timezone must be added.
:rtype: datetime
"""
return get_component(DateTimePackage.COMPONENT_NAME).try_add_timezone(value, server)
def to_datetime_string(value, to_server, from_server=None):
"""
gets the datetime string representation of input value.
if the value has no timezone info, it adds the client or server
timezone info based on `from_server` value.
example: `2015-12-24T22:40:15+01:00`
:param datetime value: input object to be converted.
:param bool to_server: specifies that value must be normalized
to server timezone. if set to False, it
will be normalized to client timezone.
:param bool from_server: specifies that value must be normalized
from server timezone. if set to False, it
will be normalized from client timezone.
if not provided, it will be set to opposite
of `to_server` value.
:rtype: str
"""
return get_component(DateTimePackage.COMPONENT_NAME).to_datetime_string(
value, to_server, from_server=from_server)
def to_date_string(value):
"""
gets the date string representation of input value.
example: `2015-12-24`
:param datetime | date value: input object to be converted.
:rtype: str
"""
return get_component(DateTimePackage.COMPONENT_NAME).to_date_string(value)
def to_time_string(value, to_server, from_server=None):
"""
gets the time string representation of input value.
if the value is a datetime and has no timezone info, it adds
the client or server timezone info based on `from_server` value.
example: `23:40:15`
:param datetime | time value: input object to be converted.
:param bool to_server: specifies that value must be normalized
to server timezone. if set to False, it
will be normalized to client timezone.
:param bool from_server: specifies that value must be normalized
from server timezone. if set to False, it
will be normalized from client timezone.
if not provided, it will be set to opposite
of `to_server` value.
:rtype: str
"""
return get_component(DateTimePackage.COMPONENT_NAME).to_time_string(value, to_server,
from_server=from_server)
def to_datetime(value, to_server, from_server=None):
"""
converts the input value to it's equivalent python datetime.
if the value has no timezone info, it adds the client or server
timezone info based on `from_server` value.
:param str value: string representation of datetime to be converted.
:param bool to_server: specifies that value must be normalized
to server timezone. if set to False, it
will be normalized to client timezone.
:param bool from_server: specifies that value must be normalized
from server timezone. if set to False, it
will be normalized from client timezone.
if not provided, it will be set to opposite
of `to_server` value.
:rtype: datetime
"""
return get_component(DateTimePackage.COMPONENT_NAME).to_datetime(value, to_server,
from_server=from_server)
def to_date(value):
"""
converts the input value to it's equivalent python date.
:param str value: string representation of date to be converted.
:rtype: date
"""
return get_component(DateTimePackage.COMPONENT_NAME).to_date(value)
def to_time(value):
"""
converts the input value to it's equivalent python time.
:param str value: string representation of time to be converted.
:rtype: time
"""
return get_component(DateTimePackage.COMPONENT_NAME).to_time(value)
def timezone_exists(timezone_name):
"""
gets a value indicating that a timezone with the given name exists.
:param str timezone_name: timezone name to check for existence.
:rtype: bool
"""
return get_component(DateTimePackage.COMPONENT_NAME).timezone_exists(timezone_name)
def get_timestamp(value, date_sep='-', main_sep=' ',
time_sep=':', microsecond=False):
"""
gets the timestamp with specified separators for given datetime.
default format is `YYYY-MM-DD HH:mm:SS`.
:param datetime value: datetime value to get its timestamp.
:param str date_sep: a separator to put between date elements.
if set to None, no separator will be used.
:param str main_sep: a separator to put between date and time part.
if set to None, no separator will be used.
:param str time_sep: a separator to put between time elements.
if set to None, no separator will be used.
:param bool microsecond: specifies that timestamp must include microseconds.
defaults to False if not provided.
:rtype: str
"""
return get_component(DateTimePackage.COMPONENT_NAME).get_timestamp(value, date_sep,
main_sep, time_sep,
microsecond)
def get_current_timestamp(date_sep='-', main_sep=' ',
time_sep=':', server=True,
timezone=None, microsecond=False):
"""
gets the current timestamp with specified separators based on requested timezone.
default format is `YYYY-MM-DD HH:mm:SS`.
:param str date_sep: a separator to put between date elements.
if set to None, no separator will be used.
:param str main_sep: a separator to put between date and time part.
if set to None, no separator will be used.
:param str time_sep: a separator to put between time elements.
if set to None, no separator will be used.
:param bool server: if set to True, server timezone will be used.
if set to False, client timezone will be used.
defaults to True.
:param str timezone: timezone name to get datetime based on it.
if provided, the value of `server` input
will be ignored. defaults to None.
:param bool microsecond: specifies that timestamp must include microseconds.
defaults to False if not provided.
:rtype: str
"""
return get_component(DateTimePackage.COMPONENT_NAME).get_current_timestamp(date_sep,
main_sep,
time_sep,
server,
timezone,
microsecond)
def datetime(year, month, day, hour=0, minute=0,
second=0, microsecond=0, fold=0,
server=True, timezone=None):
"""
gets a new datetime with given inputs and requested timezone.
:param int year: year.
:param int month: month.
:param int day: day.
:param int hour: hour.
:param int minute: minute.
:param int second: second.
:param int microsecond: microsecond.
:param int fold: used to disambiguate wall times during a repeated
interval. it could be set to 0 or 1.
:param bool server: if set to True, server timezone will be used.
if set to False, client timezone will be used.
defaults to True.
:param str timezone: timezone name to be used.
if provided, the value of `server` input
will be ignored. defaults to None.
:rtype: datetime
"""
return get_component(DateTimePackage.COMPONENT_NAME).datetime(year, month, day,
hour, minute, second,
microsecond, fold,
server, timezone)
def get_timezone_key():
"""
gets the timezone key which application expects in query strings.
:rtype: str
"""
return get_component(DateTimePackage.COMPONENT_NAME).get_timezone_key()
| # -*- coding: utf-8 -*-
"""
datetime services module.
"""
from pyrin.application.services import get_component
from pyrin.globalization.datetime import DateTimePackage
def now(server=True, timezone=None):
"""
gets the current datetime based on requested timezone.
:param bool server: if set to True, server timezone will be used.
if set to False, client timezone will be used.
defaults to True.
:param str timezone: timezone name to get datetime based on it.
if provided, the value of `server` input
will be ignored. defaults to None.
:rtype: datetime
"""
return get_component(DateTimePackage.COMPONENT_NAME).now(server, timezone)
def client_now():
"""
gets the current datetime based on client timezone.
this is a helper method to let get the client datetime
without providing value to `now` method.
:rtype: datetime
"""
return get_component(DateTimePackage.COMPONENT_NAME).client_now()
def today(server=True, timezone=None):
"""
gets the current date based on requested timezone.
:param bool server: if set to True, server timezone will be used.
if set to False, client timezone will be used.
defaults to True.
:param str timezone: timezone name to get date based on it.
if provided, the value of `server` input
will be ignored. defaults to None.
:rtype: date
"""
return get_component(DateTimePackage.COMPONENT_NAME).today(server, timezone)
def client_today():
"""
gets the current date based on client timezone.
this is a helper method to let get the client date
without providing value to `today` method.
:rtype: date
"""
return get_component(DateTimePackage.COMPONENT_NAME).client_today()
def current_year(server=True, timezone=None):
"""
gets the current year based on requested timezone.
:param bool server: if set to True, server timezone will be used.
if set to False, client timezone will be used.
defaults to True.
:param str timezone: timezone name to get year based on it.
if provided, the value of `server` input
will be ignored. defaults to None.
:rtype: int
"""
return get_component(DateTimePackage.COMPONENT_NAME).current_year(server, timezone)
def current_client_year():
"""
gets the current year based on client timezone.
this is a helper method to let get the client year
without providing value to `current_year` method.
:rtype: int
"""
return get_component(DateTimePackage.COMPONENT_NAME).current_client_year()
def get_default_client_timezone():
"""
gets the default client timezone.
:rtype: tzinfo
"""
return get_component(DateTimePackage.COMPONENT_NAME).get_default_client_timezone()
def get_current_timezone(server):
"""
gets the current timezone for server or client.
:param bool server: if set to True, server timezone will be returned.
if set to False, client timezone will be returned.
:rtype: tzinfo
"""
return get_component(DateTimePackage.COMPONENT_NAME).get_current_timezone(server)
def get_timezone(timezone):
"""
gets the timezone based on given timezone name.
:param str timezone: timezone name.
:rtype: tzinfo
"""
return get_component(DateTimePackage.COMPONENT_NAME).get_timezone(timezone)
def get_timezone_name(server):
"""
gets the server or client timezone name.
:param bool server: if set to True, server timezone name will be returned.
if set to False, client timezone name will be returned.
:rtype: str
"""
return get_component(DateTimePackage.COMPONENT_NAME).get_timezone_name(server)
def convert(value, to_server, from_server=None):
"""
converts the given datetime between server and client timezones.
:param datetime value: value to be converted.
:param bool to_server: specifies that value must be normalized
to server timezone. if set to False, it
will be normalized to client timezone.
:param bool from_server: specifies that value must be normalized
from server timezone. if set to False, it
will be normalized from client timezone.
if not provided, it will be set to opposite
of `to_server` value.
:rtype: datetime
"""
return get_component(DateTimePackage.COMPONENT_NAME).convert(value, to_server,
from_server=from_server)
def convert_to_utc(value, from_server):
"""
converts the given datetime to utc.
:param datetime value: value to be converted.
:param bool from_server: specifies that value must be normalized
from server timezone. if set to False, it
will be normalized from client timezone.
:rtype: datetime
"""
return get_component(DateTimePackage.COMPONENT_NAME).convert_to_utc(value, from_server)
def convert_from_utc(value, to_server):
"""
converts the given datetime from utc.
:param datetime value: value to be converted.
:param bool to_server: specifies that value must be normalized
to server timezone. if set to False, it
will be normalized to client timezone.
:rtype: datetime
"""
return get_component(DateTimePackage.COMPONENT_NAME).convert_from_utc(value, to_server)
def as_timezone(value, server):
"""
gets the result of `astimezone` on the given value.
:param datetime value: value to get normalized.
:param bool server: if set to True, server timezone name will be used.
if set to False, client timezone name will be used.
:rtype: datetime
"""
return get_component(DateTimePackage.COMPONENT_NAME).as_timezone(value, server)
def normalize(value, server):
"""
normalizes input value using server or client current timezone and returns it.
:param datetime value: value to get normalized.
:param bool server: specifies that server or client timezone must used.
:rtype: datetime
"""
return get_component(DateTimePackage.COMPONENT_NAME).normalize(value, server)
def localize(value, server):
"""
localizes input datetime with current timezone.
input value should not have a timezone info.
:param datetime value: value to be localized.
:param bool server: specifies that server or client timezone must used.
:rtype: datetime
"""
return get_component(DateTimePackage.COMPONENT_NAME).localize(value, server)
def try_add_timezone(value, server):
"""
adds the current timezone info into input value if it has no timezone info.
:param datetime value: value to add timezone info into it.
:param bool server: specifies that server or client timezone must be added.
:rtype: datetime
"""
return get_component(DateTimePackage.COMPONENT_NAME).try_add_timezone(value, server)
def to_datetime_string(value, to_server, from_server=None):
"""
gets the datetime string representation of input value.
if the value has no timezone info, it adds the client or server
timezone info based on `from_server` value.
example: `2015-12-24T22:40:15+01:00`
:param datetime value: input object to be converted.
:param bool to_server: specifies that value must be normalized
to server timezone. if set to False, it
will be normalized to client timezone.
:param bool from_server: specifies that value must be normalized
from server timezone. if set to False, it
will be normalized from client timezone.
if not provided, it will be set to opposite
of `to_server` value.
:rtype: str
"""
return get_component(DateTimePackage.COMPONENT_NAME).to_datetime_string(
value, to_server, from_server=from_server)
def to_date_string(value):
"""
gets the date string representation of input value.
example: `2015-12-24`
:param datetime | date value: input object to be converted.
:rtype: str
"""
return get_component(DateTimePackage.COMPONENT_NAME).to_date_string(value)
def to_time_string(value, to_server, from_server=None):
"""
gets the time string representation of input value.
if the value is a datetime and has no timezone info, it adds
the client or server timezone info based on `from_server` value.
example: `23:40:15`
:param datetime | time value: input object to be converted.
:param bool to_server: specifies that value must be normalized
to server timezone. if set to False, it
will be normalized to client timezone.
:param bool from_server: specifies that value must be normalized
from server timezone. if set to False, it
will be normalized from client timezone.
if not provided, it will be set to opposite
of `to_server` value.
:rtype: str
"""
return get_component(DateTimePackage.COMPONENT_NAME).to_time_string(value, to_server,
from_server=from_server)
def to_datetime(value, to_server, from_server=None):
"""
converts the input value to it's equivalent python datetime.
if the value has no timezone info, it adds the client or server
timezone info based on `from_server` value.
:param str value: string representation of datetime to be converted.
:param bool to_server: specifies that value must be normalized
to server timezone. if set to False, it
will be normalized to client timezone.
:param bool from_server: specifies that value must be normalized
from server timezone. if set to False, it
will be normalized from client timezone.
if not provided, it will be set to opposite
of `to_server` value.
:rtype: datetime
"""
return get_component(DateTimePackage.COMPONENT_NAME).to_datetime(value, to_server,
from_server=from_server)
def to_date(value):
"""
converts the input value to it's equivalent python date.
:param str value: string representation of date to be converted.
:rtype: date
"""
return get_component(DateTimePackage.COMPONENT_NAME).to_date(value)
def to_time(value):
"""
converts the input value to it's equivalent python time.
:param str value: string representation of time to be converted.
:rtype: time
"""
return get_component(DateTimePackage.COMPONENT_NAME).to_time(value)
def timezone_exists(timezone_name):
"""
gets a value indicating that a timezone with the given name exists.
:param str timezone_name: timezone name to check for existence.
:rtype: bool
"""
return get_component(DateTimePackage.COMPONENT_NAME).timezone_exists(timezone_name)
def get_timestamp(value, date_sep='-', main_sep=' ',
time_sep=':', microsecond=False):
"""
gets the timestamp with specified separators for given datetime.
default format is `YYYY-MM-DD HH:mm:SS`.
:param datetime value: datetime value to get its timestamp.
:param str date_sep: a separator to put between date elements.
if set to None, no separator will be used.
:param str main_sep: a separator to put between date and time part.
if set to None, no separator will be used.
:param str time_sep: a separator to put between time elements.
if set to None, no separator will be used.
:param bool microsecond: specifies that timestamp must include microseconds.
defaults to False if not provided.
:rtype: str
"""
return get_component(DateTimePackage.COMPONENT_NAME).get_timestamp(value, date_sep,
main_sep, time_sep,
microsecond)
def get_current_timestamp(date_sep='-', main_sep=' ',
time_sep=':', server=True,
timezone=None, microsecond=False):
"""
gets the current timestamp with specified separators based on requested timezone.
default format is `YYYY-MM-DD HH:mm:SS`.
:param str date_sep: a separator to put between date elements.
if set to None, no separator will be used.
:param str main_sep: a separator to put between date and time part.
if set to None, no separator will be used.
:param str time_sep: a separator to put between time elements.
if set to None, no separator will be used.
:param bool server: if set to True, server timezone will be used.
if set to False, client timezone will be used.
defaults to True.
:param str timezone: timezone name to get datetime based on it.
if provided, the value of `server` input
will be ignored. defaults to None.
:param bool microsecond: specifies that timestamp must include microseconds.
defaults to False if not provided.
:rtype: str
"""
return get_component(DateTimePackage.COMPONENT_NAME).get_current_timestamp(date_sep,
main_sep,
time_sep,
server,
timezone,
microsecond)
def datetime(year, month, day, hour=0, minute=0,
second=0, microsecond=0, fold=0,
server=True, timezone=None):
"""
gets a new datetime with given inputs and requested timezone.
:param int year: year.
:param int month: month.
:param int day: day.
:param int hour: hour.
:param int minute: minute.
:param int second: second.
:param int microsecond: microsecond.
:param int fold: used to disambiguate wall times during a repeated
interval. it could be set to 0 or 1.
:param bool server: if set to True, server timezone will be used.
if set to False, client timezone will be used.
defaults to True.
:param str timezone: timezone name to be used.
if provided, the value of `server` input
will be ignored. defaults to None.
:rtype: datetime
"""
return get_component(DateTimePackage.COMPONENT_NAME).datetime(year, month, day,
hour, minute, second,
microsecond, fold,
server, timezone)
def get_timezone_key():
"""
gets the timezone key which application expects in query strings.
:rtype: str
"""
return get_component(DateTimePackage.COMPONENT_NAME).get_timezone_key()
| en | 0.674316 | # -*- coding: utf-8 -*- datetime services module. gets the current datetime based on requested timezone. :param bool server: if set to True, server timezone will be used. if set to False, client timezone will be used. defaults to True. :param str timezone: timezone name to get datetime based on it. if provided, the value of `server` input will be ignored. defaults to None. :rtype: datetime gets the current datetime based on client timezone. this is a helper method to let get the client datetime without providing value to `now` method. :rtype: datetime gets the current date based on requested timezone. :param bool server: if set to True, server timezone will be used. if set to False, client timezone will be used. defaults to True. :param str timezone: timezone name to get date based on it. if provided, the value of `server` input will be ignored. defaults to None. :rtype: date gets the current date based on client timezone. this is a helper method to let get the client date without providing value to `today` method. :rtype: date gets the current year based on requested timezone. :param bool server: if set to True, server timezone will be used. if set to False, client timezone will be used. defaults to True. :param str timezone: timezone name to get year based on it. if provided, the value of `server` input will be ignored. defaults to None. :rtype: int gets the current year based on client timezone. this is a helper method to let get the client year without providing value to `current_year` method. :rtype: int gets the default client timezone. :rtype: tzinfo gets the current timezone for server or client. :param bool server: if set to True, server timezone will be returned. if set to False, client timezone will be returned. :rtype: tzinfo gets the timezone based on given timezone name. :param str timezone: timezone name. :rtype: tzinfo gets the server or client timezone name. :param bool server: if set to True, server timezone name will be returned. if set to False, client timezone name will be returned. :rtype: str converts the given datetime between server and client timezones. :param datetime value: value to be converted. :param bool to_server: specifies that value must be normalized to server timezone. if set to False, it will be normalized to client timezone. :param bool from_server: specifies that value must be normalized from server timezone. if set to False, it will be normalized from client timezone. if not provided, it will be set to opposite of `to_server` value. :rtype: datetime converts the given datetime to utc. :param datetime value: value to be converted. :param bool from_server: specifies that value must be normalized from server timezone. if set to False, it will be normalized from client timezone. :rtype: datetime converts the given datetime from utc. :param datetime value: value to be converted. :param bool to_server: specifies that value must be normalized to server timezone. if set to False, it will be normalized to client timezone. :rtype: datetime gets the result of `astimezone` on the given value. :param datetime value: value to get normalized. :param bool server: if set to True, server timezone name will be used. if set to False, client timezone name will be used. :rtype: datetime normalizes input value using server or client current timezone and returns it. :param datetime value: value to get normalized. :param bool server: specifies that server or client timezone must used. :rtype: datetime localizes input datetime with current timezone. input value should not have a timezone info. :param datetime value: value to be localized. :param bool server: specifies that server or client timezone must used. :rtype: datetime adds the current timezone info into input value if it has no timezone info. :param datetime value: value to add timezone info into it. :param bool server: specifies that server or client timezone must be added. :rtype: datetime gets the datetime string representation of input value. if the value has no timezone info, it adds the client or server timezone info based on `from_server` value. example: `2015-12-24T22:40:15+01:00` :param datetime value: input object to be converted. :param bool to_server: specifies that value must be normalized to server timezone. if set to False, it will be normalized to client timezone. :param bool from_server: specifies that value must be normalized from server timezone. if set to False, it will be normalized from client timezone. if not provided, it will be set to opposite of `to_server` value. :rtype: str gets the date string representation of input value. example: `2015-12-24` :param datetime | date value: input object to be converted. :rtype: str gets the time string representation of input value. if the value is a datetime and has no timezone info, it adds the client or server timezone info based on `from_server` value. example: `23:40:15` :param datetime | time value: input object to be converted. :param bool to_server: specifies that value must be normalized to server timezone. if set to False, it will be normalized to client timezone. :param bool from_server: specifies that value must be normalized from server timezone. if set to False, it will be normalized from client timezone. if not provided, it will be set to opposite of `to_server` value. :rtype: str converts the input value to it's equivalent python datetime. if the value has no timezone info, it adds the client or server timezone info based on `from_server` value. :param str value: string representation of datetime to be converted. :param bool to_server: specifies that value must be normalized to server timezone. if set to False, it will be normalized to client timezone. :param bool from_server: specifies that value must be normalized from server timezone. if set to False, it will be normalized from client timezone. if not provided, it will be set to opposite of `to_server` value. :rtype: datetime converts the input value to it's equivalent python date. :param str value: string representation of date to be converted. :rtype: date converts the input value to it's equivalent python time. :param str value: string representation of time to be converted. :rtype: time gets a value indicating that a timezone with the given name exists. :param str timezone_name: timezone name to check for existence. :rtype: bool gets the timestamp with specified separators for given datetime. default format is `YYYY-MM-DD HH:mm:SS`. :param datetime value: datetime value to get its timestamp. :param str date_sep: a separator to put between date elements. if set to None, no separator will be used. :param str main_sep: a separator to put between date and time part. if set to None, no separator will be used. :param str time_sep: a separator to put between time elements. if set to None, no separator will be used. :param bool microsecond: specifies that timestamp must include microseconds. defaults to False if not provided. :rtype: str gets the current timestamp with specified separators based on requested timezone. default format is `YYYY-MM-DD HH:mm:SS`. :param str date_sep: a separator to put between date elements. if set to None, no separator will be used. :param str main_sep: a separator to put between date and time part. if set to None, no separator will be used. :param str time_sep: a separator to put between time elements. if set to None, no separator will be used. :param bool server: if set to True, server timezone will be used. if set to False, client timezone will be used. defaults to True. :param str timezone: timezone name to get datetime based on it. if provided, the value of `server` input will be ignored. defaults to None. :param bool microsecond: specifies that timestamp must include microseconds. defaults to False if not provided. :rtype: str gets a new datetime with given inputs and requested timezone. :param int year: year. :param int month: month. :param int day: day. :param int hour: hour. :param int minute: minute. :param int second: second. :param int microsecond: microsecond. :param int fold: used to disambiguate wall times during a repeated interval. it could be set to 0 or 1. :param bool server: if set to True, server timezone will be used. if set to False, client timezone will be used. defaults to True. :param str timezone: timezone name to be used. if provided, the value of `server` input will be ignored. defaults to None. :rtype: datetime gets the timezone key which application expects in query strings. :rtype: str | 3.160766 | 3 |
python/common.py | phanikumar1210/Automation | 0 | 6623410 | import boto3
import json
import logging
import sys
from sys import platform
from os.path import exists
import os
destination_path="G://Automation//terraform" | import boto3
import json
import logging
import sys
from sys import platform
from os.path import exists
import os
destination_path="G://Automation//terraform" | none | 1 | 1.511771 | 2 | |
cinemanio/core/tests/admin.py | cinemanio/backend | 4 | 6623411 | from parameterized import parameterized
# from django.test import modify_settings
from django.urls.base import reverse
from cinemanio.core.factories import MovieFactory, PersonFactory, CastFactory
from cinemanio.sites.imdb.factories import ImdbMovieFactory, ImdbPersonFactory
from cinemanio.sites.kinopoisk.factories import KinopoiskMovieFactory, KinopoiskPersonFactory
from cinemanio.images.factories import ImageLinkFactory
from cinemanio.core.tests.base import BaseTestCase
from cinemanio.users.factories import UserFactory, User
class AdminBaseTest(BaseTestCase):
password = '<PASSWORD>'
def setUp(self):
users = [UserFactory(username='user', is_staff=False, is_active=True, is_superuser=False),
UserFactory(username='user_another', is_staff=False, is_active=True, is_superuser=False),
UserFactory(username='moderator', is_staff=True, is_active=True, is_superuser=False),
UserFactory(username='admin', is_staff=True, is_active=True, is_superuser=True)]
for user in users:
user.set_password(self.password)
user.save()
def _login(self, username):
if self.client.login(username=username, password=self.password):
self.user = User.objects.get(username=username)
else:
raise ValueError('Auth error of user "%s"' % username)
def _logout(self):
self.client.logout()
class AdminTest(AdminBaseTest):
def setUp(self):
super().setUp()
self._login('admin')
# @modify_settings(MIDDLEWARE={'remove': 'silk.middleware.SilkyMiddleware'})
@parameterized.expand([
('movie', ImdbMovieFactory, KinopoiskMovieFactory),
('person', ImdbPersonFactory, KinopoiskPersonFactory),
])
def test_objects_page(self, object_type, imdb_factory, kinopoisk_factory):
for i in range(100):
kinopoisk_factory(**{object_type: getattr(imdb_factory(), object_type)})
with self.assertNumQueries(9):
response = self.client.get(reverse(f'admin:core_{object_type}_changelist'))
self.assertEqual(response.status_code, 200)
@parameterized.expand([
('movie', MovieFactory, ImdbMovieFactory, KinopoiskMovieFactory, 20),
('person', PersonFactory, ImdbPersonFactory, KinopoiskPersonFactory, 15),
])
def test_object_page(self, object_type, factory, imdb_factory, kinopoisk_factory, queries):
instance = factory()
imdb_factory(**{object_type: instance})
kinopoisk_factory(**{object_type: instance})
for i in range(100):
CastFactory(**{object_type: instance})
for i in range(10):
ImageLinkFactory(object=instance)
# TODO: prefetch thumbnails with one extra query
with self.assertNumQueries(queries + 10):
response = self.client.get(reverse(f'admin:core_{object_type}_change', args=(instance.id,)))
self.assertEqual(response.status_code, 200)
self.assertContains(response, f'Imdb {object_type}s')
self.assertContains(response, f'Kinopoisk {object_type}s')
self.assertContains(response, 'Cast')
self.assertContains(response, 'Image links')
def test_stat_page(self):
for i in range(100):
ImdbMovieFactory(movie=KinopoiskMovieFactory().movie)
ImdbPersonFactory(person=KinopoiskPersonFactory().person)
with self.assertNumQueries(21):
response = self.client.get(reverse(f'admin:statistic'))
self.assertEqual(response.status_code, 200)
| from parameterized import parameterized
# from django.test import modify_settings
from django.urls.base import reverse
from cinemanio.core.factories import MovieFactory, PersonFactory, CastFactory
from cinemanio.sites.imdb.factories import ImdbMovieFactory, ImdbPersonFactory
from cinemanio.sites.kinopoisk.factories import KinopoiskMovieFactory, KinopoiskPersonFactory
from cinemanio.images.factories import ImageLinkFactory
from cinemanio.core.tests.base import BaseTestCase
from cinemanio.users.factories import UserFactory, User
class AdminBaseTest(BaseTestCase):
password = '<PASSWORD>'
def setUp(self):
users = [UserFactory(username='user', is_staff=False, is_active=True, is_superuser=False),
UserFactory(username='user_another', is_staff=False, is_active=True, is_superuser=False),
UserFactory(username='moderator', is_staff=True, is_active=True, is_superuser=False),
UserFactory(username='admin', is_staff=True, is_active=True, is_superuser=True)]
for user in users:
user.set_password(self.password)
user.save()
def _login(self, username):
if self.client.login(username=username, password=self.password):
self.user = User.objects.get(username=username)
else:
raise ValueError('Auth error of user "%s"' % username)
def _logout(self):
self.client.logout()
class AdminTest(AdminBaseTest):
def setUp(self):
super().setUp()
self._login('admin')
# @modify_settings(MIDDLEWARE={'remove': 'silk.middleware.SilkyMiddleware'})
@parameterized.expand([
('movie', ImdbMovieFactory, KinopoiskMovieFactory),
('person', ImdbPersonFactory, KinopoiskPersonFactory),
])
def test_objects_page(self, object_type, imdb_factory, kinopoisk_factory):
for i in range(100):
kinopoisk_factory(**{object_type: getattr(imdb_factory(), object_type)})
with self.assertNumQueries(9):
response = self.client.get(reverse(f'admin:core_{object_type}_changelist'))
self.assertEqual(response.status_code, 200)
@parameterized.expand([
('movie', MovieFactory, ImdbMovieFactory, KinopoiskMovieFactory, 20),
('person', PersonFactory, ImdbPersonFactory, KinopoiskPersonFactory, 15),
])
def test_object_page(self, object_type, factory, imdb_factory, kinopoisk_factory, queries):
instance = factory()
imdb_factory(**{object_type: instance})
kinopoisk_factory(**{object_type: instance})
for i in range(100):
CastFactory(**{object_type: instance})
for i in range(10):
ImageLinkFactory(object=instance)
# TODO: prefetch thumbnails with one extra query
with self.assertNumQueries(queries + 10):
response = self.client.get(reverse(f'admin:core_{object_type}_change', args=(instance.id,)))
self.assertEqual(response.status_code, 200)
self.assertContains(response, f'Imdb {object_type}s')
self.assertContains(response, f'Kinopoisk {object_type}s')
self.assertContains(response, 'Cast')
self.assertContains(response, 'Image links')
def test_stat_page(self):
for i in range(100):
ImdbMovieFactory(movie=KinopoiskMovieFactory().movie)
ImdbPersonFactory(person=KinopoiskPersonFactory().person)
with self.assertNumQueries(21):
response = self.client.get(reverse(f'admin:statistic'))
self.assertEqual(response.status_code, 200)
| en | 0.342628 | # from django.test import modify_settings # @modify_settings(MIDDLEWARE={'remove': 'silk.middleware.SilkyMiddleware'}) # TODO: prefetch thumbnails with one extra query | 2.022157 | 2 |
dipdup/taia_x/utils.py | alvaro-alonso/taia-x | 9 | 6623412 | <filename>dipdup/taia_x/utils.py
import logging
import aiohttp
_logger = logging.getLogger(__name__)
def clean_null_bytes(string: str) -> str:
return ''.join(string.split('\x00'))
def fromhex(hexbytes: str) -> str:
string = ''
try:
string = bytes.fromhex(hexbytes).decode()
except Exception:
try:
string = bytes.fromhex(hexbytes).decode('latin-1')
except Exception:
pass
return clean_null_bytes(string)
async def http_request(session: aiohttp.ClientSession, method: str, **kwargs):
"""Wrapped aiohttp call with preconfigured headers and logging"""
headers = {
**kwargs.pop('headers', {}),
'User-Agent': 'dipdup',
}
request_string = kwargs['url'] + '?' + '&'.join([f'{key}={value}' for key, value in kwargs.get('params', {}).items()])
_logger.debug('Calling `%s`', request_string)
async with getattr(session, method)(
skip_auto_headers={'User-Agent'},
headers=headers,
**kwargs,
) as response:
return await response.json() | <filename>dipdup/taia_x/utils.py
import logging
import aiohttp
_logger = logging.getLogger(__name__)
def clean_null_bytes(string: str) -> str:
return ''.join(string.split('\x00'))
def fromhex(hexbytes: str) -> str:
string = ''
try:
string = bytes.fromhex(hexbytes).decode()
except Exception:
try:
string = bytes.fromhex(hexbytes).decode('latin-1')
except Exception:
pass
return clean_null_bytes(string)
async def http_request(session: aiohttp.ClientSession, method: str, **kwargs):
"""Wrapped aiohttp call with preconfigured headers and logging"""
headers = {
**kwargs.pop('headers', {}),
'User-Agent': 'dipdup',
}
request_string = kwargs['url'] + '?' + '&'.join([f'{key}={value}' for key, value in kwargs.get('params', {}).items()])
_logger.debug('Calling `%s`', request_string)
async with getattr(session, method)(
skip_auto_headers={'User-Agent'},
headers=headers,
**kwargs,
) as response:
return await response.json() | en | 0.656337 | Wrapped aiohttp call with preconfigured headers and logging | 2.373702 | 2 |
lambda/create_jira.py | rhythmictech/terraform-aws-rhythmic-monitoring | 1 | 6623413 | import os
import logging
import urllib
import json
import boto3
import datetime
from jira import JIRA
logger = logging.getLogger()
logger.setLevel(os.environ.get('LOG_LEVEL', logging.DEBUG))
for handler in logger.handlers:
handler.setFormatter(logging.Formatter(
'%(asctime)s [%(levelname)s](%(name)s) %(message)s'))
for lib_logger in ['botocore', 'boto3', 'jira', 'requests_oauthlib', 'oauthlib', 'urllib3']:
logging.getLogger(lib_logger).setLevel(
os.environ.get('LIBRARY_LOG_LEVEL', logging.ERROR))
ISSUE_TYPE = os.environ["ISSUE_TYPE"]
INTEGRATION_NAME = os.environ["INTEGRATION_NAME"]
JIRA_API_TOKEN_SECRET_ARN = os.environ["JIRA_API_TOKEN_SECRET_ARN"]
JIRA_PROJECT = os.environ["JIRA_PROJECT"]
JIRA_URL = os.environ["JIRA_URL"]
JIRA_USERNAME = os.environ["JIRA_USERNAME"]
secrets_manager = boto3.client('secretsmanager')
secret = secrets_manager.get_secret_value(SecretId=JIRA_API_TOKEN_SECRET_ARN)
JIRA_API_TOKEN = secret['SecretString']
# establish connection to jira
jira = JIRA(
basic_auth=(JIRA_USERNAME, JIRA_API_TOKEN),
options={'server': JIRA_URL})
print("Connected to Jira: {}".format(jira.server_info()))
def parse_message(message):
if type(message) is str:
try:
message = json.loads(message)
except json.JSONDecodeError as err:
logger.error(f'JSON decode error: {err}')
return message
def lambda_handler(event, context):
""" receives events from SNS, see https://docs.aws.amazon.com/lambda/latest/dg/with-sns.html """
print("Event received: {}".format(event))
subject = event['Records'][0]['Sns']['Subject']
message = parse_message(event['Records'][0]['Sns']['Message'])
region = event['Records'][0]['Sns']['TopicArn'].split(":")[3]
description = "\n".join([ f'{k}: {v}' for k, v in message.items()])
issue_fields = {
'project': JIRA_PROJECT,
'summary': 'Alert - {}'.format(subject),
'description': description,
'issuetype': {'name': ISSUE_TYPE}}
issue = jira.create_issue(fields=issue_fields)
jira.add_comment(
issue.key, 'Alert triggered by {} aws cloudwatch integration'.format(INTEGRATION_NAME))
return (200, "OK")
| import os
import logging
import urllib
import json
import boto3
import datetime
from jira import JIRA
logger = logging.getLogger()
logger.setLevel(os.environ.get('LOG_LEVEL', logging.DEBUG))
for handler in logger.handlers:
handler.setFormatter(logging.Formatter(
'%(asctime)s [%(levelname)s](%(name)s) %(message)s'))
for lib_logger in ['botocore', 'boto3', 'jira', 'requests_oauthlib', 'oauthlib', 'urllib3']:
logging.getLogger(lib_logger).setLevel(
os.environ.get('LIBRARY_LOG_LEVEL', logging.ERROR))
ISSUE_TYPE = os.environ["ISSUE_TYPE"]
INTEGRATION_NAME = os.environ["INTEGRATION_NAME"]
JIRA_API_TOKEN_SECRET_ARN = os.environ["JIRA_API_TOKEN_SECRET_ARN"]
JIRA_PROJECT = os.environ["JIRA_PROJECT"]
JIRA_URL = os.environ["JIRA_URL"]
JIRA_USERNAME = os.environ["JIRA_USERNAME"]
secrets_manager = boto3.client('secretsmanager')
secret = secrets_manager.get_secret_value(SecretId=JIRA_API_TOKEN_SECRET_ARN)
JIRA_API_TOKEN = secret['SecretString']
# establish connection to jira
jira = JIRA(
basic_auth=(JIRA_USERNAME, JIRA_API_TOKEN),
options={'server': JIRA_URL})
print("Connected to Jira: {}".format(jira.server_info()))
def parse_message(message):
if type(message) is str:
try:
message = json.loads(message)
except json.JSONDecodeError as err:
logger.error(f'JSON decode error: {err}')
return message
def lambda_handler(event, context):
""" receives events from SNS, see https://docs.aws.amazon.com/lambda/latest/dg/with-sns.html """
print("Event received: {}".format(event))
subject = event['Records'][0]['Sns']['Subject']
message = parse_message(event['Records'][0]['Sns']['Message'])
region = event['Records'][0]['Sns']['TopicArn'].split(":")[3]
description = "\n".join([ f'{k}: {v}' for k, v in message.items()])
issue_fields = {
'project': JIRA_PROJECT,
'summary': 'Alert - {}'.format(subject),
'description': description,
'issuetype': {'name': ISSUE_TYPE}}
issue = jira.create_issue(fields=issue_fields)
jira.add_comment(
issue.key, 'Alert triggered by {} aws cloudwatch integration'.format(INTEGRATION_NAME))
return (200, "OK")
| en | 0.915697 | # establish connection to jira receives events from SNS, see https://docs.aws.amazon.com/lambda/latest/dg/with-sns.html | 2.345658 | 2 |
nandboxbots/inmessages/InlineSearch.py | nandbox/nandboxbotsapi-py | 0 | 6623414 | import json
from nandboxbots.data.Chat import Chat
from nandboxbots.data.User import User
class InlineSearch:
__KEY_INLINE_SEARCH = "inlineSearch"
__KEY_DATE = "date"
__KEY_METHOD = "method"
__KEY_CHAT = "chat"
__KEY_FROM = "from"
__KEY_SEARCH_ID = "search_id"
__KEY_OFFSET = "offset"
__KEY_KEYWORDS = "keywords"
date = None
method = None
chat = None
from_ = None
search_id = None
offset = None
keywords = None
def __init__(self, dictionary):
inline_search_dict = dictionary[self.__KEY_INLINE_SEARCH] if self.__KEY_INLINE_SEARCH in dictionary.keys() else {}
from_user = User(inline_search_dict.get(self.__KEY_FROM, {}))
self.chat = Chat(inline_search_dict.get(self.__KEY_CHAT, None))
self.method = str(inline_search_dict[self.__KEY_METHOD]) if self.__KEY_METHOD in inline_search_dict.keys() else None
self.from_ = from_user
self.date = int(inline_search_dict[self.__KEY_DATE]) if self.__KEY_DATE in inline_search_dict.keys() else None
self.search_id = int(inline_search_dict[self.__KEY_SEARCH_ID]) if self.__KEY_SEARCH_ID in inline_search_dict.keys() else None
self.offset = str(inline_search_dict[self.__KEY_OFFSET]) if self.__KEY_OFFSET in inline_search_dict.keys() else None
self.keywords = str(inline_search_dict[self.__KEY_KEYWORDS]) if self.__KEY_KEYWORDS in inline_search_dict.keys() else None
def to_json_obj(self):
dictionary = {}
if self.date is not None:
dictionary[self.__KEY_DATE] = self.date
if self.from_ is not None:
_, from_user_dict = self.from_.to_json_obj()
dictionary[self.__KEY_FROM] = from_user_dict
if self.chat is not None:
_, chat_dict = self.chat.to_json_obj()
dictionary[self.__KEY_CHAT] = chat_dict
if self.method is not None:
dictionary[self.__KEY_METHOD] = self.method
if self.search_id is not None:
dictionary[self.__KEY_SEARCH_ID] = self.search_id
if self.offset is not None:
dictionary[self.__KEY_OFFSET] = self.offset
if self.keywords is not None:
dictionary[self.__KEY_KEYWORDS] = self.keywords
return json.dumps(dictionary), dictionary
| import json
from nandboxbots.data.Chat import Chat
from nandboxbots.data.User import User
class InlineSearch:
__KEY_INLINE_SEARCH = "inlineSearch"
__KEY_DATE = "date"
__KEY_METHOD = "method"
__KEY_CHAT = "chat"
__KEY_FROM = "from"
__KEY_SEARCH_ID = "search_id"
__KEY_OFFSET = "offset"
__KEY_KEYWORDS = "keywords"
date = None
method = None
chat = None
from_ = None
search_id = None
offset = None
keywords = None
def __init__(self, dictionary):
inline_search_dict = dictionary[self.__KEY_INLINE_SEARCH] if self.__KEY_INLINE_SEARCH in dictionary.keys() else {}
from_user = User(inline_search_dict.get(self.__KEY_FROM, {}))
self.chat = Chat(inline_search_dict.get(self.__KEY_CHAT, None))
self.method = str(inline_search_dict[self.__KEY_METHOD]) if self.__KEY_METHOD in inline_search_dict.keys() else None
self.from_ = from_user
self.date = int(inline_search_dict[self.__KEY_DATE]) if self.__KEY_DATE in inline_search_dict.keys() else None
self.search_id = int(inline_search_dict[self.__KEY_SEARCH_ID]) if self.__KEY_SEARCH_ID in inline_search_dict.keys() else None
self.offset = str(inline_search_dict[self.__KEY_OFFSET]) if self.__KEY_OFFSET in inline_search_dict.keys() else None
self.keywords = str(inline_search_dict[self.__KEY_KEYWORDS]) if self.__KEY_KEYWORDS in inline_search_dict.keys() else None
def to_json_obj(self):
dictionary = {}
if self.date is not None:
dictionary[self.__KEY_DATE] = self.date
if self.from_ is not None:
_, from_user_dict = self.from_.to_json_obj()
dictionary[self.__KEY_FROM] = from_user_dict
if self.chat is not None:
_, chat_dict = self.chat.to_json_obj()
dictionary[self.__KEY_CHAT] = chat_dict
if self.method is not None:
dictionary[self.__KEY_METHOD] = self.method
if self.search_id is not None:
dictionary[self.__KEY_SEARCH_ID] = self.search_id
if self.offset is not None:
dictionary[self.__KEY_OFFSET] = self.offset
if self.keywords is not None:
dictionary[self.__KEY_KEYWORDS] = self.keywords
return json.dumps(dictionary), dictionary
| none | 1 | 2.505655 | 3 | |
cycles/__init__.py | pcarolan/cycles | 0 | 6623415 | <gh_stars>0
__title__ = 'cycles'
__version__ = '0.1.0'
__author__ = '<NAME>'
__copyright__ = 'Copyright 2016 <NAME>'
from .hub import Hub
__all__ = ['Hub']
| __title__ = 'cycles'
__version__ = '0.1.0'
__author__ = '<NAME>'
__copyright__ = 'Copyright 2016 <NAME>'
from .hub import Hub
__all__ = ['Hub'] | none | 1 | 1.110821 | 1 | |
py/postit.py | schaabs/sandbox | 0 | 6623416 | <gh_stars>0
from flask import Flask, jsonify, request, abort
import json
app = Flask(__name__)
@app.route('/payload', methods=['POST'])
def create_task():
if not request.json:
abort(400)
with open('d:\\temp\\posted.json', 'a+') as file:
json.dump(request.json, file)
return jsonify(request.json), 201
if __name__ == '__main__':
app.run(debug=True)
| from flask import Flask, jsonify, request, abort
import json
app = Flask(__name__)
@app.route('/payload', methods=['POST'])
def create_task():
if not request.json:
abort(400)
with open('d:\\temp\\posted.json', 'a+') as file:
json.dump(request.json, file)
return jsonify(request.json), 201
if __name__ == '__main__':
app.run(debug=True) | none | 1 | 2.612899 | 3 | |
PokeType/lexer/lexer.py | Daggy1234/PokeType | 2 | 6623417 | from rply import LexerGenerator
from rply.lexer import Lexer
from typing import Dict, List
from .tokens import tokens, ignore_tokens
class PokeLexer:
lexer: LexerGenerator
tokens: Dict[str, str]
ignore_tokens: List[str]
def __init__(self):
self.lexer = LexerGenerator()
self.tokens = tokens
self.ignore_tokens = ignore_tokens
def add_tokens(self):
for key, value in self.tokens.items():
self.lexer.add(key, value)
for item in self.ignore_tokens:
self.lexer.ignore(item)
def get_token_list(self) -> List[str]:
toks = []
for key in self.tokens.keys():
toks.append(key)
return toks
def get_lexer(self):
self.add_tokens()
return self.lexer.build()
| from rply import LexerGenerator
from rply.lexer import Lexer
from typing import Dict, List
from .tokens import tokens, ignore_tokens
class PokeLexer:
lexer: LexerGenerator
tokens: Dict[str, str]
ignore_tokens: List[str]
def __init__(self):
self.lexer = LexerGenerator()
self.tokens = tokens
self.ignore_tokens = ignore_tokens
def add_tokens(self):
for key, value in self.tokens.items():
self.lexer.add(key, value)
for item in self.ignore_tokens:
self.lexer.ignore(item)
def get_token_list(self) -> List[str]:
toks = []
for key in self.tokens.keys():
toks.append(key)
return toks
def get_lexer(self):
self.add_tokens()
return self.lexer.build()
| none | 1 | 2.431667 | 2 | |
apps/contrib/utils/dicts.py | vicobits/django-wise | 5 | 6623418 | <reponame>vicobits/django-wise
# -*- coding: utf-8 -*-
def lower_dict_values(dict_obj):
"""It applies lower to all values of a dict."""
new_dict = {}
for key, value in dict_obj.items():
new_dict[key] = value.lower() if isinstance(value, str) else value
return new_dict
| # -*- coding: utf-8 -*-
def lower_dict_values(dict_obj):
"""It applies lower to all values of a dict."""
new_dict = {}
for key, value in dict_obj.items():
new_dict[key] = value.lower() if isinstance(value, str) else value
return new_dict | en | 0.771932 | # -*- coding: utf-8 -*- It applies lower to all values of a dict. | 4.221361 | 4 |
of_route_processor.py | lijian2020/NDNAPP | 0 | 6623419 | #!/usr/bin/python3
#
# Copyright (C) 2019 Trinity College of Dublin, the University of Dublin.
# Copyright (c) 2019 <NAME>
# Author: <NAME> <<EMAIL>>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'''This class process the case that NDN node has no next hop.
It is a trigger that invokes the open flow procedure to search
the local openflow table and send packet-in message to controller'''
import os
import time
import datetime
import pyinotify
import logging
from packetin import PacketIn
from oscommand import OSCommand
from ndnflowtable import NdnFlowTable
class OF_Route_Processor():
def __init__(self):
self.nodeid = OSCommand.getnodeid()
self.unknownprefixtable = set()
def loglistener(self):
'''this method listens the file '/tmp/mininet/node_id/nfd.log',
if there is new log items indecate 'noNextHop', this method
will deal with it'''
pos = 0
log_file = r'/tmp/minindn/{}/nfd.log'.format(self.nodeid, self.nodeid)
while True:
try:
with open(log_file) as f:
if pos != 0:
f.seek(pos, 0)
while True:
line = f.readline()
if line.strip():
# print(line.strip())
linestr = line.strip()
self.noNextHopItems_log_checker(linestr)
pos = pos + len(line)
if not line.strip():
break
except:
print('error in open log file')
def noNextHopItems_log_checker(self, linestr):
'''select out the log items which include 'noNextHop' mark'''
linelist = linestr.split()
try:
if (linelist[5] == 'noNextHop'):
prefix = (linelist[3].split('?'))[0]
if (not (prefix.startswith('/localhop/') \
or prefix.startswith('/ndn/ie/tcd/controller01/ofndn') \
or prefix.startswith('/ndn/{}-site/{}/ofndn'.format(self.nodeid, self.nodeid)) \
or prefix in self.unknownprefixtable)):
self.unknownprefixtable.add(prefix)
print('[No Route in RIB ] for \n {}'.format(prefix))
if (not NdnFlowTable.searchitem(prefix)):
PacketIn().run(prefix)
except:
pass
# def packetin_sender(self, unknown_prefix):
# if (PacketIn().run(unknown_prefix)):
# print("NDN FlowTable has been updated")
#
#
# if __name__ == '__main__':
# OF_Route_Processor().loglistener()
| #!/usr/bin/python3
#
# Copyright (C) 2019 Trinity College of Dublin, the University of Dublin.
# Copyright (c) 2019 <NAME>
# Author: <NAME> <<EMAIL>>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'''This class process the case that NDN node has no next hop.
It is a trigger that invokes the open flow procedure to search
the local openflow table and send packet-in message to controller'''
import os
import time
import datetime
import pyinotify
import logging
from packetin import PacketIn
from oscommand import OSCommand
from ndnflowtable import NdnFlowTable
class OF_Route_Processor():
def __init__(self):
self.nodeid = OSCommand.getnodeid()
self.unknownprefixtable = set()
def loglistener(self):
'''this method listens the file '/tmp/mininet/node_id/nfd.log',
if there is new log items indecate 'noNextHop', this method
will deal with it'''
pos = 0
log_file = r'/tmp/minindn/{}/nfd.log'.format(self.nodeid, self.nodeid)
while True:
try:
with open(log_file) as f:
if pos != 0:
f.seek(pos, 0)
while True:
line = f.readline()
if line.strip():
# print(line.strip())
linestr = line.strip()
self.noNextHopItems_log_checker(linestr)
pos = pos + len(line)
if not line.strip():
break
except:
print('error in open log file')
def noNextHopItems_log_checker(self, linestr):
'''select out the log items which include 'noNextHop' mark'''
linelist = linestr.split()
try:
if (linelist[5] == 'noNextHop'):
prefix = (linelist[3].split('?'))[0]
if (not (prefix.startswith('/localhop/') \
or prefix.startswith('/ndn/ie/tcd/controller01/ofndn') \
or prefix.startswith('/ndn/{}-site/{}/ofndn'.format(self.nodeid, self.nodeid)) \
or prefix in self.unknownprefixtable)):
self.unknownprefixtable.add(prefix)
print('[No Route in RIB ] for \n {}'.format(prefix))
if (not NdnFlowTable.searchitem(prefix)):
PacketIn().run(prefix)
except:
pass
# def packetin_sender(self, unknown_prefix):
# if (PacketIn().run(unknown_prefix)):
# print("NDN FlowTable has been updated")
#
#
# if __name__ == '__main__':
# OF_Route_Processor().loglistener()
| en | 0.756007 | #!/usr/bin/python3 # # Copyright (C) 2019 Trinity College of Dublin, the University of Dublin. # Copyright (c) 2019 <NAME> # Author: <NAME> <<EMAIL>> # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This class process the case that NDN node has no next hop. It is a trigger that invokes the open flow procedure to search the local openflow table and send packet-in message to controller this method listens the file '/tmp/mininet/node_id/nfd.log', if there is new log items indecate 'noNextHop', this method will deal with it # print(line.strip()) select out the log items which include 'noNextHop' mark # def packetin_sender(self, unknown_prefix): # if (PacketIn().run(unknown_prefix)): # print("NDN FlowTable has been updated") # # # if __name__ == '__main__': # OF_Route_Processor().loglistener() | 2.273679 | 2 |
kontrasto/wcag_3.py | nimasmi/kontrasto | 13 | 6623420 | <reponame>nimasmi/kontrasto<filename>kontrasto/wcag_3.py
import math
from typing import Iterable
from .convert import to_rgb
from .lookup_table import get_apca_font_styles
# Python port of: https://github.com/Myndex/SAPC-APCA/blob/master/JS/APCAonly.98e_d12e.js
def apca_contrast(background, text):
if isinstance(background, str):
background = to_rgb(background)
if isinstance(text, str):
text = to_rgb(text)
Rbg, Gbg, Bbg = background
Rtxt, Gtxt, Btxt = text
# ///// MAGICAL NUMBERS ///////////////////////////////
# ///// sRGB Conversion to Relative Luminance (Y) /////
mainTRC = 2.4 # Transfer Curve (aka "Gamma") for sRGB linearization
# // Simple power curve vs piecewise described in docs
# // Essentially, 2.4 best models actual display
# // characteristics in combination with the total method
# mainTRCencode = 0.41666666666666666667 # = 1.0/mainTRC;
Rco = 0.2126729 # sRGB Red Coefficient (from matrix)
Gco = 0.7151522 # sRGB Green Coefficient (from matrix)
Bco = 0.072175 # sRGB Blue Coefficient (from matrix)
# ///// For Finding Raw SAPC Contrast from Relative Luminance (Y) /////
normBG = 0.55 # Constants for SAPC Power Curve Exponents
normTXT = 0.58 # One pair for normal text, and one for reverse
revTXT = 0.57 # These are the "beating heart" of SAPC
revBG = 0.62
# ///// For Clamping and Scaling Values /////
blkThrs = 0.03 # Level that triggers the soft black clamp
blkClmp = 1.45 # Exponent for the soft black clamp curve
deltaYmin = 0.0005 # Lint trap
scaleBoW = 1.25 # Scaling for dark text on light
scaleWoB = 1.25 # Scaling for light text on dark
loConThresh = 0.078 # Threshold for new simple offset scale
loConFactor = 12.82051282051282 # = 1/0.078,
loConOffset = 0.06 # The simple offset
loClip = 0.001 # Output clip (lint trap #2)
# // We are only concerned with Y at this point
# // Ybg and Ytxt: divide sRGB to 0.0-1.0 range, linearize,
# // and then apply the standard coefficients and sum to Y.
# // Note that the Y we create here is unique and designed
# // exclusively for SAPC. Do not use Y from other methods.
Ybg = (
math.pow(Rbg / 255.0, mainTRC) * Rco
+ math.pow(Gbg / 255.0, mainTRC) * Gco
+ math.pow(Bbg / 255.0, mainTRC) * Bco
)
Ytxt = (
math.pow(Rtxt / 255.0, mainTRC) * Rco
+ math.pow(Gtxt / 255.0, mainTRC) * Gco
+ math.pow(Btxt / 255.0, mainTRC) * Bco
)
# SAPC = 0.0 # For holding raw SAPC values
# outputContrast = 0.0 # For weighted final values
# ///// TUTORIAL /////
# // Take Y and soft clamp black, return 0 for very close luminances
# // determine polarity, and calculate SAPC raw contrast
# // Then apply the output scaling
# // Note that reverse contrast (white text on black)
# // intentionally returns a negative number
# // Proper polarity is important!
# ////////// BLACK SOFT CLAMP & INPUT CLIP ////////////////////////////////
# // Soft clamp Y when near black.
# // Now clamping all colors to prevent crossover errors
Ytxt = (
Ytxt if (Ytxt > blkThrs) else Ytxt + math.pow(blkThrs - Ytxt, blkClmp)
)
Ybg = Ybg if (Ybg > blkThrs) else Ybg + math.pow(blkThrs - Ybg, blkClmp)
# ///// Return 0 Early for extremely low ∆Y (lint trap #1) /////
if math.fabs(Ybg - Ytxt) < deltaYmin:
return 0.0
# ////////// SAPC CONTRAST ///////////////////////////////////////////////
if Ybg > Ytxt:
# // For normal polarity, black text on white
# ///// Calculate the SAPC contrast value and scale
SAPC = (math.pow(Ybg, normBG) - math.pow(Ytxt, normTXT)) * scaleBoW
# ///// NEW! SAPC SmoothScale™
# // Low Contrast Smooth Scale Rollout to prevent polarity reversal
# // and also a low clip for very low contrasts (lint trap #2)
# // much of this is for very low contrasts, less than 10
# // therefore for most reversing needs, only loConOffset is important
if SAPC < loClip:
outputContrast = 0.0
elif SAPC < loConThresh:
outputContrast = SAPC - SAPC * loConFactor * loConOffset
else:
outputContrast = SAPC - loConOffset
else:
# // For reverse polarity, light text on dark
# // WoB should always return negative value.
SAPC = (math.pow(Ybg, revBG) - math.pow(Ytxt, revTXT)) * scaleWoB
if SAPC > -loClip:
outputContrast = 0.0
elif SAPC > -loConThresh:
outputContrast = SAPC - SAPC * loConFactor * loConOffset
else:
outputContrast = SAPC + loConOffset
return outputContrast * 100
def format_contrast(score: float) -> str:
score = abs(score)
return score
def get_font_styles(score: float, weights: Iterable[int], sizes: Iterable[int]):
styles = get_apca_font_styles(score)
if len(styles) == 0:
return False
matching_styles = []
for style in styles:
if style["weight"] >= weights[0] and style["weight"] <= weights[-1]:
if style["size"] >= sizes[0] and style["size"] <= sizes[-1]:
matching_styles.append((style["size"], style["weight"]))
return matching_styles
| import math
from typing import Iterable
from .convert import to_rgb
from .lookup_table import get_apca_font_styles
# Python port of: https://github.com/Myndex/SAPC-APCA/blob/master/JS/APCAonly.98e_d12e.js
def apca_contrast(background, text):
if isinstance(background, str):
background = to_rgb(background)
if isinstance(text, str):
text = to_rgb(text)
Rbg, Gbg, Bbg = background
Rtxt, Gtxt, Btxt = text
# ///// MAGICAL NUMBERS ///////////////////////////////
# ///// sRGB Conversion to Relative Luminance (Y) /////
mainTRC = 2.4 # Transfer Curve (aka "Gamma") for sRGB linearization
# // Simple power curve vs piecewise described in docs
# // Essentially, 2.4 best models actual display
# // characteristics in combination with the total method
# mainTRCencode = 0.41666666666666666667 # = 1.0/mainTRC;
Rco = 0.2126729 # sRGB Red Coefficient (from matrix)
Gco = 0.7151522 # sRGB Green Coefficient (from matrix)
Bco = 0.072175 # sRGB Blue Coefficient (from matrix)
# ///// For Finding Raw SAPC Contrast from Relative Luminance (Y) /////
normBG = 0.55 # Constants for SAPC Power Curve Exponents
normTXT = 0.58 # One pair for normal text, and one for reverse
revTXT = 0.57 # These are the "beating heart" of SAPC
revBG = 0.62
# ///// For Clamping and Scaling Values /////
blkThrs = 0.03 # Level that triggers the soft black clamp
blkClmp = 1.45 # Exponent for the soft black clamp curve
deltaYmin = 0.0005 # Lint trap
scaleBoW = 1.25 # Scaling for dark text on light
scaleWoB = 1.25 # Scaling for light text on dark
loConThresh = 0.078 # Threshold for new simple offset scale
loConFactor = 12.82051282051282 # = 1/0.078,
loConOffset = 0.06 # The simple offset
loClip = 0.001 # Output clip (lint trap #2)
# // We are only concerned with Y at this point
# // Ybg and Ytxt: divide sRGB to 0.0-1.0 range, linearize,
# // and then apply the standard coefficients and sum to Y.
# // Note that the Y we create here is unique and designed
# // exclusively for SAPC. Do not use Y from other methods.
Ybg = (
math.pow(Rbg / 255.0, mainTRC) * Rco
+ math.pow(Gbg / 255.0, mainTRC) * Gco
+ math.pow(Bbg / 255.0, mainTRC) * Bco
)
Ytxt = (
math.pow(Rtxt / 255.0, mainTRC) * Rco
+ math.pow(Gtxt / 255.0, mainTRC) * Gco
+ math.pow(Btxt / 255.0, mainTRC) * Bco
)
# SAPC = 0.0 # For holding raw SAPC values
# outputContrast = 0.0 # For weighted final values
# ///// TUTORIAL /////
# // Take Y and soft clamp black, return 0 for very close luminances
# // determine polarity, and calculate SAPC raw contrast
# // Then apply the output scaling
# // Note that reverse contrast (white text on black)
# // intentionally returns a negative number
# // Proper polarity is important!
# ////////// BLACK SOFT CLAMP & INPUT CLIP ////////////////////////////////
# // Soft clamp Y when near black.
# // Now clamping all colors to prevent crossover errors
Ytxt = (
Ytxt if (Ytxt > blkThrs) else Ytxt + math.pow(blkThrs - Ytxt, blkClmp)
)
Ybg = Ybg if (Ybg > blkThrs) else Ybg + math.pow(blkThrs - Ybg, blkClmp)
# ///// Return 0 Early for extremely low ∆Y (lint trap #1) /////
if math.fabs(Ybg - Ytxt) < deltaYmin:
return 0.0
# ////////// SAPC CONTRAST ///////////////////////////////////////////////
if Ybg > Ytxt:
# // For normal polarity, black text on white
# ///// Calculate the SAPC contrast value and scale
SAPC = (math.pow(Ybg, normBG) - math.pow(Ytxt, normTXT)) * scaleBoW
# ///// NEW! SAPC SmoothScale™
# // Low Contrast Smooth Scale Rollout to prevent polarity reversal
# // and also a low clip for very low contrasts (lint trap #2)
# // much of this is for very low contrasts, less than 10
# // therefore for most reversing needs, only loConOffset is important
if SAPC < loClip:
outputContrast = 0.0
elif SAPC < loConThresh:
outputContrast = SAPC - SAPC * loConFactor * loConOffset
else:
outputContrast = SAPC - loConOffset
else:
# // For reverse polarity, light text on dark
# // WoB should always return negative value.
SAPC = (math.pow(Ybg, revBG) - math.pow(Ytxt, revTXT)) * scaleWoB
if SAPC > -loClip:
outputContrast = 0.0
elif SAPC > -loConThresh:
outputContrast = SAPC - SAPC * loConFactor * loConOffset
else:
outputContrast = SAPC + loConOffset
return outputContrast * 100
def format_contrast(score: float) -> str:
score = abs(score)
return score
def get_font_styles(score: float, weights: Iterable[int], sizes: Iterable[int]):
styles = get_apca_font_styles(score)
if len(styles) == 0:
return False
matching_styles = []
for style in styles:
if style["weight"] >= weights[0] and style["weight"] <= weights[-1]:
if style["size"] >= sizes[0] and style["size"] <= sizes[-1]:
matching_styles.append((style["size"], style["weight"]))
return matching_styles | en | 0.598947 | # Python port of: https://github.com/Myndex/SAPC-APCA/blob/master/JS/APCAonly.98e_d12e.js # ///// MAGICAL NUMBERS /////////////////////////////// # ///// sRGB Conversion to Relative Luminance (Y) ///// # Transfer Curve (aka "Gamma") for sRGB linearization # // Simple power curve vs piecewise described in docs # // Essentially, 2.4 best models actual display # // characteristics in combination with the total method # mainTRCencode = 0.41666666666666666667 # = 1.0/mainTRC; # sRGB Red Coefficient (from matrix) # sRGB Green Coefficient (from matrix) # sRGB Blue Coefficient (from matrix) # ///// For Finding Raw SAPC Contrast from Relative Luminance (Y) ///// # Constants for SAPC Power Curve Exponents # One pair for normal text, and one for reverse # These are the "beating heart" of SAPC # ///// For Clamping and Scaling Values ///// # Level that triggers the soft black clamp # Exponent for the soft black clamp curve # Lint trap # Scaling for dark text on light # Scaling for light text on dark # Threshold for new simple offset scale # = 1/0.078, # The simple offset # Output clip (lint trap #2) # // We are only concerned with Y at this point # // Ybg and Ytxt: divide sRGB to 0.0-1.0 range, linearize, # // and then apply the standard coefficients and sum to Y. # // Note that the Y we create here is unique and designed # // exclusively for SAPC. Do not use Y from other methods. # SAPC = 0.0 # For holding raw SAPC values # outputContrast = 0.0 # For weighted final values # ///// TUTORIAL ///// # // Take Y and soft clamp black, return 0 for very close luminances # // determine polarity, and calculate SAPC raw contrast # // Then apply the output scaling # // Note that reverse contrast (white text on black) # // intentionally returns a negative number # // Proper polarity is important! # ////////// BLACK SOFT CLAMP & INPUT CLIP //////////////////////////////// # // Soft clamp Y when near black. # // Now clamping all colors to prevent crossover errors # ///// Return 0 Early for extremely low ∆Y (lint trap #1) ///// # ////////// SAPC CONTRAST /////////////////////////////////////////////// # // For normal polarity, black text on white # ///// Calculate the SAPC contrast value and scale # ///// NEW! SAPC SmoothScale™ # // Low Contrast Smooth Scale Rollout to prevent polarity reversal # // and also a low clip for very low contrasts (lint trap #2) # // much of this is for very low contrasts, less than 10 # // therefore for most reversing needs, only loConOffset is important # // For reverse polarity, light text on dark # // WoB should always return negative value. | 2.942755 | 3 |
swarms/behaviors/sbehaviors.py | aadeshnpn/swarm | 9 | 6623421 | """Defines all the primitive behaviors for the agents.
This file name is sbehaviors coz `s` stands for swarms.
"""
import numpy as np
from py_trees.trees import BehaviourTree
from py_trees.behaviour import Behaviour
from py_trees.composites import Sequence, Selector, Parallel
from py_trees import common, blackboard
import py_trees
from swarms.utils.distangle import get_direction, check_intersect
from swarms.lib.objects import Pheromones, Signal, Cue
import os
import matplotlib
# If there is $DISPLAY, display the plot
if os.name == 'posix' and "DISPLAY" not in os.environ:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
class ObjectsStore:
"""Static class to search.
This class provides a find method to search through
Behavior Tree blackboard and agent content.
"""
@staticmethod
def find(blackboard_content, agent_content, name, agent_name):
"""Let this method implement search.
This method find implements a search through
blackboard dictionary. If the object is not found
in blackboard, then agent content is searched.
"""
try:
if name is not None:
objects = blackboard_content[name]
return list(objects)
else:
return list(blackboard_content.values())
except KeyError:
try:
objects = agent_content[name]
return list(objects)
except KeyError:
return []
class NeighbourObjects(Behaviour):
"""Sense behavior for the agents.
Inherits the Behaviors class from py_trees. This
behavior implements the sense function for the agents. This allows
the agents to sense the nearby environment based on the their
sense radius.
"""
def __init__(self, name):
"""Init method for the sense behavior."""
super(NeighbourObjects, self).__init__(name)
def setup(self, timeout, agent, item):
"""Have defined the setup method.
This method defines the other objects required for the
behavior. Agent is the actor in the environment,
item is the name of the item we are trying to find in the
environment and timeout defines the execution time for the
behavior.
"""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.WRITE)
def initialise(self):
"""Everytime initialization. Not required for now."""
pass
def receive_signals(self):
"""Receive signals from other agents.
Since this is the primary behavior for the agents to sense
the environment, we include the receive signal method here.
The agents will be able to
sense the environment and check if
it receives any signals from other agents.
"""
def update(self):
"""
Sense the neighborhood.
This method gets the grid values based on the current location and
radius. The grids are used to search the environment. If the agents
find any objects, it is stored in the behavior tree blackboard which
is a dictionary with sets as values.
"""
# if self.item is None:
# grids = self.agent.model.grid.get_neighborhood(
# self.agent.location, self.agent.radius*4)
# else:
grids = self.agent.model.grid.get_neighborhood(
self.agent.location, self.agent.radius)
objects = self.agent.model.grid.get_objects_from_list_of_grid(
self.item, grids)
# Need to reset blackboard contents after each sense
self.blackboard.neighbourobj = dict()
if len(objects) >= 1:
if self.agent in objects:
objects.remove(self.agent)
if len(objects) >= 1:
for item in objects:
name = type(item).__name__
# Is the item is not carrable, its location
# and property doesnot change. So we can commit its
# information to memory
# if item.carryable is False and item.deathable is False:
if name in ['Sites', 'Hub', 'Boundary']:
try:
self.agent.shared_content[name].add(item)
except KeyError:
self.agent.shared_content[name] = {item}
else:
# name = name + str(self.agent.name)
try:
self.blackboard.neighbourobj[name].add(item)
except KeyError:
self.blackboard.neighbourobj = dict()
self.blackboard.neighbourobj[name] = {item}
return common.Status.SUCCESS
else:
return common.Status.FAILURE
else:
return common.Status.FAILURE
class NeighbourObjectsDist(Behaviour):
"""Sense behavior for the agents.
Inherits the Behaviors class from py_trees. This
behavior implements the sense function for the agents. This allows
the agents to sense the nearby environment based on the their
sense radius.
"""
def __init__(self, name):
"""Init method for the sense behavior."""
super(NeighbourObjectsDist, self).__init__(name)
def setup(self, timeout, agent, item):
"""Have defined the setup method.
This method defines the other objects required for the
behavior. Agent is the actor in the environment,
item is the name of the item we are trying to find in the
environment and timeout defines the execution time for the
behavior.
"""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.WRITE)
def initialise(self):
"""Everytime initialization. Not required for now."""
pass
def receive_signals(self):
"""Receive signals from other agents.
Since this is the primary behavior for the agents to sense
the environment, we include the receive signal method here.
The agents will be able to
sense the environment and check if
it receives any signals from other agents.
"""
def update(self):
"""
Sense the neighborhood.
This method gets the grid values based on the current location and
radius. The grids are used to search the environment. If the agents
find any objects, it is stored in the behavior tree blackboard which
is a dictionary with sets as values.
"""
# if self.item is None:
# grids = self.agent.model.grid.get_neighborhood(
# self.agent.location, self.agent.radius*4)
# else:
# grids = self.agent.model.grid.get_neighborhood(
# self.agent.location, self.agent.radius)
grids = []
# for i in range(1, self.agent.model.grid.grid_size):
status = common.Status.FAILURE
for i in range(0, self.agent.radius):
x = int(self.agent.location[0] + np.cos(
self.agent.direction) * i)
y = int(self.agent.location[1] + np.sin(
self.agent.direction) * i)
new_location, direction = self.agent.model.grid.check_limits(
(x, y), self.agent.direction)
# grids += self.agent.model.grid.get_neighborhood(new_location, 1)
limits, grid = self.agent.model.grid.find_grid(new_location)
# print(self.agent.name, grid, self.name, round(self.agent.direction, 2), self.id, limits)
objects = self.agent.model.grid.get_objects(
self.item, grid)
# print('nighbourdist', grid, objects, self.agent.location, (new_location), limits)
# Need to reset blackboard contents after each sense
self.blackboard.neighbourobj = dict()
if len(objects) >= 1:
if self.agent in objects:
objects.remove(self.agent)
for item in objects:
name = type(item).__name__
# Is the item is not carrable, its location
# and property doesnot change. So we can commit its
# information to memory
# if item.carryable is False and item.deathable is False:
# name = name + str(self.agent.name)
if item.passable is False:
try:
self.blackboard.neighbourobj[name].add(item)
except KeyError:
self.blackboard.neighbourobj[name] = {item}
# if status == common.Status.SUCCESS:
# pass
# else:
status = common.Status.SUCCESS
return status
return status
class GoTo(Behaviour):
"""GoTo behavior for the agents.
Inherits the Behaviors class from py_trees. This
behavior implements the GoTo function for the agents. This allows
the agents direct towards the object they want to reach. This behavior
is only concerned with direction alignment not with movement.
"""
def __init__(self, name):
"""Init method for the GoTo behavior."""
super(GoTo, self).__init__(name)
# self.blackboard = Blackboard()
# self.blackboard.neighbourobj = dict()
def setup(self, timeout, agent, item):
"""Have defined the setup method.
This method defines the other objects required for the
behavior. Agent is the actor in the environment,
item is the name of the item we are trying to find in the
environment and timeout defines the execution time for the
behavior.
"""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Everytime initialization. Not required for now."""
pass
def update(self):
"""
Goto towards the object of interest.
This method uses the ObjectsStore abstract class to find the
objects sensed before and agent shared storage. If the agent
find the object of interst in the store then, direction to the
object of interest is computed and agent direction is set to that
direction.
"""
try:
objects = ObjectsStore.find(
self.blackboard.neighbourobj, self.agent.shared_content,
self.item, self.agent.name)
if len(objects) > 0:
objects = self.agent.model.random.choice(objects)
else:
objects = objects[0]
self.agent.direction = get_direction(
objects.location, self.agent.location) % (2 * np.pi)
return common.Status.SUCCESS
except (AttributeError, IndexError):
return common.Status.FAILURE
# Behavior defined to move towards something
class Towards(Behaviour):
"""Towards behaviors.
Changes the direction to go towards the object.
"""
def __init__(self, name):
"""Initialize."""
super(Towards, self).__init__(name)
def setup(self, timeout, agent, item=None):
"""Setup."""
self.agent = agent
def initialise(self):
"""Pass."""
pass
def update(self):
"""Nothing much to do."""
return common.Status.SUCCESS
# Behavior defined to move away from something
class Away(Behaviour):
"""Away behavior."""
def __init__(self, name):
"""Initialize."""
super(Away, self).__init__(name)
def setup(self, timeout, agent, item=None):
"""Setup."""
self.agent = agent
def initialise(self):
"""Pass."""
pass
def update(self):
"""Compute direction and negate it."""
self.agent.direction = (self.agent.direction + np.pi) % (2 * np.pi)
return common.Status.SUCCESS
# Behavior defined for Randomwalk
class RandomWalk(Behaviour):
"""Random walk behaviors."""
def __init__(self, name):
"""Initialize."""
super(RandomWalk, self).__init__(name)
def setup(self, timeout, agent, item=None):
"""Setup."""
self.agent = agent
def initialise(self):
"""Pass."""
pass
def update(self):
"""Compute random direction and set it to agent direction."""
delta_d = self.agent.model.random.normal(0, .1)
self.agent.direction = (self.agent.direction + delta_d) % (2 * np.pi)
return common.Status.SUCCESS
class IsMoveable(Behaviour):
"""Check is the item is moveable."""
def __init__(self, name):
"""Initialize."""
super(IsMoveable, self).__init__(name)
# self.blackboard = Blackboard()
def setup(self, timeout, agent, item):
"""Setup."""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Get the object and check its movelable property."""
try:
objects = ObjectsStore.find(
self.blackboard.neighbourobj, self.agent.shared_content,
self.item, self.agent.name)
for obj in objects:
if not objects.moveable:
return common.Status.FAILURE
return common.Status.SUCCESS
except (AttributeError, IndexError):
return common.Status.FAILURE
# Behavior defined to move
class Move(Behaviour):
"""Actually move the agent.
Move the agent with any other object fully attached or
partially attached to the agent.
"""
def __init__(self, name):
"""Initialize."""
super(Move, self).__init__(name)
def setup(self, timeout, agent, item=None):
"""Setup."""
self.agent = agent
self.dt = 0.1
def initialise(self):
"""Pass."""
pass
def update_signals(self, old_loc, new_loc):
"""Signal also move along with agents.
Signal is created by the agent. It has certain broadcast radius. It
moves along with the agent. So this move behavior should also be
responsible to move the signals.
"""
try:
for signal in self.agent.signals:
if self.agent.model.grid.move_object(
old_loc, signal, new_loc):
pass
else:
return False
except IndexError:
pass
return True
def update_partial_attached_objects(self):
"""Move logic for partially attached objects."""
try:
for item in self.agent.partial_attached_objects:
accleration = self.agent.force / item.agents[self.agent]
velocity = (accleration * self.dt) / len(item.agents)
direction = self.agent.direction
"""
if np.cos(direction) > 0:
x = int(np.ceil(
item.location[0] + np.cos(direction) * velocity))
y = int(np.ceil(
item.location[1] + np.sin(direction) * velocity))
else:
x = int(np.floor(
item.location[0] + np.cos(direction) * velocity))
y = int(np.floor(
item.location[1] + np.sin(direction) * velocity))
"""
x = int(self.agent.location[0] + np.cos(
direction) * velocity)
y = int(self.agent.location[1] + np.sin(
direction) * velocity)
# object_agent = list(item.agents.keys())
# indx = self.agent.model.random.randint(0, len(object_agent))
# object_agent = object_agent[indx]
object_agent = self.agent
# new_location, direction
# = object_agent.model.grid.check_limits(
# (x, y), direction)
new_location = (x, y)
object_agent.model.grid.move_object(
item.location, item, new_location)
self.agent.direction = direction
item.location = new_location
return True
except (IndexError, ValueError):
return False
def update(self):
"""Move logic for agent and fully carried object."""
# Partially carried object
if not self.update_partial_attached_objects():
self.agent.accleration = self.agent.force / self.agent.get_weight()
self.agent.velocity = self.agent.accleration * 1.0
# print(self.agent.direction, self.agent.velocity, self.agent.location)
x = int(np.round(self.agent.location[0] + np.cos(
self.agent.direction) * self.agent.velocity))
y = int(np.round(self.agent.location[1] + np.sin(
self.agent.direction) * self.agent.velocity))
new_location, direction = self.agent.model.grid.check_limits(
(x, y), self.agent.direction)
# print('from move', self.name, self.agent.location, new_location, direction)
if self.agent.model.grid.move_object(
self.agent.location, self.agent, new_location):
# Now the agent location has been updated, update the signal grids
if not self.update_signals(self.agent.location, new_location):
return common.Status.FAILURE
self.agent.location = new_location
self.agent.direction = direction
# Full carried object moves along the agent
for item in self.agent.attached_objects:
item.location = self.agent.location
else:
return common.Status.FAILURE
else:
new_location = self.agent.partial_attached_objects[0].location
for agent in self.agent.partial_attached_objects[0].agents.keys():
if agent.model.grid.move_object(
agent.location, agent,
new_location):
agent.location = new_location
else:
return common.Status.FAILURE
# Now the agent location has been updated, update the signal grids
if not self.update_signals(self.agent.location, new_location):
return common.Status.FAILURE
return common.Status.SUCCESS
# Behavior define for donot move
class DoNotMove(Behaviour):
"""Stand still behaviors."""
def __init__(self, name):
"""Initialize."""
super(DoNotMove, self).__init__(name)
def setup(self, timeout, agent, item=None):
"""Setup."""
self.agent = agent
def initialise(self):
"""Pass."""
pass
def update(self):
"""Update agent moveable property."""
self.agent.moveable = False
return common.Status.SUCCESS
# Behavior to check carryable attribute of an object
class IsCarryable(Behaviour):
"""Check carryable attribute of the item."""
def __init__(self, name):
"""Initialize."""
super(IsCarryable, self).__init__(name)
def setup(self, timeout, agent, item):
"""Setup."""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Check carryable property."""
try:
objects = ObjectsStore.find(
self.blackboard.neighbourobj, self.agent.shared_content,
self.item, self.agent.name)[0]
if objects.carryable:
return common.Status.SUCCESS
else:
return common.Status.FAILURE
except (AttributeError, IndexError):
return common.Status.FAILURE
# Behavior to check carryable attribute of an object
class IsDropable(Behaviour):
"""Check dropable property."""
def __init__(self, name):
"""Initialize."""
super(IsDropable, self).__init__(name)
def setup(self, timeout, agent, item):
"""Setup."""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Check the dropable attribute."""
status = common.Status.FAILURE
try:
objects = ObjectsStore.find(
self.blackboard.neighbourobj, self.agent.shared_content,
self.item, self.agent.name)
if len(objects) >= 1:
for obj in objects:
if status == common.Status.SUCCESS:
break
if objects.dropable:
status = common.Status.SUCCESS
return status
else:
return common.Status.SUCCESS
except (AttributeError, IndexError):
return common.Status.SUCCESS
# Behavior define to check is the item is carrable on its own
class IsSingleCarry(Behaviour):
"""Single carry behavior."""
def __init__(self, name):
"""Initialize."""
super(IsSingleCarry, self).__init__(name)
def setup(self, timeout, agent, item):
"""Setup."""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Logic to check if the object can be carried by single agent."""
# Logic to carry
try:
objects = ObjectsStore.find(
self.blackboard.neighbourobj, self.agent.shared_content,
self.item, self.agent.name)[0]
if objects.weight:
if self.agent.get_capacity() > objects.calc_relative_weight():
return common.Status.SUCCESS
else:
return common.Status.FAILURE
else:
return common.Status.FAILURE
except (AttributeError, IndexError):
return common.Status.FAILURE
# Behavior define to check is the item is carrable on its own or not
class IsMultipleCarry(Behaviour):
"""Multiple carry behaviour."""
def __init__(self, name):
"""Initialize."""
super(IsMultipleCarry, self).__init__(name)
def setup(self, timeout, agent, item):
"""Setup."""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Logic for multiple carry by checking the weights."""
try:
# Logic to carry
# objects = self.blackboard.neighbourobj[self.thing].pop()
objects = ObjectsStore.find(
self.blackboard.neighbourobj, self.agent.shared_content,
self.item, self.agent.name)[0]
if objects.weight:
if self.agent.get_capacity() < objects.weight:
return common.Status.SUCCESS
else:
return common.Status.FAILURE
else:
return common.Status.FAILURE
except (AttributeError, IndexError):
return common.Status.FAILURE
class IsCarrying(Behaviour):
"""Condition check if the agent is carrying something."""
def __init__(self, name):
"""Initialize."""
super(IsCarrying, self).__init__(name)
def setup(self, timeout, agent, item):
"""Setup."""
self.agent = agent
self.item = item
def initialise(self):
"""Pass."""
pass
def update(self):
"""Logic for object carrying check."""
try:
things = []
for item in self.agent.attached_objects:
things.append(type(item).__name__)
if self.item in set(things):
return common.Status.SUCCESS
else:
return common.Status.FAILURE
except (AttributeError, IndexError):
return common.Status.FAILURE
# Behavior defined to drop the items currently carrying
class Drop(Behaviour):
"""Drop behavior to drop items which is being carried."""
def __init__(self, name):
"""Initialize."""
super(Drop, self).__init__(name)
def setup(self, timeout, agent, item):
"""Setup."""
self.agent = agent
self.item = item
def initialise(self):
"""Pass."""
pass
def update(self):
"""Logic to drop the item."""
try:
# Get the objects from the actuators
objects = list(filter(
lambda x: type(x).__name__ == self.item,
self.agent.attached_objects))[0]
# Grid
grid = self.agent.model.grid
static_grids = grid.get_neighborhood(self.agent.location, self.agent.radius)
envobjects = self.agent.model.grid.get_objects_from_list_of_grid(None, static_grids)
dropped = False
for obj in envobjects:
if type(obj).__name__ in ['Hub', 'Boundary', 'Obstacles']:
dropped = True
obj.dropped_objects.append(objects)
self.agent.attached_objects.remove(objects)
objects.agent_name = self.agent.name
break
if not dropped:
self.agent.model.grid.add_object_to_grid(objects.location, objects)
self.agent.attached_objects.remove(objects)
objects.agent_name = self.agent.name
# Temporary fix
# Store the genome which activated the single carry
try:
# objects.phenotype['drop'] =
# self.agent.individual[0].phenotype
objects.phenotype = {
self.agent.individual[0].phenotype: self.agent.individual[
0].fitness}
return common.Status.SUCCESS
except AttributeError:
pass
# objects.agents.remove(self.agent)
return common.Status.SUCCESS
except (AttributeError, IndexError):
return common.Status.FAILURE
class DropPartial(Behaviour):
"""Drop behavior for partially attached object."""
def __init__(self, name):
"""Initialize."""
super(DropPartial, self).__init__(name)
def setup(self, timeout, agent, item):
"""Setup."""
self.agent = agent
self.item = item
def initialise(self):
"""Pass."""
pass
def update(self):
"""Logic to drop partially attached object."""
try:
objects = list(filter(
lambda x: type(x).__name__ == self.item,
self.agent.partial_attached_objects))[0]
objects.agents.pop(self.agent)
self.agent.partial_attached_objects.remove(objects)
# If the agent is last to drop reduce the size of the
# food to the half the size of the hub. This indicates
# that the food has been deposited to the hub
if len(objects.agents) == 0:
self.agent.model.grid.remove_object_from_grid(
objects.location, objects)
objects.radius = int(self.agent.model.hub.radius / 2)
objects.location = self.agent.model.hub.location
self.agent.model.grid.add_object_to_grid(
objects.location, objects)
try:
objects.phenotype = {
self.agent.individual[0].phenotype: self.agent.individual[
0].fitness}
return common.Status.SUCCESS
except AttributeError:
pass
return common.Status.SUCCESS
except (AttributeError, IndexError):
return common.Status.FAILURE
# Behavior defined to carry the items found
class SingleCarry(Behaviour):
"""Carry behavior."""
def __init__(self, name):
"""Initialize."""
super(SingleCarry, self).__init__(name)
def setup(self, timeout, agent, item):
"""Setup."""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Carry logic to carry the object by the agent."""
try:
objects = ObjectsStore.find(
self.blackboard.neighbourobj, self.agent.shared_content,
self.item, self.agent.name)[0]
self.agent.attached_objects.append(objects)
self.agent.model.grid.remove_object_from_grid(
objects.location, objects)
objects.agent_name = self.agent.name
# Add the agent to the object dict
# objects.agents[self.agent] = self.agent.get_capacity()
# Temporary fix
# Store the genome which activated the single carry
try:
objects.phenotype = {
self.agent.individual[0].phenotype: self.agent.individual[
0].fitness}
except AttributeError:
pass
return common.Status.SUCCESS
except (AttributeError, IndexError):
return common.Status.FAILURE
except ValueError:
self.agent.attached_objects.remove(objects)
return common.Status.FAILURE
class InitiateMultipleCarry(Behaviour):
"""Behavior to initiate multiple carry process."""
def __init__(self, name):
"""Initialize."""
super(InitiateMultipleCarry, self).__init__(name)
def setup(self, timeout, agent, item):
"""Setup."""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Logic to initiaite multiple carry process."""
try:
# objects = self.blackboard.neighbourobj[self.thing].pop()
objects = ObjectsStore.find(
self.blackboard.neighbourobj, self.agent.shared_content,
self.item, self.agent.name)[0]
relative_weight = objects.calc_relative_weight()
if relative_weight > 0:
if relative_weight - self.agent.get_capacity() >= 0:
capacity_used = self.agent.get_capacity()
else:
capacity_used = relative_weight
# Update the partial attached object
self.agent.partial_attached_objects.append(objects)
# Update the object so that it knows this agent
# has attached to it
objects.agents[self.agent] = capacity_used
return common.Status.SUCCESS
else:
# Redistribute the weights to all the attached objects
average_weight = objects.redistribute_weights()
self.agent.partial_attached_objects.append(objects)
objects.agents[self.agent] = average_weight
return common.Status.SUCCESS
try:
objects.phenotype = {
self.agent.individual[0].phenotype: self.agent.individual[
0].fitness}
except AttributeError:
pass
except (KeyError, AttributeError, IndexError):
return common.Status.FAILURE
class IsInPartialAttached(Behaviour):
"""Condition to check if the object is in partially attached list."""
def __init__(self, name):
"""Initialize."""
super(IsInPartialAttached, self).__init__(name)
def setup(self, timeout, agent, item):
"""Setup."""
self.agent = agent
self.item = item
def initialise(self):
"""Pass."""
pass
def update(self):
"""Logic to check if the object is in partially attached list."""
# objects = self.blackboard.neighbourobj[self.thing].pop()
try:
things = []
for item in self.agent.partial_attached_objects:
things.append(type(item).__name__)
objects = list(filter(
lambda x: type(x).__name__ == self.item,
self.agent.partial_attached_objects))[0]
if self.item in set(things) and \
self.agent in objects.agents:
return common.Status.SUCCESS
else:
return common.Status.FAILURE
except IndexError:
return common.Status.FAILURE
class IsEnoughStrengthToCarry(Behaviour):
"""Condition to check if the agent has enough strength to carry."""
def __init__(self, name):
"""Initialize."""
super(IsEnoughStrengthToCarry, self).__init__(name)
def setup(self, timeout, agent, item):
"""Setup."""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Logic to check if the agent has enough strength to carry."""
try:
objects = ObjectsStore.find(
self.blackboard.neighbourobj, self.agent.shared_content,
self.item, self.agent.name)[0]
if self.agent.get_capacity() >= objects.calc_relative_weight():
return common.Status.SUCCESS
else:
return common.Status.FAILURE
except IndexError:
return common.Status.FAILURE
class IsMotionTrue(Behaviour):
"""Condition to check is the object is moving."""
def __init__(self, name):
"""Initialize."""
super(IsMotionTrue, self).__init__(name)
def setup(self, timeout, agent, item):
"""Setup."""
self.agent = agent
self.item = item
def initialise(self):
"""Pass."""
pass
def update(self):
"""Logic to check if the object is moving."""
try:
if self.agent.partial_attached_objects[0].motion is True:
return common.Status.SUCCESS
else:
return common.Status.FAILURE
except (AttributeError, IndexError):
return common.Status.FAILURE
class IsVisitedBefore(Behaviour):
"""Condition to check is the object is visited before."""
def __init__(self, name):
"""Initialize."""
super(IsVisitedBefore, self).__init__(name)
def setup(self, timeout, agent, item):
"""Setup."""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Logic to check is the object is visited before."""
try:
objects = ObjectsStore.find(
self.blackboard.neighbourobj, self.agent.shared_content,
self.item, self.agent.name)[0]
if objects:
return common.Status.SUCCESS
else:
return common.Status.FAILURE
except (AttributeError, IndexError):
return common.Status.FAILURE
class MultipleCarry(Behaviour):
"""Multiple carry behavior."""
def __init__(self, name):
"""Initialize."""
super(MultipleCarry, self).__init__(name)
def setup(self, timeout, agent, item):
"""Setup."""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Logic for multiple carry."""
try:
objects = ObjectsStore.find(
self.blackboard.neighbourobj, self.agent.shared_content,
self.item, self.agent.name)[0]
self.agent.model.grid.remove_object_from_grid(
objects.location, objects)
return common.Status.SUCCESS
except IndexError:
return common.Status.FAILURE
# Lets start some communication behaviors
class SignalDoesNotExists(Behaviour):
"""Signal exists behavior.
This behavior enables agents to check it that signal already exists.
"""
def __init__(self, name):
"""Initialize."""
super(SignalDoesNotExists, self).__init__(name)
def setup(self, timeout, agent, item):
"""Setup."""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Logic for sending signal."""
try:
# Find the object the agent is trying to signal
objects = ObjectsStore.find(
self.blackboard.neighbourobj, self.agent.shared_content,
self.item, self.agent.name)[0]
if len(self.agent.signals) > 0:
# Check the agetns signals array for its exitance
signal_objects = []
for signal in self.agent.signals:
signal_objects.append(signal.object_to_communicate)
if objects not in signal_objects:
return common.Status.SUCCESS
else:
return common.Status.FAILURE
else:
return common.Status.SUCCESS
except (IndexError, AttributeError):
return common.Status.FAILURE
# Lets start some communication behaviors
class IsSignalActive(Behaviour):
"""Is Signal active?
This behavior enables agents to check it that signal is already active.
"""
def __init__(self, name):
"""Initialize."""
super(IsSignalActive, self).__init__(name)
def setup(self, timeout, agent, item):
"""Setup."""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Logic for sending signal."""
try:
# Find the object the agent is trying to signal.
if len(self.agent.signals) > 0:
return common.Status.SUCCESS
else:
return common.Status.FAILURE
except (IndexError, AttributeError):
return common.Status.FAILURE
class SendSignal(Behaviour):
"""Signalling behavior.
This behavior enables agents to send signals about the information they
have gathered. The information could be about location of site, hub, food,
obstacles and others.
"""
def __init__(self, name):
"""Initialize."""
super(SendSignal, self).__init__(name)
def setup(self, timeout, agent, item):
"""Setup."""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Logic for sending signal."""
try:
objects = ObjectsStore.find(
self.blackboard.neighbourobj, self.agent.shared_content,
self.item, self.agent.name)[0]
# Initialize the signal object
signal = Signal(
id=self.agent.name, location=self.agent.location,
radius=self.agent.radius, object_to_communicate=objects)
# Add the signal to the grids so it could be sensed by
# other agents
self.agent.model.grid.add_object_to_grid(
self.agent.location, signal)
# Append the signal object to the agent signal list
self.agent.signals.append(signal)
return common.Status.SUCCESS
except (IndexError, AttributeError):
return common.Status.FAILURE
class ReceiveSignal(Behaviour):
"""Receive signals from other agents.
Since this is the primary behavior for the agents to sense
the environment, we include the receive signal method here.
The agents will be able to sense the environment and check if
it receives any signals from other agents.
"""
def __init__(self, name):
"""Initialize."""
super(ReceiveSignal, self).__init__(name)
def setup(self, timeout, agent, item='Signal'):
"""Setup."""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Logic for receiving signal."""
try:
objects = ObjectsStore.find(
self.blackboard.neighbourobj, self.agent.shared_content,
self.item, self.agent.name)
# Extract the information from the signal object and
# store into the agent memory
objects = [obj for obj in objects if obj.id != self.agent.name][0]
objects = objects.communicated_object
name = type(objects).__name__
try:
self.agent.shared_content[name].add(objects)
except KeyError:
self.agent.shared_content[name] = {objects}
return common.Status.SUCCESS
except (IndexError, AttributeError):
return common.Status.FAILURE
class CueDoesNotExists(Behaviour):
"""Cue does not exists behavior.
This behavior enables agents to check if that cue already exists.
"""
def __init__(self, name):
"""Initialize."""
super(CueDoesNotExists, self).__init__(name)
def setup(self, timeout, agent, item):
"""Setup."""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Logic for cue checking."""
try:
# Find the object the agent is trying to cue
grids = self.agent.model.grid.get_neighborhood(
self.agent.location, self.agent.radius)
cue_objects = self.agent.model.grid.get_objects_from_list_of_grid(
'Cue', grids)
if len(cue_objects) > 0:
# Check the agetns cue list for its exitance
objects = ObjectsStore.find(
self.blackboard.neighbourobj, self.agent.shared_content,
self.item, self.agent.name)[0]
cue_in_list = [
cue.object_to_communicate for cue in cue_objects]
if objects not in cue_in_list:
return common.Status.SUCCESS
else:
return common.Status.FAILURE
else:
return common.Status.SUCCESS
except (IndexError, AttributeError):
return common.Status.FAILURE
# Communication behaviors related to cue
class DropCue(Behaviour):
"""Drop cue in the environment.
This is a communication behavior where a physical object
is placed in the environment which gives a particular information
to the agents sensing this cue.
"""
def __init__(self, name):
"""Initialize."""
super(DropCue, self).__init__(name)
def setup(self, timeout, agent, item):
"""Setup."""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Logic for dropping cue."""
try:
objects = ObjectsStore.find(
self.blackboard.neighbourobj, self.agent.shared_content,
self.item, self.agent.name)[0]
# Initialize the cue object
cue = Cue(
id=self.agent.name, location=self.agent.location,
radius=self.agent.radius, object_to_communicate=objects)
# Add the cue to the grids so it could be sensed by
# other agents
self.agent.model.grid.add_object_to_grid(
cue.location, cue)
# We just drop the cue on the environment and don't keep track
# of it. Instead of using cue here we can derive a class from cue
# and call it pheromonone
return common.Status.SUCCESS
except (IndexError, AttributeError):
return common.Status.FAILURE
class PickCue(Behaviour):
"""Pick cue in the environment.
This is a communication behavior where the information from the cue
object in the environment is pickedup.
"""
def __init__(self, name):
"""Initialize."""
super(PickCue, self).__init__(name)
def setup(self, timeout, agent, item='Cue'):
"""Setup."""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Logic for pickup cue."""
try:
objects = ObjectsStore.find(
self.blackboard.neighbourobj, self.agent.shared_content,
self.item, self.agent.name)[0]
# Get information from the cue. For now, the agents orients
# its direction towards the object that is communicated
self.agent.direction = get_direction(
objects.communicated_location, self.agent.location)
objects = objects.communicated_object
name = type(objects).__name__
try:
self.agent.shared_content[name].add(objects)
except KeyError:
self.agent.shared_content[name] = {objects}
return common.Status.SUCCESS
except (IndexError, AttributeError):
return common.Status.FAILURE
class AvoidSObjects(Behaviour):
"""Avoid Static objects in the environment.
This is a avoid behaviors where the agents avoids
the static objects that are not passable.
"""
def __init__(self, name):
"""Initialize."""
super(AvoidSObjects, self).__init__(name)
def setup(self, timeout, agent, item='Obstacles'):
"""Setup."""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Logic for avoid static objects."""
# try:
item = ObjectsStore.find(
self.blackboard.neighbourobj, self.agent.shared_content,
self.item, self.agent.name)[0]
# alpha = get_direction(self.agent.location, objects.location)
# theta = self.agent.direction
# angle_diff = theta-alpha
# print('From', self.agent.name, self.name, item, self.agent.direction, item.location, item.radius)
x = int(np.ceil(self.agent.location[0] + np.cos(
self.agent.direction) * self.agent.radius))
y = int(np.ceil(self.agent.location[1] + np.sin(
self.agent.direction) * self.agent.radius))
# agent_A = y - self.agent.location[1]
# agent_B = self.agent.location[0] - x
# agent_C = agent_A * self.agent.location[0] + agent_B * self.agent.location[1]
# print('agetn ABC', agent_A, agent_B, agent_C)
# obj_x2 = int(np.ceil(item.location[0] + (np.cos(np.pi/4) * (item.radius))))
# obj_y2 = int(np.ceil(item.location[1] + (np.sin(np.pi/4) * (item.radius))))
# obj_loc_2 = self.agent.model.grid.find_upperbound((obj_x2, obj_y2))
# # print('obj x2', obj_x2, obj_y2, obj_loc_2)
# obj_x0 = int(np.floor(item.location[0] + (np.cos(np.pi/4 + np.pi) * (item.radius))))
# obj_y0 = int(np.floor(item.location[1] + (np.sin(np.pi/4 + np.pi) * (item.radius))))
# obj_loc_0 = self.agent.model.grid.find_lowerbound((obj_x0, obj_y0))
# # print('obj x1', obj_x0, obj_y0, obj_loc_0)
# obj_loc_1 = (obj_loc_2[0], obj_loc_0[1])
# obj_loc_3 = (obj_loc_0[0], obj_loc_2[1])
grids = self.agent.model.grid.get_neighborhood(item.location, item.radius)
points = [self.agent.model.grid.grid_reverse[grid] for grid in grids]
p1s, p2s = zip(*points)
x1s, y1s = zip(*p1s)
x2s, y2s = zip(*p2s)
x1 = min(x1s)
y1 = min(y1s)
x2 = max(x2s)
y2 = max(y2s)
# print(grids, [self.agent.model.grid.grid_reverse[grid] for grid in grids])
intersect = False
# for grid in grids:
# p1, p2 = self.agent.model.grid.grid_reverse[grid]
# x1, y1 = p1
# x2, y2 = p2
lines = [
[(x1, y1), (x2, y1)],
[(x2, y1), (x2, y2)],
[(x2, y2), (x1, y2)],
[(x1, y2), (x1, y1)]
]
# print('agent ray', self.agent.location, (x,y))
# print('rectangle obstacle',lines)
# plt.plot([self.agent.location[0], x], [self.agent.location[1], y], 'r--')
# plt.plot([lines[0][0][0], lines[0][1][0]], [lines[0][0][1], lines[0][1][1]],'b.-')
# plt.plot([lines[1][0][0], lines[1][1][0]], [lines[1][0][1], lines[1][1][1]],'b.-')
# plt.plot([lines[2][0][0], lines[2][1][0]], [lines[2][0][1], lines[2][1][1]],'b.-')
# plt.plot([lines[3][0][0], lines[3][1][0]], [lines[3][0][1], lines[3][1][1]],'b.-')
# # plt.xticks(range(-20, 20, 1))
# # plt.yticks(range(-20, 20, 1))
# plt.show()
for line in lines:
intersect = check_intersect(self.agent.location, (x, y), line[0], line[1])
if intersect:
dx = line[1][0] - line[0][0]
dy = line[1][1] - line[0][1]
self.agent.direction = np.arctan2(dy,dx)
break
# if intersect:
# break
# line_A = line[1][1] - line[0][1]
# line_B = line[0][0] - line[1][0]
# line_C = line_A * line[0][0] + line_B * line[0][1]
# slope = round(agent_A * line_B - line_A * agent_B, 2)
# # print('slope', slope)
# if slope == 0.0:
# break
# else:
# intersection_x = int((line_B * agent_C - agent_B * line_C) / slope)
# intersection_y = int((agent_A * line_C - line_A * agent_C) / slope)
# print('itersection point', intersection_x, intersection_y, self.agent.location, x, y, line)
# if (
# (intersection_x <= x) and ( intersection_x >= self.agent.location[0]) and
# (intersection_y <= y) and ( intersection_y >= self.agent.location[1])):
# # ((intersection_x <= line[1][0]) and ( intersection_x >= line[0][0]) and
# # (intersection_y <= line[1][1]) and ( intersection_y >= line[0][1]))):
# direction = np.arctan2(line[1][1] - line[0][1], line[1][0] - line[0][0])
# print('computed direction', direction)
# self.agent.direction = (direction + 2*np.pi) % (2*np.pi)
# break
# direction = self.agent.direction + np.pi/2
# self.agent.direction = direction % (2 * np.pi)
# print(self.agent.name, direction, self.agent.direction)
return common.Status.SUCCESS
# except (IndexError, AttributeError):
# return common.Status.FAILURE
# Behavior to check if the agent avoided obj
class DidAvoidedObj(Behaviour):
"""Logic to check if the agent avoided the objects."""
def __init__(self, name):
"""Initialize."""
super(DidAvoidedObj, self).__init__(name)
def setup(self, timeout, agent, item):
"""Setup."""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Check if it can sense the object and its direction."""
try:
objects = ObjectsStore.find(
self.blackboard.neighbourobj, self.agent.shared_content,
self.item, self.agent.name)[0]
alpha = get_direction(self.agent.location, objects.location)
theta = self.agent.direction
angle_diff = np.abs(theta-alpha)
if angle_diff < np.pi/2:
return common.Status.FAILURE
else:
return common.Status.SUCCESS
except (AttributeError, IndexError):
return common.Status.SUCCESS
# Behavior to check if the agent can move
class CanMove(Behaviour):
"""Logic to check if the agent can move in the intended direction."""
def __init__(self, name):
"""Initialize."""
super(CanMove, self).__init__(name)
def setup(self, timeout, agent, item=None):
"""Setup."""
self.agent = agent
self.item = item
def initialise(self):
"""Pass."""
pass
def update(self):
"""Check if it can sense the object and its direction."""
try:
if (self.agent.moveable and self.agent.dead is not True):
return common.Status.SUCCESS
else:
return common.Status.FAILURE
except (AttributeError, IndexError):
return common.Status.FAILURE
# Pheromone related bheaviors
class DropPheromone(Behaviour):
"""Drop pheromone in the environment.
This is a communication behavior where a pheromone object
is placed in the environment which gives a direction to follow for the
agents.
"""
def __init__(self, name, attractive=True):
"""Initialize."""
super(DropPheromone, self).__init__(name)
self.attractive = attractive
def setup(self, timeout, agent, item=None):
"""Setup."""
self.agent = agent
self.item = item
# self.blackboard = blackboard.Client(name=str(agent.name))
# self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
self.blackboard = blackboard.Client(name='Pheromones')
self.blackboard.register_key(key='pheromones', access=common.Access.WRITE)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Logic for dropping pheromone."""
try:
if True:
# Initialize the pheromone object
pheromone = Pheromones(
id=self.agent.name, location=self.agent.location,
radius=self.agent.radius, attractive=self.attractive, direction=self.agent.direction)
pheromone.passable = self.attractive
# Add the pheromone to the grids so it could be sensed by
# other agents
self.agent.model.grid.add_object_to_grid(
pheromone.location, pheromone)
self.blackboard.pheromones.append(pheromone)
return common.Status.SUCCESS
else:
return common.Status.SUCCESS
except (IndexError, AttributeError):
return common.Status.FAILURE
# Pheromone related bheaviors
class SensePheromone(Behaviour):
"""Sense pheromone in the environment.
This is a communication behavior where pheromones are sensed from
the environment and a direction is computed to follow.
"""
def __init__(self, name, attractive=True):
"""Initialize."""
super(SensePheromone, self).__init__(name)
self.attractive = True
def setup(self, timeout, agent, item='Pheromones'):
"""Setup."""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Logic for dropping pheromone."""
try:
objects = ObjectsStore.find(
self.blackboard.neighbourobj, self.agent.shared_content,
self.item, self.agent.name)
repulsive = False
for obj in objects:
if obj.attractive is False:
repulsive = True
break
if not repulsive:
# Initialize the pheromone object
angles = [[
np.sin(obj.direction), np.cos(obj.direction), obj.strength[obj.current_time]] for obj in objects]
sin, cos, weight = zip(*angles)
sin, cos, weight = np.array(sin), np.array(cos), np.array(weight)
direction = np.arctan2(sum(sin * weight), sum(cos * weight))
self.agent.direction = direction % (2 * np.pi)
else:
direction = self.agent.direction + np.pi/2
self.agent.direction = direction % (2 * np.pi)
return common.Status.SUCCESS
except (IndexError, AttributeError):
return common.Status.FAILURE
class PheromoneExists(Behaviour):
"""Check if pheromone exists at the location where agent is.
This is a communication behavior where pheromones are sensed from
the environment and a direction is computed to follow.
"""
def __init__(self, name):
"""Initialize."""
super(PheromoneExists, self).__init__(name)
def setup(self, timeout, agent, item='Pheromones'):
"""Setup."""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Logic for dropping pheromone."""
try:
objects = ObjectsStore.find(
self.blackboard.neighbourobj, self.agent.shared_content,
self.item, self.agent.name)
objects = [obj for obj in objects if obj.id == self.agent.name]
if len(objects) >=1:
return common.Status.SUCCESS
else:
return common.Status.FAILURE
except (IndexError, AttributeError):
return common.Status.FAILURE
class IsAgentDead(Behaviour):
"""Check if agent is dead.
"""
def __init__(self, name):
"""Initialize."""
super(IsAgentDead, self).__init__(name)
def setup(self, timeout, agent, item=None):
"""Setup."""
self.agent = agent
self.item = item
def initialise(self):
"""Pass."""
pass
def update(self):
"""Check agent is dead"""
try:
if self.agent.dead:
return common.Status.SUCCESS
else:
return common.Status.FAILURE
except (IndexError, AttributeError):
return common.Status.FAILURE
class IsAttractivePheromone(Behaviour):
"""Check if the pheromone is attractive.
"""
def __init__(self, name):
"""Initialize."""
super(IsAttractivePheromone, self).__init__(name)
def setup(self, timeout, agent, item=None):
"""Setup."""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Check if the pheromone is attractive or repulsive."""
try:
objects = ObjectsStore.find(
self.blackboard.neighbourobj, self.agent.shared_content,
self.item, self.agent.name)
repulsive = False
for obj in objects:
if obj.attractive is False:
repulsive = True
break
if not repulsive:
return common.Status.SUCCESS
else:
return common.Status.FAILURE
except (IndexError, AttributeError):
return common.Status.FAILURE
class IsRepulsivePheromone(Behaviour):
"""Check if the pheromone is attractive.
"""
def __init__(self, name):
"""Initialize."""
super(IsRepulsivePheromone, self).__init__(name)
def setup(self, timeout, agent, item=None):
"""Setup."""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Check if the pheromone is attractive or repulsive."""
try:
objects = ObjectsStore.find(
self.blackboard.neighbourobj, self.agent.shared_content,
self.item, self.agent.name)
# print('repusive pheroment', objects, objects[0].attractive)
repulsive = False
for obj in objects:
if obj.attractive is False:
repulsive = True
break
if repulsive:
return common.Status.SUCCESS
else:
return common.Status.FAILURE
except (IndexError, AttributeError):
return common.Status.FAILURE
# Dummy node
class DummyNode(Behaviour):
"""Dummy node.
BT node that always returns Success.
"""
def __init__(self, name):
"""Initialize."""
super(DummyNode, self).__init__(name)
def setup(self, timeout, agent, item=None):
"""Setup."""
self.agent = agent
self.item = item
def initialise(self):
"""Pass."""
pass
def update(self):
"""Nothing much to do."""
return common.Status.SUCCESS | """Defines all the primitive behaviors for the agents.
This file name is sbehaviors coz `s` stands for swarms.
"""
import numpy as np
from py_trees.trees import BehaviourTree
from py_trees.behaviour import Behaviour
from py_trees.composites import Sequence, Selector, Parallel
from py_trees import common, blackboard
import py_trees
from swarms.utils.distangle import get_direction, check_intersect
from swarms.lib.objects import Pheromones, Signal, Cue
import os
import matplotlib
# If there is $DISPLAY, display the plot
if os.name == 'posix' and "DISPLAY" not in os.environ:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
class ObjectsStore:
"""Static class to search.
This class provides a find method to search through
Behavior Tree blackboard and agent content.
"""
@staticmethod
def find(blackboard_content, agent_content, name, agent_name):
"""Let this method implement search.
This method find implements a search through
blackboard dictionary. If the object is not found
in blackboard, then agent content is searched.
"""
try:
if name is not None:
objects = blackboard_content[name]
return list(objects)
else:
return list(blackboard_content.values())
except KeyError:
try:
objects = agent_content[name]
return list(objects)
except KeyError:
return []
class NeighbourObjects(Behaviour):
"""Sense behavior for the agents.
Inherits the Behaviors class from py_trees. This
behavior implements the sense function for the agents. This allows
the agents to sense the nearby environment based on the their
sense radius.
"""
def __init__(self, name):
"""Init method for the sense behavior."""
super(NeighbourObjects, self).__init__(name)
def setup(self, timeout, agent, item):
"""Have defined the setup method.
This method defines the other objects required for the
behavior. Agent is the actor in the environment,
item is the name of the item we are trying to find in the
environment and timeout defines the execution time for the
behavior.
"""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.WRITE)
def initialise(self):
"""Everytime initialization. Not required for now."""
pass
def receive_signals(self):
"""Receive signals from other agents.
Since this is the primary behavior for the agents to sense
the environment, we include the receive signal method here.
The agents will be able to
sense the environment and check if
it receives any signals from other agents.
"""
def update(self):
"""
Sense the neighborhood.
This method gets the grid values based on the current location and
radius. The grids are used to search the environment. If the agents
find any objects, it is stored in the behavior tree blackboard which
is a dictionary with sets as values.
"""
# if self.item is None:
# grids = self.agent.model.grid.get_neighborhood(
# self.agent.location, self.agent.radius*4)
# else:
grids = self.agent.model.grid.get_neighborhood(
self.agent.location, self.agent.radius)
objects = self.agent.model.grid.get_objects_from_list_of_grid(
self.item, grids)
# Need to reset blackboard contents after each sense
self.blackboard.neighbourobj = dict()
if len(objects) >= 1:
if self.agent in objects:
objects.remove(self.agent)
if len(objects) >= 1:
for item in objects:
name = type(item).__name__
# Is the item is not carrable, its location
# and property doesnot change. So we can commit its
# information to memory
# if item.carryable is False and item.deathable is False:
if name in ['Sites', 'Hub', 'Boundary']:
try:
self.agent.shared_content[name].add(item)
except KeyError:
self.agent.shared_content[name] = {item}
else:
# name = name + str(self.agent.name)
try:
self.blackboard.neighbourobj[name].add(item)
except KeyError:
self.blackboard.neighbourobj = dict()
self.blackboard.neighbourobj[name] = {item}
return common.Status.SUCCESS
else:
return common.Status.FAILURE
else:
return common.Status.FAILURE
class NeighbourObjectsDist(Behaviour):
"""Sense behavior for the agents.
Inherits the Behaviors class from py_trees. This
behavior implements the sense function for the agents. This allows
the agents to sense the nearby environment based on the their
sense radius.
"""
def __init__(self, name):
"""Init method for the sense behavior."""
super(NeighbourObjectsDist, self).__init__(name)
def setup(self, timeout, agent, item):
"""Have defined the setup method.
This method defines the other objects required for the
behavior. Agent is the actor in the environment,
item is the name of the item we are trying to find in the
environment and timeout defines the execution time for the
behavior.
"""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.WRITE)
def initialise(self):
"""Everytime initialization. Not required for now."""
pass
def receive_signals(self):
"""Receive signals from other agents.
Since this is the primary behavior for the agents to sense
the environment, we include the receive signal method here.
The agents will be able to
sense the environment and check if
it receives any signals from other agents.
"""
def update(self):
"""
Sense the neighborhood.
This method gets the grid values based on the current location and
radius. The grids are used to search the environment. If the agents
find any objects, it is stored in the behavior tree blackboard which
is a dictionary with sets as values.
"""
# if self.item is None:
# grids = self.agent.model.grid.get_neighborhood(
# self.agent.location, self.agent.radius*4)
# else:
# grids = self.agent.model.grid.get_neighborhood(
# self.agent.location, self.agent.radius)
grids = []
# for i in range(1, self.agent.model.grid.grid_size):
status = common.Status.FAILURE
for i in range(0, self.agent.radius):
x = int(self.agent.location[0] + np.cos(
self.agent.direction) * i)
y = int(self.agent.location[1] + np.sin(
self.agent.direction) * i)
new_location, direction = self.agent.model.grid.check_limits(
(x, y), self.agent.direction)
# grids += self.agent.model.grid.get_neighborhood(new_location, 1)
limits, grid = self.agent.model.grid.find_grid(new_location)
# print(self.agent.name, grid, self.name, round(self.agent.direction, 2), self.id, limits)
objects = self.agent.model.grid.get_objects(
self.item, grid)
# print('nighbourdist', grid, objects, self.agent.location, (new_location), limits)
# Need to reset blackboard contents after each sense
self.blackboard.neighbourobj = dict()
if len(objects) >= 1:
if self.agent in objects:
objects.remove(self.agent)
for item in objects:
name = type(item).__name__
# Is the item is not carrable, its location
# and property doesnot change. So we can commit its
# information to memory
# if item.carryable is False and item.deathable is False:
# name = name + str(self.agent.name)
if item.passable is False:
try:
self.blackboard.neighbourobj[name].add(item)
except KeyError:
self.blackboard.neighbourobj[name] = {item}
# if status == common.Status.SUCCESS:
# pass
# else:
status = common.Status.SUCCESS
return status
return status
class GoTo(Behaviour):
"""GoTo behavior for the agents.
Inherits the Behaviors class from py_trees. This
behavior implements the GoTo function for the agents. This allows
the agents direct towards the object they want to reach. This behavior
is only concerned with direction alignment not with movement.
"""
def __init__(self, name):
"""Init method for the GoTo behavior."""
super(GoTo, self).__init__(name)
# self.blackboard = Blackboard()
# self.blackboard.neighbourobj = dict()
def setup(self, timeout, agent, item):
"""Have defined the setup method.
This method defines the other objects required for the
behavior. Agent is the actor in the environment,
item is the name of the item we are trying to find in the
environment and timeout defines the execution time for the
behavior.
"""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Everytime initialization. Not required for now."""
pass
def update(self):
"""
Goto towards the object of interest.
This method uses the ObjectsStore abstract class to find the
objects sensed before and agent shared storage. If the agent
find the object of interst in the store then, direction to the
object of interest is computed and agent direction is set to that
direction.
"""
try:
objects = ObjectsStore.find(
self.blackboard.neighbourobj, self.agent.shared_content,
self.item, self.agent.name)
if len(objects) > 0:
objects = self.agent.model.random.choice(objects)
else:
objects = objects[0]
self.agent.direction = get_direction(
objects.location, self.agent.location) % (2 * np.pi)
return common.Status.SUCCESS
except (AttributeError, IndexError):
return common.Status.FAILURE
# Behavior defined to move towards something
class Towards(Behaviour):
"""Towards behaviors.
Changes the direction to go towards the object.
"""
def __init__(self, name):
"""Initialize."""
super(Towards, self).__init__(name)
def setup(self, timeout, agent, item=None):
"""Setup."""
self.agent = agent
def initialise(self):
"""Pass."""
pass
def update(self):
"""Nothing much to do."""
return common.Status.SUCCESS
# Behavior defined to move away from something
class Away(Behaviour):
"""Away behavior."""
def __init__(self, name):
"""Initialize."""
super(Away, self).__init__(name)
def setup(self, timeout, agent, item=None):
"""Setup."""
self.agent = agent
def initialise(self):
"""Pass."""
pass
def update(self):
"""Compute direction and negate it."""
self.agent.direction = (self.agent.direction + np.pi) % (2 * np.pi)
return common.Status.SUCCESS
# Behavior defined for Randomwalk
class RandomWalk(Behaviour):
"""Random walk behaviors."""
def __init__(self, name):
"""Initialize."""
super(RandomWalk, self).__init__(name)
def setup(self, timeout, agent, item=None):
"""Setup."""
self.agent = agent
def initialise(self):
"""Pass."""
pass
def update(self):
"""Compute random direction and set it to agent direction."""
delta_d = self.agent.model.random.normal(0, .1)
self.agent.direction = (self.agent.direction + delta_d) % (2 * np.pi)
return common.Status.SUCCESS
class IsMoveable(Behaviour):
"""Check is the item is moveable."""
def __init__(self, name):
"""Initialize."""
super(IsMoveable, self).__init__(name)
# self.blackboard = Blackboard()
def setup(self, timeout, agent, item):
"""Setup."""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Get the object and check its movelable property."""
try:
objects = ObjectsStore.find(
self.blackboard.neighbourobj, self.agent.shared_content,
self.item, self.agent.name)
for obj in objects:
if not objects.moveable:
return common.Status.FAILURE
return common.Status.SUCCESS
except (AttributeError, IndexError):
return common.Status.FAILURE
# Behavior defined to move
class Move(Behaviour):
"""Actually move the agent.
Move the agent with any other object fully attached or
partially attached to the agent.
"""
def __init__(self, name):
"""Initialize."""
super(Move, self).__init__(name)
def setup(self, timeout, agent, item=None):
"""Setup."""
self.agent = agent
self.dt = 0.1
def initialise(self):
"""Pass."""
pass
def update_signals(self, old_loc, new_loc):
"""Signal also move along with agents.
Signal is created by the agent. It has certain broadcast radius. It
moves along with the agent. So this move behavior should also be
responsible to move the signals.
"""
try:
for signal in self.agent.signals:
if self.agent.model.grid.move_object(
old_loc, signal, new_loc):
pass
else:
return False
except IndexError:
pass
return True
def update_partial_attached_objects(self):
"""Move logic for partially attached objects."""
try:
for item in self.agent.partial_attached_objects:
accleration = self.agent.force / item.agents[self.agent]
velocity = (accleration * self.dt) / len(item.agents)
direction = self.agent.direction
"""
if np.cos(direction) > 0:
x = int(np.ceil(
item.location[0] + np.cos(direction) * velocity))
y = int(np.ceil(
item.location[1] + np.sin(direction) * velocity))
else:
x = int(np.floor(
item.location[0] + np.cos(direction) * velocity))
y = int(np.floor(
item.location[1] + np.sin(direction) * velocity))
"""
x = int(self.agent.location[0] + np.cos(
direction) * velocity)
y = int(self.agent.location[1] + np.sin(
direction) * velocity)
# object_agent = list(item.agents.keys())
# indx = self.agent.model.random.randint(0, len(object_agent))
# object_agent = object_agent[indx]
object_agent = self.agent
# new_location, direction
# = object_agent.model.grid.check_limits(
# (x, y), direction)
new_location = (x, y)
object_agent.model.grid.move_object(
item.location, item, new_location)
self.agent.direction = direction
item.location = new_location
return True
except (IndexError, ValueError):
return False
def update(self):
"""Move logic for agent and fully carried object."""
# Partially carried object
if not self.update_partial_attached_objects():
self.agent.accleration = self.agent.force / self.agent.get_weight()
self.agent.velocity = self.agent.accleration * 1.0
# print(self.agent.direction, self.agent.velocity, self.agent.location)
x = int(np.round(self.agent.location[0] + np.cos(
self.agent.direction) * self.agent.velocity))
y = int(np.round(self.agent.location[1] + np.sin(
self.agent.direction) * self.agent.velocity))
new_location, direction = self.agent.model.grid.check_limits(
(x, y), self.agent.direction)
# print('from move', self.name, self.agent.location, new_location, direction)
if self.agent.model.grid.move_object(
self.agent.location, self.agent, new_location):
# Now the agent location has been updated, update the signal grids
if not self.update_signals(self.agent.location, new_location):
return common.Status.FAILURE
self.agent.location = new_location
self.agent.direction = direction
# Full carried object moves along the agent
for item in self.agent.attached_objects:
item.location = self.agent.location
else:
return common.Status.FAILURE
else:
new_location = self.agent.partial_attached_objects[0].location
for agent in self.agent.partial_attached_objects[0].agents.keys():
if agent.model.grid.move_object(
agent.location, agent,
new_location):
agent.location = new_location
else:
return common.Status.FAILURE
# Now the agent location has been updated, update the signal grids
if not self.update_signals(self.agent.location, new_location):
return common.Status.FAILURE
return common.Status.SUCCESS
# Behavior define for donot move
class DoNotMove(Behaviour):
"""Stand still behaviors."""
def __init__(self, name):
"""Initialize."""
super(DoNotMove, self).__init__(name)
def setup(self, timeout, agent, item=None):
"""Setup."""
self.agent = agent
def initialise(self):
"""Pass."""
pass
def update(self):
"""Update agent moveable property."""
self.agent.moveable = False
return common.Status.SUCCESS
# Behavior to check carryable attribute of an object
class IsCarryable(Behaviour):
"""Check carryable attribute of the item."""
def __init__(self, name):
"""Initialize."""
super(IsCarryable, self).__init__(name)
def setup(self, timeout, agent, item):
"""Setup."""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Check carryable property."""
try:
objects = ObjectsStore.find(
self.blackboard.neighbourobj, self.agent.shared_content,
self.item, self.agent.name)[0]
if objects.carryable:
return common.Status.SUCCESS
else:
return common.Status.FAILURE
except (AttributeError, IndexError):
return common.Status.FAILURE
# Behavior to check carryable attribute of an object
class IsDropable(Behaviour):
"""Check dropable property."""
def __init__(self, name):
"""Initialize."""
super(IsDropable, self).__init__(name)
def setup(self, timeout, agent, item):
"""Setup."""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Check the dropable attribute."""
status = common.Status.FAILURE
try:
objects = ObjectsStore.find(
self.blackboard.neighbourobj, self.agent.shared_content,
self.item, self.agent.name)
if len(objects) >= 1:
for obj in objects:
if status == common.Status.SUCCESS:
break
if objects.dropable:
status = common.Status.SUCCESS
return status
else:
return common.Status.SUCCESS
except (AttributeError, IndexError):
return common.Status.SUCCESS
# Behavior define to check is the item is carrable on its own
class IsSingleCarry(Behaviour):
"""Single carry behavior."""
def __init__(self, name):
"""Initialize."""
super(IsSingleCarry, self).__init__(name)
def setup(self, timeout, agent, item):
"""Setup."""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Logic to check if the object can be carried by single agent."""
# Logic to carry
try:
objects = ObjectsStore.find(
self.blackboard.neighbourobj, self.agent.shared_content,
self.item, self.agent.name)[0]
if objects.weight:
if self.agent.get_capacity() > objects.calc_relative_weight():
return common.Status.SUCCESS
else:
return common.Status.FAILURE
else:
return common.Status.FAILURE
except (AttributeError, IndexError):
return common.Status.FAILURE
# Behavior define to check is the item is carrable on its own or not
class IsMultipleCarry(Behaviour):
"""Multiple carry behaviour."""
def __init__(self, name):
"""Initialize."""
super(IsMultipleCarry, self).__init__(name)
def setup(self, timeout, agent, item):
"""Setup."""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Logic for multiple carry by checking the weights."""
try:
# Logic to carry
# objects = self.blackboard.neighbourobj[self.thing].pop()
objects = ObjectsStore.find(
self.blackboard.neighbourobj, self.agent.shared_content,
self.item, self.agent.name)[0]
if objects.weight:
if self.agent.get_capacity() < objects.weight:
return common.Status.SUCCESS
else:
return common.Status.FAILURE
else:
return common.Status.FAILURE
except (AttributeError, IndexError):
return common.Status.FAILURE
class IsCarrying(Behaviour):
"""Condition check if the agent is carrying something."""
def __init__(self, name):
"""Initialize."""
super(IsCarrying, self).__init__(name)
def setup(self, timeout, agent, item):
"""Setup."""
self.agent = agent
self.item = item
def initialise(self):
"""Pass."""
pass
def update(self):
"""Logic for object carrying check."""
try:
things = []
for item in self.agent.attached_objects:
things.append(type(item).__name__)
if self.item in set(things):
return common.Status.SUCCESS
else:
return common.Status.FAILURE
except (AttributeError, IndexError):
return common.Status.FAILURE
# Behavior defined to drop the items currently carrying
class Drop(Behaviour):
"""Drop behavior to drop items which is being carried."""
def __init__(self, name):
"""Initialize."""
super(Drop, self).__init__(name)
def setup(self, timeout, agent, item):
"""Setup."""
self.agent = agent
self.item = item
def initialise(self):
"""Pass."""
pass
def update(self):
"""Logic to drop the item."""
try:
# Get the objects from the actuators
objects = list(filter(
lambda x: type(x).__name__ == self.item,
self.agent.attached_objects))[0]
# Grid
grid = self.agent.model.grid
static_grids = grid.get_neighborhood(self.agent.location, self.agent.radius)
envobjects = self.agent.model.grid.get_objects_from_list_of_grid(None, static_grids)
dropped = False
for obj in envobjects:
if type(obj).__name__ in ['Hub', 'Boundary', 'Obstacles']:
dropped = True
obj.dropped_objects.append(objects)
self.agent.attached_objects.remove(objects)
objects.agent_name = self.agent.name
break
if not dropped:
self.agent.model.grid.add_object_to_grid(objects.location, objects)
self.agent.attached_objects.remove(objects)
objects.agent_name = self.agent.name
# Temporary fix
# Store the genome which activated the single carry
try:
# objects.phenotype['drop'] =
# self.agent.individual[0].phenotype
objects.phenotype = {
self.agent.individual[0].phenotype: self.agent.individual[
0].fitness}
return common.Status.SUCCESS
except AttributeError:
pass
# objects.agents.remove(self.agent)
return common.Status.SUCCESS
except (AttributeError, IndexError):
return common.Status.FAILURE
class DropPartial(Behaviour):
"""Drop behavior for partially attached object."""
def __init__(self, name):
"""Initialize."""
super(DropPartial, self).__init__(name)
def setup(self, timeout, agent, item):
"""Setup."""
self.agent = agent
self.item = item
def initialise(self):
"""Pass."""
pass
def update(self):
"""Logic to drop partially attached object."""
try:
objects = list(filter(
lambda x: type(x).__name__ == self.item,
self.agent.partial_attached_objects))[0]
objects.agents.pop(self.agent)
self.agent.partial_attached_objects.remove(objects)
# If the agent is last to drop reduce the size of the
# food to the half the size of the hub. This indicates
# that the food has been deposited to the hub
if len(objects.agents) == 0:
self.agent.model.grid.remove_object_from_grid(
objects.location, objects)
objects.radius = int(self.agent.model.hub.radius / 2)
objects.location = self.agent.model.hub.location
self.agent.model.grid.add_object_to_grid(
objects.location, objects)
try:
objects.phenotype = {
self.agent.individual[0].phenotype: self.agent.individual[
0].fitness}
return common.Status.SUCCESS
except AttributeError:
pass
return common.Status.SUCCESS
except (AttributeError, IndexError):
return common.Status.FAILURE
# Behavior defined to carry the items found
class SingleCarry(Behaviour):
"""Carry behavior."""
def __init__(self, name):
"""Initialize."""
super(SingleCarry, self).__init__(name)
def setup(self, timeout, agent, item):
"""Setup."""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Carry logic to carry the object by the agent."""
try:
objects = ObjectsStore.find(
self.blackboard.neighbourobj, self.agent.shared_content,
self.item, self.agent.name)[0]
self.agent.attached_objects.append(objects)
self.agent.model.grid.remove_object_from_grid(
objects.location, objects)
objects.agent_name = self.agent.name
# Add the agent to the object dict
# objects.agents[self.agent] = self.agent.get_capacity()
# Temporary fix
# Store the genome which activated the single carry
try:
objects.phenotype = {
self.agent.individual[0].phenotype: self.agent.individual[
0].fitness}
except AttributeError:
pass
return common.Status.SUCCESS
except (AttributeError, IndexError):
return common.Status.FAILURE
except ValueError:
self.agent.attached_objects.remove(objects)
return common.Status.FAILURE
class InitiateMultipleCarry(Behaviour):
"""Behavior to initiate multiple carry process."""
def __init__(self, name):
"""Initialize."""
super(InitiateMultipleCarry, self).__init__(name)
def setup(self, timeout, agent, item):
"""Setup."""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Logic to initiaite multiple carry process."""
try:
# objects = self.blackboard.neighbourobj[self.thing].pop()
objects = ObjectsStore.find(
self.blackboard.neighbourobj, self.agent.shared_content,
self.item, self.agent.name)[0]
relative_weight = objects.calc_relative_weight()
if relative_weight > 0:
if relative_weight - self.agent.get_capacity() >= 0:
capacity_used = self.agent.get_capacity()
else:
capacity_used = relative_weight
# Update the partial attached object
self.agent.partial_attached_objects.append(objects)
# Update the object so that it knows this agent
# has attached to it
objects.agents[self.agent] = capacity_used
return common.Status.SUCCESS
else:
# Redistribute the weights to all the attached objects
average_weight = objects.redistribute_weights()
self.agent.partial_attached_objects.append(objects)
objects.agents[self.agent] = average_weight
return common.Status.SUCCESS
try:
objects.phenotype = {
self.agent.individual[0].phenotype: self.agent.individual[
0].fitness}
except AttributeError:
pass
except (KeyError, AttributeError, IndexError):
return common.Status.FAILURE
class IsInPartialAttached(Behaviour):
"""Condition to check if the object is in partially attached list."""
def __init__(self, name):
"""Initialize."""
super(IsInPartialAttached, self).__init__(name)
def setup(self, timeout, agent, item):
"""Setup."""
self.agent = agent
self.item = item
def initialise(self):
"""Pass."""
pass
def update(self):
"""Logic to check if the object is in partially attached list."""
# objects = self.blackboard.neighbourobj[self.thing].pop()
try:
things = []
for item in self.agent.partial_attached_objects:
things.append(type(item).__name__)
objects = list(filter(
lambda x: type(x).__name__ == self.item,
self.agent.partial_attached_objects))[0]
if self.item in set(things) and \
self.agent in objects.agents:
return common.Status.SUCCESS
else:
return common.Status.FAILURE
except IndexError:
return common.Status.FAILURE
class IsEnoughStrengthToCarry(Behaviour):
"""Condition to check if the agent has enough strength to carry."""
def __init__(self, name):
"""Initialize."""
super(IsEnoughStrengthToCarry, self).__init__(name)
def setup(self, timeout, agent, item):
"""Setup."""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Logic to check if the agent has enough strength to carry."""
try:
objects = ObjectsStore.find(
self.blackboard.neighbourobj, self.agent.shared_content,
self.item, self.agent.name)[0]
if self.agent.get_capacity() >= objects.calc_relative_weight():
return common.Status.SUCCESS
else:
return common.Status.FAILURE
except IndexError:
return common.Status.FAILURE
class IsMotionTrue(Behaviour):
"""Condition to check is the object is moving."""
def __init__(self, name):
"""Initialize."""
super(IsMotionTrue, self).__init__(name)
def setup(self, timeout, agent, item):
"""Setup."""
self.agent = agent
self.item = item
def initialise(self):
"""Pass."""
pass
def update(self):
"""Logic to check if the object is moving."""
try:
if self.agent.partial_attached_objects[0].motion is True:
return common.Status.SUCCESS
else:
return common.Status.FAILURE
except (AttributeError, IndexError):
return common.Status.FAILURE
class IsVisitedBefore(Behaviour):
"""Condition to check is the object is visited before."""
def __init__(self, name):
"""Initialize."""
super(IsVisitedBefore, self).__init__(name)
def setup(self, timeout, agent, item):
"""Setup."""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Logic to check is the object is visited before."""
try:
objects = ObjectsStore.find(
self.blackboard.neighbourobj, self.agent.shared_content,
self.item, self.agent.name)[0]
if objects:
return common.Status.SUCCESS
else:
return common.Status.FAILURE
except (AttributeError, IndexError):
return common.Status.FAILURE
class MultipleCarry(Behaviour):
"""Multiple carry behavior."""
def __init__(self, name):
"""Initialize."""
super(MultipleCarry, self).__init__(name)
def setup(self, timeout, agent, item):
"""Setup."""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Logic for multiple carry."""
try:
objects = ObjectsStore.find(
self.blackboard.neighbourobj, self.agent.shared_content,
self.item, self.agent.name)[0]
self.agent.model.grid.remove_object_from_grid(
objects.location, objects)
return common.Status.SUCCESS
except IndexError:
return common.Status.FAILURE
# Lets start some communication behaviors
class SignalDoesNotExists(Behaviour):
"""Signal exists behavior.
This behavior enables agents to check it that signal already exists.
"""
def __init__(self, name):
"""Initialize."""
super(SignalDoesNotExists, self).__init__(name)
def setup(self, timeout, agent, item):
"""Setup."""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Logic for sending signal."""
try:
# Find the object the agent is trying to signal
objects = ObjectsStore.find(
self.blackboard.neighbourobj, self.agent.shared_content,
self.item, self.agent.name)[0]
if len(self.agent.signals) > 0:
# Check the agetns signals array for its exitance
signal_objects = []
for signal in self.agent.signals:
signal_objects.append(signal.object_to_communicate)
if objects not in signal_objects:
return common.Status.SUCCESS
else:
return common.Status.FAILURE
else:
return common.Status.SUCCESS
except (IndexError, AttributeError):
return common.Status.FAILURE
# Lets start some communication behaviors
class IsSignalActive(Behaviour):
"""Is Signal active?
This behavior enables agents to check it that signal is already active.
"""
def __init__(self, name):
"""Initialize."""
super(IsSignalActive, self).__init__(name)
def setup(self, timeout, agent, item):
"""Setup."""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Logic for sending signal."""
try:
# Find the object the agent is trying to signal.
if len(self.agent.signals) > 0:
return common.Status.SUCCESS
else:
return common.Status.FAILURE
except (IndexError, AttributeError):
return common.Status.FAILURE
class SendSignal(Behaviour):
"""Signalling behavior.
This behavior enables agents to send signals about the information they
have gathered. The information could be about location of site, hub, food,
obstacles and others.
"""
def __init__(self, name):
"""Initialize."""
super(SendSignal, self).__init__(name)
def setup(self, timeout, agent, item):
"""Setup."""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Logic for sending signal."""
try:
objects = ObjectsStore.find(
self.blackboard.neighbourobj, self.agent.shared_content,
self.item, self.agent.name)[0]
# Initialize the signal object
signal = Signal(
id=self.agent.name, location=self.agent.location,
radius=self.agent.radius, object_to_communicate=objects)
# Add the signal to the grids so it could be sensed by
# other agents
self.agent.model.grid.add_object_to_grid(
self.agent.location, signal)
# Append the signal object to the agent signal list
self.agent.signals.append(signal)
return common.Status.SUCCESS
except (IndexError, AttributeError):
return common.Status.FAILURE
class ReceiveSignal(Behaviour):
"""Receive signals from other agents.
Since this is the primary behavior for the agents to sense
the environment, we include the receive signal method here.
The agents will be able to sense the environment and check if
it receives any signals from other agents.
"""
def __init__(self, name):
"""Initialize."""
super(ReceiveSignal, self).__init__(name)
def setup(self, timeout, agent, item='Signal'):
"""Setup."""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Logic for receiving signal."""
try:
objects = ObjectsStore.find(
self.blackboard.neighbourobj, self.agent.shared_content,
self.item, self.agent.name)
# Extract the information from the signal object and
# store into the agent memory
objects = [obj for obj in objects if obj.id != self.agent.name][0]
objects = objects.communicated_object
name = type(objects).__name__
try:
self.agent.shared_content[name].add(objects)
except KeyError:
self.agent.shared_content[name] = {objects}
return common.Status.SUCCESS
except (IndexError, AttributeError):
return common.Status.FAILURE
class CueDoesNotExists(Behaviour):
"""Cue does not exists behavior.
This behavior enables agents to check if that cue already exists.
"""
def __init__(self, name):
"""Initialize."""
super(CueDoesNotExists, self).__init__(name)
def setup(self, timeout, agent, item):
"""Setup."""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Logic for cue checking."""
try:
# Find the object the agent is trying to cue
grids = self.agent.model.grid.get_neighborhood(
self.agent.location, self.agent.radius)
cue_objects = self.agent.model.grid.get_objects_from_list_of_grid(
'Cue', grids)
if len(cue_objects) > 0:
# Check the agetns cue list for its exitance
objects = ObjectsStore.find(
self.blackboard.neighbourobj, self.agent.shared_content,
self.item, self.agent.name)[0]
cue_in_list = [
cue.object_to_communicate for cue in cue_objects]
if objects not in cue_in_list:
return common.Status.SUCCESS
else:
return common.Status.FAILURE
else:
return common.Status.SUCCESS
except (IndexError, AttributeError):
return common.Status.FAILURE
# Communication behaviors related to cue
class DropCue(Behaviour):
"""Drop cue in the environment.
This is a communication behavior where a physical object
is placed in the environment which gives a particular information
to the agents sensing this cue.
"""
def __init__(self, name):
"""Initialize."""
super(DropCue, self).__init__(name)
def setup(self, timeout, agent, item):
"""Setup."""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Logic for dropping cue."""
try:
objects = ObjectsStore.find(
self.blackboard.neighbourobj, self.agent.shared_content,
self.item, self.agent.name)[0]
# Initialize the cue object
cue = Cue(
id=self.agent.name, location=self.agent.location,
radius=self.agent.radius, object_to_communicate=objects)
# Add the cue to the grids so it could be sensed by
# other agents
self.agent.model.grid.add_object_to_grid(
cue.location, cue)
# We just drop the cue on the environment and don't keep track
# of it. Instead of using cue here we can derive a class from cue
# and call it pheromonone
return common.Status.SUCCESS
except (IndexError, AttributeError):
return common.Status.FAILURE
class PickCue(Behaviour):
"""Pick cue in the environment.
This is a communication behavior where the information from the cue
object in the environment is pickedup.
"""
def __init__(self, name):
"""Initialize."""
super(PickCue, self).__init__(name)
def setup(self, timeout, agent, item='Cue'):
"""Setup."""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Logic for pickup cue."""
try:
objects = ObjectsStore.find(
self.blackboard.neighbourobj, self.agent.shared_content,
self.item, self.agent.name)[0]
# Get information from the cue. For now, the agents orients
# its direction towards the object that is communicated
self.agent.direction = get_direction(
objects.communicated_location, self.agent.location)
objects = objects.communicated_object
name = type(objects).__name__
try:
self.agent.shared_content[name].add(objects)
except KeyError:
self.agent.shared_content[name] = {objects}
return common.Status.SUCCESS
except (IndexError, AttributeError):
return common.Status.FAILURE
class AvoidSObjects(Behaviour):
"""Avoid Static objects in the environment.
This is a avoid behaviors where the agents avoids
the static objects that are not passable.
"""
def __init__(self, name):
"""Initialize."""
super(AvoidSObjects, self).__init__(name)
def setup(self, timeout, agent, item='Obstacles'):
"""Setup."""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Logic for avoid static objects."""
# try:
item = ObjectsStore.find(
self.blackboard.neighbourobj, self.agent.shared_content,
self.item, self.agent.name)[0]
# alpha = get_direction(self.agent.location, objects.location)
# theta = self.agent.direction
# angle_diff = theta-alpha
# print('From', self.agent.name, self.name, item, self.agent.direction, item.location, item.radius)
x = int(np.ceil(self.agent.location[0] + np.cos(
self.agent.direction) * self.agent.radius))
y = int(np.ceil(self.agent.location[1] + np.sin(
self.agent.direction) * self.agent.radius))
# agent_A = y - self.agent.location[1]
# agent_B = self.agent.location[0] - x
# agent_C = agent_A * self.agent.location[0] + agent_B * self.agent.location[1]
# print('agetn ABC', agent_A, agent_B, agent_C)
# obj_x2 = int(np.ceil(item.location[0] + (np.cos(np.pi/4) * (item.radius))))
# obj_y2 = int(np.ceil(item.location[1] + (np.sin(np.pi/4) * (item.radius))))
# obj_loc_2 = self.agent.model.grid.find_upperbound((obj_x2, obj_y2))
# # print('obj x2', obj_x2, obj_y2, obj_loc_2)
# obj_x0 = int(np.floor(item.location[0] + (np.cos(np.pi/4 + np.pi) * (item.radius))))
# obj_y0 = int(np.floor(item.location[1] + (np.sin(np.pi/4 + np.pi) * (item.radius))))
# obj_loc_0 = self.agent.model.grid.find_lowerbound((obj_x0, obj_y0))
# # print('obj x1', obj_x0, obj_y0, obj_loc_0)
# obj_loc_1 = (obj_loc_2[0], obj_loc_0[1])
# obj_loc_3 = (obj_loc_0[0], obj_loc_2[1])
grids = self.agent.model.grid.get_neighborhood(item.location, item.radius)
points = [self.agent.model.grid.grid_reverse[grid] for grid in grids]
p1s, p2s = zip(*points)
x1s, y1s = zip(*p1s)
x2s, y2s = zip(*p2s)
x1 = min(x1s)
y1 = min(y1s)
x2 = max(x2s)
y2 = max(y2s)
# print(grids, [self.agent.model.grid.grid_reverse[grid] for grid in grids])
intersect = False
# for grid in grids:
# p1, p2 = self.agent.model.grid.grid_reverse[grid]
# x1, y1 = p1
# x2, y2 = p2
lines = [
[(x1, y1), (x2, y1)],
[(x2, y1), (x2, y2)],
[(x2, y2), (x1, y2)],
[(x1, y2), (x1, y1)]
]
# print('agent ray', self.agent.location, (x,y))
# print('rectangle obstacle',lines)
# plt.plot([self.agent.location[0], x], [self.agent.location[1], y], 'r--')
# plt.plot([lines[0][0][0], lines[0][1][0]], [lines[0][0][1], lines[0][1][1]],'b.-')
# plt.plot([lines[1][0][0], lines[1][1][0]], [lines[1][0][1], lines[1][1][1]],'b.-')
# plt.plot([lines[2][0][0], lines[2][1][0]], [lines[2][0][1], lines[2][1][1]],'b.-')
# plt.plot([lines[3][0][0], lines[3][1][0]], [lines[3][0][1], lines[3][1][1]],'b.-')
# # plt.xticks(range(-20, 20, 1))
# # plt.yticks(range(-20, 20, 1))
# plt.show()
for line in lines:
intersect = check_intersect(self.agent.location, (x, y), line[0], line[1])
if intersect:
dx = line[1][0] - line[0][0]
dy = line[1][1] - line[0][1]
self.agent.direction = np.arctan2(dy,dx)
break
# if intersect:
# break
# line_A = line[1][1] - line[0][1]
# line_B = line[0][0] - line[1][0]
# line_C = line_A * line[0][0] + line_B * line[0][1]
# slope = round(agent_A * line_B - line_A * agent_B, 2)
# # print('slope', slope)
# if slope == 0.0:
# break
# else:
# intersection_x = int((line_B * agent_C - agent_B * line_C) / slope)
# intersection_y = int((agent_A * line_C - line_A * agent_C) / slope)
# print('itersection point', intersection_x, intersection_y, self.agent.location, x, y, line)
# if (
# (intersection_x <= x) and ( intersection_x >= self.agent.location[0]) and
# (intersection_y <= y) and ( intersection_y >= self.agent.location[1])):
# # ((intersection_x <= line[1][0]) and ( intersection_x >= line[0][0]) and
# # (intersection_y <= line[1][1]) and ( intersection_y >= line[0][1]))):
# direction = np.arctan2(line[1][1] - line[0][1], line[1][0] - line[0][0])
# print('computed direction', direction)
# self.agent.direction = (direction + 2*np.pi) % (2*np.pi)
# break
# direction = self.agent.direction + np.pi/2
# self.agent.direction = direction % (2 * np.pi)
# print(self.agent.name, direction, self.agent.direction)
return common.Status.SUCCESS
# except (IndexError, AttributeError):
# return common.Status.FAILURE
# Behavior to check if the agent avoided obj
class DidAvoidedObj(Behaviour):
"""Logic to check if the agent avoided the objects."""
def __init__(self, name):
"""Initialize."""
super(DidAvoidedObj, self).__init__(name)
def setup(self, timeout, agent, item):
"""Setup."""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Check if it can sense the object and its direction."""
try:
objects = ObjectsStore.find(
self.blackboard.neighbourobj, self.agent.shared_content,
self.item, self.agent.name)[0]
alpha = get_direction(self.agent.location, objects.location)
theta = self.agent.direction
angle_diff = np.abs(theta-alpha)
if angle_diff < np.pi/2:
return common.Status.FAILURE
else:
return common.Status.SUCCESS
except (AttributeError, IndexError):
return common.Status.SUCCESS
# Behavior to check if the agent can move
class CanMove(Behaviour):
"""Logic to check if the agent can move in the intended direction."""
def __init__(self, name):
"""Initialize."""
super(CanMove, self).__init__(name)
def setup(self, timeout, agent, item=None):
"""Setup."""
self.agent = agent
self.item = item
def initialise(self):
"""Pass."""
pass
def update(self):
"""Check if it can sense the object and its direction."""
try:
if (self.agent.moveable and self.agent.dead is not True):
return common.Status.SUCCESS
else:
return common.Status.FAILURE
except (AttributeError, IndexError):
return common.Status.FAILURE
# Pheromone related bheaviors
class DropPheromone(Behaviour):
"""Drop pheromone in the environment.
This is a communication behavior where a pheromone object
is placed in the environment which gives a direction to follow for the
agents.
"""
def __init__(self, name, attractive=True):
"""Initialize."""
super(DropPheromone, self).__init__(name)
self.attractive = attractive
def setup(self, timeout, agent, item=None):
"""Setup."""
self.agent = agent
self.item = item
# self.blackboard = blackboard.Client(name=str(agent.name))
# self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
self.blackboard = blackboard.Client(name='Pheromones')
self.blackboard.register_key(key='pheromones', access=common.Access.WRITE)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Logic for dropping pheromone."""
try:
if True:
# Initialize the pheromone object
pheromone = Pheromones(
id=self.agent.name, location=self.agent.location,
radius=self.agent.radius, attractive=self.attractive, direction=self.agent.direction)
pheromone.passable = self.attractive
# Add the pheromone to the grids so it could be sensed by
# other agents
self.agent.model.grid.add_object_to_grid(
pheromone.location, pheromone)
self.blackboard.pheromones.append(pheromone)
return common.Status.SUCCESS
else:
return common.Status.SUCCESS
except (IndexError, AttributeError):
return common.Status.FAILURE
# Pheromone related bheaviors
class SensePheromone(Behaviour):
"""Sense pheromone in the environment.
This is a communication behavior where pheromones are sensed from
the environment and a direction is computed to follow.
"""
def __init__(self, name, attractive=True):
"""Initialize."""
super(SensePheromone, self).__init__(name)
self.attractive = True
def setup(self, timeout, agent, item='Pheromones'):
"""Setup."""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Logic for dropping pheromone."""
try:
objects = ObjectsStore.find(
self.blackboard.neighbourobj, self.agent.shared_content,
self.item, self.agent.name)
repulsive = False
for obj in objects:
if obj.attractive is False:
repulsive = True
break
if not repulsive:
# Initialize the pheromone object
angles = [[
np.sin(obj.direction), np.cos(obj.direction), obj.strength[obj.current_time]] for obj in objects]
sin, cos, weight = zip(*angles)
sin, cos, weight = np.array(sin), np.array(cos), np.array(weight)
direction = np.arctan2(sum(sin * weight), sum(cos * weight))
self.agent.direction = direction % (2 * np.pi)
else:
direction = self.agent.direction + np.pi/2
self.agent.direction = direction % (2 * np.pi)
return common.Status.SUCCESS
except (IndexError, AttributeError):
return common.Status.FAILURE
class PheromoneExists(Behaviour):
"""Check if pheromone exists at the location where agent is.
This is a communication behavior where pheromones are sensed from
the environment and a direction is computed to follow.
"""
def __init__(self, name):
"""Initialize."""
super(PheromoneExists, self).__init__(name)
def setup(self, timeout, agent, item='Pheromones'):
"""Setup."""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Logic for dropping pheromone."""
try:
objects = ObjectsStore.find(
self.blackboard.neighbourobj, self.agent.shared_content,
self.item, self.agent.name)
objects = [obj for obj in objects if obj.id == self.agent.name]
if len(objects) >=1:
return common.Status.SUCCESS
else:
return common.Status.FAILURE
except (IndexError, AttributeError):
return common.Status.FAILURE
class IsAgentDead(Behaviour):
"""Check if agent is dead.
"""
def __init__(self, name):
"""Initialize."""
super(IsAgentDead, self).__init__(name)
def setup(self, timeout, agent, item=None):
"""Setup."""
self.agent = agent
self.item = item
def initialise(self):
"""Pass."""
pass
def update(self):
"""Check agent is dead"""
try:
if self.agent.dead:
return common.Status.SUCCESS
else:
return common.Status.FAILURE
except (IndexError, AttributeError):
return common.Status.FAILURE
class IsAttractivePheromone(Behaviour):
"""Check if the pheromone is attractive.
"""
def __init__(self, name):
"""Initialize."""
super(IsAttractivePheromone, self).__init__(name)
def setup(self, timeout, agent, item=None):
"""Setup."""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Check if the pheromone is attractive or repulsive."""
try:
objects = ObjectsStore.find(
self.blackboard.neighbourobj, self.agent.shared_content,
self.item, self.agent.name)
repulsive = False
for obj in objects:
if obj.attractive is False:
repulsive = True
break
if not repulsive:
return common.Status.SUCCESS
else:
return common.Status.FAILURE
except (IndexError, AttributeError):
return common.Status.FAILURE
class IsRepulsivePheromone(Behaviour):
"""Check if the pheromone is attractive.
"""
def __init__(self, name):
"""Initialize."""
super(IsRepulsivePheromone, self).__init__(name)
def setup(self, timeout, agent, item=None):
"""Setup."""
self.agent = agent
self.item = item
self.blackboard = blackboard.Client(name=str(agent.name))
self.blackboard.register_key(key='neighbourobj', access=common.Access.READ)
def initialise(self):
"""Pass."""
pass
def update(self):
"""Check if the pheromone is attractive or repulsive."""
try:
objects = ObjectsStore.find(
self.blackboard.neighbourobj, self.agent.shared_content,
self.item, self.agent.name)
# print('repusive pheroment', objects, objects[0].attractive)
repulsive = False
for obj in objects:
if obj.attractive is False:
repulsive = True
break
if repulsive:
return common.Status.SUCCESS
else:
return common.Status.FAILURE
except (IndexError, AttributeError):
return common.Status.FAILURE
# Dummy node
class DummyNode(Behaviour):
"""Dummy node.
BT node that always returns Success.
"""
def __init__(self, name):
"""Initialize."""
super(DummyNode, self).__init__(name)
def setup(self, timeout, agent, item=None):
"""Setup."""
self.agent = agent
self.item = item
def initialise(self):
"""Pass."""
pass
def update(self):
"""Nothing much to do."""
return common.Status.SUCCESS | en | 0.820848 | Defines all the primitive behaviors for the agents. This file name is sbehaviors coz `s` stands for swarms. # If there is $DISPLAY, display the plot Static class to search. This class provides a find method to search through Behavior Tree blackboard and agent content. Let this method implement search. This method find implements a search through blackboard dictionary. If the object is not found in blackboard, then agent content is searched. Sense behavior for the agents. Inherits the Behaviors class from py_trees. This behavior implements the sense function for the agents. This allows the agents to sense the nearby environment based on the their sense radius. Init method for the sense behavior. Have defined the setup method. This method defines the other objects required for the behavior. Agent is the actor in the environment, item is the name of the item we are trying to find in the environment and timeout defines the execution time for the behavior. Everytime initialization. Not required for now. Receive signals from other agents. Since this is the primary behavior for the agents to sense the environment, we include the receive signal method here. The agents will be able to sense the environment and check if it receives any signals from other agents. Sense the neighborhood. This method gets the grid values based on the current location and radius. The grids are used to search the environment. If the agents find any objects, it is stored in the behavior tree blackboard which is a dictionary with sets as values. # if self.item is None: # grids = self.agent.model.grid.get_neighborhood( # self.agent.location, self.agent.radius*4) # else: # Need to reset blackboard contents after each sense # Is the item is not carrable, its location # and property doesnot change. So we can commit its # information to memory # if item.carryable is False and item.deathable is False: # name = name + str(self.agent.name) Sense behavior for the agents. Inherits the Behaviors class from py_trees. This behavior implements the sense function for the agents. This allows the agents to sense the nearby environment based on the their sense radius. Init method for the sense behavior. Have defined the setup method. This method defines the other objects required for the behavior. Agent is the actor in the environment, item is the name of the item we are trying to find in the environment and timeout defines the execution time for the behavior. Everytime initialization. Not required for now. Receive signals from other agents. Since this is the primary behavior for the agents to sense the environment, we include the receive signal method here. The agents will be able to sense the environment and check if it receives any signals from other agents. Sense the neighborhood. This method gets the grid values based on the current location and radius. The grids are used to search the environment. If the agents find any objects, it is stored in the behavior tree blackboard which is a dictionary with sets as values. # if self.item is None: # grids = self.agent.model.grid.get_neighborhood( # self.agent.location, self.agent.radius*4) # else: # grids = self.agent.model.grid.get_neighborhood( # self.agent.location, self.agent.radius) # for i in range(1, self.agent.model.grid.grid_size): # grids += self.agent.model.grid.get_neighborhood(new_location, 1) # print(self.agent.name, grid, self.name, round(self.agent.direction, 2), self.id, limits) # print('nighbourdist', grid, objects, self.agent.location, (new_location), limits) # Need to reset blackboard contents after each sense # Is the item is not carrable, its location # and property doesnot change. So we can commit its # information to memory # if item.carryable is False and item.deathable is False: # name = name + str(self.agent.name) # if status == common.Status.SUCCESS: # pass # else: GoTo behavior for the agents. Inherits the Behaviors class from py_trees. This behavior implements the GoTo function for the agents. This allows the agents direct towards the object they want to reach. This behavior is only concerned with direction alignment not with movement. Init method for the GoTo behavior. # self.blackboard = Blackboard() # self.blackboard.neighbourobj = dict() Have defined the setup method. This method defines the other objects required for the behavior. Agent is the actor in the environment, item is the name of the item we are trying to find in the environment and timeout defines the execution time for the behavior. Everytime initialization. Not required for now. Goto towards the object of interest. This method uses the ObjectsStore abstract class to find the objects sensed before and agent shared storage. If the agent find the object of interst in the store then, direction to the object of interest is computed and agent direction is set to that direction. # Behavior defined to move towards something Towards behaviors. Changes the direction to go towards the object. Initialize. Setup. Pass. Nothing much to do. # Behavior defined to move away from something Away behavior. Initialize. Setup. Pass. Compute direction and negate it. # Behavior defined for Randomwalk Random walk behaviors. Initialize. Setup. Pass. Compute random direction and set it to agent direction. Check is the item is moveable. Initialize. # self.blackboard = Blackboard() Setup. Pass. Get the object and check its movelable property. # Behavior defined to move Actually move the agent. Move the agent with any other object fully attached or partially attached to the agent. Initialize. Setup. Pass. Signal also move along with agents. Signal is created by the agent. It has certain broadcast radius. It moves along with the agent. So this move behavior should also be responsible to move the signals. Move logic for partially attached objects. if np.cos(direction) > 0: x = int(np.ceil( item.location[0] + np.cos(direction) * velocity)) y = int(np.ceil( item.location[1] + np.sin(direction) * velocity)) else: x = int(np.floor( item.location[0] + np.cos(direction) * velocity)) y = int(np.floor( item.location[1] + np.sin(direction) * velocity)) # object_agent = list(item.agents.keys()) # indx = self.agent.model.random.randint(0, len(object_agent)) # object_agent = object_agent[indx] # new_location, direction # = object_agent.model.grid.check_limits( # (x, y), direction) Move logic for agent and fully carried object. # Partially carried object # print(self.agent.direction, self.agent.velocity, self.agent.location) # print('from move', self.name, self.agent.location, new_location, direction) # Now the agent location has been updated, update the signal grids # Full carried object moves along the agent # Now the agent location has been updated, update the signal grids # Behavior define for donot move Stand still behaviors. Initialize. Setup. Pass. Update agent moveable property. # Behavior to check carryable attribute of an object Check carryable attribute of the item. Initialize. Setup. Pass. Check carryable property. # Behavior to check carryable attribute of an object Check dropable property. Initialize. Setup. Pass. Check the dropable attribute. # Behavior define to check is the item is carrable on its own Single carry behavior. Initialize. Setup. Pass. Logic to check if the object can be carried by single agent. # Logic to carry # Behavior define to check is the item is carrable on its own or not Multiple carry behaviour. Initialize. Setup. Pass. Logic for multiple carry by checking the weights. # Logic to carry # objects = self.blackboard.neighbourobj[self.thing].pop() Condition check if the agent is carrying something. Initialize. Setup. Pass. Logic for object carrying check. # Behavior defined to drop the items currently carrying Drop behavior to drop items which is being carried. Initialize. Setup. Pass. Logic to drop the item. # Get the objects from the actuators # Grid # Temporary fix # Store the genome which activated the single carry # objects.phenotype['drop'] = # self.agent.individual[0].phenotype # objects.agents.remove(self.agent) Drop behavior for partially attached object. Initialize. Setup. Pass. Logic to drop partially attached object. # If the agent is last to drop reduce the size of the # food to the half the size of the hub. This indicates # that the food has been deposited to the hub # Behavior defined to carry the items found Carry behavior. Initialize. Setup. Pass. Carry logic to carry the object by the agent. # Add the agent to the object dict # objects.agents[self.agent] = self.agent.get_capacity() # Temporary fix # Store the genome which activated the single carry Behavior to initiate multiple carry process. Initialize. Setup. Pass. Logic to initiaite multiple carry process. # objects = self.blackboard.neighbourobj[self.thing].pop() # Update the partial attached object # Update the object so that it knows this agent # has attached to it # Redistribute the weights to all the attached objects Condition to check if the object is in partially attached list. Initialize. Setup. Pass. Logic to check if the object is in partially attached list. # objects = self.blackboard.neighbourobj[self.thing].pop() Condition to check if the agent has enough strength to carry. Initialize. Setup. Pass. Logic to check if the agent has enough strength to carry. Condition to check is the object is moving. Initialize. Setup. Pass. Logic to check if the object is moving. Condition to check is the object is visited before. Initialize. Setup. Pass. Logic to check is the object is visited before. Multiple carry behavior. Initialize. Setup. Pass. Logic for multiple carry. # Lets start some communication behaviors Signal exists behavior. This behavior enables agents to check it that signal already exists. Initialize. Setup. Pass. Logic for sending signal. # Find the object the agent is trying to signal # Check the agetns signals array for its exitance # Lets start some communication behaviors Is Signal active? This behavior enables agents to check it that signal is already active. Initialize. Setup. Pass. Logic for sending signal. # Find the object the agent is trying to signal. Signalling behavior. This behavior enables agents to send signals about the information they have gathered. The information could be about location of site, hub, food, obstacles and others. Initialize. Setup. Pass. Logic for sending signal. # Initialize the signal object # Add the signal to the grids so it could be sensed by # other agents # Append the signal object to the agent signal list Receive signals from other agents. Since this is the primary behavior for the agents to sense the environment, we include the receive signal method here. The agents will be able to sense the environment and check if it receives any signals from other agents. Initialize. Setup. Pass. Logic for receiving signal. # Extract the information from the signal object and # store into the agent memory Cue does not exists behavior. This behavior enables agents to check if that cue already exists. Initialize. Setup. Pass. Logic for cue checking. # Find the object the agent is trying to cue # Check the agetns cue list for its exitance # Communication behaviors related to cue Drop cue in the environment. This is a communication behavior where a physical object is placed in the environment which gives a particular information to the agents sensing this cue. Initialize. Setup. Pass. Logic for dropping cue. # Initialize the cue object # Add the cue to the grids so it could be sensed by # other agents # We just drop the cue on the environment and don't keep track # of it. Instead of using cue here we can derive a class from cue # and call it pheromonone Pick cue in the environment. This is a communication behavior where the information from the cue object in the environment is pickedup. Initialize. Setup. Pass. Logic for pickup cue. # Get information from the cue. For now, the agents orients # its direction towards the object that is communicated Avoid Static objects in the environment. This is a avoid behaviors where the agents avoids the static objects that are not passable. Initialize. Setup. Pass. Logic for avoid static objects. # try: # alpha = get_direction(self.agent.location, objects.location) # theta = self.agent.direction # angle_diff = theta-alpha # print('From', self.agent.name, self.name, item, self.agent.direction, item.location, item.radius) # agent_A = y - self.agent.location[1] # agent_B = self.agent.location[0] - x # agent_C = agent_A * self.agent.location[0] + agent_B * self.agent.location[1] # print('agetn ABC', agent_A, agent_B, agent_C) # obj_x2 = int(np.ceil(item.location[0] + (np.cos(np.pi/4) * (item.radius)))) # obj_y2 = int(np.ceil(item.location[1] + (np.sin(np.pi/4) * (item.radius)))) # obj_loc_2 = self.agent.model.grid.find_upperbound((obj_x2, obj_y2)) # # print('obj x2', obj_x2, obj_y2, obj_loc_2) # obj_x0 = int(np.floor(item.location[0] + (np.cos(np.pi/4 + np.pi) * (item.radius)))) # obj_y0 = int(np.floor(item.location[1] + (np.sin(np.pi/4 + np.pi) * (item.radius)))) # obj_loc_0 = self.agent.model.grid.find_lowerbound((obj_x0, obj_y0)) # # print('obj x1', obj_x0, obj_y0, obj_loc_0) # obj_loc_1 = (obj_loc_2[0], obj_loc_0[1]) # obj_loc_3 = (obj_loc_0[0], obj_loc_2[1]) # print(grids, [self.agent.model.grid.grid_reverse[grid] for grid in grids]) # for grid in grids: # p1, p2 = self.agent.model.grid.grid_reverse[grid] # x1, y1 = p1 # x2, y2 = p2 # print('agent ray', self.agent.location, (x,y)) # print('rectangle obstacle',lines) # plt.plot([self.agent.location[0], x], [self.agent.location[1], y], 'r--') # plt.plot([lines[0][0][0], lines[0][1][0]], [lines[0][0][1], lines[0][1][1]],'b.-') # plt.plot([lines[1][0][0], lines[1][1][0]], [lines[1][0][1], lines[1][1][1]],'b.-') # plt.plot([lines[2][0][0], lines[2][1][0]], [lines[2][0][1], lines[2][1][1]],'b.-') # plt.plot([lines[3][0][0], lines[3][1][0]], [lines[3][0][1], lines[3][1][1]],'b.-') # # plt.xticks(range(-20, 20, 1)) # # plt.yticks(range(-20, 20, 1)) # plt.show() # if intersect: # break # line_A = line[1][1] - line[0][1] # line_B = line[0][0] - line[1][0] # line_C = line_A * line[0][0] + line_B * line[0][1] # slope = round(agent_A * line_B - line_A * agent_B, 2) # # print('slope', slope) # if slope == 0.0: # break # else: # intersection_x = int((line_B * agent_C - agent_B * line_C) / slope) # intersection_y = int((agent_A * line_C - line_A * agent_C) / slope) # print('itersection point', intersection_x, intersection_y, self.agent.location, x, y, line) # if ( # (intersection_x <= x) and ( intersection_x >= self.agent.location[0]) and # (intersection_y <= y) and ( intersection_y >= self.agent.location[1])): # # ((intersection_x <= line[1][0]) and ( intersection_x >= line[0][0]) and # # (intersection_y <= line[1][1]) and ( intersection_y >= line[0][1]))): # direction = np.arctan2(line[1][1] - line[0][1], line[1][0] - line[0][0]) # print('computed direction', direction) # self.agent.direction = (direction + 2*np.pi) % (2*np.pi) # break # direction = self.agent.direction + np.pi/2 # self.agent.direction = direction % (2 * np.pi) # print(self.agent.name, direction, self.agent.direction) # except (IndexError, AttributeError): # return common.Status.FAILURE # Behavior to check if the agent avoided obj Logic to check if the agent avoided the objects. Initialize. Setup. Pass. Check if it can sense the object and its direction. # Behavior to check if the agent can move Logic to check if the agent can move in the intended direction. Initialize. Setup. Pass. Check if it can sense the object and its direction. # Pheromone related bheaviors Drop pheromone in the environment. This is a communication behavior where a pheromone object is placed in the environment which gives a direction to follow for the agents. Initialize. Setup. # self.blackboard = blackboard.Client(name=str(agent.name)) # self.blackboard.register_key(key='neighbourobj', access=common.Access.READ) Pass. Logic for dropping pheromone. # Initialize the pheromone object # Add the pheromone to the grids so it could be sensed by # other agents # Pheromone related bheaviors Sense pheromone in the environment. This is a communication behavior where pheromones are sensed from the environment and a direction is computed to follow. Initialize. Setup. Pass. Logic for dropping pheromone. # Initialize the pheromone object Check if pheromone exists at the location where agent is. This is a communication behavior where pheromones are sensed from the environment and a direction is computed to follow. Initialize. Setup. Pass. Logic for dropping pheromone. Check if agent is dead. Initialize. Setup. Pass. Check agent is dead Check if the pheromone is attractive. Initialize. Setup. Pass. Check if the pheromone is attractive or repulsive. Check if the pheromone is attractive. Initialize. Setup. Pass. Check if the pheromone is attractive or repulsive. # print('repusive pheroment', objects, objects[0].attractive) # Dummy node Dummy node. BT node that always returns Success. Initialize. Setup. Pass. Nothing much to do. | 3.150385 | 3 |
script.py | IraKorshunova/kaggle-seizure-detection | 1 | 6623422 | <filename>script.py
import os
import numpy as np
import scipy as sc
import scipy.signal
from scipy import interpolate
from scipy.io import loadmat, savemat
def get_files_paths(directory, extension):
files_with_extension = list()
for root, dirs, files in os.walk(directory):
files_with_extension += [root + '/' + file_name for file_name in files if
file_name.endswith(extension) and not file_name.startswith('.')]
return files_with_extension
if __name__ == '__main__':
read_dir = 'clips'
write_dir = 'xclips'
for raw_file_path in get_files_paths('../EEG/Volumes/Seagate/seizure_detection/competition_data/clips/Patient_4',
'.mat'):
print raw_file_path
preprocessed_file_path = raw_file_path.replace(read_dir, write_dir)
dir_path = os.path.dirname(preprocessed_file_path)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# =========================You should split this processing into reusable functions============================
d = loadmat(raw_file_path, squeeze_me=True)
x = d['data']
sampling_frequency = d['freq']
n_channels = d['channels']
sampling_frequency = x.shape[1]
lowcut = 0
highcut = 25
nyq = 0.5 * sampling_frequency
high = highcut / nyq
b, a = sc.signal.butter(3, high)
x_filt = sc.signal.lfilter(b, a, x, axis=1)
t = np.linspace(0, x.shape[1] - 1, sampling_frequency)
sampling_frequency2 = 2 * highcut
t2 = np.linspace(0, x.shape[1], sampling_frequency2, endpoint=False)
f = interpolate.interp1d(t, x_filt, axis=1)
x2 = f(t2)
if '_ictal_' in raw_file_path:
d2 = {'data': x2, 'latency': d['latency'], 'freq': sampling_frequency, 'channels': d['channels']}
else:
d2 = {'data': x2, 'freq': sampling_frequency, 'channels': d['channels']}
# =============================================================================================================
savemat(preprocessed_file_path, d2)
print raw_file_path
| <filename>script.py
import os
import numpy as np
import scipy as sc
import scipy.signal
from scipy import interpolate
from scipy.io import loadmat, savemat
def get_files_paths(directory, extension):
files_with_extension = list()
for root, dirs, files in os.walk(directory):
files_with_extension += [root + '/' + file_name for file_name in files if
file_name.endswith(extension) and not file_name.startswith('.')]
return files_with_extension
if __name__ == '__main__':
read_dir = 'clips'
write_dir = 'xclips'
for raw_file_path in get_files_paths('../EEG/Volumes/Seagate/seizure_detection/competition_data/clips/Patient_4',
'.mat'):
print raw_file_path
preprocessed_file_path = raw_file_path.replace(read_dir, write_dir)
dir_path = os.path.dirname(preprocessed_file_path)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# =========================You should split this processing into reusable functions============================
d = loadmat(raw_file_path, squeeze_me=True)
x = d['data']
sampling_frequency = d['freq']
n_channels = d['channels']
sampling_frequency = x.shape[1]
lowcut = 0
highcut = 25
nyq = 0.5 * sampling_frequency
high = highcut / nyq
b, a = sc.signal.butter(3, high)
x_filt = sc.signal.lfilter(b, a, x, axis=1)
t = np.linspace(0, x.shape[1] - 1, sampling_frequency)
sampling_frequency2 = 2 * highcut
t2 = np.linspace(0, x.shape[1], sampling_frequency2, endpoint=False)
f = interpolate.interp1d(t, x_filt, axis=1)
x2 = f(t2)
if '_ictal_' in raw_file_path:
d2 = {'data': x2, 'latency': d['latency'], 'freq': sampling_frequency, 'channels': d['channels']}
else:
d2 = {'data': x2, 'freq': sampling_frequency, 'channels': d['channels']}
# =============================================================================================================
savemat(preprocessed_file_path, d2)
print raw_file_path
| en | 0.450622 | # =========================You should split this processing into reusable functions============================ # ============================================================================================================= | 2.471315 | 2 |
app/db/base.py | SanchithHegde/decrypto-api | 0 | 6623423 | <reponame>SanchithHegde/decrypto-api
"""
Imports all the models, so that `Base` has them before being imported by Alembic.
"""
# pylint: disable=unused-import
from app.db.base_class import Base
from app.models.question import Question
from app.models.question_order_item import QuestionOrderItem
from app.models.user import User
| """
Imports all the models, so that `Base` has them before being imported by Alembic.
"""
# pylint: disable=unused-import
from app.db.base_class import Base
from app.models.question import Question
from app.models.question_order_item import QuestionOrderItem
from app.models.user import User | en | 0.931598 | Imports all the models, so that `Base` has them before being imported by Alembic. # pylint: disable=unused-import | 1.650393 | 2 |
PassPredict.py | Krytic/PassPredict | 7 | 6623424 | <reponame>Krytic/PassPredict
import time
import twitter
import arrow
import requests
import os, random
from orbit_predictor.sources import EtcTLESource
from orbit_predictor.locations import NZ2
from orbit_predictor.predictors.base import Position
import configparser
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
from apscheduler.schedulers.blocking import BlockingScheduler
sched = BlockingScheduler()
plt.ioff()
plt.rcParams["font.family"] = "serif"
config = configparser.ConfigParser()
config.read("config.ini")
def get_tracked_satellites():
url = config['tracking']['update_url']
r = requests.get(url)
sats = []
for line in r.text.split("\n"):
line = line.strip()
if line != '':
sats.append(line)
return sats
def download_TLEs():
"""
Download the TLE files from celestrak.
Returns
-------
None.
"""
url = "http://celestrak.com/NORAD/elements/active.txt"
r = requests.get(url)
lines = r.text.split("\r\n")
for i in range(0, len(lines)-3, 3):
satname = lines[i].strip()
invalid_chars = ["\\", "/", ":", "*", "?", '"', "<", ">", "|"]
fname = satname
for char in invalid_chars:
fname = fname.replace(char, "")
line1 = satname + "\n"
line2 = lines[i+1].strip() + "\n"
line3 = lines[i+2].strip()
with open(f"tles/{fname}.tle", "w") as f:
f.writelines([line1, line2, line3])
def connect_twitter():
"""
Generate a connection to the twitter API.
Raises
------
Exception
If the validation with twitter failed.
Typically this is due to an incorrect consumer key, etc.
Returns
-------
api : twitter.api.Api
An instance of the twitter API.
"""
global config
api = twitter.Api(**config['twitter_api'])
if not api.VerifyCredentials():
raise Exception("Twitter Validation Failed.")
return api
def tweet(apiHandler, msg, media):
"""
Post a tweet.
Parameters
----------
apiHandler : twitter.api.Api
An instance of the Twitter API.
tweet : string
The tweet to send.
media : file
A file pointer to an image to attach.
Returns
-------
None.
"""
apiHandler.PostUpdate(msg, media=media)
checked = dict()
@sched.scheduled_job('interval', minutes=int(config['tracking']['time_between_checks']))
def main_loop():
"""
The main loop of PassPredict.
Parameters
----------
None.
Returns
-------
None.
"""
tracking = get_tracked_satellites()
api = connect_twitter()
fname = random.choice(os.listdir("tles/"))
if time.time() - os.path.getmtime(f"tles/{fname}") > 604800: # 1 week
download_TLEs()
now = arrow.utcnow()
for sat in tracking:
source = EtcTLESource(filename=f"tles/{sat}.tle")
predictor = source.get_predictor(sat)
predicted_pass = predictor.get_next_pass(NZ2)
if sat in checked.keys():
if now > checked[sat]:
# AOS already occured, can safely remove the sat from the list
checked.pop(sat)
continue
AOS_utc = arrow.get(predicted_pass.aos)
if AOS_utc <= now.shift(minutes=120):
LOS_utc = arrow.get(predicted_pass.los)
plt.figure()
lat, long = NZ2.position_llh[0], NZ2.position_llh[1]
ax = plt.axes(projection=ccrs.NearsidePerspective(central_longitude=long, central_latitude=lat, satellite_height=35785831/3))
ax.stock_img()
AOS_for_image = AOS_utc.to("Pacific/Auckland").format("DD/MM/YYYY, HH:mm:ss")
x = []
y = []
for t in arrow.Arrow.range('second', AOS_utc, LOS_utc):
pos = predictor.get_position(t)
lat = pos.position_llh[0]
long = pos.position_llh[1]
x.append(long)
y.append(lat)
plt.plot(x, y, 'r-', transform=ccrs.Geodetic())
plt.title(f"Pass of {sat} on {AOS_for_image}")
plt.savefig(f"images/{sat}.png", dpi=500)
img = open(f"images/{sat}.png", 'rb')
AOS_nzt = AOS_utc.to("Pacific/Auckland").format("HH:mm:ss")
max_el = predicted_pass.max_elevation_deg
tweet(api, f"There's a pass of {sat} over UoA, with maximum elevation {max_el:.2f}°, commencing at {AOS_nzt}.", img)
print(f"Tweeted about {sat}")
checked[sat] = AOS_utc
sched.start() | import time
import twitter
import arrow
import requests
import os, random
from orbit_predictor.sources import EtcTLESource
from orbit_predictor.locations import NZ2
from orbit_predictor.predictors.base import Position
import configparser
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
from apscheduler.schedulers.blocking import BlockingScheduler
sched = BlockingScheduler()
plt.ioff()
plt.rcParams["font.family"] = "serif"
config = configparser.ConfigParser()
config.read("config.ini")
def get_tracked_satellites():
url = config['tracking']['update_url']
r = requests.get(url)
sats = []
for line in r.text.split("\n"):
line = line.strip()
if line != '':
sats.append(line)
return sats
def download_TLEs():
"""
Download the TLE files from celestrak.
Returns
-------
None.
"""
url = "http://celestrak.com/NORAD/elements/active.txt"
r = requests.get(url)
lines = r.text.split("\r\n")
for i in range(0, len(lines)-3, 3):
satname = lines[i].strip()
invalid_chars = ["\\", "/", ":", "*", "?", '"', "<", ">", "|"]
fname = satname
for char in invalid_chars:
fname = fname.replace(char, "")
line1 = satname + "\n"
line2 = lines[i+1].strip() + "\n"
line3 = lines[i+2].strip()
with open(f"tles/{fname}.tle", "w") as f:
f.writelines([line1, line2, line3])
def connect_twitter():
"""
Generate a connection to the twitter API.
Raises
------
Exception
If the validation with twitter failed.
Typically this is due to an incorrect consumer key, etc.
Returns
-------
api : twitter.api.Api
An instance of the twitter API.
"""
global config
api = twitter.Api(**config['twitter_api'])
if not api.VerifyCredentials():
raise Exception("Twitter Validation Failed.")
return api
def tweet(apiHandler, msg, media):
"""
Post a tweet.
Parameters
----------
apiHandler : twitter.api.Api
An instance of the Twitter API.
tweet : string
The tweet to send.
media : file
A file pointer to an image to attach.
Returns
-------
None.
"""
apiHandler.PostUpdate(msg, media=media)
checked = dict()
@sched.scheduled_job('interval', minutes=int(config['tracking']['time_between_checks']))
def main_loop():
"""
The main loop of PassPredict.
Parameters
----------
None.
Returns
-------
None.
"""
tracking = get_tracked_satellites()
api = connect_twitter()
fname = random.choice(os.listdir("tles/"))
if time.time() - os.path.getmtime(f"tles/{fname}") > 604800: # 1 week
download_TLEs()
now = arrow.utcnow()
for sat in tracking:
source = EtcTLESource(filename=f"tles/{sat}.tle")
predictor = source.get_predictor(sat)
predicted_pass = predictor.get_next_pass(NZ2)
if sat in checked.keys():
if now > checked[sat]:
# AOS already occured, can safely remove the sat from the list
checked.pop(sat)
continue
AOS_utc = arrow.get(predicted_pass.aos)
if AOS_utc <= now.shift(minutes=120):
LOS_utc = arrow.get(predicted_pass.los)
plt.figure()
lat, long = NZ2.position_llh[0], NZ2.position_llh[1]
ax = plt.axes(projection=ccrs.NearsidePerspective(central_longitude=long, central_latitude=lat, satellite_height=35785831/3))
ax.stock_img()
AOS_for_image = AOS_utc.to("Pacific/Auckland").format("DD/MM/YYYY, HH:mm:ss")
x = []
y = []
for t in arrow.Arrow.range('second', AOS_utc, LOS_utc):
pos = predictor.get_position(t)
lat = pos.position_llh[0]
long = pos.position_llh[1]
x.append(long)
y.append(lat)
plt.plot(x, y, 'r-', transform=ccrs.Geodetic())
plt.title(f"Pass of {sat} on {AOS_for_image}")
plt.savefig(f"images/{sat}.png", dpi=500)
img = open(f"images/{sat}.png", 'rb')
AOS_nzt = AOS_utc.to("Pacific/Auckland").format("HH:mm:ss")
max_el = predicted_pass.max_elevation_deg
tweet(api, f"There's a pass of {sat} over UoA, with maximum elevation {max_el:.2f}°, commencing at {AOS_nzt}.", img)
print(f"Tweeted about {sat}")
checked[sat] = AOS_utc
sched.start() | en | 0.606501 | Download the TLE files from celestrak. Returns ------- None. Generate a connection to the twitter API. Raises ------ Exception If the validation with twitter failed. Typically this is due to an incorrect consumer key, etc. Returns ------- api : twitter.api.Api An instance of the twitter API. Post a tweet. Parameters ---------- apiHandler : twitter.api.Api An instance of the Twitter API. tweet : string The tweet to send. media : file A file pointer to an image to attach. Returns ------- None. The main loop of PassPredict. Parameters ---------- None. Returns ------- None. # 1 week # AOS already occured, can safely remove the sat from the list | 2.547455 | 3 |
Basic programs/ascending.py | gurusabarish/python-programs | 2 | 6623425 | lst=[]
n=int(input("Enter how many elements want to insert in list :"))
for i in range(n):
a=int(input())
lst.append(a)
for i in range(n):
for j in range(n):
if lst[i]<=lst[j]:
lst[i],lst[j]=lst[j],lst[i]
print("Asending order of the list :")
for i in lst:
print(i)
| lst=[]
n=int(input("Enter how many elements want to insert in list :"))
for i in range(n):
a=int(input())
lst.append(a)
for i in range(n):
for j in range(n):
if lst[i]<=lst[j]:
lst[i],lst[j]=lst[j],lst[i]
print("Asending order of the list :")
for i in lst:
print(i)
| none | 1 | 3.941313 | 4 | |
K means clustering/color_conpression.py | rpotter12/Learn_Machine-Learning | 0 | 6623426 | <gh_stars>0
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
import numpy as np
from sklearn.datasets import load_sample_image
china = load_sample_image("flower.jpg")
ax=plt.axes(xticks=[], yticks=[])
print(ax.imshow(china))
print(china.shape)
data=china/255 # use 0...1 scale
data = data.reshape(427*640,3)
print(data.shape)
def plot_pixels(data, title, colors=None, N=10000):
if colors is None:
colors=data
# choose random subset
rng = np.random.RandomState(0)
i=rng.permutation(data.shape[0])[:N]
colors = colors[i]
R,G,B=data[i].T
fig, ax = plt.subplots(1,2,figsize=(16,6))
ax[0].scatter(R,G,color=colors, marker='.')
ax[0].set(xlabel='Red', ylabel='Green', xlim=(0,1), ylim=(0,1))
ax[1].scatter(R,G,color=colors, marker='.')
ax[1].set(xlabel='Red', ylabel='Blue', xlim=(0,1), ylim=(0,1))
print(fig.suptitle(title,size=20))
plot_pixels(data, title='input color space: 16 million possible colors')
import warnings; warnings.simplefilter('ignore') #fix numpy issues
from sklearn.cluster import MiniBatchKMeans
kmeans=MiniBatchKMeans(16)
kmeans.fit(data)
new_colors=kmeans.cluster_centers_[kmeans.predict(data)]
plot_pixels(data, colors=new_colors, title = "reduced color space: 16 colors")
china_recolored=new_colors.reshape(china.shape)
fig, ax = plt.subplots(1,2,figsize=(16,6),subplot_kw=dict(xticks=[],yticks=[]))
fig.subplots_adjust(wspace=0.05)
ax[0].imshow(china)
ax[0].set_title('Original image', size=16)
ax[1].imshow(china_recolored)
ax[1].set_title('16 color Image', size=16)
| import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
import numpy as np
from sklearn.datasets import load_sample_image
china = load_sample_image("flower.jpg")
ax=plt.axes(xticks=[], yticks=[])
print(ax.imshow(china))
print(china.shape)
data=china/255 # use 0...1 scale
data = data.reshape(427*640,3)
print(data.shape)
def plot_pixels(data, title, colors=None, N=10000):
if colors is None:
colors=data
# choose random subset
rng = np.random.RandomState(0)
i=rng.permutation(data.shape[0])[:N]
colors = colors[i]
R,G,B=data[i].T
fig, ax = plt.subplots(1,2,figsize=(16,6))
ax[0].scatter(R,G,color=colors, marker='.')
ax[0].set(xlabel='Red', ylabel='Green', xlim=(0,1), ylim=(0,1))
ax[1].scatter(R,G,color=colors, marker='.')
ax[1].set(xlabel='Red', ylabel='Blue', xlim=(0,1), ylim=(0,1))
print(fig.suptitle(title,size=20))
plot_pixels(data, title='input color space: 16 million possible colors')
import warnings; warnings.simplefilter('ignore') #fix numpy issues
from sklearn.cluster import MiniBatchKMeans
kmeans=MiniBatchKMeans(16)
kmeans.fit(data)
new_colors=kmeans.cluster_centers_[kmeans.predict(data)]
plot_pixels(data, colors=new_colors, title = "reduced color space: 16 colors")
china_recolored=new_colors.reshape(china.shape)
fig, ax = plt.subplots(1,2,figsize=(16,6),subplot_kw=dict(xticks=[],yticks=[]))
fig.subplots_adjust(wspace=0.05)
ax[0].imshow(china)
ax[0].set_title('Original image', size=16)
ax[1].imshow(china_recolored)
ax[1].set_title('16 color Image', size=16) | en | 0.414036 | # use 0...1 scale # choose random subset #fix numpy issues | 2.910125 | 3 |
dictionary.py | VersatileVishal/Dictionary | 2 | 6623427 | <reponame>VersatileVishal/Dictionary
import json
from difflib import get_close_matches
data = json.load(open("data.json"))
def translate(word):
word = word.lower()
if word in data:
return data[word]
elif word.title() in data:
return data[word.title()]
elif word.upper() in data:
return data[word.upper()]
elif len(get_close_matches(word,data.keys())) > 0:
decide="y"
while decide!="n":
print("\nHey, Did you mean %s instead ?" %get_close_matches(word,data.keys())[0])
print("Press y for Yes or n for No",end=' ')
decide=input()
if decide == "y":
return data[get_close_matches(word,data.keys())[0]]
elif decide =="n":
print("\n🔥 Buddy have entered a wrong word please check it again!!!\n")
else:
print("You have entered wrong input please enter y or n")
else:
print("\n🔥 Buddy have entered a wrong word please check it again!!!\n")
word="start"
while(word!="n"):
print("Enter the word you want to search (Type n to exit):-",end=' ' )
word = input()
if word!="n":
meanings = translate(word)
if type(meanings)==list:
print("\nMeanings:-")
index=1
for meaning in meanings:
print( str(index) + " " + meaning)
index+=1
print("---------------------------------------------------------------\n")
else:
print("Thanks for Surfing 😇")
| import json
from difflib import get_close_matches
data = json.load(open("data.json"))
def translate(word):
word = word.lower()
if word in data:
return data[word]
elif word.title() in data:
return data[word.title()]
elif word.upper() in data:
return data[word.upper()]
elif len(get_close_matches(word,data.keys())) > 0:
decide="y"
while decide!="n":
print("\nHey, Did you mean %s instead ?" %get_close_matches(word,data.keys())[0])
print("Press y for Yes or n for No",end=' ')
decide=input()
if decide == "y":
return data[get_close_matches(word,data.keys())[0]]
elif decide =="n":
print("\n🔥 Buddy have entered a wrong word please check it again!!!\n")
else:
print("You have entered wrong input please enter y or n")
else:
print("\n🔥 Buddy have entered a wrong word please check it again!!!\n")
word="start"
while(word!="n"):
print("Enter the word you want to search (Type n to exit):-",end=' ' )
word = input()
if word!="n":
meanings = translate(word)
if type(meanings)==list:
print("\nMeanings:-")
index=1
for meaning in meanings:
print( str(index) + " " + meaning)
index+=1
print("---------------------------------------------------------------\n")
else:
print("Thanks for Surfing 😇") | none | 1 | 3.704291 | 4 | |
testtf.py | MaxKinny/kaggle_SMILE-Kinship-Recognizing | 1 | 6623428 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import tensorflow as tf
sess = tf.Session()
a = tf.constant(1)
b = tf.constant(2)
print(sess.run(a+b))
| # -*- coding: utf-8 -*-
import tensorflow as tf
sess = tf.Session()
a = tf.constant(1)
b = tf.constant(2)
print(sess.run(a+b)) | en | 0.769321 | # -*- coding: utf-8 -*- | 2.849198 | 3 |
wagtail_webstories/markup.py | sebastianbenz/wagtail-webstories | 9 | 6623429 | <filename>wagtail_webstories/markup.py
import re
from django.apps import apps
from django.utils.html import escape
from django.utils.safestring import mark_safe
from wagtail.images import get_image_model_string
from wagtail.images.shortcuts import get_rendition_or_not_found
# Retrieve the (possibly custom) image model. Can't use get_image_model as of Wagtail 2.11, as we
# need require_ready=False: https://github.com/wagtail/wagtail/pull/6568
Image = apps.get_model(get_image_model_string(), require_ready=False)
def _replace_image_id(match):
try:
image = Image.objects.get(id=match.group(1))
except Image.DoesNotExist:
return ''
rendition = get_rendition_or_not_found(image, 'original')
return 'src="%s"' % escape(rendition.url)
def _replace_media_id(match):
from wagtailmedia.models import get_media_model
Media = get_media_model()
try:
media = Media.objects.get(id=match.group(1))
except Media.DoesNotExist:
return ''
return 'src="%s"' % escape(media.url)
FIND_DATA_WAGTAIL_IMAGE_ID_ATTR = re.compile(r'''\bdata-wagtail-image-id=["'](\d+)["']''')
FIND_DATA_WAGTAIL_MEDIA_ID_ATTR = re.compile(r'''\bdata-wagtail-media-id=["'](\d+)["']''')
def expand_entities(html):
"""
Expand symbolic references in a string of AMP markup - e.g. convert
data-wagtail-image-id="123" to src="/path/to/image.html"
"""
html = FIND_DATA_WAGTAIL_IMAGE_ID_ATTR.sub(_replace_image_id, html)
html = FIND_DATA_WAGTAIL_MEDIA_ID_ATTR.sub(_replace_media_id, html)
return html
class AMPText:
"""Equivalent of Wagtail's RichText - performs entity expansion when rendered"""
def __init__(self, source):
self.source = (source or '')
def __html__(self):
return expand_entities(self.source)
def __str__(self):
return mark_safe(self.__html__())
def __bool__(self):
return bool(self.source)
| <filename>wagtail_webstories/markup.py
import re
from django.apps import apps
from django.utils.html import escape
from django.utils.safestring import mark_safe
from wagtail.images import get_image_model_string
from wagtail.images.shortcuts import get_rendition_or_not_found
# Retrieve the (possibly custom) image model. Can't use get_image_model as of Wagtail 2.11, as we
# need require_ready=False: https://github.com/wagtail/wagtail/pull/6568
Image = apps.get_model(get_image_model_string(), require_ready=False)
def _replace_image_id(match):
try:
image = Image.objects.get(id=match.group(1))
except Image.DoesNotExist:
return ''
rendition = get_rendition_or_not_found(image, 'original')
return 'src="%s"' % escape(rendition.url)
def _replace_media_id(match):
from wagtailmedia.models import get_media_model
Media = get_media_model()
try:
media = Media.objects.get(id=match.group(1))
except Media.DoesNotExist:
return ''
return 'src="%s"' % escape(media.url)
FIND_DATA_WAGTAIL_IMAGE_ID_ATTR = re.compile(r'''\bdata-wagtail-image-id=["'](\d+)["']''')
FIND_DATA_WAGTAIL_MEDIA_ID_ATTR = re.compile(r'''\bdata-wagtail-media-id=["'](\d+)["']''')
def expand_entities(html):
"""
Expand symbolic references in a string of AMP markup - e.g. convert
data-wagtail-image-id="123" to src="/path/to/image.html"
"""
html = FIND_DATA_WAGTAIL_IMAGE_ID_ATTR.sub(_replace_image_id, html)
html = FIND_DATA_WAGTAIL_MEDIA_ID_ATTR.sub(_replace_media_id, html)
return html
class AMPText:
"""Equivalent of Wagtail's RichText - performs entity expansion when rendered"""
def __init__(self, source):
self.source = (source or '')
def __html__(self):
return expand_entities(self.source)
def __str__(self):
return mark_safe(self.__html__())
def __bool__(self):
return bool(self.source)
| en | 0.706935 | # Retrieve the (possibly custom) image model. Can't use get_image_model as of Wagtail 2.11, as we # need require_ready=False: https://github.com/wagtail/wagtail/pull/6568 \bdata-wagtail-image-id=["'](\d+)["'] \bdata-wagtail-media-id=["'](\d+)["'] Expand symbolic references in a string of AMP markup - e.g. convert data-wagtail-image-id="123" to src="/path/to/image.html" Equivalent of Wagtail's RichText - performs entity expansion when rendered | 2.296018 | 2 |
tkm/main_widgets.py | Inno97/tkm | 0 | 6623430 | <filename>tkm/main_widgets.py
import tkinter as tk
from tkinter import *
class GUI:
"""The GUI class for an app.
"""
def __init__(self, title, geometry, maxsize):
"""
Args:
title (string): The title of the GUI.
geometry (string): The string of 'AxB', where A and B are the X and
Y size of the GUI respectively.
maxsize (List): The Tuple of ints of the maximum X, Y size of the GUI.
"""
self.__window = Tk()
self.__title = title
self.__geometry = geometry
self.__maxsize = maxsize
self._widgets = {}
self.__set_attributes()
def add_widget(self, widget):
"""Adds a Widget object to the Frame.
"""
self._widgets[widget.get_id()] = widget
widget.setup(self.get_frame())
def __set_attributes(self):
# set the attributes of the GUI
self.set_title(self.__title)
self.set_geometry(self.__geometry)
self.set_maxsize(self.__maxsize)
self.set_unresizable()
def start(self):
self.__window.mainloop()
def exit(self):
"""Exits the application.
Program exit should be called via this.
"""
self.__window.quit()
# setters
def set_title(self, value):
self.__title = value
self.__window.title(value)
def set_geometry(self, value):
self.__geometry = value
self.__window.geometry(value)
def set_maxsize(self, value):
"""
Args:
value (List): The List of int of X, Y size.
"""
self.__maxsize = value
self.__window.maxsize(self.__maxsize[0], self.__maxsize[1])
def set_unresizable(self):
self.__window.resizable(width=False, height=False)
# getters
def get_parent_frame(self):
return self.__window
def get_widget_keys(self):
return self._widgets.keys()
def get_widget(self, widget_name):
"""
Returns:
The widget specified, or None if not found.
"""
try:
return self._widgets[widget_name]
except KeyError:
return None
def get_frame(self):
return self.__window
| <filename>tkm/main_widgets.py
import tkinter as tk
from tkinter import *
class GUI:
"""The GUI class for an app.
"""
def __init__(self, title, geometry, maxsize):
"""
Args:
title (string): The title of the GUI.
geometry (string): The string of 'AxB', where A and B are the X and
Y size of the GUI respectively.
maxsize (List): The Tuple of ints of the maximum X, Y size of the GUI.
"""
self.__window = Tk()
self.__title = title
self.__geometry = geometry
self.__maxsize = maxsize
self._widgets = {}
self.__set_attributes()
def add_widget(self, widget):
"""Adds a Widget object to the Frame.
"""
self._widgets[widget.get_id()] = widget
widget.setup(self.get_frame())
def __set_attributes(self):
# set the attributes of the GUI
self.set_title(self.__title)
self.set_geometry(self.__geometry)
self.set_maxsize(self.__maxsize)
self.set_unresizable()
def start(self):
self.__window.mainloop()
def exit(self):
"""Exits the application.
Program exit should be called via this.
"""
self.__window.quit()
# setters
def set_title(self, value):
self.__title = value
self.__window.title(value)
def set_geometry(self, value):
self.__geometry = value
self.__window.geometry(value)
def set_maxsize(self, value):
"""
Args:
value (List): The List of int of X, Y size.
"""
self.__maxsize = value
self.__window.maxsize(self.__maxsize[0], self.__maxsize[1])
def set_unresizable(self):
self.__window.resizable(width=False, height=False)
# getters
def get_parent_frame(self):
return self.__window
def get_widget_keys(self):
return self._widgets.keys()
def get_widget(self, widget_name):
"""
Returns:
The widget specified, or None if not found.
"""
try:
return self._widgets[widget_name]
except KeyError:
return None
def get_frame(self):
return self.__window
| en | 0.826123 | The GUI class for an app. Args: title (string): The title of the GUI. geometry (string): The string of 'AxB', where A and B are the X and Y size of the GUI respectively. maxsize (List): The Tuple of ints of the maximum X, Y size of the GUI. Adds a Widget object to the Frame. # set the attributes of the GUI Exits the application. Program exit should be called via this. # setters Args: value (List): The List of int of X, Y size. # getters Returns: The widget specified, or None if not found. | 4.036814 | 4 |
index/entity.py | livi2000/FundSpider | 3 | 6623431 | <reponame>livi2000/FundSpider
# -*- coding: utf-8 -*-
__author__ = 'study_sun'
import sys
from spider_base.entity import *
reload(sys)
sys.setdefaultencoding('utf-8')
#因为获取数据所限,指数没有太多数据结构
class IndexInfo(SBObject):
CODE_KEY = u'code'
CODE_CHINESE_KEY= u"指数编号"
FULL_CODE_KEY = u'full_code'
FULL_CODE_CHINESE_KEY = u'指数代码'
NAME_KEY = u'name'
NAME_CHINESE_KEY = u'指数名称'
SHORT_NAME_KEY = u'short_name'
SHORT_NAME_CHINESE_KEY = u'指数简写'
BEGIN_TIME_KEY = u'begin_time'
BEGIN_TIME_CHINESE_KEY = u'启用日期'
WEAVE_KEY = u'weave'
WEAVE_CHINESE_KEY = u'编制方案'
def __init__(self):
self.code = u'' #就是 399978
self.full_code = u'' #形如000001.XSHG,有的地方接口非要这个
self.name = u''
self.short_name = u''
self.begin_time = u''
self.weave = u'' #编制方式,一般是个url
def parse_sqlresult(self, sql_result):
self.code = sql_result[0]
self.full_code = sql_result[1]
self.name = sql_result[2]
self.short_name = sql_result[3]
self.begin_time = sql_result[4]
self.weave = sql_result[5]
@classmethod
def all_keys(cls):
return [IndexInfo.CODE_KEY, IndexInfo.FULL_CODE_KEY, IndexInfo.NAME_KEY, IndexInfo.SHORT_NAME_KEY, IndexInfo.BEGIN_TIME_KEY, IndexInfo.WEAVE_KEY]
@classmethod
def all_desc_keys(cls):
return [IndexInfo.CODE_CHINESE_KEY, IndexInfo.FULL_CODE_CHINESE_KEY, IndexInfo.NAME_CHINESE_KEY, IndexInfo.SHORT_NAME_CHINESE_KEY, IndexInfo.BEGIN_TIME_CHINESE_KEY, IndexInfo.WEAVE_CHINESE_KEY]
#指数成分股,处于合理性考虑,不可能将一个指数的每日的成分股都记录下来,过于冗余了,目前的想法有两个,一种是记录每个成分股的纳入和剔除日期
#一种是记录成分股变化日及所有的成分股
class IndexConstituent(SBObject):
DATE_KEY = u'c_date'
DATE_CHINESE_KEY = u''
CONSTITUENTS_KEY = u'constituents'
CONSTITUENTS_CHINESE_KEY = u'成分股列表'
def __init__(self):
self.c_date = ''
self.constituents = []
def parse_sqlresult(self, sql_result):
self.c_date = sql_result[0]
self.constituents = sql_result[1]
@classmethod
def all_keys(cls):
return [IndexConstituent.DATE_KEY, IndexConstituent.CONSTITUENTS_KEY]
@classmethod
def all_desc_keys(cls):
return [IndexConstituent.DATE_CHINESE_KEY, IndexConstituent.CONSTITUENTS_CHINESE_KEY]
| # -*- coding: utf-8 -*-
__author__ = 'study_sun'
import sys
from spider_base.entity import *
reload(sys)
sys.setdefaultencoding('utf-8')
#因为获取数据所限,指数没有太多数据结构
class IndexInfo(SBObject):
CODE_KEY = u'code'
CODE_CHINESE_KEY= u"指数编号"
FULL_CODE_KEY = u'full_code'
FULL_CODE_CHINESE_KEY = u'指数代码'
NAME_KEY = u'name'
NAME_CHINESE_KEY = u'指数名称'
SHORT_NAME_KEY = u'short_name'
SHORT_NAME_CHINESE_KEY = u'指数简写'
BEGIN_TIME_KEY = u'begin_time'
BEGIN_TIME_CHINESE_KEY = u'启用日期'
WEAVE_KEY = u'weave'
WEAVE_CHINESE_KEY = u'编制方案'
def __init__(self):
self.code = u'' #就是 399978
self.full_code = u'' #形如000001.XSHG,有的地方接口非要这个
self.name = u''
self.short_name = u''
self.begin_time = u''
self.weave = u'' #编制方式,一般是个url
def parse_sqlresult(self, sql_result):
self.code = sql_result[0]
self.full_code = sql_result[1]
self.name = sql_result[2]
self.short_name = sql_result[3]
self.begin_time = sql_result[4]
self.weave = sql_result[5]
@classmethod
def all_keys(cls):
return [IndexInfo.CODE_KEY, IndexInfo.FULL_CODE_KEY, IndexInfo.NAME_KEY, IndexInfo.SHORT_NAME_KEY, IndexInfo.BEGIN_TIME_KEY, IndexInfo.WEAVE_KEY]
@classmethod
def all_desc_keys(cls):
return [IndexInfo.CODE_CHINESE_KEY, IndexInfo.FULL_CODE_CHINESE_KEY, IndexInfo.NAME_CHINESE_KEY, IndexInfo.SHORT_NAME_CHINESE_KEY, IndexInfo.BEGIN_TIME_CHINESE_KEY, IndexInfo.WEAVE_CHINESE_KEY]
#指数成分股,处于合理性考虑,不可能将一个指数的每日的成分股都记录下来,过于冗余了,目前的想法有两个,一种是记录每个成分股的纳入和剔除日期
#一种是记录成分股变化日及所有的成分股
class IndexConstituent(SBObject):
DATE_KEY = u'c_date'
DATE_CHINESE_KEY = u''
CONSTITUENTS_KEY = u'constituents'
CONSTITUENTS_CHINESE_KEY = u'成分股列表'
def __init__(self):
self.c_date = ''
self.constituents = []
def parse_sqlresult(self, sql_result):
self.c_date = sql_result[0]
self.constituents = sql_result[1]
@classmethod
def all_keys(cls):
return [IndexConstituent.DATE_KEY, IndexConstituent.CONSTITUENTS_KEY]
@classmethod
def all_desc_keys(cls):
return [IndexConstituent.DATE_CHINESE_KEY, IndexConstituent.CONSTITUENTS_CHINESE_KEY] | zh | 0.973458 | # -*- coding: utf-8 -*- #因为获取数据所限,指数没有太多数据结构 #就是 399978 #形如000001.XSHG,有的地方接口非要这个 #编制方式,一般是个url #指数成分股,处于合理性考虑,不可能将一个指数的每日的成分股都记录下来,过于冗余了,目前的想法有两个,一种是记录每个成分股的纳入和剔除日期 #一种是记录成分股变化日及所有的成分股 | 2.345886 | 2 |
apisql/blueprint.py | dataspot/apisql | 0 | 6623432 | import codecs
import os
import csv
import urllib
import tempfile
from io import StringIO
from flask import Blueprint, Response, request, send_file, abort
from flask_jsonpify import jsonpify
import xlsxwriter
from .controllers import Controllers
from .logger import logger, logging
MAX_ROWS = int(os.environ.get('APISQL__MAX_ROWS', 100))
CONNECTION_STRING = os.environ.get('APISQL__DATABASE_URL')
class APISQLBlueprint(Blueprint):
def __init__(self, connection_string=CONNECTION_STRING, max_rows=MAX_ROWS, debug=False):
super().__init__('apisql', 'apisql')
self.controllers = Controllers(
connection_string, max_rows, debug
)
if debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
self.max_rows = max_rows
self.add_url_rule(
'/query',
'query',
self.query,
methods=['GET']
)
self.add_url_rule(
'/download',
'download',
self.download,
methods=['GET']
)
def query(self):
results = dict(total=0, rows=[])
if not self.detect_bot():
try:
num_rows = int(request.values.get('num_rows', self.max_rows))
page_size = int(request.values.get('page_size', num_rows))
page = int(request.values.get('page', 0))
except Exception:
abort(400)
sql = request.values.get('query')
try:
sql = codecs.decode(sql.encode('ascii'), 'base64').decode('utf8')
except Exception:
pass
results = self.controllers.query_db(sql, num_rows=num_rows, page_size=page_size, page=page)
return jsonpify(results)
def download(self):
format = request.values.get('format', 'csv')
file_name = request.values.get('filename')
# Create a default value here in case this parameter is not provided
if file_name is None:
file_name = 'query-results'
formatters = request.values.get('headers').split(';')
if format not in ('csv', 'xlsx'):
abort(400)
if self.detect_bot():
headers = {
'Content-Type': 'text/csv',
'Content-Disposition': 'attachment; filename=bot-detected.csv'
}
return Response('', content_type='text/csv', headers=headers)
mime = {
'csv': 'text/csv',
'xlsx': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
}[format]
sql = request.values.get('query')
try:
sql = codecs.decode(sql.encode('ascii'), 'base64').decode('utf8')
except Exception:
pass
results = self.controllers.query_db_streaming(sql, formatters)
if format == 'csv':
def generate():
buffer = StringIO()
writer = csv.writer(buffer)
for row in results:
writer.writerow(row)
pos = buffer.tell()
buffer.seek(0)
ret = buffer.read(pos)
buffer.seek(0)
yield ret
# Encode the filename in utf-8 and url encoding
file_name_utf8_encoded = file_name.encode('utf-8')
file_name_url_encoded = urllib.parse.quote(file_name_utf8_encoded)
headers = {
'Content-Type': mime,
'Content-Disposition': 'attachment; filename=' + file_name_url_encoded + '.csv'
}
return Response(generate(),
content_type='text/csv', headers=headers)
if format == 'xlsx':
with tempfile.NamedTemporaryFile(mode='w+b', suffix='.xlsx') as out:
try:
workbook = xlsxwriter.Workbook(out.name)
worksheet = workbook.add_worksheet()
for i, row in enumerate(results):
for j, v in enumerate(row):
if v is not None:
try:
worksheet.write_number(i, j, float(v))
except ValueError:
worksheet.write(i, j, str(v))
finally:
workbook.close()
return send_file(out.name, mimetype=mime, as_attachment=True, attachment_filename=file_name + '.xlsx')
def detect_bot(self):
if request.user_agent.browser in ('google', 'aol', 'baidu', 'bing', 'yahoo'):
logger.info('Bot detected %s: %s', request.user_agent.string, request.user_agent.browser)
elif any(x in request.user_agent.string.lower() for x in ('applebot', 'yandexbot', 'petalbot')):
logger.info('Bot detected %s: %s', request.user_agent.string, request.user_agent.browser)
else:
return False
return True
| import codecs
import os
import csv
import urllib
import tempfile
from io import StringIO
from flask import Blueprint, Response, request, send_file, abort
from flask_jsonpify import jsonpify
import xlsxwriter
from .controllers import Controllers
from .logger import logger, logging
MAX_ROWS = int(os.environ.get('APISQL__MAX_ROWS', 100))
CONNECTION_STRING = os.environ.get('APISQL__DATABASE_URL')
class APISQLBlueprint(Blueprint):
def __init__(self, connection_string=CONNECTION_STRING, max_rows=MAX_ROWS, debug=False):
super().__init__('apisql', 'apisql')
self.controllers = Controllers(
connection_string, max_rows, debug
)
if debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
self.max_rows = max_rows
self.add_url_rule(
'/query',
'query',
self.query,
methods=['GET']
)
self.add_url_rule(
'/download',
'download',
self.download,
methods=['GET']
)
def query(self):
results = dict(total=0, rows=[])
if not self.detect_bot():
try:
num_rows = int(request.values.get('num_rows', self.max_rows))
page_size = int(request.values.get('page_size', num_rows))
page = int(request.values.get('page', 0))
except Exception:
abort(400)
sql = request.values.get('query')
try:
sql = codecs.decode(sql.encode('ascii'), 'base64').decode('utf8')
except Exception:
pass
results = self.controllers.query_db(sql, num_rows=num_rows, page_size=page_size, page=page)
return jsonpify(results)
def download(self):
format = request.values.get('format', 'csv')
file_name = request.values.get('filename')
# Create a default value here in case this parameter is not provided
if file_name is None:
file_name = 'query-results'
formatters = request.values.get('headers').split(';')
if format not in ('csv', 'xlsx'):
abort(400)
if self.detect_bot():
headers = {
'Content-Type': 'text/csv',
'Content-Disposition': 'attachment; filename=bot-detected.csv'
}
return Response('', content_type='text/csv', headers=headers)
mime = {
'csv': 'text/csv',
'xlsx': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
}[format]
sql = request.values.get('query')
try:
sql = codecs.decode(sql.encode('ascii'), 'base64').decode('utf8')
except Exception:
pass
results = self.controllers.query_db_streaming(sql, formatters)
if format == 'csv':
def generate():
buffer = StringIO()
writer = csv.writer(buffer)
for row in results:
writer.writerow(row)
pos = buffer.tell()
buffer.seek(0)
ret = buffer.read(pos)
buffer.seek(0)
yield ret
# Encode the filename in utf-8 and url encoding
file_name_utf8_encoded = file_name.encode('utf-8')
file_name_url_encoded = urllib.parse.quote(file_name_utf8_encoded)
headers = {
'Content-Type': mime,
'Content-Disposition': 'attachment; filename=' + file_name_url_encoded + '.csv'
}
return Response(generate(),
content_type='text/csv', headers=headers)
if format == 'xlsx':
with tempfile.NamedTemporaryFile(mode='w+b', suffix='.xlsx') as out:
try:
workbook = xlsxwriter.Workbook(out.name)
worksheet = workbook.add_worksheet()
for i, row in enumerate(results):
for j, v in enumerate(row):
if v is not None:
try:
worksheet.write_number(i, j, float(v))
except ValueError:
worksheet.write(i, j, str(v))
finally:
workbook.close()
return send_file(out.name, mimetype=mime, as_attachment=True, attachment_filename=file_name + '.xlsx')
def detect_bot(self):
if request.user_agent.browser in ('google', 'aol', 'baidu', 'bing', 'yahoo'):
logger.info('Bot detected %s: %s', request.user_agent.string, request.user_agent.browser)
elif any(x in request.user_agent.string.lower() for x in ('applebot', 'yandexbot', 'petalbot')):
logger.info('Bot detected %s: %s', request.user_agent.string, request.user_agent.browser)
else:
return False
return True
| en | 0.567026 | # Create a default value here in case this parameter is not provided # Encode the filename in utf-8 and url encoding | 2.541344 | 3 |
bullhorn/pipeline_methods/pre.py | jjorissen52/python-bullhorn | 0 | 6623433 | import ast
import time
import logging
from bullhorn.api.exceptions import APICallError
REST_API_PARAMS = "command method entity select_fields start sort count query entity_id_str body where".split(" ")
VALID_COMMANDS = ['search', 'query', 'entity', 'entityFiles']
ENTITY_ID_REQUIRED_METHODS = ['UPDATE', 'DELETE']
VALID_METHODS = ['GET', 'POST'] + ENTITY_ID_REQUIRED_METHODS
def keep_authenticated(params):
"""
"""
request_start = time.time()
self = params.get('self')
auth_details = self.auth_details
expiration_time = auth_details.get("expiration_time", 0) if auth_details else 0
if self.leader and not auth_details:
self.authenticate()
auth_details = self.auth_details
else:
retries = 10
while retries:
if expiration_time - request_start <= 0:
time.sleep(1)
auth_details = self.auth_details
if auth_details:
break
retries -= 1
return params
def clean_api_call_input(params):
problems = []
command, method, entity = params.get('command', None), params.get('method', None), params.get('entity', None)
select_fields, query, body = params.get('select_fields', None), params.get('query', None), params.get('body', '')
entity_id = params.pop('entity_id', None)
entity_id_str = f'/{entity_id}' if entity_id else ''
if method and method.upper() in ENTITY_ID_REQUIRED_METHODS and not entity_id:
problems.append(f"entity_id is a required field for all {ENTITY_ID_REQUIRED_METHODS} methods.")
if command and command.lower() != 'query':
for param in params.keys():
if param not in REST_API_PARAMS and param != 'self':
logging.warning(f'{param} is not an acceptable api parameter. '
f'You may only filter by keyword arguments when using the query command.')
elif command:
if 'where' not in params:
problems.append('where is a required argument for the query command. It cannot be none.')
if command and command.lower() == 'search':
if 'query' not in params:
problems.append("query is a required argument when using the search command.")
if command and command.lower() == 'entity' and method.upper() != 'CREATE' and not entity_id:
problems.append("entity_id is a required argument when attempting to access existing records.")
if not command or not command.lower() in VALID_COMMANDS:
problems.append(f"{command} is not a valid command. Valid commands are {VALID_COMMANDS}")
if not method or not method.upper() in VALID_METHODS:
problems.append(f"{command} is not a valid method. Valid methods are {VALID_METHODS}")
if not entity:
problems.append(f"{entity} is not a valid entity.")
if not select_fields or not isinstance(select_fields, (str, list)):
problems.append(f"{select_fields} is not a valid argument for select_fields. Must be a str or list")
else:
if isinstance(select_fields, list):
select_fields = ','.join(select_fields)
select_fields = select_fields.replace(' ', '')
if problems:
raise APICallError("\n".join(problems))
params.update({"command": command.lower(), "method": method.upper()})
params.update({"entity_id_str": entity_id_str})
params.update({'select_fields': select_fields})
return params
def clean_api_search_input(params):
required_params = "entity query select_fields".split(" ")
if not all(required in params for required in required_params):
raise APICallError("search command requires entity, query, and select_fields are required arguments.")
return params
def translate_kwargs_to_query(params):
mapping = {'gt': '{}>{}', 'gte': '{}>={}', 'lt': '{}<{}', 'lte': '{}<={}', 'to': '{}:[{} TO {}]', 'eq': '{}:{}',
'ne': 'NOT {}:{}'}
supported_comparisons = ['gt', 'gte', 'lt', 'lte', 'to', 'eq', 'ne']
implicit_and = []
for param in params:
if param not in REST_API_PARAMS:
field, comparison = param, 'eq'
if len(param.split('__')) == 2 and param.split('__')[-1] in supported_comparisons:
param, comparison = param.split('__')[0], param.split('__')[-1]
if comparison not in ['ne', 'to']:
implicit_and.append(mapping[comparison].format(field, params.get(param)))
elif comparison == 'to':
to_list = ast.literal_eval(params.get(param))
if not isinstance(to_list, list):
raise APICallError(f'{param} should be a list of two elements, cannot be {params.get(param)}. '
f'Ex: {param}=[1, 2]')
# implicit_and.append()
raise NotImplementedError('interrupted')
| import ast
import time
import logging
from bullhorn.api.exceptions import APICallError
REST_API_PARAMS = "command method entity select_fields start sort count query entity_id_str body where".split(" ")
VALID_COMMANDS = ['search', 'query', 'entity', 'entityFiles']
ENTITY_ID_REQUIRED_METHODS = ['UPDATE', 'DELETE']
VALID_METHODS = ['GET', 'POST'] + ENTITY_ID_REQUIRED_METHODS
def keep_authenticated(params):
"""
"""
request_start = time.time()
self = params.get('self')
auth_details = self.auth_details
expiration_time = auth_details.get("expiration_time", 0) if auth_details else 0
if self.leader and not auth_details:
self.authenticate()
auth_details = self.auth_details
else:
retries = 10
while retries:
if expiration_time - request_start <= 0:
time.sleep(1)
auth_details = self.auth_details
if auth_details:
break
retries -= 1
return params
def clean_api_call_input(params):
problems = []
command, method, entity = params.get('command', None), params.get('method', None), params.get('entity', None)
select_fields, query, body = params.get('select_fields', None), params.get('query', None), params.get('body', '')
entity_id = params.pop('entity_id', None)
entity_id_str = f'/{entity_id}' if entity_id else ''
if method and method.upper() in ENTITY_ID_REQUIRED_METHODS and not entity_id:
problems.append(f"entity_id is a required field for all {ENTITY_ID_REQUIRED_METHODS} methods.")
if command and command.lower() != 'query':
for param in params.keys():
if param not in REST_API_PARAMS and param != 'self':
logging.warning(f'{param} is not an acceptable api parameter. '
f'You may only filter by keyword arguments when using the query command.')
elif command:
if 'where' not in params:
problems.append('where is a required argument for the query command. It cannot be none.')
if command and command.lower() == 'search':
if 'query' not in params:
problems.append("query is a required argument when using the search command.")
if command and command.lower() == 'entity' and method.upper() != 'CREATE' and not entity_id:
problems.append("entity_id is a required argument when attempting to access existing records.")
if not command or not command.lower() in VALID_COMMANDS:
problems.append(f"{command} is not a valid command. Valid commands are {VALID_COMMANDS}")
if not method or not method.upper() in VALID_METHODS:
problems.append(f"{command} is not a valid method. Valid methods are {VALID_METHODS}")
if not entity:
problems.append(f"{entity} is not a valid entity.")
if not select_fields or not isinstance(select_fields, (str, list)):
problems.append(f"{select_fields} is not a valid argument for select_fields. Must be a str or list")
else:
if isinstance(select_fields, list):
select_fields = ','.join(select_fields)
select_fields = select_fields.replace(' ', '')
if problems:
raise APICallError("\n".join(problems))
params.update({"command": command.lower(), "method": method.upper()})
params.update({"entity_id_str": entity_id_str})
params.update({'select_fields': select_fields})
return params
def clean_api_search_input(params):
required_params = "entity query select_fields".split(" ")
if not all(required in params for required in required_params):
raise APICallError("search command requires entity, query, and select_fields are required arguments.")
return params
def translate_kwargs_to_query(params):
mapping = {'gt': '{}>{}', 'gte': '{}>={}', 'lt': '{}<{}', 'lte': '{}<={}', 'to': '{}:[{} TO {}]', 'eq': '{}:{}',
'ne': 'NOT {}:{}'}
supported_comparisons = ['gt', 'gte', 'lt', 'lte', 'to', 'eq', 'ne']
implicit_and = []
for param in params:
if param not in REST_API_PARAMS:
field, comparison = param, 'eq'
if len(param.split('__')) == 2 and param.split('__')[-1] in supported_comparisons:
param, comparison = param.split('__')[0], param.split('__')[-1]
if comparison not in ['ne', 'to']:
implicit_and.append(mapping[comparison].format(field, params.get(param)))
elif comparison == 'to':
to_list = ast.literal_eval(params.get(param))
if not isinstance(to_list, list):
raise APICallError(f'{param} should be a list of two elements, cannot be {params.get(param)}. '
f'Ex: {param}=[1, 2]')
# implicit_and.append()
raise NotImplementedError('interrupted')
| ro | 0.095283 | # implicit_and.append() | 2.344216 | 2 |
src/cagefight/cagefighter.py | tgates42/cagefight | 0 | 6623434 | """
The base fighter implementation
"""
from __future__ import absolute_import, print_function, division
import os
import json
class CageFighter(object):
"""
Base fighter implementation
"""
colours = [
(55, 255, 255, 255),
(255, 55, 255, 255),
(255, 255, 55, 255),
(55, 55, 255, 255),
(255, 55, 55, 255),
(55, 255, 55, 255),
]
def __init__(self, world, fighterid):
self.world = world
self.fighterid = fighterid
def start(self):
"""
Called prior to the first render to prepare the starting state.
"""
pass
def next(self, filepath):
"""
Progress the game state to the next tick.
"""
pass
def save(self):
"""
Override to save details of current fighter with total knowledge
"""
raise NotImplementedError('Override to save fighter')
def save_view(self):
"""
Override to save details of current fighter with fighter knowledge,
default implementation assumes total knowledge
"""
return self.save()
def load(self, jsonobj):
"""
Override to load details of current fighter
"""
raise NotImplementedError('Override to load fighter')
def render(self, im):
"""
Render the display to an image for the provided game mp4 output
"""
raise NotImplementedError('Override to draw fighter')
def name(self):
"""
Override to name fighters
"""
raise NotImplementedError('Override to name fighter')
def text_result(self):
"""
Override to provide fighter result
"""
raise NotImplementedError('Override to provide fighter result')
def get_instructions(self, filepath):
"""
Load instructions from the filepath
"""
if not os.path.isfile(filepath):
return {}
with open(filepath) as fobj:
return json.load(fobj)
def csv_header(self):
"""
Can be overriden for a custom result
"""
return ['Name', 'Result']
def csv_result(self):
"""
Can be overriden for a custom result
"""
return [self.name(), self.text_result()]
| """
The base fighter implementation
"""
from __future__ import absolute_import, print_function, division
import os
import json
class CageFighter(object):
"""
Base fighter implementation
"""
colours = [
(55, 255, 255, 255),
(255, 55, 255, 255),
(255, 255, 55, 255),
(55, 55, 255, 255),
(255, 55, 55, 255),
(55, 255, 55, 255),
]
def __init__(self, world, fighterid):
self.world = world
self.fighterid = fighterid
def start(self):
"""
Called prior to the first render to prepare the starting state.
"""
pass
def next(self, filepath):
"""
Progress the game state to the next tick.
"""
pass
def save(self):
"""
Override to save details of current fighter with total knowledge
"""
raise NotImplementedError('Override to save fighter')
def save_view(self):
"""
Override to save details of current fighter with fighter knowledge,
default implementation assumes total knowledge
"""
return self.save()
def load(self, jsonobj):
"""
Override to load details of current fighter
"""
raise NotImplementedError('Override to load fighter')
def render(self, im):
"""
Render the display to an image for the provided game mp4 output
"""
raise NotImplementedError('Override to draw fighter')
def name(self):
"""
Override to name fighters
"""
raise NotImplementedError('Override to name fighter')
def text_result(self):
"""
Override to provide fighter result
"""
raise NotImplementedError('Override to provide fighter result')
def get_instructions(self, filepath):
"""
Load instructions from the filepath
"""
if not os.path.isfile(filepath):
return {}
with open(filepath) as fobj:
return json.load(fobj)
def csv_header(self):
"""
Can be overriden for a custom result
"""
return ['Name', 'Result']
def csv_result(self):
"""
Can be overriden for a custom result
"""
return [self.name(), self.text_result()]
| en | 0.807011 | The base fighter implementation Base fighter implementation Called prior to the first render to prepare the starting state. Progress the game state to the next tick. Override to save details of current fighter with total knowledge Override to save details of current fighter with fighter knowledge, default implementation assumes total knowledge Override to load details of current fighter Render the display to an image for the provided game mp4 output Override to name fighters Override to provide fighter result Load instructions from the filepath Can be overriden for a custom result Can be overriden for a custom result | 3.014819 | 3 |
Convert Sorted List to Binary Search Tree.py | sugia/leetcode | 0 | 6623435 | <filename>Convert Sorted List to Binary Search Tree.py
'''
Given a singly linked list where elements are sorted in ascending order, convert it to a height balanced BST.
For this problem, a height-balanced binary tree is defined as a binary tree in which the depth of the two subtrees of every node never differ by more than 1.
Example:
Given the sorted linked list: [-10,-3,0,5,9],
One possible answer is: [0,-3,9,-10,null,5], which represents the following height balanced BST:
0
/ \
-3 9
/ /
-10 5
'''
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def sortedListToBST(self, head):
"""
:type head: ListNode
:rtype: TreeNode
"""
data = []
while head:
data.append(head.val)
head = head.next
return self.build(data, 0, len(data) - 1)
def build(self, data, left, right):
if left > right:
return None
mid = (left + right) // 2
node = TreeNode(data[mid])
node.left = self.build(data, left, mid - 1)
node.right = self.build(data, mid + 1, right)
return node
| <filename>Convert Sorted List to Binary Search Tree.py
'''
Given a singly linked list where elements are sorted in ascending order, convert it to a height balanced BST.
For this problem, a height-balanced binary tree is defined as a binary tree in which the depth of the two subtrees of every node never differ by more than 1.
Example:
Given the sorted linked list: [-10,-3,0,5,9],
One possible answer is: [0,-3,9,-10,null,5], which represents the following height balanced BST:
0
/ \
-3 9
/ /
-10 5
'''
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def sortedListToBST(self, head):
"""
:type head: ListNode
:rtype: TreeNode
"""
data = []
while head:
data.append(head.val)
head = head.next
return self.build(data, 0, len(data) - 1)
def build(self, data, left, right):
if left > right:
return None
mid = (left + right) // 2
node = TreeNode(data[mid])
node.left = self.build(data, left, mid - 1)
node.right = self.build(data, mid + 1, right)
return node
| en | 0.762339 | Given a singly linked list where elements are sorted in ascending order, convert it to a height balanced BST. For this problem, a height-balanced binary tree is defined as a binary tree in which the depth of the two subtrees of every node never differ by more than 1. Example: Given the sorted linked list: [-10,-3,0,5,9], One possible answer is: [0,-3,9,-10,null,5], which represents the following height balanced BST: 0 / \ -3 9 / / -10 5 # Definition for singly-linked list. # class ListNode(object): # def __init__(self, x): # self.val = x # self.next = None # Definition for a binary tree node. # class TreeNode(object): # def __init__(self, x): # self.val = x # self.left = None # self.right = None :type head: ListNode :rtype: TreeNode | 4.227838 | 4 |
openstack_app/bin/api/authentication.py | GSLabDev/openstack-app-for-splunk | 2 | 6623436 | <gh_stars>1-10
'''
Openstack App for Splunk
Copyright (c) 2017, Great Software Laboratory Private Limited.
All rights reserved.
Contributor: <NAME> [<EMAIL>], <NAME> [<EMAIL>]
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the "Great Software Laboratory Private Limited" nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
#!/usr/bin/python
'''
This script performs authentication and return authentication token and services information
Author: <NAME>, GSLab
'''
#Import from standard libraries
import sys
import argparse
import requests
import ConfigParser
import os
import io
def login(user_name,password):
#Variable declaration
headers = {'content-type': 'application/json'}
auth_token = None
nova_url = None
auth_response = None
auth_token = None
Config = ConfigParser.ConfigParser()
PATH = os.path.dirname(os.path.realpath(__file__))
with io.open(PATH+"/./../../local/myconf.conf", 'r', encoding='utf_8_sig') as fp:
Config.readfp(fp)
for section in Config.sections():
if section == 'userinfo':
for option in Config.options(section):
if option == 'baseurl':
base_url = Config.get(section,option)
if option == 'tenant':
tenant = Config.get(section,option)
try:
auth_request = ('{ "auth": {"identity": {"methods": ["password"],"password": {"user": {"name": "' + user_name + '","domain": { "id": "default" },"password": "' + password + '"}}},"scope": {"project": {"name": "admin","domain": { "id": "default" }}}}}')
auth_response = requests.post(base_url + '/auth/tokens', data=auth_request,headers=headers);
auth_response_body = auth_response.json();
subject_token = auth_response.headers["x-subject-token"]
auth_token = auth_response.headers["x-subject-token"]
if not auth_response_body['token']['user']['id']:
raise Exception("Authentication failed. Failed to get an auth token.")
except Exception as e:
print ('WARNING: Athentication failed for tenant %s and user %s'
% (tenant, user_name) + '\nInfo: ' + str(e))
return auth_token,auth_response_body
| '''
Openstack App for Splunk
Copyright (c) 2017, Great Software Laboratory Private Limited.
All rights reserved.
Contributor: <NAME> [<EMAIL>], <NAME> [<EMAIL>]
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the "Great Software Laboratory Private Limited" nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
#!/usr/bin/python
'''
This script performs authentication and return authentication token and services information
Author: <NAME>, GSLab
'''
#Import from standard libraries
import sys
import argparse
import requests
import ConfigParser
import os
import io
def login(user_name,password):
#Variable declaration
headers = {'content-type': 'application/json'}
auth_token = None
nova_url = None
auth_response = None
auth_token = None
Config = ConfigParser.ConfigParser()
PATH = os.path.dirname(os.path.realpath(__file__))
with io.open(PATH+"/./../../local/myconf.conf", 'r', encoding='utf_8_sig') as fp:
Config.readfp(fp)
for section in Config.sections():
if section == 'userinfo':
for option in Config.options(section):
if option == 'baseurl':
base_url = Config.get(section,option)
if option == 'tenant':
tenant = Config.get(section,option)
try:
auth_request = ('{ "auth": {"identity": {"methods": ["password"],"password": {"user": {"name": "' + user_name + '","domain": { "id": "default" },"password": "' + password + '"}}},"scope": {"project": {"name": "admin","domain": { "id": "default" }}}}}')
auth_response = requests.post(base_url + '/auth/tokens', data=auth_request,headers=headers);
auth_response_body = auth_response.json();
subject_token = auth_response.headers["x-subject-token"]
auth_token = auth_response.headers["x-subject-token"]
if not auth_response_body['token']['user']['id']:
raise Exception("Authentication failed. Failed to get an auth token.")
except Exception as e:
print ('WARNING: Athentication failed for tenant %s and user %s'
% (tenant, user_name) + '\nInfo: ' + str(e))
return auth_token,auth_response_body | en | 0.726815 | Openstack App for Splunk Copyright (c) 2017, Great Software Laboratory Private Limited. All rights reserved. Contributor: <NAME> [<EMAIL>], <NAME> [<EMAIL>] Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the "Great Software Laboratory Private Limited" nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #!/usr/bin/python This script performs authentication and return authentication token and services information Author: <NAME>, GSLab #Import from standard libraries #Variable declaration | 1.139701 | 1 |
sacrerouge/data/eval_instance.py | danieldeutsch/decomposed-rouge | 81 | 6623437 | <gh_stars>10-100
import jsons
from sacrerouge.data.fields import Fields
class EvalInstance(object):
def __init__(self,
instance_id: str,
summarizer_id: str,
summarizer_type: str,
fields: Fields) -> None:
self.instance_id = instance_id
self.summarizer_id = summarizer_id
self.summarizer_type = summarizer_type
self.fields = fields
def __repr__(self) -> str:
return jsons.dumps(self)
| import jsons
from sacrerouge.data.fields import Fields
class EvalInstance(object):
def __init__(self,
instance_id: str,
summarizer_id: str,
summarizer_type: str,
fields: Fields) -> None:
self.instance_id = instance_id
self.summarizer_id = summarizer_id
self.summarizer_type = summarizer_type
self.fields = fields
def __repr__(self) -> str:
return jsons.dumps(self) | none | 1 | 2.530169 | 3 | |
report/insert/fits/fit.py | DunstanBecht/lpa-workspace | 0 | 6623438 | #!/usr/bin/env python
# coding: utf-8
from lpa.output import analyze
stm = '100_rho5e13m-2_square_3200nm_RDD_d5e-5nm-2_edge_S0_PBC1_output'
ttl = r"100 RDD $ \left( d = 5 \times 10^{-4} \mathrm{nm^{-2}} \right) $"
impdir = '../output'
analyze.export(stm, impdir=impdir, figttl=ttl, fmtfit='pdf')
| #!/usr/bin/env python
# coding: utf-8
from lpa.output import analyze
stm = '100_rho5e13m-2_square_3200nm_RDD_d5e-5nm-2_edge_S0_PBC1_output'
ttl = r"100 RDD $ \left( d = 5 \times 10^{-4} \mathrm{nm^{-2}} \right) $"
impdir = '../output'
analyze.export(stm, impdir=impdir, figttl=ttl, fmtfit='pdf')
| en | 0.325294 | #!/usr/bin/env python # coding: utf-8 | 1.474797 | 1 |
soiq_keys.py | lizmat/soiqbot | 9 | 6623439 | twitter_consumer_key = ''
twitter_consumer_secret = ''
twitter_token_key = ''
twitter_token_secret = ''
so_client_secret = ''
so_key = ''
| twitter_consumer_key = ''
twitter_consumer_secret = ''
twitter_token_key = ''
twitter_token_secret = ''
so_client_secret = ''
so_key = ''
| none | 1 | 1.013278 | 1 | |
rpython/translator/platform/freebsd.py | nanjekyejoannah/pypy | 381 | 6623440 | """Support for FreeBSD."""
import os
from rpython.translator.platform.bsd import BSD
class Freebsd(BSD):
name = "freebsd"
link_flags = tuple(
['-pthread'] +
os.environ.get('LDFLAGS', '').split())
cflags = tuple(
['-O3', '-pthread', '-fomit-frame-pointer'] +
os.environ.get('CFLAGS', '').split())
rpath_flags = ['-Wl,-rpath=\'$$ORIGIN/\'', '-Wl,-z,origin']
class Freebsd_64(Freebsd):
shared_only = ('-fPIC',)
class GNUkFreebsd(Freebsd):
DEFAULT_CC = 'cc'
extra_libs = ('-lrt',)
class GNUkFreebsd_64(Freebsd_64):
DEFAULT_CC = 'cc'
extra_libs = ('-lrt',)
| """Support for FreeBSD."""
import os
from rpython.translator.platform.bsd import BSD
class Freebsd(BSD):
name = "freebsd"
link_flags = tuple(
['-pthread'] +
os.environ.get('LDFLAGS', '').split())
cflags = tuple(
['-O3', '-pthread', '-fomit-frame-pointer'] +
os.environ.get('CFLAGS', '').split())
rpath_flags = ['-Wl,-rpath=\'$$ORIGIN/\'', '-Wl,-z,origin']
class Freebsd_64(Freebsd):
shared_only = ('-fPIC',)
class GNUkFreebsd(Freebsd):
DEFAULT_CC = 'cc'
extra_libs = ('-lrt',)
class GNUkFreebsd_64(Freebsd_64):
DEFAULT_CC = 'cc'
extra_libs = ('-lrt',)
| en | 0.766899 | Support for FreeBSD. | 2.048373 | 2 |
tests/test_field.py | saxix/django-regex | 3 | 6623441 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import re
import pytest
from django.core.exceptions import ValidationError
from django.db import connection
from django_regex.fields import RegexField
from django_regex.forms import RegexFormField, compress
pytestmark = pytest.mark.django_db
def test_type():
f = RegexField()
assert f.db_parameters(connection)['type'] == 'text'
def test_formfield():
f = RegexField()
assert isinstance(f.formfield(), RegexFormField)
def test_validation():
f = RegexField()
with pytest.raises(ValidationError):
f.clean(None, None)
with pytest.raises(ValidationError):
f.clean('*', None)
assert f.clean('.*', None)
def test_flags():
f = RegexField()
regex = f.clean('abc', None)
assert regex.match('abc')
assert not regex.match('ABC')
f = RegexField(flags=re.I)
regex = f.clean('abc', None)
assert regex.match('abc')
assert regex.match('ABC')
f = RegexField(flags=re.I)
regex = f.clean(compress(['abc', '32']), None)
assert regex.match('abc')
assert not regex.match('ABC')
f = RegexField()
regex = f.clean(compress(['abc', '2']), None)
assert regex.match('abc')
assert regex.match('ABC')
| # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import re
import pytest
from django.core.exceptions import ValidationError
from django.db import connection
from django_regex.fields import RegexField
from django_regex.forms import RegexFormField, compress
pytestmark = pytest.mark.django_db
def test_type():
f = RegexField()
assert f.db_parameters(connection)['type'] == 'text'
def test_formfield():
f = RegexField()
assert isinstance(f.formfield(), RegexFormField)
def test_validation():
f = RegexField()
with pytest.raises(ValidationError):
f.clean(None, None)
with pytest.raises(ValidationError):
f.clean('*', None)
assert f.clean('.*', None)
def test_flags():
f = RegexField()
regex = f.clean('abc', None)
assert regex.match('abc')
assert not regex.match('ABC')
f = RegexField(flags=re.I)
regex = f.clean('abc', None)
assert regex.match('abc')
assert regex.match('ABC')
f = RegexField(flags=re.I)
regex = f.clean(compress(['abc', '32']), None)
assert regex.match('abc')
assert not regex.match('ABC')
f = RegexField()
regex = f.clean(compress(['abc', '2']), None)
assert regex.match('abc')
assert regex.match('ABC')
| en | 0.769321 | # -*- coding: utf-8 -*- | 2.202442 | 2 |
Lang/Python/py_base/data_structure/graph/graph.py | Orig5826/Basics | 5 | 6623442 |
from pydotplus import Dot, Node, Edge
import os
# 该图配置
graph = {'A': ['B', 'C', 'F'],
'B': ['C', 'D'],
'C': ['D'],
'D': ['C'],
'E': ['F', 'D'],
'F': ['C']
}
def dotgraph(graph):
__graph = Dot(rankdir='TB', fontname="Fangsong",
fontcolor='blue', label="有向图的示例")
__graph.set_type('digraph')
__graph.set_name('digraph_demo')
__graph.set_node_defaults(
fontname="Fangsong", style='filled', fillcolor='yellow')
__graph.set_edge_defaults(fontname="Fangsong", color='black')
for key, value in graph.items():
# 若节点没有特殊label或者其他属性需求
# 可以直接以节点名称显示
# 直接标记方向,不用手动添加
# node = Node(key)
# __graph.add_node(node)
for v in value:
edge = Edge(key, v)
__graph.add_edge(edge)
ret = __graph.write_raw("demo.dot")
if ret is not True:
print('生成demo.dot失败')
ret = __graph.write_svg("demo.svg")
if ret is not True:
print('生成graph.svg失败')
# ret = __graph.write_png("demo.png")
# if ret is not True:
# print('生成graph.png失败')
return __graph
def find_path(graph, start, end, path=[]):
"""
在图graph中找路径:
从顶点start到顶点end
走过的路径为path
"""
path = path + [start]
# 3.0 若当找到路径尾部,则返回该路径
if start == end:
return path
# 1.0 判断当前顶点是否在图内
if start not in graph.keys():
return None
for node in graph[start]:
if node not in path:
# 2.0 以当前顶点为起点,继续找路径
newpath = find_path(graph, node, end, path)
# 4.0 返回该路径
if newpath:
return newpath
# 这个没有什么用吗 ?
# return path
def find_all_paths(graph, start, end, path=[], paths=[]):
path = path + [start]
if start == end:
paths.append(path)
return path
if start not in graph.keys():
return None
for node in graph[start]:
if node not in path:
newpaths = find_all_paths(graph, node, end, path)
if paths == []:
return None
return paths
def find_short_path(graph, start, end, path=[]):
path = path + [start]
if start == end:
return path
if start not in graph.keys():
return None
shortpath = None
for node in graph[start]:
if node not in path:
newpath = find_path(graph, node, end, path)
if newpath:
if not shortpath or len(newpath) < len(shortpath):
shortpath = newpath
return shortpath
def breadth_first_search(graph, start):
prenodes = [start] # 前驱节点
travel = [start] # 已遍历过的顶点
graph_sets = set(graph.keys())
travel_sets = set(travel)
while travel_sets < graph_sets:
# 当前驱节点未空时退出
while prenodes:
nextnodes = [] # 当前顶点的邻接点
for prenode in prenodes:
for curnode in graph[prenode]: # 遍历当前层的节点
if curnode not in travel: # 判断当前层节点是否被访问国
travel.append(curnode) # 若没有被访问过,则入队
nextnodes.append(curnode) # 将当前节点追加如新的前驱节点队列
# 当前层的节点都遍历完毕,则开始下一层的遍历
prenodes = nextnodes
travel_sets = set(travel)
prenodes = list(graph_sets - travel_sets)
if prenodes != []:
prenodes.sort()
travel.append(prenodes[0])
return travel
def depth_first_search(graph, start):
travel = []
stack = [start]
graph_sets = set(graph.keys())
travel_sets = set(travel)
while travel_sets < graph_sets:
# 堆栈空的时候退出
while stack:
curnode = stack.pop() # 栈顶弹出
if curnode not in travel: # 判断当前节点是否已经被访问过
travel.append(curnode) # 若没访问过,则入队
for nextnode in graph[curnode]: # 遍历当前节点林邻接点
if nextnode not in travel: # 没有被访问过的顶点全部入栈
stack.append(nextnode)
travel_sets = set(travel)
leftnode = list(graph_sets - travel_sets)
if leftnode != []:
leftnode.sort()
stack.append(leftnode[0])
return travel
if __name__ == '__main__':
result = find_path(graph, 'A', 'D')
print("1. 路径查找结果:", result)
print('---------------------------------')
result = find_all_paths(graph, 'A', 'D')
print("2. 全路径查找结果:", result)
print("路径个数:", len(result))
i = 1
for path in result:
print('路径{0:2d}为:{1}'.format(i, path))
i += 1
print('---------------------------------')
result = find_short_path(graph, 'A', 'D')
print("3. 查找最短路径:", result)
print('---------------------------------')
# 生成图表
dotgraph(graph)
# 广度优先遍历
result = breadth_first_search(graph, 'A')
print(result)
# 深度优先遍历
result = depth_first_search(graph, 'A')
print(result)
|
from pydotplus import Dot, Node, Edge
import os
# 该图配置
graph = {'A': ['B', 'C', 'F'],
'B': ['C', 'D'],
'C': ['D'],
'D': ['C'],
'E': ['F', 'D'],
'F': ['C']
}
def dotgraph(graph):
__graph = Dot(rankdir='TB', fontname="Fangsong",
fontcolor='blue', label="有向图的示例")
__graph.set_type('digraph')
__graph.set_name('digraph_demo')
__graph.set_node_defaults(
fontname="Fangsong", style='filled', fillcolor='yellow')
__graph.set_edge_defaults(fontname="Fangsong", color='black')
for key, value in graph.items():
# 若节点没有特殊label或者其他属性需求
# 可以直接以节点名称显示
# 直接标记方向,不用手动添加
# node = Node(key)
# __graph.add_node(node)
for v in value:
edge = Edge(key, v)
__graph.add_edge(edge)
ret = __graph.write_raw("demo.dot")
if ret is not True:
print('生成demo.dot失败')
ret = __graph.write_svg("demo.svg")
if ret is not True:
print('生成graph.svg失败')
# ret = __graph.write_png("demo.png")
# if ret is not True:
# print('生成graph.png失败')
return __graph
def find_path(graph, start, end, path=[]):
"""
在图graph中找路径:
从顶点start到顶点end
走过的路径为path
"""
path = path + [start]
# 3.0 若当找到路径尾部,则返回该路径
if start == end:
return path
# 1.0 判断当前顶点是否在图内
if start not in graph.keys():
return None
for node in graph[start]:
if node not in path:
# 2.0 以当前顶点为起点,继续找路径
newpath = find_path(graph, node, end, path)
# 4.0 返回该路径
if newpath:
return newpath
# 这个没有什么用吗 ?
# return path
def find_all_paths(graph, start, end, path=[], paths=[]):
path = path + [start]
if start == end:
paths.append(path)
return path
if start not in graph.keys():
return None
for node in graph[start]:
if node not in path:
newpaths = find_all_paths(graph, node, end, path)
if paths == []:
return None
return paths
def find_short_path(graph, start, end, path=[]):
path = path + [start]
if start == end:
return path
if start not in graph.keys():
return None
shortpath = None
for node in graph[start]:
if node not in path:
newpath = find_path(graph, node, end, path)
if newpath:
if not shortpath or len(newpath) < len(shortpath):
shortpath = newpath
return shortpath
def breadth_first_search(graph, start):
prenodes = [start] # 前驱节点
travel = [start] # 已遍历过的顶点
graph_sets = set(graph.keys())
travel_sets = set(travel)
while travel_sets < graph_sets:
# 当前驱节点未空时退出
while prenodes:
nextnodes = [] # 当前顶点的邻接点
for prenode in prenodes:
for curnode in graph[prenode]: # 遍历当前层的节点
if curnode not in travel: # 判断当前层节点是否被访问国
travel.append(curnode) # 若没有被访问过,则入队
nextnodes.append(curnode) # 将当前节点追加如新的前驱节点队列
# 当前层的节点都遍历完毕,则开始下一层的遍历
prenodes = nextnodes
travel_sets = set(travel)
prenodes = list(graph_sets - travel_sets)
if prenodes != []:
prenodes.sort()
travel.append(prenodes[0])
return travel
def depth_first_search(graph, start):
travel = []
stack = [start]
graph_sets = set(graph.keys())
travel_sets = set(travel)
while travel_sets < graph_sets:
# 堆栈空的时候退出
while stack:
curnode = stack.pop() # 栈顶弹出
if curnode not in travel: # 判断当前节点是否已经被访问过
travel.append(curnode) # 若没访问过,则入队
for nextnode in graph[curnode]: # 遍历当前节点林邻接点
if nextnode not in travel: # 没有被访问过的顶点全部入栈
stack.append(nextnode)
travel_sets = set(travel)
leftnode = list(graph_sets - travel_sets)
if leftnode != []:
leftnode.sort()
stack.append(leftnode[0])
return travel
if __name__ == '__main__':
result = find_path(graph, 'A', 'D')
print("1. 路径查找结果:", result)
print('---------------------------------')
result = find_all_paths(graph, 'A', 'D')
print("2. 全路径查找结果:", result)
print("路径个数:", len(result))
i = 1
for path in result:
print('路径{0:2d}为:{1}'.format(i, path))
i += 1
print('---------------------------------')
result = find_short_path(graph, 'A', 'D')
print("3. 查找最短路径:", result)
print('---------------------------------')
# 生成图表
dotgraph(graph)
# 广度优先遍历
result = breadth_first_search(graph, 'A')
print(result)
# 深度优先遍历
result = depth_first_search(graph, 'A')
print(result)
| zh | 0.884514 | # 该图配置 # 若节点没有特殊label或者其他属性需求 # 可以直接以节点名称显示 # 直接标记方向,不用手动添加 # node = Node(key) # __graph.add_node(node) # ret = __graph.write_png("demo.png") # if ret is not True: # print('生成graph.png失败') 在图graph中找路径: 从顶点start到顶点end 走过的路径为path # 3.0 若当找到路径尾部,则返回该路径 # 1.0 判断当前顶点是否在图内 # 2.0 以当前顶点为起点,继续找路径 # 4.0 返回该路径 # 这个没有什么用吗 ? # return path # 前驱节点 # 已遍历过的顶点 # 当前驱节点未空时退出 # 当前顶点的邻接点 # 遍历当前层的节点 # 判断当前层节点是否被访问国 # 若没有被访问过,则入队 # 将当前节点追加如新的前驱节点队列 # 当前层的节点都遍历完毕,则开始下一层的遍历 # 堆栈空的时候退出 # 栈顶弹出 # 判断当前节点是否已经被访问过 # 若没访问过,则入队 # 遍历当前节点林邻接点 # 没有被访问过的顶点全部入栈 # 生成图表 # 广度优先遍历 # 深度优先遍历 | 2.905657 | 3 |
src/components/base.py | Mrpatekful/Pytorch-MT | 7 | 6623443 | <filename>src/components/base.py
from torch.nn import Module
from src.utils.utils import Component
class Encoder(Module, Component):
"""
Abstract base class for the encoder modules of the application. An encoder must
inherit from this class, otherwise it won't be discoverable by the hierarchy
builder utility.
"""
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, *args, **kwargs):
raise NotImplementedError
@property
def optimizers(self):
raise NotImplementedError
@property
def state(self):
raise NotImplementedError
@state.setter
def state(self, value):
raise NotImplementedError
class Decoder(Module, Component):
"""
Abstract base class for the decoder modules of the application. A decoder must
inherit from this class, otherwise it won't be discoverable by the hierarchy
builder utility.
"""
def __init__(self, *args, **kwargs):
super().__init__()
self._output_size = None
def forward(self, *args, **kwargs):
raise NotImplementedError
@property
def optimizers(self):
raise NotImplementedError
@property
def state(self):
raise NotImplementedError
@state.setter
def state(self, value):
raise NotImplementedError
@property
def output_size(self):
return self._output_size
| <filename>src/components/base.py
from torch.nn import Module
from src.utils.utils import Component
class Encoder(Module, Component):
"""
Abstract base class for the encoder modules of the application. An encoder must
inherit from this class, otherwise it won't be discoverable by the hierarchy
builder utility.
"""
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, *args, **kwargs):
raise NotImplementedError
@property
def optimizers(self):
raise NotImplementedError
@property
def state(self):
raise NotImplementedError
@state.setter
def state(self, value):
raise NotImplementedError
class Decoder(Module, Component):
"""
Abstract base class for the decoder modules of the application. A decoder must
inherit from this class, otherwise it won't be discoverable by the hierarchy
builder utility.
"""
def __init__(self, *args, **kwargs):
super().__init__()
self._output_size = None
def forward(self, *args, **kwargs):
raise NotImplementedError
@property
def optimizers(self):
raise NotImplementedError
@property
def state(self):
raise NotImplementedError
@state.setter
def state(self, value):
raise NotImplementedError
@property
def output_size(self):
return self._output_size
| en | 0.874477 | Abstract base class for the encoder modules of the application. An encoder must inherit from this class, otherwise it won't be discoverable by the hierarchy builder utility. Abstract base class for the decoder modules of the application. A decoder must inherit from this class, otherwise it won't be discoverable by the hierarchy builder utility. | 2.644761 | 3 |
ivory/lightgbm/estimator.py | daizutabi/ivory | 1 | 6623444 | <gh_stars>1-10
import lightgbm as lgb
import ivory.core.estimator
from ivory.core import instance
from ivory.core.run import Run
class Estimator(ivory.core.estimator.Estimator):
def __init__(self, **kwargs):
self.params, self.kwargs = instance.filter_params(lgb.train, **kwargs)
def step(self, run: Run, mode: str): # type:ignore
if mode == "train":
train_dataset = run.datasets.train[:][1:]
val_dataset = run.datasets.val[:][1:]
self.fit(train_dataset, val_dataset)
super().step(run, mode, training=False)
def fit(self, train_dataset, val_dataset):
train_set = lgb.Dataset(*train_dataset)
val_set = lgb.Dataset(*val_dataset)
valid_sets = [train_set, val_set]
self.estimator = lgb.train(
self.params, train_set, valid_sets=valid_sets, **self.kwargs
)
class Regressor(Estimator):
def __init__(self, objective="regression", metric="mse", **kwargs):
super().__init__(objective=objective, metric=metric, **kwargs)
class Classifier(Estimator):
def __init__(self, objective="multiclass", metric="multi_logloss", **kwargs):
super().__init__(objective=objective, metric=metric, **kwargs)
| import lightgbm as lgb
import ivory.core.estimator
from ivory.core import instance
from ivory.core.run import Run
class Estimator(ivory.core.estimator.Estimator):
def __init__(self, **kwargs):
self.params, self.kwargs = instance.filter_params(lgb.train, **kwargs)
def step(self, run: Run, mode: str): # type:ignore
if mode == "train":
train_dataset = run.datasets.train[:][1:]
val_dataset = run.datasets.val[:][1:]
self.fit(train_dataset, val_dataset)
super().step(run, mode, training=False)
def fit(self, train_dataset, val_dataset):
train_set = lgb.Dataset(*train_dataset)
val_set = lgb.Dataset(*val_dataset)
valid_sets = [train_set, val_set]
self.estimator = lgb.train(
self.params, train_set, valid_sets=valid_sets, **self.kwargs
)
class Regressor(Estimator):
def __init__(self, objective="regression", metric="mse", **kwargs):
super().__init__(objective=objective, metric=metric, **kwargs)
class Classifier(Estimator):
def __init__(self, objective="multiclass", metric="multi_logloss", **kwargs):
super().__init__(objective=objective, metric=metric, **kwargs) | it | 0.408888 | # type:ignore | 2.354751 | 2 |
setup.py | nguyentientungduong/python_client | 0 | 6623445 | <gh_stars>0
#!/usr/bin/env python
"""
setup.py file for GridDB python client
"""
from distutils.command.build import build
import os
import platform, sysconfig
try:
from setuptools import setup, Extension
except ImportError:
from distutils.core import setup, Extension
try:
with open('README.rst') as f:
readme = f.read()
except IOError:
readme = ''
os.environ["CXX"] = "g++"
os.environ["CC"] = "g++"
SOURCES = [
'src/AggregationResult.cpp',
'src/Container.cpp',
'src/ContainerInfo.cpp',
'src/Field.cpp',
'src/PartitionController.cpp',
'src/Query.cpp',
'src/QueryAnalysisEntry.cpp',
'src/RowKeyPredicate.cpp',
'src/RowList.cpp',
'src/RowSet.cpp',
'src/Store.cpp',
'src/StoreFactory.cpp',
'src/TimeSeriesProperties.cpp',
'src/TimestampUtils.cpp',
'src/griddb.i',
'src/Util.cpp'
]
DEPENDENTS = [
'src/AggregationResult.h',
'src/ContainerInfo.h',
'src/Container.h',
'src/ExpirationInfo.h',
'src/Field.h'
'src/GSException.h',
'src/PartitionController.h',
'src/Query.h',
'src/QueryAnalysisEntry.h',
'src/RowKeyPredicate.h',
'src/RowList.h',
'src/RowSet.h',
'src/Store.h',
'src/StoreFactory.h',
'src/TimeSeriesProperties.h',
'src/TimestampUtils.h',
'src/gstype_python.i',
'src/gstype.i',
'src/Util.h',
'include/gridstore.h'
]
site_packages_path = sysconfig.get_path('purelib')
INCLUDES = [
'include',
'src',
site_packages_path + '/numpy/core/include/'
]
COMPILE_ARGS = [
'-std=c++0x'
]
# For MacOS
if platform.system() == 'Darwin':
LIBRARIES = [
'gridstore'
]
else:
LIBRARIES = [
'rt',
'gridstore'
]
SWIG_OPTS = [
'-DSWIGWORDSIZE64',
'-c++',
'-outdir',
'.',
'-Isrc'
]
class CustomBuild(build):
sub_commands = [
('build_ext', build.has_ext_modules),
('build_py', build.has_pure_modules),
('build_clib', build.has_c_libraries),
('build_scripts', build.has_scripts)
]
griddb_module = Extension('_griddb_python',
sources=SOURCES,
include_dirs=INCLUDES,
libraries=LIBRARIES,
extra_compile_args=COMPILE_ARGS,
swig_opts=SWIG_OPTS,
depends=DEPENDENTS
)
data_files = []
cclient_version = "4.5.1"
# For MacOS
if platform.system() == 'Darwin':
classifiers = [
"License :: OSI Approved :: Apache Software License",
"Operating System :: MacOS :: MacOS X",
"Programming Language :: Python :: 3.6"
]
data_files=[
(
os.path.join(
"lib/python3.6/site-packages/griddb/griddb-c-client", cclient_version
),
[
os.path.join("c_client-{}".format(cclient_version), "LICENSE"),
os.path.join("c_client-{}".format(cclient_version), "README.md")
]
),
(
os.path.join(
"lib/python3.6/site-packages/griddb/griddb-c-client",
cclient_version,
"3rd_party",
),
[
os.path.join(
"c_client-{}".format(cclient_version), "3rd_party/3rd_party.md"
),
os.path.join(
"c_client-{}".format(cclient_version),
"3rd_party/Apache_License-2.0.txt",
),
os.path.join(
"c_client-{}".format(cclient_version), "3rd_party/BSD_License.txt"
),
os.path.join(
"c_client-{}".format(cclient_version), "3rd_party/MIT_License.txt"
),
],
),
(
os.path.join(
"lib/python3.6/site-packages/griddb/griddb-c-client",
cclient_version,
"3rd_party/ebb",
),
[os.path.join("c_client-{}".format(cclient_version), "3rd_party/ebb/LICENSE")],
),
(
os.path.join(
"lib/python3.6/site-packages/griddb/griddb-c-client",
cclient_version,
"3rd_party/omaha",
),
[
os.path.join(
"c_client-{}".format(cclient_version), "3rd_party/omaha/COPYING"
)
],
),
(
os.path.join(
"lib/python3.6/site-packages/griddb/griddb-c-client",
cclient_version,
"3rd_party/picojson",
),
[
os.path.join(
"c_client-{}".format(cclient_version), "3rd_party/picojson/org/include/README.mkdn"
)
],
),
(
os.path.join(
"lib/python3.6/site-packages/griddb/griddb-c-client",
cclient_version,
"3rd_party/purewell",
),
[
os.path.join(
"c_client-{}".format(cclient_version),
"3rd_party/purewell/purewell.txt",
)
],
),
(
os.path.join(
"lib/python3.6/site-packages/griddb/griddb-c-client",
cclient_version,
"3rd_party/sha2",
),
[os.path.join("c_client-{}".format(cclient_version), "3rd_party/sha2/README")],
),
(
os.path.join(
"lib/python3.6/site-packages/griddb/griddb-c-client",
cclient_version,
"3rd_party/uuid",
),
[
os.path.join(
"c_client-{}".format(cclient_version), "3rd_party/uuid/uuid/COPYING"
)
],
),
(
os.path.join(
"lib/python3.6/site-packages/griddb/griddb-c-client",
cclient_version,
"3rd_party/yield",
),
[
os.path.join(
"c_client-{}".format(cclient_version), "3rd_party/yield/yield.txt"
)
],
),
(
f"lib/python3.6/site-packages",
[
#f"c_client-{cclient_version}/bin/libgridstore.0.0.0.dylib",
#f"c_client-{cclient_version}/bin/libgridstore.0.dylib",
#f"c_client-{cclient_version}/bin/libgridstore.dylib"
os.path.join(
"/usr/local/Cellar/griddb-c-client", cclient_version, "lib/libgridstore.0.dylib"
),
os.path.join("/usr/local/Cellar/griddb-c-client", cclient_version, "lib/libgridstore.a"),
os.path.join(
"/usr/local/Cellar/griddb-c-client", cclient_version, "lib/libgridstore.dylib"
),
]
),
(
os.path.join(
"lib/python3.6/site-packages/griddb/griddb-c-client",
cclient_version,
"include"
),
[os.path.join("c_client-{}".format(cclient_version), "client/c/include/gridstore.h")],
),
(
os.path.join("lib/python3.6/site-packages/griddb/Sample"),
["sample/sample1.py"],
)
]
else:
classifiers = [
"License :: OSI Approved :: Apache Software License",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3.6"
]
setup(name='griddb_python',
version='0.8.3',
author='<NAME>',
author_email='<EMAIL>',
description='GridDB Python Client Library built using SWIG',
long_description=readme,
ext_modules=[griddb_module],
py_modules=['griddb_python'],
data_files=data_files,
url='https://github.com/griddb/python_client/',
license='Apache Software License',
cmdclass={'build': CustomBuild},
long_description_content_type = 'text/x-rst',
classifiers=classifiers
)
| #!/usr/bin/env python
"""
setup.py file for GridDB python client
"""
from distutils.command.build import build
import os
import platform, sysconfig
try:
from setuptools import setup, Extension
except ImportError:
from distutils.core import setup, Extension
try:
with open('README.rst') as f:
readme = f.read()
except IOError:
readme = ''
os.environ["CXX"] = "g++"
os.environ["CC"] = "g++"
SOURCES = [
'src/AggregationResult.cpp',
'src/Container.cpp',
'src/ContainerInfo.cpp',
'src/Field.cpp',
'src/PartitionController.cpp',
'src/Query.cpp',
'src/QueryAnalysisEntry.cpp',
'src/RowKeyPredicate.cpp',
'src/RowList.cpp',
'src/RowSet.cpp',
'src/Store.cpp',
'src/StoreFactory.cpp',
'src/TimeSeriesProperties.cpp',
'src/TimestampUtils.cpp',
'src/griddb.i',
'src/Util.cpp'
]
DEPENDENTS = [
'src/AggregationResult.h',
'src/ContainerInfo.h',
'src/Container.h',
'src/ExpirationInfo.h',
'src/Field.h'
'src/GSException.h',
'src/PartitionController.h',
'src/Query.h',
'src/QueryAnalysisEntry.h',
'src/RowKeyPredicate.h',
'src/RowList.h',
'src/RowSet.h',
'src/Store.h',
'src/StoreFactory.h',
'src/TimeSeriesProperties.h',
'src/TimestampUtils.h',
'src/gstype_python.i',
'src/gstype.i',
'src/Util.h',
'include/gridstore.h'
]
site_packages_path = sysconfig.get_path('purelib')
INCLUDES = [
'include',
'src',
site_packages_path + '/numpy/core/include/'
]
COMPILE_ARGS = [
'-std=c++0x'
]
# For MacOS
if platform.system() == 'Darwin':
LIBRARIES = [
'gridstore'
]
else:
LIBRARIES = [
'rt',
'gridstore'
]
SWIG_OPTS = [
'-DSWIGWORDSIZE64',
'-c++',
'-outdir',
'.',
'-Isrc'
]
class CustomBuild(build):
sub_commands = [
('build_ext', build.has_ext_modules),
('build_py', build.has_pure_modules),
('build_clib', build.has_c_libraries),
('build_scripts', build.has_scripts)
]
griddb_module = Extension('_griddb_python',
sources=SOURCES,
include_dirs=INCLUDES,
libraries=LIBRARIES,
extra_compile_args=COMPILE_ARGS,
swig_opts=SWIG_OPTS,
depends=DEPENDENTS
)
data_files = []
cclient_version = "4.5.1"
# For MacOS
if platform.system() == 'Darwin':
classifiers = [
"License :: OSI Approved :: Apache Software License",
"Operating System :: MacOS :: MacOS X",
"Programming Language :: Python :: 3.6"
]
data_files=[
(
os.path.join(
"lib/python3.6/site-packages/griddb/griddb-c-client", cclient_version
),
[
os.path.join("c_client-{}".format(cclient_version), "LICENSE"),
os.path.join("c_client-{}".format(cclient_version), "README.md")
]
),
(
os.path.join(
"lib/python3.6/site-packages/griddb/griddb-c-client",
cclient_version,
"3rd_party",
),
[
os.path.join(
"c_client-{}".format(cclient_version), "3rd_party/3rd_party.md"
),
os.path.join(
"c_client-{}".format(cclient_version),
"3rd_party/Apache_License-2.0.txt",
),
os.path.join(
"c_client-{}".format(cclient_version), "3rd_party/BSD_License.txt"
),
os.path.join(
"c_client-{}".format(cclient_version), "3rd_party/MIT_License.txt"
),
],
),
(
os.path.join(
"lib/python3.6/site-packages/griddb/griddb-c-client",
cclient_version,
"3rd_party/ebb",
),
[os.path.join("c_client-{}".format(cclient_version), "3rd_party/ebb/LICENSE")],
),
(
os.path.join(
"lib/python3.6/site-packages/griddb/griddb-c-client",
cclient_version,
"3rd_party/omaha",
),
[
os.path.join(
"c_client-{}".format(cclient_version), "3rd_party/omaha/COPYING"
)
],
),
(
os.path.join(
"lib/python3.6/site-packages/griddb/griddb-c-client",
cclient_version,
"3rd_party/picojson",
),
[
os.path.join(
"c_client-{}".format(cclient_version), "3rd_party/picojson/org/include/README.mkdn"
)
],
),
(
os.path.join(
"lib/python3.6/site-packages/griddb/griddb-c-client",
cclient_version,
"3rd_party/purewell",
),
[
os.path.join(
"c_client-{}".format(cclient_version),
"3rd_party/purewell/purewell.txt",
)
],
),
(
os.path.join(
"lib/python3.6/site-packages/griddb/griddb-c-client",
cclient_version,
"3rd_party/sha2",
),
[os.path.join("c_client-{}".format(cclient_version), "3rd_party/sha2/README")],
),
(
os.path.join(
"lib/python3.6/site-packages/griddb/griddb-c-client",
cclient_version,
"3rd_party/uuid",
),
[
os.path.join(
"c_client-{}".format(cclient_version), "3rd_party/uuid/uuid/COPYING"
)
],
),
(
os.path.join(
"lib/python3.6/site-packages/griddb/griddb-c-client",
cclient_version,
"3rd_party/yield",
),
[
os.path.join(
"c_client-{}".format(cclient_version), "3rd_party/yield/yield.txt"
)
],
),
(
f"lib/python3.6/site-packages",
[
#f"c_client-{cclient_version}/bin/libgridstore.0.0.0.dylib",
#f"c_client-{cclient_version}/bin/libgridstore.0.dylib",
#f"c_client-{cclient_version}/bin/libgridstore.dylib"
os.path.join(
"/usr/local/Cellar/griddb-c-client", cclient_version, "lib/libgridstore.0.dylib"
),
os.path.join("/usr/local/Cellar/griddb-c-client", cclient_version, "lib/libgridstore.a"),
os.path.join(
"/usr/local/Cellar/griddb-c-client", cclient_version, "lib/libgridstore.dylib"
),
]
),
(
os.path.join(
"lib/python3.6/site-packages/griddb/griddb-c-client",
cclient_version,
"include"
),
[os.path.join("c_client-{}".format(cclient_version), "client/c/include/gridstore.h")],
),
(
os.path.join("lib/python3.6/site-packages/griddb/Sample"),
["sample/sample1.py"],
)
]
else:
classifiers = [
"License :: OSI Approved :: Apache Software License",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3.6"
]
setup(name='griddb_python',
version='0.8.3',
author='<NAME>',
author_email='<EMAIL>',
description='GridDB Python Client Library built using SWIG',
long_description=readme,
ext_modules=[griddb_module],
py_modules=['griddb_python'],
data_files=data_files,
url='https://github.com/griddb/python_client/',
license='Apache Software License',
cmdclass={'build': CustomBuild},
long_description_content_type = 'text/x-rst',
classifiers=classifiers
) | en | 0.29832 | #!/usr/bin/env python setup.py file for GridDB python client # For MacOS # For MacOS #f"c_client-{cclient_version}/bin/libgridstore.0.0.0.dylib", #f"c_client-{cclient_version}/bin/libgridstore.0.dylib", #f"c_client-{cclient_version}/bin/libgridstore.dylib" | 1.677372 | 2 |
examples/simplest.py | wilfredinni/mary | 4 | 6623446 | """
Very basic example that show how to build a simple CLI.
"""
import noodle
class Main(noodle.Master):
"""
Simple CLI app written with Noodle.
"""
app_name = "Simplest" # if not specified, defaults to the filename
version = "0.1.1" # if not specified, defaults to 0.1.0
class Greet(noodle.Command):
"""
Greets someone
"""
command_name = "greet"
arguments = {"name": "Who do you want to greet?"}
options = {
"yell": "Yell in uppercase letters",
"shh": "Shh in lowercase letters",
}
def handler(self):
greet = f"Hello {self.arguments}"
if self.option("yell"):
noodle.output.danger(greet.upper())
elif self.option("shh"):
noodle.output.info(greet.lower())
else:
noodle.output(greet)
app = Main()
app.register(Greet)
if __name__ == "__main__":
app.run()
| """
Very basic example that show how to build a simple CLI.
"""
import noodle
class Main(noodle.Master):
"""
Simple CLI app written with Noodle.
"""
app_name = "Simplest" # if not specified, defaults to the filename
version = "0.1.1" # if not specified, defaults to 0.1.0
class Greet(noodle.Command):
"""
Greets someone
"""
command_name = "greet"
arguments = {"name": "Who do you want to greet?"}
options = {
"yell": "Yell in uppercase letters",
"shh": "Shh in lowercase letters",
}
def handler(self):
greet = f"Hello {self.arguments}"
if self.option("yell"):
noodle.output.danger(greet.upper())
elif self.option("shh"):
noodle.output.info(greet.lower())
else:
noodle.output(greet)
app = Main()
app.register(Greet)
if __name__ == "__main__":
app.run()
| en | 0.629919 | Very basic example that show how to build a simple CLI. Simple CLI app written with Noodle. # if not specified, defaults to the filename # if not specified, defaults to 0.1.0 Greets someone | 3.509338 | 4 |
platipy/dicom/tests/test_convert_rtstruct.py | SimonBiggs/platipy | 0 | 6623447 | # Copyright 2020 University of New South Wales, University of Sydney, Ingham Institute
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import SimpleITK as sitk
from platipy.dicom.rtstruct_to_nifti.convert import convert_rtstruct
def test_convert_rtstruct():
phantom_dir = os.path.dirname(__file__)
rtstruct_in = os.path.join(
phantom_dir, r"../data/phantom/RTStruct.dcm"
) # Path to RTStruct file
ct_in = os.path.join(phantom_dir, r"../data/phantom/CT") # Path to CT directory
pre = "Test_"
output_dir = "test_output_nifti"
output_img = "img.nii.gz"
# Run the function
convert_rtstruct(
ct_in, rtstruct_in, prefix=pre, output_dir=output_dir, output_img=output_img
)
# Check some of the output files for sanity
assert len(os.listdir(output_dir)) == 12
# Check the converted image series
im = sitk.ReadImage(os.path.join(output_dir, output_img), sitk.sitkInt64)
print(os.path.join(output_dir, output_img))
assert im.GetOrigin() == (-211.12600708007812, -422.1260070800781, -974.5)
assert im.GetSize() == (512, 512, 88)
assert im.GetSpacing() == (0.8263229727745056, 0.8263229727745056, 3.0)
nda = sitk.GetArrayFromImage(im)
print(nda.sum())
assert nda.sum() == -19933669253
# Check a converted contour mask
mask = sitk.ReadImage(os.path.join(output_dir, "Test_BRAINSTEM_PRI.nii.gz"))
assert mask.GetOrigin() == (-211.12600708007812, -422.1260070800781, -974.5)
assert mask.GetSize() == (512, 512, 88)
assert mask.GetSpacing() == (0.8263229727745056, 0.8263229727745056, 3.0)
nda = sitk.GetArrayFromImage(mask)
assert nda.sum() == 13606
| # Copyright 2020 University of New South Wales, University of Sydney, Ingham Institute
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import SimpleITK as sitk
from platipy.dicom.rtstruct_to_nifti.convert import convert_rtstruct
def test_convert_rtstruct():
phantom_dir = os.path.dirname(__file__)
rtstruct_in = os.path.join(
phantom_dir, r"../data/phantom/RTStruct.dcm"
) # Path to RTStruct file
ct_in = os.path.join(phantom_dir, r"../data/phantom/CT") # Path to CT directory
pre = "Test_"
output_dir = "test_output_nifti"
output_img = "img.nii.gz"
# Run the function
convert_rtstruct(
ct_in, rtstruct_in, prefix=pre, output_dir=output_dir, output_img=output_img
)
# Check some of the output files for sanity
assert len(os.listdir(output_dir)) == 12
# Check the converted image series
im = sitk.ReadImage(os.path.join(output_dir, output_img), sitk.sitkInt64)
print(os.path.join(output_dir, output_img))
assert im.GetOrigin() == (-211.12600708007812, -422.1260070800781, -974.5)
assert im.GetSize() == (512, 512, 88)
assert im.GetSpacing() == (0.8263229727745056, 0.8263229727745056, 3.0)
nda = sitk.GetArrayFromImage(im)
print(nda.sum())
assert nda.sum() == -19933669253
# Check a converted contour mask
mask = sitk.ReadImage(os.path.join(output_dir, "Test_BRAINSTEM_PRI.nii.gz"))
assert mask.GetOrigin() == (-211.12600708007812, -422.1260070800781, -974.5)
assert mask.GetSize() == (512, 512, 88)
assert mask.GetSpacing() == (0.8263229727745056, 0.8263229727745056, 3.0)
nda = sitk.GetArrayFromImage(mask)
assert nda.sum() == 13606
| en | 0.833968 | # Copyright 2020 University of New South Wales, University of Sydney, Ingham Institute # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Path to RTStruct file # Path to CT directory # Run the function # Check some of the output files for sanity # Check the converted image series # Check a converted contour mask | 2.055789 | 2 |
pyxel/editor/__init__.py | grewn0uille/pyxel | 1 | 6623448 | <filename>pyxel/editor/__init__.py
from . import canvas_expansion # noqa: F401
from .app import App # noqa: F401
| <filename>pyxel/editor/__init__.py
from . import canvas_expansion # noqa: F401
from .app import App # noqa: F401
| uz | 0.476978 | # noqa: F401 # noqa: F401 | 0.951236 | 1 |
crypt/gamma_cipher.py | 5x/cryptography-gui-app | 1 | 6623449 | <filename>crypt/gamma_cipher.py
from random import Random
from crypt.trithemius_cipher import TrithemiusCipher, TrithemiusHandleABC
from crypt.cipher_abc import CipherABC
class SimplePRNG(TrithemiusHandleABC):
SHIFT_C1 = 53
def __init__(self, symbols_collection, key, *args, **kwargs):
super().__init__(*args, **kwargs)
self._symbols_len = len(symbols_collection)
self._random_inst = Random(x=key)
def __iter__(self):
while True:
rand_int = self._random_inst.randint(0, self._symbols_len)
next_val = (rand_int + SimplePRNG.SHIFT_C1) % self._symbols_len
yield next_val
def get_code(self, index):
return next(self.__iter__())
class GammaCipher(TrithemiusCipher):
def __init__(self, key, alphabet='EN'):
super().__init__(key, SimplePRNG, alphabet)
@CipherABC.key.setter
def key(self, value):
self._key = int(value)
if __name__ == "__main__":
cipher = GammaCipher("54")
crt_text = cipher.encrypt("the quick brown fox jumps over the lazy dog.")
plain_text = cipher.decrypt(crt_text)
print(crt_text)
print(plain_text)
| <filename>crypt/gamma_cipher.py
from random import Random
from crypt.trithemius_cipher import TrithemiusCipher, TrithemiusHandleABC
from crypt.cipher_abc import CipherABC
class SimplePRNG(TrithemiusHandleABC):
SHIFT_C1 = 53
def __init__(self, symbols_collection, key, *args, **kwargs):
super().__init__(*args, **kwargs)
self._symbols_len = len(symbols_collection)
self._random_inst = Random(x=key)
def __iter__(self):
while True:
rand_int = self._random_inst.randint(0, self._symbols_len)
next_val = (rand_int + SimplePRNG.SHIFT_C1) % self._symbols_len
yield next_val
def get_code(self, index):
return next(self.__iter__())
class GammaCipher(TrithemiusCipher):
def __init__(self, key, alphabet='EN'):
super().__init__(key, SimplePRNG, alphabet)
@CipherABC.key.setter
def key(self, value):
self._key = int(value)
if __name__ == "__main__":
cipher = GammaCipher("54")
crt_text = cipher.encrypt("the quick brown fox jumps over the lazy dog.")
plain_text = cipher.decrypt(crt_text)
print(crt_text)
print(plain_text)
| none | 1 | 3.029503 | 3 | |
odp/lib/auth.py | SAEONData/Open-Data-Platform | 2 | 6623450 | from dataclasses import dataclass
from typing import Union, Set, Literal, Dict, Optional, List
from odp.db import Session
from odp.db.models import User, Client
from odp.lib import exceptions as x
@dataclass
class Authorization:
"""An Authorization object represents the effective set of permissions
for a user or a client. It consists of a dictionary of scope ids (OAuth2
scope identifiers), where the value for each id is either:
- '*' if the scope is applicable across all relevant platform entities; or
- a set of provider ids to which the scope's usage is limited
"""
scopes: Dict[str, Union[Literal['*'], Set[str]]]
@dataclass
class UserInfo:
sub: str
email: str
email_verified: bool
name: Optional[str]
picture: Optional[str]
roles: List[str]
def get_client_auth(client_id: str) -> Authorization:
"""Return client authorization info."""
client = Session.get(Client, client_id)
if not client:
raise x.ODPClientNotFound
return Authorization(
scopes={scope.id: '*' if not client.provider else {client.provider_id}
for scope in client.scopes}
)
def get_user_auth(user_id: str, client_id: str) -> Authorization:
"""Return user authorization info, which may be linked with
a user's access token for a given client application."""
user = Session.get(User, user_id)
if not user:
raise x.ODPUserNotFound
client = Session.get(Client, client_id)
if not client:
raise x.ODPClientNotFound
platform_scopes = set()
if not client.provider:
for role in user.roles:
if not role.provider:
platform_scopes |= {
scope.id for scope in role.scopes
if scope in client.scopes
}
provider_scopes = {}
for role in user.roles:
if role.provider or client.provider:
if role.provider and client.provider and role.provider_id != client.provider_id:
continue
for scope in role.scopes:
if scope.id in platform_scopes:
continue
if scope not in client.scopes:
continue
provider_scopes.setdefault(scope.id, set())
provider_scopes[scope.id] |= {role.provider_id if role.provider else client.provider_id}
return Authorization(
scopes={scope: '*' for scope in platform_scopes} | provider_scopes
)
def get_user_info(user_id: str, client_id: str) -> UserInfo:
"""Return user profile info, which may be linked with a user's
ID token for a given client application.
TODO: we should limit the returned info based on the claims
allowed for the client
"""
user = Session.get(User, user_id)
if not user:
raise x.ODPUserNotFound
client = Session.get(Client, client_id)
if not client:
raise x.ODPClientNotFound
return UserInfo(
sub=user_id,
email=user.email,
email_verified=user.verified,
name=user.name,
picture=user.picture,
roles=[
role.id for role in user.roles
if not role.provider or not client.provider or role.provider_id == client.provider_id
],
)
| from dataclasses import dataclass
from typing import Union, Set, Literal, Dict, Optional, List
from odp.db import Session
from odp.db.models import User, Client
from odp.lib import exceptions as x
@dataclass
class Authorization:
"""An Authorization object represents the effective set of permissions
for a user or a client. It consists of a dictionary of scope ids (OAuth2
scope identifiers), where the value for each id is either:
- '*' if the scope is applicable across all relevant platform entities; or
- a set of provider ids to which the scope's usage is limited
"""
scopes: Dict[str, Union[Literal['*'], Set[str]]]
@dataclass
class UserInfo:
sub: str
email: str
email_verified: bool
name: Optional[str]
picture: Optional[str]
roles: List[str]
def get_client_auth(client_id: str) -> Authorization:
"""Return client authorization info."""
client = Session.get(Client, client_id)
if not client:
raise x.ODPClientNotFound
return Authorization(
scopes={scope.id: '*' if not client.provider else {client.provider_id}
for scope in client.scopes}
)
def get_user_auth(user_id: str, client_id: str) -> Authorization:
"""Return user authorization info, which may be linked with
a user's access token for a given client application."""
user = Session.get(User, user_id)
if not user:
raise x.ODPUserNotFound
client = Session.get(Client, client_id)
if not client:
raise x.ODPClientNotFound
platform_scopes = set()
if not client.provider:
for role in user.roles:
if not role.provider:
platform_scopes |= {
scope.id for scope in role.scopes
if scope in client.scopes
}
provider_scopes = {}
for role in user.roles:
if role.provider or client.provider:
if role.provider and client.provider and role.provider_id != client.provider_id:
continue
for scope in role.scopes:
if scope.id in platform_scopes:
continue
if scope not in client.scopes:
continue
provider_scopes.setdefault(scope.id, set())
provider_scopes[scope.id] |= {role.provider_id if role.provider else client.provider_id}
return Authorization(
scopes={scope: '*' for scope in platform_scopes} | provider_scopes
)
def get_user_info(user_id: str, client_id: str) -> UserInfo:
"""Return user profile info, which may be linked with a user's
ID token for a given client application.
TODO: we should limit the returned info based on the claims
allowed for the client
"""
user = Session.get(User, user_id)
if not user:
raise x.ODPUserNotFound
client = Session.get(Client, client_id)
if not client:
raise x.ODPClientNotFound
return UserInfo(
sub=user_id,
email=user.email,
email_verified=user.verified,
name=user.name,
picture=user.picture,
roles=[
role.id for role in user.roles
if not role.provider or not client.provider or role.provider_id == client.provider_id
],
)
| en | 0.878787 | An Authorization object represents the effective set of permissions for a user or a client. It consists of a dictionary of scope ids (OAuth2 scope identifiers), where the value for each id is either: - '*' if the scope is applicable across all relevant platform entities; or - a set of provider ids to which the scope's usage is limited Return client authorization info. Return user authorization info, which may be linked with a user's access token for a given client application. Return user profile info, which may be linked with a user's ID token for a given client application. TODO: we should limit the returned info based on the claims allowed for the client | 2.970142 | 3 |